def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
python类name()的实例源码
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def removedirs(name):
"""removedirs(name)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except OSError:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except OSError:
pass
def __setattr__(self, name, value):
if hasattr(self, name):
self.__dict__[name] = value
else:
raise AttributeError('Invalid attribute "%s"' % name)
def allocate(self, ip_addr, name, platform, cpus, memory, disk):
"""When a node is found, scheduler calls this method with IP address,
name, CPUs, memory and disk available on that node. This method should
return a number indicating number of CPUs to use. If return value is 0,
the node is not used; if the return value is < 0, this allocation is
ignored (next allocation in the 'node_allocations' list, if any, is
applied).
"""
if not re.match(self.ip_rex, ip_addr):
return -1
if (self.platform and not re.search(self.platform, platform)):
return -1
if ((self.memory and memory and self.memory > memory) or
(self.disk and disk and self.disk > disk)):
return 0
if self.cpus > 0:
if self.cpus > cpus:
return 0
return self.cpus
elif self.cpus == 0:
return 0
else:
cpus += self.cpus
if cpus < 0:
return 0
return cpus
def __init__(self, request, client, name, where, cpu, code, args=None, kwargs=None):
self.request = request
self.client = client
self.name = name
self.where = where
self.cpu = cpu
self.code = code
self.args = pycos.serialize(args)
self.kwargs = pycos.serialize(kwargs)
self.done = None
def __init__(self, name, location):
self.name = name
self.task = None
self.status = Scheduler.ServerClosed
self.rtasks = {}
self.xfer_files = []
self.askew_results = {}
self.avail = pycos.Event()
self.avail.clear()
self.scheduler = Scheduler._instance
def __init__(self, **kwargs):
self.__class__._instance = self
self._nodes = {}
self._disabled_nodes = {}
self._avail_nodes = set()
self._nodes_avail = pycos.Event()
self._nodes_avail.clear()
self._shared = False
self._cur_computation = None
self.__cur_client_auth = None
self.__cur_node_allocations = []
self.__pulse_interval = kwargs.pop('pulse_interval', MaxPulseInterval)
self.__ping_interval = kwargs.pop('ping_interval', 0)
self.__zombie_period = kwargs.pop('zombie_period', 100 * MaxPulseInterval)
self._node_port = kwargs.pop('dispycosnode_port', 51351)
self.__server_locations = set()
self.__job_scheduler_task = None
kwargs['name'] = 'dispycos_scheduler'
clean = kwargs.pop('clean', False)
nodes = kwargs.pop('nodes', [])
self.pycos = pycos.Pycos.instance(**kwargs)
self.__dest_path = os.path.join(self.pycos.dest_path, 'dispycos', 'dispycosscheduler')
if clean:
shutil.rmtree(self.__dest_path)
self.pycos.dest_path = self.__dest_path
self.__computation_sched_event = pycos.Event()
self.__computation_scheduler_task = SysTask(self.__computation_scheduler_proc, nodes)
self.__client_task = SysTask(self.__client_proc)
self.__timer_task = SysTask(self.__timer_proc)
Scheduler.__status_task = self.__status_task = SysTask(self.__status_proc)
self.__client_task.register('dispycos_scheduler')
self.pycos.discover_peers(port=self._node_port)
def __node_allocate(self, node):
for node_allocate in self.__cur_node_allocations:
cpus = node_allocate.allocate(node.addr, node.name, node.platform, node.cpus,
node.avail_info.memory, node.avail_info.disk)
if cpus < 0:
continue
return min(cpus, node.cpus)
return node.cpus
def __discover_node(self, msg, task=None):
for _ in range(10):
node_task = yield Task.locate('dispycos_node', location=msg.location,
timeout=MsgTimeout)
if not isinstance(node_task, Task):
yield task.sleep(0.1)
continue
self._disabled_nodes.pop(msg.location.addr, None)
node = self._nodes.pop(msg.location.addr, None)
if node:
logger.warning('Rediscovered dispycosnode at %s; discarding previous incarnation!',
msg.location.addr)
self._disabled_nodes.pop(node.addr, None)
if self._cur_computation:
status_task = self._cur_computation.status_task
else:
status_task = None
if status_task:
for server in node.servers.values():
for rtask, job in server.rtasks.values():
status = pycos.MonitorException(rtask, (Scheduler.TaskAbandoned, None))
status_task.send(status)
status_task.send(DispycosStatus(Scheduler.ServerAbandoned,
server.task.location))
info = DispycosNodeInfo(node.name, node.addr, node.cpus, node.platform,
node.avail_info)
status_task.send(DispycosStatus(Scheduler.NodeAbandoned, info))
node = self._disabled_nodes.get(msg.location.addr, None)
if not node:
node = Scheduler._Node(msg.name, msg.location.addr)
self._disabled_nodes[msg.location.addr] = node
node.task = node_task
yield self.__get_node_info(node, task=task)
raise StopIteration
def __setattr__(self, name, value):
if hasattr(self, name):
self.__dict__[name] = value
else:
raise AttributeError('Invalid attribute "%s"' % name)
def inventory_data_nodir(input_dir):
''' Inventory data, assuming flat directory structure'''
training_names = ls(os.path.join(input_dir, '*_train.data'))
for i in range(0,len(training_names)):
name = training_names[i]
training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
check_dataset(input_dir, training_names[i])
return training_names
def inventory_data_dir(input_dir):
''' Inventory data, assuming flat directory structure, assuming a directory hierarchy'''
training_names = ls(input_dir + '/*/*_train.data') # This supports subdirectory structures obtained by concatenating bundles
for i in range(0,len(training_names)):
name = training_names[i]
training_names[i] = name[-name[::-1].index(filesep):-name[::-1].index('_')-1]
check_dataset(os.path.join(input_dir, training_names[i]), training_names[i])
return training_names
def addpackage(sitedir, name, known_paths):
"""Add a new path to known_paths by combining sitedir and 'name' or execute
sitedir if it starts with 'import'"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
try:
for line in f:
if line.startswith("#"):
continue
if line.startswith("import"):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
finally:
f.close()
if reset:
known_paths = None
return known_paths
def setquit():
"""Define new built-ins 'quit' and 'exit'.
These are simply strings that display a hint on how to exit.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
builtins.quit = Quitter('quit')
builtins.exit = Quitter('exit')
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)