def __init__(self, home_dir=os.path.join(os.path.expanduser(path='~'), '.poco'),
argv=sys.argv[1:]):
"""Fill state"""
StateHolder.home_dir = home_dir
StateHolder.config_file = os.path.join(StateHolder.home_dir, 'config')
StateHolder.args = docopt(__doc__, version=__version__, argv=argv)
ColorPrint.set_log_level(StateHolder.args)
if StateHolder.args.get('<project>') is None:
StateHolder.args['<project>'] = FileUtils.get_directory_name()
StateHolder.name = StateHolder.args.get('<project>')
StateHolder.offline = StateHolder.args.get("--offline")
if StateHolder.args.get("--developer"):
StateHolder.developer_mode = StateHolder.args.get("--developer")
self.config_handler = ConfigHandler()
"""Parse config if exists """
if ConfigHandler.exists():
self.config_handler.read()
else:
StateHolder.work_dir = os.getcwd()
StateHolder.developer_mode = True
python类docopt()的实例源码
def main():
args = docopt("""
Usage:
analogy_eval.py [options] <representation> <representation_path> <task_path>
Options:
--neg NUM Number of negative samples; subtracts its log from PMI (only applicable to PPMI) [default: 1]
--w+c Use ensemble of word and context vectors (not applicable to PPMI)
--eig NUM Weighted exponent of the eigenvalue matrix (only applicable to SVD) [default: 0.5]
""")
data = read_test_set(args['<task_path>'])
xi, ix = get_vocab(data)
representation = create_representation(args)
accuracy_add, accuracy_mul = evaluate(representation, data, xi, ix)
print args['<representation>'], args['<representation_path>'], '\t%0.3f' % accuracy_add, '\t%0.3f' % accuracy_mul
def main():
args = docopt("""
Usage:
text2numpy.py <path>
""")
path = args['<path>']
matrix = read_vectors(path)
iw = sorted(matrix.keys())
new_matrix = np.zeros(shape=(len(iw), len(matrix[iw[0]])), dtype=np.float32)
for i, word in enumerate(iw):
if word in matrix:
new_matrix[i, :] = matrix[word]
np.save(path + '.npy', new_matrix)
save_vocabulary(path + '.vocab', iw)
def main():
args = docopt("""
Usage:
pmi2svd.py [options] <pmi_path> <output_path>
Options:
--dim NUM Dimensionality of eigenvectors [default: 500]
--neg NUM Number of negative samples; subtracts its log from PMI [default: 1]
""")
pmi_path = args['<pmi_path>']
output_path = args['<output_path>']
dim = int(args['--dim'])
neg = int(args['--neg'])
explicit = PositiveExplicit(pmi_path, normalize=False, neg=neg)
ut, s, vt = sparsesvd(explicit.m.tocsc(), dim)
np.save(output_path + '.ut.npy', ut)
np.save(output_path + '.s.npy', s)
np.save(output_path + '.vt.npy', vt)
save_vocabulary(output_path + '.words.vocab', explicit.iw)
save_vocabulary(output_path + '.contexts.vocab', explicit.ic)
def main():
args = docopt("""
Usage:
sgns2text.py [options] <sgns_path> <output_path>
Options:
--w+c Use ensemble of word and context vectors
""")
sgns_path = args['<sgns_path>']
output_path = args['<output_path>']
w_c = args['--w+c']
if w_c:
sgns = EnsembleEmbedding(Embedding(sgns_path + '.words', False), Embedding(sgns_path + '.contexts', False), True)
else:
sgns = Embedding(sgns_path + '.words', True)
with open(output_path, 'w') as f:
for i, w in enumerate(sgns.iw):
print >>f, w, ' '.join([str(x) for x in sgns.m[i]])
def main():
args = docopt("""
Usage:
counts2pmi.py [options] <counts> <output_path>
Options:
--cds NUM Context distribution smoothing [default: 1.0]
""")
counts_path = args['<counts>']
vectors_path = args['<output_path>']
cds = float(args['--cds'])
counts, iw, ic = read_counts_matrix(counts_path)
pmi = calc_pmi(counts, cds)
save_matrix(vectors_path, pmi)
save_vocabulary(vectors_path + '.words.vocab', iw)
save_vocabulary(vectors_path + '.contexts.vocab', ic)
def main():
args = docopt("""
Usage:
counts2pmi.py <counts>
""")
counts_path = args['<counts>']
words = Counter()
contexts = Counter()
with open(counts_path) as f:
for line in f:
count, word, context = line.strip().split()
count = int(count)
words[word] += count
contexts[context] += count
words = sorted(words.items(), key=lambda (x, y): y, reverse=True)
contexts = sorted(contexts.items(), key=lambda (x, y): y, reverse=True)
save_count_vocabulary(counts_path + '.words.vocab', words)
save_count_vocabulary(counts_path + '.contexts.vocab', contexts)
def main():
args = docopt("""
Usage:
svd2text.py [options] <svd_path> <output_path>
Options:
--w+c Use ensemble of word and context vectors
--eig NUM Weighted exponent of the eigenvalue matrix [default: 0.5]
""")
svd_path = args['<svd_path>']
output_path = args['<output_path>']
w_c = args['--w+c']
eig = float(args['--eig'])
if w_c:
svd = EnsembleEmbedding(SVDEmbedding(svd_path, False, eig, False), SVDEmbedding(svd_path, False, eig, True), True)
else:
svd = SVDEmbedding(svd_path, True, eig)
with open(output_path, 'w') as f:
for i, w in enumerate(svd.iw):
print >>f, w, ' '.join([str(x) for x in svd.m[i]])
def main(argv=None):
args = docopt(__doc__, argv=argv, version='1.0.3')
assert 'config.toml' in (p.name for p in Path().iterdir()), "config.toml not found in directory. Are you sure you're in the project's root?"
if args['--init']:
notebooks_dir = Path('./notebooks/')
notebooks_dir.mkdir(exist_ok=True)
with open(resource_filename('hugo_jupyter', '__fabfile.py')) as fp:
fabfile = Path('fabfile.py')
fabfile.write_text(fp.read())
print(dedent("""
Successfully initialized. From this directory, the following commands are available.
Just remember to prepend them with `fab`
"""))
run(('fab', '-l'))
def main():
"""Run the release."""
options = docopt(HELP)
for index, step in enumerate([ensure_not_dirty,
ensure_master,
git_pull,
ensure_passing_tests,
bump_mdk_versions,
]):
print("Step {}: {}".format(index + 1, step.__name__))
step(options)
print("""\
The release has been committed and tagged locally.
You can now push it upstream by running:
git push origin master --tags
""")
def main():
# run <shape_start> <shape_end> [<iters>][<decimals>]
arguments = docopt(__doc__, version='Same Stats 1.0')
if arguments['run']:
with plot_settings():
it = 100000
de = 2
frames = 100
if arguments['<iters>']:
it = int(arguments['<iters>'])
if arguments['<decimals>']:
de = int(arguments['<decimals>'])
if arguments['<decimals>']:
frames = int(arguments['<frames>'])
shape_start = arguments['<shape_start>']
shape_end = arguments['<shape_end>']
if shape_start in INITIAL_DATASETS and shape_end in ALL_TARGETS:
do_single_run(shape_start, shape_end, iterations=it, decimals=de, num_frames=frames)
else:
print("************* One of those shapes isn't correct:")
print("shape_start must be one of ", INITIAL_DATASETS)
print("shape_end must be one of ", ALL_TARGETS)
def command():
arguments = docopt(__doc__)
#print(arguments)
from_sta = stations.get(arguments['<from>'])
to_sta = stations.get(arguments['<to>'])
date = arguments['<date>']
url = 'https://kyfw.12306.cn/otn/leftTicket/query?leftTicketDTO.train_date={}&leftTicketDTO.from_station={}&leftTicketDTO.to_station={}&purpose_codes=ADULT'.format(date, from_sta, to_sta)
resp = requests.get(url, verify=False)
#print(resp.json())
options = ''.join([
key for key, value in arguments.items() if value is True
])
available_trains = resp.json()['data']
TrainsResult(available_trains, options).pretty_print()
def __init__(self, docstring=None):
if docstring is not None:
self.options = docopt.docopt(docstring)
else:
self.options = {}
self.umap_class_dict = {
'audio': ('audio', 'Headset'),
'billboard': ('billboard', 'A billboard, requires USB 2.1 and higher'),
'cdc_acm': ('cdc_acm', 'Abstract Control Model device (like serial modem)'),
'cdc_dl': ('cdc_dl', 'Direct Line Control device (like modem)'),
'ftdi': ('ftdi', 'USB<->RS232 FTDI chip'),
'hub': ('hub', 'USB hub'),
'keyboard': ('keyboard', 'Keyboard'),
'mass_storage': ('mass_storage', 'Disk on key'),
'mtp': ('mtp', 'Android phone'),
'printer': ('printer', 'Printer'),
'smartcard': ('smartcard', 'USB<->smart card interface'),
}
self.umap_classes = sorted(self.umap_class_dict.keys())
self.logger = self.get_logger()
self.num_processed = 0
self.fuzzer = None
self.setup_packet_received = False
def main():
# type: () -> typing.Any
"""Parse the command line options and launch the requested command.
If the command is 'help' then print the help message for the subcommand; if
no subcommand is given, print the standard help message.
"""
colorama.init(wrap=six.PY3)
doc = usage.get_primary_command_usage()
allow_subcommands = '<command>' in doc
args = docopt(doc, version=settings.version,
options_first=allow_subcommands)
if sys.excepthook is sys.__excepthook__:
sys.excepthook = log.excepthook
try:
log.enable_logging(log.get_log_level(args))
default_args = sys.argv[2 if args.get('<command>') else 1:]
if (args.get('<command>') == 'help' and
None not in settings.subcommands):
subcommand = next(iter(args.get('<args>', default_args)), None)
return usage.get_help_usage(subcommand)
argv = [args.get('<command>')] + args.get('<args>', default_args)
return _run_command(argv)
except exc.InvalidCliValueError as e:
return str(e)
def _run_command(argv):
# type: (typing.List[str]) -> typing.Any
"""Run the command with the given CLI options and exit.
Command functions are expected to have a __doc__ string that is parseable
by docopt.
Args:
argv: The list of command line arguments supplied for a command. The
first argument is expected to be the name of the command to be run.
Note that this is different than the full arguments parsed by
docopt for the entire program.
Raises:
ValueError: Raised if the user attempted to run an invalid command.
"""
command_name, argv = _get_command_and_argv(argv)
_LOGGER.info('Running command "%s %s" with args: %s', settings.command,
command_name, argv)
subcommand = _get_subcommand(command_name)
func = call.get_callable(subcommand)
doc = usage.format_usage(subcommand.__doc__)
args = _get_parsed_args(command_name, doc, argv)
return call.call(func, args) or 0
def _get_command_and_argv(argv):
# type: (typing.List[str]) -> typing.Tuple[str, typing.List[str]]
"""Extract the command name and arguments to pass to docopt.
Args:
argv: The argument list being used to run the command.
Returns:
A tuple containing the name of the command and the arguments to pass
to docopt.
"""
command_name = argv[0]
if not command_name:
argv = argv[1:]
elif command_name == settings.command:
argv.remove(command_name)
return command_name, argv
def _get_parsed_args(command_name, doc, argv):
# type: (str, str, typing.List[str]) -> typing.Dict[str, typing.Any]
"""Parse the docstring with docopt.
Args:
command_name: The name of the subcommand to parse.
doc: A docopt-parseable string.
argv: The list of arguments to pass to docopt during parsing.
Returns:
The docopt results dictionary. If the subcommand has the same name as
the primary command, the subcommand value will be added to the
dictionary.
"""
_LOGGER.debug('Parsing docstring: """%s""" with arguments %s.', doc, argv)
args = docopt(doc, argv=argv)
if command_name == settings.command:
args[command_name] = True
return args
def main():
arguments = docopt(__doc__, version='BIC clustering')
db_yml = expanduser(arguments['--database'])
protocol_name = arguments['<database.task.protocol>']
subset = arguments['--subset']
if arguments['tune']:
experiment_dir = arguments['<experiment_dir>']
if subset is None:
subset = 'development'
application = BICClustering(experiment_dir, db_yml=db_yml)
application.tune(protocol_name, subset=subset)
# if arguments['apply']:
# tune_dir = arguments['<tune_dir>']
# if subset is None:
# subset = 'test'
# application = BICClustering.from_tune_dir(
# tune_dir, db_yml=db_yml)
# application.apply(protocol_name, subset=subset)
def cmd():
try:
main(docopt(__doc__, version=__version__))
except ImportError as e:
# lxml or html5lib not found
print(e.msg)
print('After installing one of them, please try again by using `mpsign update [user]`')
except UserNotFound as e:
print('User not found.')
except InvalidBDUSSException as e:
raise e
print('BDUSS not valid')
except KeyboardInterrupt:
print('Operation cancelled by user.')
except Exception as e:
raise e
db.close()
def cli():
"""command-line interface"""
args = docopt(__doc__)
if args["-q"] == True:
pprint_user_ask(args["<user>"], int(args["--depth"]))
elif args["-r"] == True:
pprint_user_answer(args["<user>"], int(args["--depth"]))
elif args["-a"] == True:
pprint_user_article(args["<user>"], int(args["--depth"]))
elif args["post"] == True:
pprint_post(args['<url>'])
elif args["question"] == True:
pprint_question(args['<url>'])
elif args["column"] == True:
pprint_column(args['<url>'])
elif args["answer"] == True:
pprint_answer(args['<url>'])
elif args["collection"] == True:
pprint_collection(args['<url>'])
else:
pprint_user_base(args['<user>'])
def _main():
print('kitty version: %s' % get_distribution('kittyfuzzer').version)
opts = docopt.docopt(__doc__)
files = opts['<FILE>']
fast = opts['--fast']
verbose = opts['--verbose']
if opts['--tree']:
processor = TemplateTreePrinter()
else:
processor = TemplateTester(fast)
processor.verbose = verbose
try:
validate_files(files)
for f in files:
process_file(f, processor)
except Exception as e:
print(e)
def main(argv=None):
arguments = docopt(__doc__)
inputfilename = arguments["INPUT"]
if not inputfilename.endswith('.tcx'):
print ("input file %s has no .tcx extention" % inputfilename)
exit(-1)
if arguments['--lever']:
lever = int(arguments['--lever'])
assert (lever > 0 and lever <11)
sys.stderr.write('Handlebar resistance lever position = %d \n' % lever)
sys.stderr.flush()
else:
lever = 5
sys.stderr.write('Assuming default handlebar resistance lever postion at 5\n')
sys.stderr.flush()
process_file(inputfilename, lever)
def cli():
"""Command-line interface"""
arguments = docopt(__doc__)
from_station = stations.get(arguments['<from>'])
to_station = stations.get(arguments['<to>'])
date = arguments['<date>']
url = ('https://kyfw.12306.cn/otn/leftTicket/query?'
'leftTicketDTO.train_date={}&'
'leftTicketDTO.from_station={}&leftTicketDTO.to_station={}&purpose_codes=ADULT').format(
date, from_station, to_station
)
options = ''.join([
key for key, value in arguments.items() if value is True
])
r = requests.get(url, verify=False)
res = r.json()
if 'data' in res.keys():
available_trains = res['data']
TrainsCollection(available_trains, options).pretty_print()
else:
print('????')
def _process_cmd_line(argv):
"""Memory graph diffing.
Usage:
graph_diffing.py [--verbose] <dump>... [-n <neg_dump>]
graph_diffing.py (--help | --version)
Options:
<dump> The list of positive memory dumps.
-n <neg_dump> The negative memory dump.
-h --help Shows this message.
-v --verbose Shows details.
--version Shows the current version.
"""
# initializing the parser object
args = docopt(_process_cmd_line.__doc__, argv=argv, version=__version__)
# checking arguments
if args['--verbose']:
print(args)
return args['<dump>'], args['-n'], args['--verbose']
def _process_cmd_line(argv):
'''Minidump parser.
By David I. Urbina based on Brendan Dolan-Gavitt 'minidump.py' parser.
Usage:
minidump.py <minidump>
minidump.py (-h | --help | --version)
Options:
<minidump> The minidump file.
-h --help Shows this help.
--version Shows the current version.
'''
# initializing the parser object
args = docopt(_process_cmd_line.__doc__, argv=argv, version=__version__)
return args['<minidump>']
def _process_cmd_line(argv):
'''Minidump converter.
Usage:
minidump_convert.py [options] <dump>
minidump_convert.py (-h | --help | --version)
Options:
<dump> The minidump file.
-m Extract non-standard modules.
-M Extract all modules.
-s Extract segments.
-c Extract core.
-v --verbose Verbose.
-h --help Shows this help.
--version Shows the current version.
'''
# initializing the parser object
args = docopt(_process_cmd_line.__doc__, argv=argv, version=__version__)
return (args['<dump>'], args['-m'], args['-M'], args['-s'],
args['-c'], args['--verbose'])
def _process_cmd_line(argv):
"""Memory graph generator.
Usage:
graph_generator.py [--verbose] <dump>
graph_generator.py (--help | --version)
Options:
<dump> The memory dump file.
-h --help Shows this message.
-v --verbose Shows details.
--version Shows the current version.
"""
# initializing the parser object
args = docopt(_process_cmd_line.__doc__, argv=argv, version=__version__)
# checking arguments
if args['--verbose']:
print(args)
return args['<dump>'], args['--verbose']
def _main():
print('kitty version: %s' % get_distribution('kittyfuzzer').version)
opts = docopt.docopt(__doc__)
files = opts['<FILE>']
fast = opts['--fast']
verbose = opts['--verbose']
if opts['--tree']:
processor = TemplateTreePrinter()
else:
processor = TemplateTester(fast)
processor.verbose = verbose
try:
validate_files(files)
for f in files:
process_file(f, processor)
except Exception as e:
print(e)
def main():
"""Translating KITTI data into RecordIO"""
arguments = docopt.docopt(__doc__)
data_root = arguments['--data']
out_root = arguments['--out']
X_train, Y_train = grab_images_labels(data_root, 'train')
X_val, Y_val = grab_images_labels(data_root, 'val')
train_writer = Writer(os.path.join(out_root, 'train.brick'))
train_writer.write(X_train, Y_train)
train_writer.close()
print(' * Finished writing train.')
val_writer = Writer(os.path.join(out_root, 'val.brick'))
val_writer.write(X_val, Y_val)
val_writer.close()
print(' * Finished writing val.')
def query_by_command(self):
"""command-line interface"""
arguments = docopt(__doc__)
from stations import stations
from_station = stations.get(arguments['<from>'])
to_station = stations.get(arguments['<to>'])
train_type = []
all_train_type = ['-d', '-g', '-t', '-k', '-z']
for key in all_train_type:
if arguments[key]:
train_type.append(key[1:].upper())
if len(train_type) == 0:
train_type = [x[1:].upper() for x in all_train_type]
date = arguments['<date>']
# ??URL
url = 'https://kyfw.12306.cn/otn/lcxxcx/query?purpose_codes=ADULT&queryDate={}&from_station={}&to_station={}'.format(date,from_station, to_station)
import requests
requests.packages.urllib3.disable_warnings()
r = requests.get(url, verify = False)
rows = r.json()['data']['datas']
from ticketSearch import TrainCollection
t = TrainCollection(rows, train_type)
t.print_pretty()