def main():
maker = readmemaker.ReadmeMaker(PROJECT_NAME, OUTPUT_DIR)
intro_root = Path(os.path.join("pages", "introduction"))
maker.write_file(intro_root.joinpath("badges.txt"))
maker.set_indent_level(0)
maker.write_chapter("Summary")
maker.write_file(intro_root.joinpath("summary.txt"))
maker.write_chapter("Features")
maker.write_file(intro_root.joinpath("features.txt"))
write_examples(maker)
maker.write_file(
maker.doc_page_root_dir_path.joinpath("installation.rst"))
maker.set_indent_level(0)
maker.write_chapter("Documentation")
maker.write_line_list([
"http://{:s}.rtfd.io/".format(PROJECT_NAME),
])
return 0
python类Path()的实例源码
def write_examples(maker):
maker.set_indent_level(0)
maker.write_chapter("Examples")
examples_root = Path("pages").joinpath("examples")
maker.inc_indent_level()
maker.write_chapter("Load a CSV table")
maker.write_file(examples_root.joinpath("load_csv.txt"))
maker.write_chapter("Get loaded table data as pandas.DataFrame instance")
maker.write_file(examples_root.joinpath("as_dataframe.txt"))
maker.write_chapter("For more information")
maker.write_line_list([
"More examples are available at ",
"http://{:s}.rtfd.io/en/latest/pages/examples/index.html".format(
PROJECT_NAME),
])
def test_normal(
self, tmpdir, table_text, filename,
table_name, expected_tabletuple_list):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with open(file_path, "w") as f:
f.write(table_text)
loader = ptr.JsonTableFileLoader(file_path)
#loader.table_name = table_name
load = False
for tabledata in loader.load():
print("[actual]\n{}".format(ptw.dump_tabledata(tabledata)))
assert tabledata in expected_tabletuple_list
load = True
assert load
def test_normal(
self, tmpdir, test_id, table_text, filename,
table_name, expected_tabledata_list):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with io.open(file_path, "w", encoding="utf-8") as f:
f.write(table_text)
loader = ptr.HtmlTableFileLoader(file_path)
loader.table_name = table_name
for tabledata, expected in zip(loader.load(), expected_tabledata_list):
print("--- test {} ---".format(test_id))
print("[expected]\n{}".format(ptw.dump_tabledata(expected)))
print("[actual]\n{}".format(ptw.dump_tabledata(tabledata)))
print("")
assert tabledata == expected
def test_normal(
self, tmpdir, test_id, table_text, filename,
table_name, expected_tabledata_list):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with open(file_path, "w") as f:
f.write(table_text)
loader = ptr.MediaWikiTableFileLoader(file_path)
loader.table_name = table_name
load = False
for tabledata, expected in zip(loader.load(), expected_tabledata_list):
print("--- test {} ---".format(test_id))
print("[tabledata]\n{}".format(tabledata))
print("[expected]\n{}".format(expected))
print("")
assert tabledata == expected
load = True
assert load
def test_normal(
self, tmpdir, test_id, table_text, filename, expected):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with io.open(file_path, "w", encoding="utf-8") as f:
f.write(table_text)
loader = ptr.LtsvTableFileLoader(file_path)
for tabledata in loader.load():
print("test-id={}".format(test_id))
print("[expected]\n{}".format(ptw.dump_tabledata(expected)))
print("[actual]\n{}".format(ptw.dump_tabledata(tabledata)))
assert tabledata == expected
def test_normal(
self, tmpdir,
test_id, table_text, filename, header_list, expected):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with io.open(file_path, "w", encoding="utf-8") as f:
f.write(table_text)
loader = ptr.TsvTableFileLoader(file_path)
loader.header_list = header_list
for tabledata in loader.load():
print("test-id={}".format(test_id))
print(ptw.dump_tabledata(tabledata))
assert tabledata in expected
def test_normal(
self, tmpdir, test_id, table_text, filename,
table_name, expected_tabledata_list):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
with open(file_path, "w") as f:
f.write(table_text)
loader = ptr.MarkdownTableFileLoader(file_path)
loader.table_name = table_name
load = False
for tabledata, expected in zip(loader.load(), expected_tabledata_list):
print("--- test {} ---".format(test_id))
print("[tabledata]\n{}".format(tabledata))
print("[expected]\n{}".format(expected))
print("")
assert tabledata == expected
load = True
assert load
def test_normal(
self, tmpdir,
test_id, tabledata, filename, header_list, expected):
file_path = Path(str(tmpdir.join(filename)))
file_path.parent.makedirs_p()
con = SimpleSQLite(file_path, "w")
con.create_table_from_tabledata(tabledata)
loader = ptr.SqliteFileLoader(file_path)
loader.header_list = header_list
for tabledata in loader.load():
print("test-id={}".format(test_id))
print(ptw.dump_tabledata(tabledata))
assert tabledata in expected
def save_weights(fname, params, metadata=None):
""" assumes all params have unique names.
"""
# Includes batchnorm params now
names = [par.name for par in params]
if len(names) != len(set(names)):
raise ValueError('need unique param names')
param_dict = { param.name : param.get_value(borrow=False)
for param in params }
if metadata is not None:
param_dict['metadata'] = pickle.dumps(metadata)
logging.info('saving {} parameters to {}'.format(len(params), fname))
# try to avoid half-written files
fname = Path(fname)
if fname.exists():
tmp_fname = Path(fname.stripext() + '.tmp.npz') # TODO yes, this is a hack
np.savez_compressed(str(tmp_fname), **param_dict)
tmp_fname.rename(fname)
else:
np.savez_compressed(str(fname), **param_dict)
rhythmbox_playlists_writer.py 文件源码
项目:migrate-itunes-to-rhythmbox
作者: phauer
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def write(playlists: List[Playlist], target_path: Path, target_library_root: str, source_library_root: str, exclude_playlist_folders: bool = True) -> None:
persistent_id_to_playlist_dict = create_persistent_id_to_playlist_dict(playlists)
filtered_playlist = filter_playlists_if_necessary(playlists, exclude_playlist_folders)
root = etree.Element("rhythmdb-playlists")
for playlist in filtered_playlist:
name = create_playlist_name(playlist, persistent_id_to_playlist_dict)
attributes = {'name': name, 'show-browser': 'true', 'browser-position': "231",
'search-type': "search-match", 'type': "static"}
playlist_element = etree.SubElement(root, "playlist", attributes)
for song in playlist.tracks:
if song.location_escaped is not None:
transformed_location = transform_to_rhythmbox_path(song.location_escaped, target_library_root, source_library_root)
location_element = etree.SubElement(playlist_element, "location")
location_element.text = transformed_location
else:
print(" Can't convert the track [{} - {}] in playlist '{}' because there is no file location defined. It's probably a remote file."
.format(song.artist, song.name, playlist.name))
common.write_to_file(root, target_path, add_standalone_to_xml_declaration=False)
def find_workspace_path():
""" Look for a workspace root somewhere in the upper directories
hierarchy
"""
head = os.getcwd()
tail = True
while tail:
tsrc_path = os.path.join(head, ".tsrc")
if os.path.isdir(tsrc_path):
return path.Path(head)
tbuild_yml_path = os.path.join(head, "tbuild.yml")
if os.path.exists(tbuild_yml_path):
return path.Path(head)
else:
head, tail = os.path.split(head)
raise tsrc.Error("Could not find current workspace")
def run_bear(bearcls, instance, input, **options):
"""
Analyze `input` with :class:`Unleashed` Bear `instance`.
:param bearcls:
The original coala Bear class.
:param input:
Either a file ``path.Path`` instance or a ``str`` of input data.
"""
if isinstance(input, Path):
filename = input
data = input.lines()
else:
filename = ':bearsh-input:'
data = [line + '\n' for line in str(input).split('\n')]
return bearcls.run(instance, filename, data, **options)
def from_json(cls, json_filename, db_filename='crashes.sqlite'):
"""
Parses JSON creating a database.
"""
json_filename = Path(json_filename)
db_filename = Path(db_filename)
if not db_filename.exists():
pass
elif db_filename.mtime > json_filename.mtime:
return Corpus(db_filename)
# Autovivify the corpus
corpus = Corpus(db_filename)
# Parse the JSON.
data = load_oracle_data(json_filename, should_parse=False)
crashes, _oracle_all, crash2bucket, _total_ids, _total_buckets = data
for report_id, bucket_id in crash2bucket.items():
if report_id not in crashes:
continue
corpus.insert_crash(report_id, crashes[report_id], bucket_id)
def save(ctx, dest="docs.html", format="html"):
"""Save/update docs under destination directory."""
print("STEP: Generate docs in HTML format")
build(ctx, builder=format)
print("STEP: Save docs under %s/" % dest)
source_dir = Path(ctx.config.sphinx.destdir)/format
Path(dest).rmtree_p()
source_dir.copytree(dest)
# -- POST-PROCESSING: Polish up.
for part in [ ".buildinfo", ".doctrees" ]:
partpath = Path(dest)/part
if partpath.isdir():
partpath.rmtree_p()
elif partpath.exists():
partpath.remove_p()
# -----------------------------------------------------------------------------
# TASK CONFIGURATION:
# -----------------------------------------------------------------------------
def save(ctx, dest="docs.html", format="html"):
"""Save/update docs under destination directory."""
print("STEP: Generate docs in HTML format")
build(ctx, builder=format)
print("STEP: Save docs under %s/" % dest)
source_dir = Path(ctx.config.sphinx.destdir)/format
Path(dest).rmtree_p()
source_dir.copytree(dest)
# -- POST-PROCESSING: Polish up.
for part in [ ".buildinfo", ".doctrees" ]:
partpath = Path(dest)/part
if partpath.isdir():
partpath.rmtree_p()
elif partpath.exists():
partpath.remove_p()
# -----------------------------------------------------------------------------
# TASK CONFIGURATION:
# -----------------------------------------------------------------------------
def test_command_line(virtualenv, tmpdir):
wheels = Path(DIST_DIR).files(pattern="*.whl")
assert len(wheels) == 1
virtualenv.run("pip install %s" % wheels[0])
expected_version = "3.10.1"
for executable_name in ["cmake", "cpack", "ctest"]:
output = virtualenv.run(
"%s --version" % executable_name, capture=True).splitlines()[0]
assert output == "%s version %s" % (executable_name, expected_version)
test_script = tmpdir.join("test_cmake.cmake")
test_script.write(textwrap.dedent(r"""
message("${CMAKE_COMMAND}")
"""))
output = virtualenv.run("cmake -P %s" % str(test_script), capture=True)
expected = os.path.realpath(virtualenv.virtualenv).replace(os.sep, "/")
assert output[:len(expected)].lower() == expected.lower()
def write_examples(maker):
maker.set_indent_level(0)
maker.write_chapter("Usage")
usage_root = Path("pages").joinpath("usage")
maker.inc_indent_level()
maker.write_chapter("Create SQLite database from files")
maker.write_line_list([
".. image:: docs/gif/usage_example.gif",
])
maker.write_chapter("Create SQLite database from URL")
maker.write_file(usage_root.joinpath("url", "usage.txt"))
maker.inc_indent_level()
maker.write_chapter("For more information")
maker.write_line_list([
"More examples are available at ",
"http://{:s}.rtfd.io/en/latest/pages/{:s}/index.html".format(
PROJECT_NAME.lower(), maker.examples_dir_name),
])
def write_examples(maker):
maker.set_indent_level(0)
maker.write_chapter("Examples")
example_root = Path("pages").joinpath("examples")
maker.inc_indent_level()
maker.write_chapter("Validate a filename")
maker.write_file(example_root.joinpath("validate_filename_code.txt"))
maker.write_chapter("Sanitize a filename")
maker.write_file(example_root.joinpath("sanitize_filename_code.txt"))
maker.write_chapter("Sanitize a variable name")
maker.write_file(example_root.joinpath("sanitize_var_name_code.txt"))
maker.write_chapter("For more information")
maker.write_line_list([
"More examples are available at ",
"http://pathvalidate.rtfd.io/en/latest/pages/examples/index.html",
])
def write_examples(maker):
maker.set_indent_level(0)
maker.write_chapter("Usage")
usage_root = Path("pages").joinpath("usage")
maker.inc_indent_level()
maker.write_chapter("Set traffic control (``tcset`` command)")
maker.write_file(usage_root.joinpath("tcset", "description.txt"))
maker.write_file(usage_root.joinpath("tcset", "basic_usage.rst"))
maker.write_file(usage_root.joinpath("tcdel", "header.rst"))
maker.write_file(usage_root.joinpath("tcdel", "usage.rst"))
maker.write_file(usage_root.joinpath("tcshow", "header.rst"))
maker.write_file(usage_root.joinpath("tcshow", "usage.rst"))
maker.write_chapter("For more information")
maker.write_line_list([
"More examples are available at ",
"http://{:s}.rtfd.io/en/latest/pages/usage/index.html".format(
PROJECT_NAME),
])
def export_result(self, _):
filename = QtWidgets.QFileDialog.getSaveFileName()[0]
filepath = Path(filename)
if not filepath.exists() and filepath != '':
report = filepath if filepath.ext == ".html" else filepath.dirname() / filepath.namebase+".html"
raw = filepath.dirname() / filepath.namebase+".csv"
html_file = filepath.dirname() / filepath.namebase+".html"
html_file.write_bytes(self.report.generate())
report.write_text(self.report.generate())
f = raw.open("w")
for addr, infos in self.results.iteritems():
f.write_bytes(u"0x%x,%s,%d,%s,0x%x,0x%x\n" % (addr, to_status_name(infos.status), infos.k,
infos.dependency, infos.alive_branch, infos.dead_branch))
f.close()
self.log("[info]", "Export done in %s and %s" % (report.basename(), raw.basename()))
else:
self.log("[error]", "File already exists.. (do not save)")
def save_config_clicked(self, infile=True):
raw_config = self.configuration_textarea.toPlainText()
if raw_config == "":
print "Press Generate button first"
else:
try:
json_data = json.loads(raw_config)
self.core.configuration.Clear()
json2pb(self.core.configuration, json_data)
if infile:
json_data = pb2json(self.core.configuration)
filename = QtWidgets.QFileDialog.getSaveFileName()[0]
filepath = Path(filename)
if filepath != '':
bytes = json.dumps(json_data, indent=4)
filepath.write_bytes(bytes)
else:
print "Invalid file given %s" % str(filepath)
except KeyError as e:
print "invalid key:"+e.message
def dump_trace(self):
filename = QtWidgets.QFileDialog.getSaveFileName()[0]
filepath = Path(filename)
if not filepath.exists() and filepath != '':
try:
index = self.traces_tab.currentIndex()
trace = self.core.traces[self.id_map[index]]
f = filepath.open("w")
for line in trace.to_string_generator():
f.write(line+"\n")
f.close()
print "Writing done"
except KeyError:
print "Trace not found"
else:
print "File already exists.. (do not dump)"
def save_weights(fname, params, history=None):
param_dict = convert2dict(params)
logging.info('saving {} parameters to {}'.format(len(params), fname))
fname = Path(fname)
filename, ext = osp.splitext(fname)
history_file = osp.join(osp.dirname(fname), 'history.npy')
np.save(history_file, history)
logging.info("Save history to {}".format(history_file))
if ext == '.npy':
np.save(filename + '.npy', param_dict)
else:
f = gzip.open(fname, 'wb')
pickle.dump(param_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
def __init__(self, initial, final, nsteps=10):
path.Path.__init__(self)
assert isinstance(nsteps, int)
self._molecules = [initial]
ci = initial.getCoordinates()
cf = final.getCoordinates()
delta = (cf - ci) / (nsteps - 1)
# only generate the inner range
for k in range(1, nsteps-1):
m2 = Molecule.fromMolecule(initial)
m2.setCoordinates(ci + k*delta)
self._molecules.append(m2)
self._molecules.append(final)
assert self.getNumBeads() == nsteps
def get_seg_path(self, word):
path = Path(word)
while not path.is_ended():
child = path.get_fringe_word()
parts = child.split("'")
if len(parts) == 2 and len(parts[0]) > 0 and self.lang == 'eng':
path.expand(child, parts[0], 'APOSTR')
else:
parts = child.split('-')
if len(parts) > 1:
p1, p2 = parts[0], child[len(parts[0]) + 1:]
path.expand(child, (p1, p2), 'HYPHEN')
else:
parent, type_ = self.predict(child)
path.expand(child, parent, type_)
return path
def get_seg_path(self, w):
path = Path(w)
while not path.is_ended():
child = path.get_fringe_word()
parts = child.split("'")
if len(parts) == 2 and len(parts[0]) > 0 and self.base.lang == 'eng':
path.expand(child, parts[0], 'APOSTR')
else:
parts = child.split('-')
if len(parts) > 1:
p1, p2 = parts[0], child[len(parts[0]) + 1:]
path.expand(child, (p1, p2), 'HYPHEN')
else:
parent, type_ = self.predict(child)
path.expand(child, parent, type_)
return path
def write_examples(maker):
maker.set_indent_level(0)
maker.write_chapter("Usage")
intro_root = Path(os.path.join("pages", "introduction"))
maker.write_file(intro_root.joinpath("usage.txt"))
def cli(config):
global down_cache, src_cache
if config:
read_config(Path(config))
else:
read_config()
down_cache = DirMap(cfg.cache.dir)
src_cache = DirMap(cfg.source.dir)
def __init__(self, string):
self.uri, filename = string.split("::")
self.filename = Path(filename)