python类cmp()的实例源码

test_file_handler.py 文件源码 项目:PlasoScaffolder 作者: ClaudiaSaxer 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def testCopyFile(self):
    """Tests if the copying of a file none existing beforhand works."""
    expected_content = "this is test content."

    with tempfile.TemporaryDirectory() as tmpdir:
      source = os.path.join(tmpdir, self.file)
      destination = os.path.join(tmpdir, "copy", self.file)

      with open(source, "a") as f:
        f.write(expected_content)

      handler = file_handler.FileHandler()
      self.assertFalse(os.path.exists(destination))
      handler.CopyFile(source, destination)
      self.assertTrue(os.path.exists(destination))
      self.assertTrue(filecmp.cmp(destination, source))
recipe-551777.py 文件源码 项目:code 作者: ActiveState 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def true_duplicates(files):
    """
    Compare the given files, breaking them down into groups with identical
    content.
    """
    while len(files) > 1:
        next_set = []
        this_set = []
        master = files[0]
        this_set.append(master)
        for other in files[1:]:
            if filecmp.cmp(master, other, False):
                this_set.append(other)
            else:
                next_set.append(other)
        if len(this_set) > 1:
            yield this_set
        files = next_set
test_i_aws_encryption_sdk_cli.py 文件源码 项目:aws-encryption-sdk-cli 作者: awslabs 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_file_to_file_decrypt_required_encryption_context_success(tmpdir, required_encryption_context):
    plaintext = tmpdir.join('source_plaintext')
    ciphertext = tmpdir.join('ciphertext')
    decrypted = tmpdir.join('decrypted')
    with open(str(plaintext), 'wb') as f:
        f.write(os.urandom(1024))

    encrypt_args = encrypt_args_template().format(
        source=str(plaintext),
        target=str(ciphertext)
    )
    decrypt_args = decrypt_args_template().format(
        source=str(ciphertext),
        target=str(decrypted)
    ) + ' --encryption-context ' + required_encryption_context

    aws_encryption_sdk_cli.cli(shlex.split(encrypt_args, posix=not is_windows()))
    aws_encryption_sdk_cli.cli(shlex.split(decrypt_args, posix=not is_windows()))

    assert filecmp.cmp(str(plaintext), str(decrypted))
test_i_aws_encryption_sdk_cli.py 文件源码 项目:aws-encryption-sdk-cli 作者: awslabs 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_file_to_file_cycle(tmpdir):
    plaintext = tmpdir.join('source_plaintext')
    ciphertext = tmpdir.join('ciphertext')
    decrypted = tmpdir.join('decrypted')
    with open(str(plaintext), 'wb') as f:
        f.write(os.urandom(1024))

    encrypt_args = encrypt_args_template().format(
        source=str(plaintext),
        target=str(ciphertext)
    )
    decrypt_args = decrypt_args_template().format(
        source=str(ciphertext),
        target=str(decrypted)
    )

    aws_encryption_sdk_cli.cli(shlex.split(encrypt_args, posix=not is_windows()))
    aws_encryption_sdk_cli.cli(shlex.split(decrypt_args, posix=not is_windows()))

    assert filecmp.cmp(str(plaintext), str(decrypted))
test_i_aws_encryption_sdk_cli.py 文件源码 项目:aws-encryption-sdk-cli 作者: awslabs 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def test_file_to_file_cycle_target_through_symlink(tmpdir):
    plaintext = tmpdir.join('source_plaintext')
    output_dir = tmpdir.mkdir('output')
    os.symlink(str(output_dir), str(tmpdir.join('output_link')))
    ciphertext = tmpdir.join('output_link', 'ciphertext')
    decrypted = tmpdir.join('decrypted')
    with open(str(plaintext), 'wb') as f:
        f.write(os.urandom(1024))

    encrypt_args = encrypt_args_template().format(
        source=str(plaintext),
        target=str(ciphertext)
    )
    decrypt_args = decrypt_args_template().format(
        source=str(ciphertext),
        target=str(decrypted)
    )

    aws_encryption_sdk_cli.cli(shlex.split(encrypt_args, posix=not is_windows()))
    aws_encryption_sdk_cli.cli(shlex.split(decrypt_args, posix=not is_windows()))

    assert filecmp.cmp(str(plaintext), str(decrypted))
test_i_aws_encryption_sdk_cli.py 文件源码 项目:aws-encryption-sdk-cli 作者: awslabs 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def test_file_to_file_cycle_with_caching(tmpdir):
    plaintext = tmpdir.join('source_plaintext')
    ciphertext = tmpdir.join('ciphertext')
    decrypted = tmpdir.join('decrypted')
    with open(str(plaintext), 'wb') as f:
        f.write(os.urandom(1024))

    encrypt_args = encrypt_args_template(caching=True).format(
        source=str(plaintext),
        target=str(ciphertext)
    )
    decrypt_args = decrypt_args_template().format(
        source=str(ciphertext),
        target=str(decrypted)
    )

    aws_encryption_sdk_cli.cli(shlex.split(encrypt_args, posix=not is_windows()))
    aws_encryption_sdk_cli.cli(shlex.split(decrypt_args, posix=not is_windows()))

    assert filecmp.cmp(str(plaintext), str(decrypted))
test_i_aws_encryption_sdk_cli.py 文件源码 项目:aws-encryption-sdk-cli 作者: awslabs 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_file_to_dir_cycle(tmpdir):
    inner_dir = tmpdir.mkdir('inner')
    plaintext = tmpdir.join('source_plaintext')
    ciphertext = inner_dir.join('source_plaintext.encrypted')
    decrypted = tmpdir.join('decrypted')
    with open(str(plaintext), 'wb') as f:
        f.write(os.urandom(1024))

    encrypt_args = encrypt_args_template().format(
        source=str(plaintext),
        target=str(inner_dir)
    )
    decrypt_args = decrypt_args_template().format(
        source=str(ciphertext),
        target=str(decrypted)
    )

    aws_encryption_sdk_cli.cli(shlex.split(encrypt_args, posix=not is_windows()))
    assert os.path.isfile(str(ciphertext))
    aws_encryption_sdk_cli.cli(shlex.split(decrypt_args, posix=not is_windows()))

    assert filecmp.cmp(str(plaintext), str(decrypted))
judge.py 文件源码 项目:Play.Excel 作者: abhijithasokan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def validate(self,pid):

        """

        This function compares the output file with the key

        Args :

            pid : The id of the problem to map to the correct key.

        Returns :

            WA : Wrong Answer , if comparison fails.
            AC : Accepted , if comparison succeeds.

        """

        if filecmp.cmp(self.cwd+"/tmp/output.txt",self.cwd+"/env/key/key"+str(pid)+".txt")==True:
                    return "AC"
        else:
                    return "WA"
main.py 文件源码 项目:betaPika 作者: alchemistake 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def delete_identical_files():
    for battle_folder in os.listdir(download_path):
        battle_folder_path = os.path.join(os.curdir, download_folder_name, battle_folder)

        if not battle_folder.startswith('.') and os.path.isdir(battle_folder_path):
            battle_files = os.listdir(battle_folder_path)

            previous_battle_file = os.path.join(os.curdir, "main.py")
            for battle_file in battle_files:
                current_battle_file = os.path.join(os.curdir, download_folder_name, battle_folder, battle_file)
                if filecmp.cmp(previous_battle_file, current_battle_file, shallow=0):
                    os.remove(current_battle_file)
                    print current_battle_file, "Deleted, copy of another file"
                else:
                    previous_battle_file = current_battle_file
    print "Identical file search complete."
validate_mac_table_files.py 文件源码 项目:cluster-genesis 作者: open-power-ref-design-toolkit 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def compare_files(file1, file2, log):
    """Compare two files

    Use Python's filecmp module to compare two files and log/print
    results.

    Args:
        file1 (string): Path of first file to compare
        file2 (string): Path of second file to compare
        log (:obj:`Logger`): Log file object.

    Returns:
        boolean: True if they seem equal, False otherwise
    """
    if filecmp.cmp(file1, file2):
        msg = ("Two MAC Address Table Files Are Identical! '%s' & '%s'"
               % (file1, file2))
        log.error(msg)
        print("Error: " + msg)
        return True
    else:
        return False
mx.py 文件源码 项目:mx 作者: graalvm 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _sorted_unique_jdk_configs(configs):
    path_seen = set()
    unique_configs = [c for c in configs if c.home not in path_seen and not path_seen.add(c.home)]

    def _compare_configs(c1, c2):
        if c1 == _default_java_home:
            if c2 != _default_java_home:
                return 1
        elif c2 == _default_java_home:
            return -1
        if c1 in _extra_java_homes:
            if c2 not in _extra_java_homes:
                return 1
        elif c2 in _extra_java_homes:
            return -1
        return VersionSpec.__cmp__(c1.version, c2.version)
    return sorted(unique_configs, cmp=_compare_configs, reverse=True)
_amt_utils_test.py 文件源码 项目:AutoMergeTool 作者: xgouchet 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_single_conflict_unsolved(self):
        """Tests a walker against a file with a single conflict, without solving it"""

        # Given a file to merge
        file = CW_PATH.format('single_conflict')
        walker = ConflictsWalker(file, 'test', REPORT_NONE, False)

        # When walking the conflicts
        self.assertTrue(walker.has_more_conflicts())
        self.assertFalse(walker.has_more_conflicts())
        walker.end(False)

        # Then check the output
        self.assertTrue(filecmp.cmp(walker.merged, file))
        self.assertEqual(walker.get_merge_status(), ERROR_CONFLICTS)
        os.remove(walker.merged)
_amt_utils_test.py 文件源码 项目:AutoMergeTool 作者: xgouchet 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_single_conflict_rewritten(self):
        """Tests a walker against a file with a single conflict, without solving it"""

        # Given a file to merge
        file = CW_PATH.format('single_conflict')
        walker = ConflictsWalker(file, 'test', REPORT_NONE, False)

        # When walking the conflicts
        self.assertTrue(walker.has_more_conflicts())
        conflict = walker.next_conflict()
        conflict.rewrite(RESOLUTION)
        self.assertFalse(walker.has_more_conflicts())
        walker.end(False)

        # Then check the output
        self.assertTrue(filecmp.cmp(walker.merged, CW_PATH.format('single_conflict_resolved')))
        self.assertEqual(walker.get_merge_status(), ERROR_CONFLICTS)
        os.remove(walker.merged)
_amt_utils_test.py 文件源码 项目:AutoMergeTool 作者: xgouchet 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_single_conflict_solved(self):
        """Tests a walker against a file with a single conflict, and solving it"""

        # Given a file to merge
        file = CW_PATH.format('single_conflict')
        walker = ConflictsWalker(file, 'test', REPORT_NONE, False)

        # When walking the conflicts
        self.assertTrue(walker.has_more_conflicts())
        conflict = walker.next_conflict()
        conflict.resolve(RESOLUTION)
        self.assertFalse(walker.has_more_conflicts())
        walker.end(False)

        # Then check the output
        self.assertTrue(filecmp.cmp(walker.merged, CW_PATH.format('single_conflict_resolved')))
        self.assertEqual(walker.get_merge_status(), SUCCESS)
        os.remove(walker.merged)
test_functional.py 文件源码 项目:pymongo-schema 作者: pajachiet 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test02_transform():
    base_output = "output_fctl_data_dict"
    outputs = {}
    extensions = ['html', 'xlsx', 'tsv', 'md']
    for ext in extensions:
        outputs[ext] = "{}.{}".format(base_output, ext)

    exp = os.path.join(TEST_DIR, 'resources', 'expected', 'data_dict')
    argv = ['transform', SCHEMA_FILE, '--output', base_output, '--columns',
            'Field_compact_name', 'Field_name', 'Full_name', 'Description', 'Count', 'Percentage',
            'Types_count',
            '--formats'] + extensions
    main(argv)

    assert filecmp.cmp(outputs['tsv'], "{}.tsv".format(exp))
    assert filecmp.cmp(outputs['md'], "{}.md".format(exp))
    with open(outputs['html']) as out_fd, \
            open("{}.html".format(exp)) as exp_fd:
        assert out_fd.read().replace(' ', '') == exp_fd.read().replace(' ', '')
    res = [cell.value for row in load_workbook(outputs['xlsx']).active for cell in row]
    exp = [cell.value for row in load_workbook("{}.xlsx".format(exp)).active for cell in row]
    assert res == exp
    for output in outputs.values():
        os.remove(output)
test_functional.py 文件源码 项目:pymongo-schema 作者: pajachiet 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test04_transform_default_cols():
    base_output = "output_fctl_data_dict_default"
    outputs = {}
    extensions = ['html', 'xlsx', 'tsv', 'md']
    for ext in extensions:
        outputs[ext] = "{}.{}".format(base_output, ext)

    exp = os.path.join(TEST_DIR, 'resources', 'expected', 'data_dict_default')
    argv = ['transform', SCHEMA_FILE, '--output', base_output, '--formats'] + extensions
    main(argv)

    assert filecmp.cmp(outputs['tsv'], "{}.tsv".format(exp))
    assert filecmp.cmp(outputs['md'], "{}.md".format(exp))
    with open(outputs['html']) as out_fd, \
            open("{}.html".format(exp)) as exp_fd:
        assert out_fd.read().replace(' ', '') == exp_fd.read().replace(' ', '')
    res = [cell.value for row in load_workbook(outputs['xlsx']).active for cell in row]
    exp = [cell.value for row in load_workbook("{}.xlsx".format(exp)).active for cell in row]
    assert res == exp
    for output in outputs.values():
        os.remove(output)
test_functional.py 文件源码 项目:pymongo-schema 作者: pajachiet 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test06_compare():
    base_output = "output_fctl_diff"
    outputs = {}
    extensions = ['html', 'xlsx', 'tsv', 'md']
    for ext in extensions:
        outputs[ext] = "{}.{}".format(base_output, ext)

    exp = os.path.join(TEST_DIR, 'resources', 'functional', 'expected', 'diff')
    exp_schema = os.path.join(TEST_DIR, 'resources', 'input', 'test_schema2.json')
    argv = ['compare', SCHEMA_FILE, exp_schema, '--output', base_output, '--formats'] + extensions
    main(argv)

    assert filecmp.cmp(outputs['tsv'], "{}.tsv".format(exp))
    assert filecmp.cmp(outputs['md'], "{}.md".format(exp))
    with open(outputs['html']) as out_fd, \
            open("{}.html".format(exp)) as exp_fd:
        assert out_fd.read().replace(' ', '') == exp_fd.read().replace(' ', '')
    res = [cell.value for row in load_workbook(outputs['xlsx']).active for cell in row]
    exp = [cell.value for row in load_workbook("{}.xlsx".format(exp)).active for cell in row]
    assert res == exp
    for output in outputs.values():
        os.remove(output)
test_functional.py 文件源码 项目:pymongo-schema 作者: pajachiet 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test07_compare_detailed():
    base_output = "output_fctl_detailed_diff"
    outputs = {}
    extensions = ['html', 'xlsx', 'tsv', 'md']
    for ext in extensions:
        outputs[ext] = "{}.{}".format(base_output, ext)

    exp = os.path.join(TEST_DIR, 'resources', 'functional', 'expected', 'detailed_diff')
    exp_schema = os.path.join(TEST_DIR, 'resources', 'input', 'test_schema2.json')
    argv = ['compare', SCHEMA_FILE, exp_schema, '--output', base_output, '--detailed_diff',
            '--formats'] + extensions
    main(argv)

    assert filecmp.cmp(outputs['tsv'], "{}.tsv".format(exp))
    assert filecmp.cmp(outputs['md'], "{}.md".format(exp))
    with open(outputs['html']) as out_fd, \
            open("{}.html".format(exp)) as exp_fd:
        assert out_fd.read().replace(' ', '') == exp_fd.read().replace(' ', '')
    res = [cell.value for row in load_workbook(outputs['xlsx']).active for cell in row]
    exp = [cell.value for row in load_workbook("{}.xlsx".format(exp)).active for cell in row]
    assert res == exp
    for output in outputs.values():
        os.remove(output)
test_GroupIO.py 文件源码 项目:pbtranscript 作者: PacificBiosciences 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def test_all(self):
        """Test All"""
        expected_r = GroupRecord(name="group1",
                                 members=["member0", "member1", "member2"])
        with GroupReader(GROUP_FN_1) as reader:
            records = [r for r in reader]
            self.assertEqual(len(records), 1)
            self.assertEqual(records[0], expected_r)

        expected_r = GroupRecord(name="PB.1.1",
                                 members="i0_HQ_sampleb92221|c8319/f2p0/463,i0_HQ_sampleb92221|c28/f4p0/460,i0_HQ_sampleb92221|c524/f2p0/462,i0_HQ_sampleb92221|c539/f2p0/460,i0_HQ_sampleb92221|c7864/f22p0/462,i0_HQ_sampleb92221|c7959/f2p0/461,i0_HQ_sampleb92221|c8090/f3p0/462,i0_HQ_sampleb92221|c8099/f3p0/459,i0_HQ_sampleb92221|c8136/f2p0/461,i0_HQ_sampleb92221|c428/f2p0/459".split(','))
        with GroupReader(GROUP_FN_2) as reader:
            records = [r for r in reader]
            self.assertEqual(len(records), 51)
            self.assertEqual(records[0], expected_r)
            out_fn = op.join(OUT_DIR, "test_GroupWriter.txt")
            with GroupWriter(out_fn) as writer:
                for r in records:
                    writer.writeRecord(r)

            self.assertTrue(filecmp.cmp(out_fn, GROUP_FN_2))
test_Summary.py 文件源码 项目:pbtranscript 作者: PacificBiosciences 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test_write(self):
        """Test ClusterSummary.write."""
        obj = ClusterSummary()
        obj.num_consensus_isoforms = 97
        obj.num_total_bases = 97 * 3945

        outFN = op.join(self.testDir, "out/test_ClusterSummary.txt")
        stdoutFN = op.join(self.testDir, "stdout/test_ClusterSummary.txt")
        obj.write(outFN)
        self.assertTrue(filecmp.cmp(outFN, stdoutFN))

        outFN = op.join(self.testDir, "out/test_ClusterSummary.json")
        stdoutFN = op.join(self.testDir, "stdout/test_ClusterSummary.json")
        obj.write(outFN)

        rm_version_string(outFN, outFN + "tmp1")
        rm_version_string(stdoutFN, outFN + "tmp2")
        _compare_reports(self, outFN, stdoutFN)
        #self.assertTrue(filecmp.cmp(outFN + "tmp1", outFN + "tmp2"))
test_CollapsingUtils.py 文件源码 项目:pbtranscript 作者: PacificBiosciences 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_concatenate_sam(self):
        """Test concatenate_sam(in_sam_files, out_sam)"""
        in_sam_files = [op.join(_SIV_DIR_, f)
                        for f in ["chunk0.sam", "chunk1.sam"]]
        out_sam = op.join(_OUT_DIR_, 'test concatenated.sam')
        expected_sam = op.join(_SIV_DIR_, "sorted-gmap-output.sam")
        concatenate_sam(in_sam_files, out_sam)

        self.assertTrue(op.exists(out_sam))
        self.assertTrue(op.exists(expected_sam))

        #self.assertTrue(filecmp.cmp(out_sam, expected_sam))
        out = [l for l in open(out_sam, 'r') if not l.startswith('@PG')]
        exp = [l for l in open(expected_sam, 'r') if not l.startswith('@PG')]
        # test everything other than @PG are identical
        self.assertEqual(out, exp)

        # chunk01.sam and chunk02.sam has identical PG ID in their SAM headers
        # test concatenated @PG IDs are not conflicting
        pg_ids = [x[3:] for pg in [l for l in open(out_sam, 'r') if l.startswith('@PG')]
                  for x in pg.split('\t') if x.startswith('ID:')]
        self.assertEqual(len(pg_ids), len(set(pg_ids)))
        self.assertEqual(len(pg_ids), 2)
build_utils.py 文件源码 项目:fritzchecksum 作者: mementum 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def prepare_issfile(self):
        # Create temp file
        ofilehandle, ofilepath = tempfile.mkstemp()  # open temporary file
        ofile = os.fdopen(ofilehandle, 'w')  # wrap fhandle in "file object"

        ifilepath = self.getissfile()
        ifile = open(ifilepath)  # open original file
        for line in ifile:
            line = self.replace_lines(line)
            ofile.write(line)

        ofile.close()  # close temp file
        ifile.close()  # close original file

        equal = filecmp.cmp(ifilepath, ofilepath, shallow=False)
        if not equal:
            os.remove(ifilepath)  # remove original file
            shutil.move(ofilepath, ifilepath)  # move new file
        else:
            os.remove(ofilepath)  # remove temp file
OTA_server.py 文件源码 项目:pycom-libraries 作者: pycom 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def get_diff_list(left, right, ignore=['.DS_Store', 'pymakr.conf']):
    left_paths = get_all_paths(left, ignore=ignore)
    right_paths = get_all_paths(right, ignore=ignore)
    new_files = right_paths.difference(left_paths)
    to_delete = left_paths.difference(right_paths)
    common = left_paths.intersection(right_paths)

    to_update = []
    for f in common:
        if not filecmp.cmp(os.path.join(left, f),
                           os.path.join(right, f),
                           shallow=False):
            to_update.append(f)

    return (to_delete, new_files, (to_update))


# Searches the current working directory for a file starting with "firmware_"
# followed by a version number higher than `current_ver` as per LooseVersion.
# Returns None if such a file does not exist.
# Parameters
#    path - the path to the directory to be searched
#    current_ver - the result must be higher than this version
#
s3_to_hive_operator.py 文件源码 项目:incubator-airflow-old 作者: apache 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _check_file_equality(self, fn_1, fn_2, ext):
        # gz files contain mtime and filename in the header that
        # causes filecmp to return False even if contents are identical
        # Hence decompress to test for equality
        if(ext == '.gz'):
            with gzip.GzipFile(fn_1, 'rb') as f_1,\
                 NamedTemporaryFile(mode='wb') as f_txt_1,\
                 gzip.GzipFile(fn_2, 'rb') as f_2,\
                 NamedTemporaryFile(mode='wb') as f_txt_2:
                shutil.copyfileobj(f_1, f_txt_1)
                shutil.copyfileobj(f_2, f_txt_2)
                f_txt_1.flush()
                f_txt_2.flush()
                return filecmp.cmp(f_txt_1.name, f_txt_2.name, shallow=False)
        else:
            return filecmp.cmp(fn_1, fn_2, shallow=False)
test_compression.py 文件源码 项目:incubator-airflow-old 作者: apache 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_uncompress_file(self):
        # Testing txt file type
        self.assertRaisesRegexp(NotImplementedError,
                                "^Received .txt format. Only gz and bz2.*",
                                compression.uncompress_file,
                                **{'input_file_name': None,
                                   'file_extension': '.txt',
                                   'dest_dir': None
                                   })
        # Testing gz file type
        fn_txt = self._get_fn('.txt')
        fn_gz = self._get_fn('.gz')
        txt_gz = compression.uncompress_file(fn_gz, '.gz', self.tmp_dir)
        self.assertTrue(filecmp.cmp(txt_gz, fn_txt, shallow=False),
                        msg="Uncompressed file doest match original")
        # Testing bz2 file type
        fn_bz2 = self._get_fn('.bz2')
        txt_bz2 = compression.uncompress_file(fn_bz2, '.bz2', self.tmp_dir)
        self.assertTrue(filecmp.cmp(txt_bz2, fn_txt, shallow=False),
                        msg="Uncompressed file doest match original")
image_featurizer_test.py 文件源码 项目:pic2vec 作者: datarobot 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def compare_featurizer_class(featurizer,
                             scaled_size,
                             featurized_data,
                             downsample_size,
                             image_column_headers,
                             automatic_downsample,
                             csv_path,
                             image_list,
                             depth,
                             featurized=False):
    """Check the necessary assertions for a featurizer image."""
    assert featurizer.scaled_size == scaled_size
    assert np.allclose(featurizer.features, featurized_data, atol=ATOL)
    assert featurizer.downsample_size == downsample_size
    assert featurizer.image_column_headers == image_column_headers
    assert featurizer.auto_sample == automatic_downsample
    assert featurizer.csv_path == csv_path
    assert featurizer.image_list == image_list
    assert featurizer.depth == depth
    if featurized:
        assert filecmp.cmp('{}_full'.format(csv_path), CHECK_CSV.format(featurizer.model_name))
        assert featurizer.full_dataframe == pd.read_csv(CHECK_CSV.format(featurizer.model_name))
test_endtoend_tooldog.py 文件源码 项目:ToolDog 作者: bio-tools 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_from_biotools_to_galaxy(self, name, json_path, xml_path):
        # Open json to be the content of the requests_mock
        json_answer = main.json_from_file(json_path)
        with requests_mock.mock() as m:
            m.get('https://bio.tools/api/tool/' + name + '/version/1.0',\
                  json=json_answer)
            json = main.json_from_biotools(name, '1.0')
            biotool = main.json_to_biotool(json)
            tmp_file = 'tmp_test_xml.xml'
            main.write_xml(biotool,tmp_file)
            tmp_file_list = glob("tmp_*.xml")
            try:
                for temp_file in tmp_file_list:
                    if len(tmp_file_list) > 1:
                        xml_path = os.path.splitext(json_path)[0] + \
                                   str(re.findall('\d+', temp_file)[0]) + '.xml' 
                    self.assertTrue(filecmp.cmp(xml_path,temp_file))
            finally:
                for temp_file in tmp_file_list:
                    os.remove(temp_file)
test_endtoend_tooldog.py 文件源码 项目:ToolDog 作者: bio-tools 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def test_from_biotools_to_cwl(self, name, json_path, cwl_path):
        # Open json to be the content of the requests_mock
        json_answer = main.json_from_file(json_path)
        with requests_mock.mock() as m:
            m.get('https://bio.tools/api/tool/' + name + '/version/1.0',\
                  json=json_answer)
            json = main.json_from_biotools(name, '1.0')
            biotool = main.json_to_biotool(json)
            tmp_file = name + '_tmp_test_cwl.cwl'
            main.write_cwl(biotool,tmp_file)
            tmp_file_list = glob(name + "_tmp_*.cwl")
            print (tmp_file_list)
            try:
                for temp_file in tmp_file_list:
                    if len(tmp_file_list) > 1:
                        cwl_path = os.path.splitext(json_path)[0] + \
                                   str(re.findall('\d+', temp_file)[0]) + '.cwl' 
                    self.assertTrue(filecmp.cmp(cwl_path,temp_file))
            finally:
                pass
                for temp_file in tmp_file_list:
                    os.remove(temp_file)


###########  Main  ###########
bundletool_experimental.py 文件源码 项目:rules_apple 作者: bazelbuild 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _copy_file(self, src, dest, executable, bundle_root):
    """Copies a file into the bundle.

    Args:
      src: The path to the file or directory that should be added.
      dest: The path relative to the bundle root where the file should be
          stored.
      executable: A Boolean value indicating whether or not the file(s) should
          be made executable.
      bundle_root: The bundle root directory into which the files should be
          added.
    """
    full_dest = os.path.join(bundle_root, dest)
    if (os.path.isfile(full_dest) and
        not filecmp.cmp(full_dest, src, shallow=False)):
      raise BundleConflictError(dest)

    self._makedirs_safely(os.path.dirname(full_dest))
    shutil.copy(src, full_dest)
    os.chmod(full_dest, 0755 if executable else 0644)
collectstatic.py 文件源码 项目:aioweb 作者: kreopt 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def recursive_overwrite(src, dest, ignore=None):
    if os.path.isdir(src):
        if not os.path.isdir(dest):
            os.makedirs(dest, exist_ok=True)
        files = os.listdir(src)
        if ignore is not None:
            ignored = ignore(src, files)
        else:
            ignored = set()
        for f in files:
            if f not in ignored:
                recursive_overwrite(os.path.join(src, f),
                                    os.path.join(dest, f),
                                    ignore)
    else:
        if not os.path.exists(dest) or not filecmp.cmp(src, dest):
            print('copy {}'.format(src))
            shutil.copyfile(src, dest)
            shutil.copystat(src, dest)


问题


面经


文章

微信
公众号

扫码关注公众号