python类testing()的实例源码

test_common.py 文件源码 项目:keras-rcnn 作者: broadinstitute 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def test_smooth_l1():
    output = keras.backend.variable(
        [[[2.5, 0.0, 0.4, 0.0],
          [0.0, 0.0, 0.0, 0.0],
          [0.0, 2.5, 0.0, 0.4]],
         [[3.5, 0.0, 0.0, 0.0],
          [0.0, 0.4, 0.0, 0.9],
          [0.0, 0.0, 1.5, 0.0]]]
    )

    target = keras.backend.zeros_like(output)

    x = keras_rcnn.backend.smooth_l1(output, target)

    numpy.testing.assert_approx_equal(keras.backend.eval(x), 8.645)

    weights = keras.backend.variable(
        [[2, 1, 1],
         [0, 3, 0]]
    )

    x = keras_rcnn.backend.smooth_l1(output, target, weights=weights)

    numpy.testing.assert_approx_equal(keras.backend.eval(x), 7.695)
test_gwas_utilities.py 文件源码 项目:MetaXcan 作者: hakyimlab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def assert_gwas_1(unit_test, gwas):
    expected_snp = pandas.Series(["rs1666", "rs1", "rs2", "rs3", "rs4", "rs6", "rs7", "rs7666", "rs8", "rs9"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[SNP], expected_snp)

    expected_effect = pandas.Series(["A", "C", "C", "G", "A", "G", "T", "A", "A", "A"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[EFFECT_ALLELE], expected_effect)

    expected_non_effect = pandas.Series(["G", "T", "T", "A", "G", "A", "C", "G", "G", "G"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[NON_EFFECT_ALLELE], expected_non_effect)

    expected_zscore = pandas.Series([0.3, -0.2, 0.5, 1.3, -0.3, 2.9, 4.35, 1.3, 0.09, 0.09], dtype=numpy.float32)
    numpy.testing.assert_allclose(gwas[ZSCORE], expected_zscore, rtol=0.001)

    expected_chromosome = pandas.Series(["chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[CHROMOSOME], expected_chromosome)

    expected_position = pandas.Series([0, 1, 5, 20, 30, 42, 43, 45, 50, 70])
    numpy.testing.assert_array_equal(gwas[POSITION], expected_position)
test_gwas_utilities.py 文件源码 项目:MetaXcan 作者: hakyimlab 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def assert_gwas_2(unit_test, gwas):
    expected_snp = pandas.Series(["rsC", "rs1666", "rs1", "rs2",  "rs4", "rsB", "rsA", "rs7666", "rs8", "rs9"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[SNP], expected_snp)

    expected_effect = pandas.Series(["T", "A", "C", "C", "A", "G", "G", "A", "A", "A"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[EFFECT_ALLELE], expected_effect)

    expected_non_effect = pandas.Series(["C", "G", "T", "T", "G", "A", "A", "G", "G", "G"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[NON_EFFECT_ALLELE], expected_non_effect)

    expected_zscore = pandas.Series([4.35, 0.3, -0.2, 1.3, -0.3, 2.9, 1.3, 1.3, 0.09, 0.09], dtype=numpy.float32)
    numpy.testing.assert_allclose(gwas[ZSCORE], expected_zscore, rtol=0.001)

    expected_chromosome = pandas.Series(["chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1"], dtype=numpy.str)
    numpy.testing.assert_array_equal(gwas[CHROMOSOME], expected_chromosome)

    expected_position = pandas.Series([None, None, None, None, None, None, None, None, None, None])
    numpy.testing.assert_array_equal(gwas[POSITION], expected_position)
test_prediction_model.py 文件源码 项目:MetaXcan 作者: hakyimlab 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_load_model(self):
        snp_model = PredictionModel.load_model("tests/_td/dbs/test_1.db")

        e_e = SampleData.dataframe_from_extra(SampleData.sample_extra_2())
        numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_GENE], e_e[PredictionModel.WDBEQF.K_GENE])
        numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_GENE_NAME], e_e[PredictionModel.WDBEQF.K_GENE_NAME])
        numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_N_SNP_IN_MODEL], e_e[PredictionModel.WDBEQF.K_N_SNP_IN_MODEL])
        numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_PRED_PERF_R2], e_e[PredictionModel.WDBEQF.K_PRED_PERF_R2])
        numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_PRED_PERF_PVAL], e_e[PredictionModel.WDBEQF.K_PRED_PERF_PVAL])
        numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_PRED_PERF_QVAL], e_e[PredictionModel.WDBEQF.K_PRED_PERF_QVAL])

        e_w = SampleData.dataframe_from_weights(SampleData.sample_weights_2())
        numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_RSID], e_w[PredictionModel.WDBQF.K_RSID])
        numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_GENE], e_w[PredictionModel.WDBQF.K_GENE])
        numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_WEIGHT], e_w[PredictionModel.WDBQF.K_WEIGHT])
        numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_NON_EFFECT_ALLELE], e_w[PredictionModel.WDBQF.K_NON_EFFECT_ALLELE])
        numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_EFFECT_ALLELE], e_w[PredictionModel.WDBQF.K_EFFECT_ALLELE])
test_matrix_manager.py 文件源码 项目:MetaXcan 作者: hakyimlab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_from_load(self):
        m = MatrixManager.load_matrix_manager("tests/_td/cov/cov.txt.gz")
        snps, cov = m.get("ENSG00000239789.1")
        self.assertEqual(snps, cov_data.SNPS_ENSG00000239789_1)
        numpy.testing.assert_array_almost_equal(cov, cov_data.COV_ENSG00000239789_1)

        n = m.n_snps("ENSG00000239789.1")
        self.assertEqual(n, len(cov_data.SNPS_ENSG00000239789_1))

        with self.assertRaises(Exceptions.InvalidArguments) as ctx:
            snps, cov = m.get("ENSG00000183742.8", ["rs7806506", "rs12718973"])

        self.assertTrue("whitelist" in ctx.exception.message) #?

        whitelist = ["rs3094989", "rs7806506", "rs12536095", "rs10226814"]
        snps, cov = m.get("ENSG00000183742.8", whitelist)
        self.assertEqual(snps, cov_data.SNPS_ENSG00000183742_8_w)
        numpy.testing.assert_array_almost_equal(cov, cov_data.COV_ENSG00000183742_8_w)

        snps, cov = m.get("ENSG00000004766.11")
        self.assertEqual(snps, cov_data.SNPS_ENSG00000004766_11)
        numpy.testing.assert_array_almost_equal(cov, cov_data.COV_ENSG00000004766_11)

        n = m.n_snps("ENSG00000004766.11")
        self.assertEqual(n, len(cov_data.COV_ENSG00000004766_11))
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def test_get_sequence_lengths_from_binary_mask(self):
        binary_mask = torch.ByteTensor([[1, 1, 1, 0, 0, 0],
                                        [1, 1, 0, 0, 0, 0],
                                        [1, 1, 1, 1, 1, 1],
                                        [1, 0, 0, 0, 0, 0]])
        lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
        numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_get_sequence_lengths_converts_to_long_tensor_and_avoids_variable_overflow(self):
        # Tests the following weird behaviour in Pytorch 0.1.12
        # doesn't happen for our sequence masks:
        #
        # mask = torch.ones([260]).byte()
        # mask.sum() # equals 260.
        # var_mask = torch.autograd.Variable(mask)
        # var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows.
        binary_mask = Variable(torch.ones(2, 260).byte())
        lengths = util.get_lengths_from_binary_sequence_mask(binary_mask)
        numpy.testing.assert_array_equal(lengths.data.numpy(), numpy.array([260, 260]))
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_weighted_sum_works_on_simple_input(self):
        batch_size = 1
        sentence_length = 5
        embedding_dim = 4
        sentence_array = numpy.random.rand(batch_size, sentence_length, embedding_dim)
        sentence_tensor = Variable(torch.from_numpy(sentence_array).float())
        attention_tensor = Variable(torch.FloatTensor([[.3, .4, .1, 0, 1.2]]))
        aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
        assert aggregated_array.shape == (batch_size, embedding_dim)
        expected_array = (0.3 * sentence_array[0, 0] +
                          0.4 * sentence_array[0, 1] +
                          0.1 * sentence_array[0, 2] +
                          0.0 * sentence_array[0, 3] +
                          1.2 * sentence_array[0, 4])
        numpy.testing.assert_almost_equal(aggregated_array, [expected_array], decimal=5)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_weighted_sum_handles_higher_order_input(self):
        batch_size = 1
        length_1 = 5
        length_2 = 6
        length_3 = 2
        embedding_dim = 4
        sentence_array = numpy.random.rand(batch_size, length_1, length_2, length_3, embedding_dim)
        attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3)
        sentence_tensor = Variable(torch.from_numpy(sentence_array).float())
        attention_tensor = Variable(torch.from_numpy(attention_array).float())
        aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy()
        assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim)
        expected_array = (attention_array[0, 3, 2, 0] * sentence_array[0, 3, 2, 0] +
                          attention_array[0, 3, 2, 1] * sentence_array[0, 3, 2, 1])
        numpy.testing.assert_almost_equal(aggregated_array[0, 3, 2], expected_array, decimal=5)
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_flatten_and_batch_shift_indices(self):
        indices = numpy.array([[[1, 2, 3, 4],
                                [5, 6, 7, 8],
                                [9, 9, 9, 9]],
                               [[2, 1, 0, 7],
                                [7, 7, 2, 3],
                                [0, 0, 4, 2]]])
        indices = Variable(torch.LongTensor(indices))
        shifted_indices = util.flatten_and_batch_shift_indices(indices, 10)
        numpy.testing.assert_array_equal(shifted_indices.data.numpy(),
                                         numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9,
                                                      9, 9, 9, 12, 11, 10, 17, 17,
                                                      17, 12, 13, 10, 10, 14, 12]))
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def test_flattened_index_select(self):
        indices = numpy.array([[1, 2],
                               [3, 4]])
        targets = torch.ones([2, 6, 3]).cumsum(1) - 1
        # Make the second batch double it's index so they're different.
        targets[1, :, :] *= 2
        indices = Variable(torch.LongTensor(indices))
        targets = Variable(targets)

        selected = util.flattened_index_select(targets, indices)

        assert list(selected.size()) == [2, 2, 2, 3]

        ones = numpy.ones([3])
        numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones)
        numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2)
        numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3)
        numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4)

        numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 2)
        numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 4)
        numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 6)
        numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 8)

        # Check we only accept 2D indices.
        with pytest.raises(ConfigurationError):
            util.flattened_index_select(targets, torch.ones([3, 4, 5]))
util_test.py 文件源码 项目:allennlp 作者: allenai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def test_bucket_values(self):
        indices = torch.LongTensor([1, 2, 7, 1, 56, 900])
        bucketed_distances = util.bucket_values(indices)
        numpy.testing.assert_array_equal(bucketed_distances.numpy(),
                                         numpy.array([1, 2, 5, 1, 8, 9]))
nosetester.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def get_package_name(filepath):
    """
    Given a path where a package is installed, determine its name.

    Parameters
    ----------
    filepath : str
        Path to a file. If the determination fails, "numpy" is returned.

    Examples
    --------
    >>> np.testing.nosetester.get_package_name('nonsense')
    'numpy'

    """

    fullpath = filepath[:]
    pkg_name = []
    while 'site-packages' in filepath or 'dist-packages' in filepath:
        filepath, p2 = os.path.split(filepath)
        if p2 in ('site-packages', 'dist-packages'):
            break
        pkg_name.append(p2)

    # if package name determination failed, just default to numpy/scipy
    if not pkg_name:
        if 'scipy' in fullpath:
            return 'scipy'
        else:
            return 'numpy'

    # otherwise, reverse to get correct order and return
    pkg_name.reverse()

    # don't include the outer egg directory
    if pkg_name[0].endswith('.egg'):
        pkg_name.pop(0)

    return '.'.join(pkg_name)
nosetester.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 46 收藏 0 点赞 0 评论 0
def __init__(self, package=None, raise_warnings="release", depth=0):
        # Back-compat: 'None' used to mean either "release" or "develop"
        # depending on whether this was a release or develop version of
        # numpy. Those semantics were fine for testing numpy, but not so
        # helpful for downstream projects like scipy that use
        # numpy.testing. (They want to set this based on whether *they* are a
        # release or develop version, not whether numpy is.) So we continue to
        # accept 'None' for back-compat, but it's now just an alias for the
        # default "release".
        if raise_warnings is None:
            raise_warnings = "release"

        package_name = None
        if package is None:
            f = sys._getframe(1 + depth)
            package_path = f.f_locals.get('__file__', None)
            if package_path is None:
                raise AssertionError
            package_path = os.path.dirname(package_path)
            package_name = f.f_locals.get('__name__', None)
        elif isinstance(package, type(os)):
            package_path = os.path.dirname(package.__file__)
            package_name = getattr(package, '__name__', None)
        else:
            package_path = str(package)

        self.package_path = package_path

        # Find the package name under test; this name is used to limit coverage
        # reporting (if enabled).
        if package_name is None:
            package_name = get_package_name(package_path)
        self.package_name = package_name

        # Set to "release" in constructor in maintenance branches.
        self.raise_warnings = raise_warnings
test_illumination_statistics.py 文件源码 项目:DeepProfiler 作者: jccaicedo 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_init(illumination_stats):
    histogram = numpy.zeros((3, 2**16), dtype=numpy.float64)

    assert illumination_stats.depth == 2 ** 16
    assert illumination_stats.channels == ["DNA", "ER", "Mito"]
    assert illumination_stats.name == ""
    assert illumination_stats.down_scale_factor == 2
    assert illumination_stats.median_filter_size == 3
    numpy.testing.assert_array_equal(illumination_stats.hist, histogram)
    assert illumination_stats.count == 0
    assert illumination_stats.expected == 1
    assert illumination_stats.mean_image is None
    assert illumination_stats.original_image_size is None
test_illumination_statistics.py 文件源码 项目:DeepProfiler 作者: jccaicedo 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test_add_to_mean_no_scaling(illumination_stats):
    numpy.random.seed(8)
    image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16)

    illumination_stats.down_scale_factor = 1
    illumination_stats.addToMean(image)

    assert illumination_stats.mean_image.shape == (16, 16, 3)
    # This method rescales the input image and normalizes pixels according to
    # the data type. We restore the values in this test to match the input for comparison.
    result_mean = illumination_stats.mean_image #* (2 ** 16)
    numpy.testing.assert_array_equal(numpy.round(result_mean).astype(numpy.uint16), image)
test_illumination_statistics.py 文件源码 项目:DeepProfiler 作者: jccaicedo 项目源码 文件源码 阅读 55 收藏 0 点赞 0 评论 0
def test_add_to_mean_with_scaling(illumination_stats):
    numpy.random.seed(8)
    image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16)

    illumination_stats.addToMean(image)

    assert illumination_stats.mean_image.shape == (8, 8, 3)
    result_mean = illumination_stats.mean_image
    assert result_mean.sum() > 0
    #numpy.testing.assert_array_equal(result_mean.astype(numpy.uint16), image)
test_illumination_statistics.py 文件源码 项目:DeepProfiler 作者: jccaicedo 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def test_process_image(illumination_stats):
    numpy.random.seed(8)
    image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16)

    illumination_stats.processImage(0, image, None)

    histogram1 = numpy.histogram(image[:, :, 0], bins=2 ** 16, range=(0, 2 ** 16))[0]
    histogram2 = numpy.histogram(image[:, :, 1], bins=2 ** 16, range=(0, 2 ** 16))[0]
    histogram3 = numpy.histogram(image[:, :, 2], bins=2 ** 16, range=(0, 2 ** 16))[0]

    assert illumination_stats.count == 1
    numpy.testing.assert_array_equal(illumination_stats.hist[0], histogram1)
    numpy.testing.assert_array_equal(illumination_stats.hist[1], histogram2)
    numpy.testing.assert_array_equal(illumination_stats.hist[2], histogram3)
test_compression.py 文件源码 项目:DeepProfiler 作者: jccaicedo 项目源码 文件源码 阅读 83 收藏 0 点赞 0 评论 0
def test_init(compress, out_dir):
    stats = {"original_size": [16, 16]}
    channels = ["DNA", "ER", "Mito"]
    control_distribution = numpy.zeros((3, 2 ** 8), dtype=numpy.float64)

    assert compress.stats == stats
    assert compress.channels == channels
    assert compress.out_dir == out_dir
    assert compress.count == 0
    assert compress.expected == 1
    assert not compress.metadata_control_filter("x")
    numpy.testing.assert_array_equal(compress.controls_distribution, control_distribution)
    assert compress.source_format == "tiff"
    assert compress.target_format == "png"
    assert compress.output_shape == [16, 16]
test_compression.py 文件源码 项目:DeepProfiler 作者: jccaicedo 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def test_set_control_samples_filter(compress):
    test_filter = lambda x: True
    control_distribution = numpy.zeros((3, 2 ** 8), dtype=numpy.float64)

    compress.set_control_samples_filter(test_filter)

    assert compress.metadata_control_filter(1)
    numpy.testing.assert_array_equal(compress.controls_distribution, control_distribution)


问题


面经


文章

微信
公众号

扫码关注公众号