python类add()的实例源码

training.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def add_row(self, row_id):
        logger.debug('TreeCatTrainer.add_row %d', row_id)
        assert row_id not in self._added_rows, row_id
        self._added_rows.add(row_id)

        # These are used for scratch work, so we create them each step.
        np.add(self._vert_ss, self._vert_prior, out=self._vert_probs)
        np.add(self._feat_ss, self._feat_prior, out=self._feat_probs)
        np.add(self._meas_ss, self._meas_prior, out=self._meas_probs)

        treecat_add_row(
            self._table.feature_types,
            self._table.ragged_index,
            self._table.data[row_id, :],
            self._tree.tree_grid,
            self._program,
            self._assignments[row_id, :],
            self._vert_ss,
            self._edge_ss,
            self._feat_ss,
            self._meas_ss,
            self._vert_probs,
            self._edge_probs,
            self._feat_probs,
            self._meas_probs, )
test_reduce.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_out_parameter(self):
        """ Test that the kwargs ``out`` is correctly passed to reduction function """

        with self.subTest('axis = -1'):
            not_out = last(ireduce_ufunc(self.source, np.add, axis = -1))
            out = np.empty_like(self.source[0])
            last(ireduce_ufunc(self.source, ufunc = np.add, out = out))

            self.assertTrue(np.allclose(not_out, out))

        with self.subTest('axis != -1'):
            not_out = last(ireduce_ufunc(self.source, np.add, axis = 2))
            out = np.empty_like(self.source[0])
            from_out = last(ireduce_ufunc(self.source, ufunc = np.add, out = out, axis = 2))

            self.assertTrue(np.allclose(not_out, from_out))
7_2_manydicts_server.py 文件源码 项目:Tencent2017_Final_Rank28_code 作者: Dojocat-GO 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def proc(csv_na,con):
    dicts=[]
    for i in range(0,len(con)):
        dicts.append(dict())
    sum=0
    f=csv.DictReader(open(csv_na))
    for rec in f:
        rec['single']='1'
        #print(csv_na,rec['clickTime'])
        label=int(rec['label'])
        for i in range(0,len(con)):
            k=rec[con[i][0]]+'#'+rec[con[i][1]]
            if dicts[i].__contains__(k):
                dicts[i][k]=np.add(dicts[i][k],[label,1])
            else:
                dicts[i][k]=[label,1]
        sum+=1
    return  dicts,sum
ocr.py 文件源码 项目:OCR 作者: OrangeGuo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train(self, training_data_array):
        for data in training_data_array:
            # ??????????
            y1 = np.dot(np.mat(self.theta1), np.mat(data.y0).T)
            sum1 = y1 + np.mat(self.input_layer_bias)
            y1 = self.sigmoid(sum1)

            y2 = np.dot(np.array(self.theta2), y1)
            y2 = np.add(y2, self.hidden_layer_bias)
            y2 = self.sigmoid(y2)

            # ??????????
            actual_vals = [0] * 10
            actual_vals[data.label] = 1
            output_errors = np.mat(actual_vals).T - np.mat(y2)
            hidden_errors = np.multiply(np.dot(np.mat(self.theta2).T, output_errors), self.sigmoid_prime(sum1))

            # ???????????
            self.theta1 += self.LEARNING_RATE * np.dot(np.mat(hidden_errors), np.mat(data.y0))
            self.theta2 += self.LEARNING_RATE * np.dot(np.mat(output_errors), np.mat(y1).T)
            self.hidden_layer_bias += self.LEARNING_RATE * output_errors
            self.input_layer_bias += self.LEARNING_RATE * hidden_errors
seqLib.py 文件源码 项目:nrc 作者: IcarPA-TBlab 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def getComplexRepr(sequenza, K):
    """
    Crea la rappresentazione ottenuta dalla somma delle varie rappresentazioni 
    in una unica matrice. K e' la lista delle dimensioni dei k-mers.
    """
    m_init=getMatrice(sequenza, K[0])
    dim=2**K[-1]
    #out=np.zeros((dim, dim))
    for ki in K[1:]:
        m_init=espandiMatrice(m_init)
        # divido gli elementi per due per diminuire l'impatto dei termini 
        # piu' corti e quindi piu' comuni
        m_init /= 2.0
        temp=getMatrice(sequenza, ki)
        m_init=np.add(m_init,temp)
    return m_init
test_multiarray.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_ufunc_override_normalize_signature(self):
        # 2016-01-29: NUMPY_UFUNC_DISABLED
        return

        # gh-5674
        class SomeClass(object):
            def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
                return kw

        a = SomeClass()
        kw = np.add(a, [1])
        assert_('sig' not in kw and 'signature' not in kw)
        kw = np.add(a, [1], sig='ii->i')
        assert_('sig' not in kw and 'signature' in kw)
        assert_equal(kw['signature'], 'ii->i')
        kw = np.add(a, [1], signature='ii->i')
        assert_('sig' not in kw and 'signature' in kw)
        assert_equal(kw['signature'], 'ii->i')
test_ufunc.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_forced_sig(self):
        a = 0.5*np.arange(3, dtype='f8')
        assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
        assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
        assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
        assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
        assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
                                            casting='unsafe'), [0, 0, 1])

        b = np.zeros((3,), dtype='f8')
        np.add(a, 0.5, out=b)
        assert_equal(b, [0.5, 1, 1.5])
        b[:] = 0
        np.add(a, 0.5, sig='i', out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
        b[:] = 0
        np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
        b[:] = 0
        np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
        b[:] = 0
        np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
test_ufunc.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_safe_casting(self):
        # In old versions of numpy, in-place operations used the 'unsafe'
        # casting rules. In versions >= 1.10, 'same_kind' is the
        # default and an exception is raised instead of a warning.
        # when 'same_kind' is not satisfied.
        a = np.array([1, 2, 3], dtype=int)
        # Non-in-place addition is fine
        assert_array_equal(assert_no_warnings(np.add, a, 1.1),
                           [2.1, 3.1, 4.1])
        assert_raises(TypeError, np.add, a, 1.1, out=a)

        def add_inplace(a, b):
            a += b

        assert_raises(TypeError, add_inplace, a, 1.1)
        # Make sure that explicitly overriding the exception is allowed:
        assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
        assert_array_equal(a, [2, 3, 4])
test_scalarmath.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _test_type_repr(self, t):
        finfo = np.finfo(t)
        last_fraction_bit_idx = finfo.nexp + finfo.nmant
        last_exponent_bit_idx = finfo.nexp
        storage_bytes = np.dtype(t).itemsize*8
        # could add some more types to the list below
        for which in ['small denorm', 'small norm']:
            # Values from http://en.wikipedia.org/wiki/IEEE_754
            constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
            if which == 'small denorm':
                byte = last_fraction_bit_idx // 8
                bytebit = 7-(last_fraction_bit_idx % 8)
                constr[byte] = 1 << bytebit
            elif which == 'small norm':
                byte = last_exponent_bit_idx // 8
                bytebit = 7-(last_exponent_bit_idx % 8)
                constr[byte] = 1 << bytebit
            else:
                raise ValueError('hmm')
            val = constr.view(t)[0]
            val_repr = repr(val)
            val2 = t(eval(val_repr))
            if not (val2 == 0 and val < 1e-100):
                assert_equal(val, val2)
test_core.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))
test_core.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def test_datafriendly_add(self):
        # Test keeping data w/ (inplace) addition
        x = array([1, 2, 3], mask=[0, 0, 1])
        # Test add w/ scalar
        xx = x + 1
        assert_equal(xx.data, [2, 3, 3])
        assert_equal(xx.mask, [0, 0, 1])
        # Test iadd w/ scalar
        x += 1
        assert_equal(x.data, [2, 3, 3])
        assert_equal(x.mask, [0, 0, 1])
        # Test add w/ array
        x = array([1, 2, 3], mask=[0, 0, 1])
        xx = x + array([1, 2, 3], mask=[1, 0, 0])
        assert_equal(xx.data, [1, 4, 3])
        assert_equal(xx.mask, [1, 0, 1])
        # Test iadd w/ array
        x = array([1, 2, 3], mask=[0, 0, 1])
        x += array([1, 2, 3], mask=[1, 0, 0])
        assert_equal(x.data, [1, 4, 3])
        assert_equal(x.mask, [1, 0, 1])
test_old_ma.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_testAddSumProd(self):
        # Test add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
        self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
        self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
        self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
        self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
        self.assertTrue(eq(np.product(x, 0), product(x, 0)))
        self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
                           product(xm, axis=0)))
        if len(s) > 1:
            self.assertTrue(eq(np.concatenate((x, y), 1),
                               concatenate((xm, ym), 1)))
            self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
            self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
            self.assertTrue(eq(np.product(x, 1), product(x, 1)))
archipack_polylib.py 文件源码 项目:bpy_lambda 作者: bcongdon 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _as_spline(curve, geometry):
        """
            add a spline into a blender curve
            @curve : blender curve
        """
        if hasattr(geometry, 'exterior'):
            # Polygon
            Io._add_spline(curve, geometry.exterior)
            for geom in geometry.interiors:
                Io._add_spline(curve, geom)
        elif hasattr(geometry, 'geoms'):
            # Multi and Collections
            for geom in geometry.geoms:
                Io._as_spline(curve, geom)
        else:
            # LinearRing, LineString and Shape
            Io._add_spline(curve, geometry)
test_constant.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_cputensor_fusion():
    """TODO."""
    M = ng.make_axis(length=1)
    N = ng.make_axis(length=3)

    np_a = np.array([[1, 2, 3]], dtype=np.float32)
    np_b = np.array([[3, 2, 1]], dtype=np.float32)
    np_d = np.multiply(np_b, np.add(np_a, 2))

    a = ng.constant(np_a, [M, N])
    b = ng.constant(np_b, [M, N])
    c = ng.constant(2)
    d = ng.multiply(b, ng.add(a, c))

    with executor(d) as ex:
        result = ex()
    print(result)
    assert np.array_equal(result, np_d)
test_kernel_generator.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_4d_elementwise(transformer_factory, input_axes):

    # Limiting maximum absolute value for tensors elements to 7.9.
    # See description in function test_exit_condition above

    is_flex = is_flex_factory(transformer_factory)
    clip_val = 7.9 if is_flex else 0

    x_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    y_val = rng.randn_abs_clip(input_axes, clip_max=clip_val)
    x = ng.constant(x_val, input_axes)
    y = ng.constant(y_val, input_axes)

    out = ng.add(x, y)

    with executor(out) as ex:
        graph_val = ex()

    np_val = np.add(x_val, y_val)

    ng.testing.assert_allclose(graph_val, np_val, rtol=1e-4)
random.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def discrete_uniform(self, low, high, quantum, axes, dtype=None):
        """
        Returns a tensor initialized with a discrete uniform distribution.

        Arguments:
            low: The lower limit of the values.
            high: The upper limit of the values.
            quantum: Distance between values.
            axes: The axes of the tensor.

        Returns:
            The tensor.

        """
        if dtype is None:
            dtype = self.dtype

        n = math.floor((high - low) / quantum)
        result = np.array(self.rng.random_integers(
            0, n, ng.make_axes(axes).lengths), dtype=dtype)
        np.multiply(result, quantum, result)
        np.add(result, low, result)
        return result
get_centroid.py 文件源码 项目:BioIR 作者: nlpaueb 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_centroid_idf(text, emb, idf, stopwords, D):
    # Computing Terms' Frequency
    tf = defaultdict(int)
    tokens = bioclean(text)
    for word in tokens:
        if word in emb and word not in stopwords:
            tf[word] += 1

    # Computing the centroid
    centroid = np.zeros((1, D))
    div = 0

    for word in tf:
        if word in idf:
            p = tf[word] * idf[word]
            centroid = np.add(centroid, emb[word]*p)
            div += p
    if div != 0:
        centroid = np.divide(centroid, div)
    return centroid
batchdispenser.py 文件源码 项目:LSTM_PIT 作者: snsun 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def apply_cmvn(utt, mean, variance, reverse=False):
    """Apply mean and variance normalisation based on previously computed statistics.

    Args:
        utt: The utterance feature numpy matrix.
        stats: A numpy array containing the mean and variance statistics.
            The first row contains the sum of all the fautures and as a last
            element the total numbe of features. The second row contains the
            squared sum of the features and a zero at the end

    Returns:
        A numpy array containing the mean and variance normalized features
    """
    if not reverse:
        #return mean and variance normalised utterance
        return np.divide(np.subtract(utt, mean), np.sqrt(variance))
    else:
        #reversed normalization
        return np.add(np.multiply(utt, np.sqrt(variance)), mean)
saliency.py 文件源码 项目:saliency 作者: shuuchen 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sumNormalizedFeatures(features, levels=9, startSize=(1983*8, 1088*8)):
    """
        Normalizes the feature maps in argument features and combines them into one.
        Arguments:
            features    : list of feature maps (images)
            levels      : the levels of the Gaussian pyramid used to
                            calculate the feature maps.
            startSize   : the base size of the Gaussian pyramit used to
                            calculate the feature maps.
        returns:
            a combined feature map.
    """
    commonWidth = startSize[0] / 2**(levels/2 - 1)
    commonHeight = startSize[1] / 2**(levels/2 - 1)
    commonSize = commonWidth, commonHeight
    logger.info("Size of conspicuity map: %s", commonSize)
    consp = N(cv2.resize(features[0][1], commonSize))
    for f in features[1:]:
        resized = N(cv2.resize(f[1], commonSize))
        consp = cv2.add(consp, resized)
    return consp
test_multiarray.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_ufunc_override_normalize_signature(self):
        # 2016-01-29: NUMPY_UFUNC_DISABLED
        return

        # gh-5674
        class SomeClass(object):
            def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
                return kw

        a = SomeClass()
        kw = np.add(a, [1])
        assert_('sig' not in kw and 'signature' not in kw)
        kw = np.add(a, [1], sig='ii->i')
        assert_('sig' not in kw and 'signature' in kw)
        assert_equal(kw['signature'], 'ii->i')
        kw = np.add(a, [1], signature='ii->i')
        assert_('sig' not in kw and 'signature' in kw)
        assert_equal(kw['signature'], 'ii->i')
test_ufunc.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_forced_sig(self):
        a = 0.5*np.arange(3, dtype='f8')
        assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
        assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
        assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
        assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
        assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
                                            casting='unsafe'), [0, 0, 1])

        b = np.zeros((3,), dtype='f8')
        np.add(a, 0.5, out=b)
        assert_equal(b, [0.5, 1, 1.5])
        b[:] = 0
        np.add(a, 0.5, sig='i', out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
        b[:] = 0
        np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
        b[:] = 0
        np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
        b[:] = 0
        np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
        assert_equal(b, [0, 0, 1])
test_ufunc.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_safe_casting(self):
        # In old versions of numpy, in-place operations used the 'unsafe'
        # casting rules. In versions >= 1.10, 'same_kind' is the
        # default and an exception is raised instead of a warning.
        # when 'same_kind' is not satisfied.
        a = np.array([1, 2, 3], dtype=int)
        # Non-in-place addition is fine
        assert_array_equal(assert_no_warnings(np.add, a, 1.1),
                           [2.1, 3.1, 4.1])
        assert_raises(TypeError, np.add, a, 1.1, out=a)

        def add_inplace(a, b):
            a += b

        assert_raises(TypeError, add_inplace, a, 1.1)
        # Make sure that explicitly overriding the exception is allowed:
        assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
        assert_array_equal(a, [2, 3, 4])
test_scalarmath.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _test_type_repr(self, t):
        finfo = np.finfo(t)
        last_fraction_bit_idx = finfo.nexp + finfo.nmant
        last_exponent_bit_idx = finfo.nexp
        storage_bytes = np.dtype(t).itemsize*8
        # could add some more types to the list below
        for which in ['small denorm', 'small norm']:
            # Values from http://en.wikipedia.org/wiki/IEEE_754
            constr = np.array([0x00]*storage_bytes, dtype=np.uint8)
            if which == 'small denorm':
                byte = last_fraction_bit_idx // 8
                bytebit = 7-(last_fraction_bit_idx % 8)
                constr[byte] = 1 << bytebit
            elif which == 'small norm':
                byte = last_exponent_bit_idx // 8
                bytebit = 7-(last_exponent_bit_idx % 8)
                constr[byte] = 1 << bytebit
            else:
                raise ValueError('hmm')
            val = constr.view(t)[0]
            val_repr = repr(val)
            val2 = t(eval(val_repr))
            if not (val2 == 0 and val < 1e-100):
                assert_equal(val, val2)
test_core.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def test_addsumprod(self):
        # Tests add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
        assert_equal(np.add.reduce(x), add.reduce(x))
        assert_equal(np.add.accumulate(x), add.accumulate(x))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(4, sum(array(4), axis=0))
        assert_equal(np.sum(x, axis=0), sum(x, axis=0))
        assert_equal(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0))
        assert_equal(np.sum(x, 0), sum(x, 0))
        assert_equal(np.product(x, axis=0), product(x, axis=0))
        assert_equal(np.product(x, 0), product(x, 0))
        assert_equal(np.product(filled(xm, 1), axis=0), product(xm, axis=0))
        s = (3, 4)
        x.shape = y.shape = xm.shape = ym.shape = s
        if len(s) > 1:
            assert_equal(np.concatenate((x, y), 1), concatenate((xm, ym), 1))
            assert_equal(np.add.reduce(x, 1), add.reduce(x, 1))
            assert_equal(np.sum(x, 1), sum(x, 1))
            assert_equal(np.product(x, 1), product(x, 1))
test_core.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_datafriendly_add(self):
        # Test keeping data w/ (inplace) addition
        x = array([1, 2, 3], mask=[0, 0, 1])
        # Test add w/ scalar
        xx = x + 1
        assert_equal(xx.data, [2, 3, 3])
        assert_equal(xx.mask, [0, 0, 1])
        # Test iadd w/ scalar
        x += 1
        assert_equal(x.data, [2, 3, 3])
        assert_equal(x.mask, [0, 0, 1])
        # Test add w/ array
        x = array([1, 2, 3], mask=[0, 0, 1])
        xx = x + array([1, 2, 3], mask=[1, 0, 0])
        assert_equal(xx.data, [1, 4, 3])
        assert_equal(xx.mask, [1, 0, 1])
        # Test iadd w/ array
        x = array([1, 2, 3], mask=[0, 0, 1])
        x += array([1, 2, 3], mask=[1, 0, 0])
        assert_equal(x.data, [1, 4, 3])
        assert_equal(x.mask, [1, 0, 1])
test_old_ma.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_testAddSumProd(self):
        # Test add, sum, product.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
        self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(4, sum(array(4), axis=0)))
        self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
        self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
        self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
        self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
        self.assertTrue(eq(np.product(x, 0), product(x, 0)))
        self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
                           product(xm, axis=0)))
        if len(s) > 1:
            self.assertTrue(eq(np.concatenate((x, y), 1),
                               concatenate((xm, ym), 1)))
            self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
            self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
            self.assertTrue(eq(np.product(x, 1), product(x, 1)))
ensemble_loader.py 文件源码 项目:carvana-challenge 作者: chplushsieh 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __getitem__(self, idx):

        img_name = self.img_names[idx]
        ensembled = np.zeros(const.img_size)

        for i, pred_dir in enumerate(self.pred_dirs):
            pred_path = os.path.join(const.OUTPUT_DIR, pred_dir, const.PROBS_DIR_NAME, img_name + '.npy')
            img_prob = np.load(pred_path)

            weighted_img_prob = np.multiply(img_prob, self.weights[i])
            ensembled = np.add(ensembled, weighted_img_prob)

        # save into new output/ folder
        submit.save_ensembled_prob_map(self.ensemble_dir, img_name, ensembled)
        #plt.imshow(ensembled)
        #plt.show()

        return img_name, ensembled
rle_ensemble_loader.py 文件源码 项目:carvana-challenge 作者: chplushsieh 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __getitem__(self, idx):

        img_name = self.img_names[idx]

        ensembled_mask = np.zeros((const.img_size[0], const.img_size[1]))

        for i, submission in enumerate(self.submissions):

            rle = submission[img_name]
            mask = run_length.decode(rle)
            weighted_mask = np.multiply(mask, self.weights[i])
            ensembled_mask = np.add(ensembled_mask, weighted_mask)

        ensembled_mask[ ensembled_mask > 0.5 ] = 1
        ensembled_mask[ ensembled_mask <= 0.5 ] = 0

        # plt.imshow(ensembled_mask)
        # plt.show()

        ensembled_rle = run_length.encode(ensembled_mask)

        return img_name, ensembled_rle
variations.py 文件源码 项目:GASP-python 作者: henniggroup 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def compute_num_adds(self, cell, composition_space, random):
        """
        Computes the number of atoms (or stoichiometries worth of atoms) to add
        or remove. Returns a non-zero integer.

        Args:
            cell: the Cell of the parent organism

            composition_space: the CompositionSpace of the search

            random: a copy of Python's built in PRNG
        """

        num_adds = int(round(random.gauss(self.mu_num_adds,
                                          self.sigma_num_adds)))
        # keep trying until we get a valid number
        while num_adds == 0 or \
            (composition_space.objective_function == 'epa' and num_adds*-1 >=
             cell.num_sites/composition_space.endpoints[0].num_atoms) or \
            (composition_space.objective_function == 'pd' and
                num_adds*-1 >= cell.num_sites):
            num_adds = int(round(random.gauss(self.mu_num_adds,
                                              self.sigma_num_adds)))
        return num_adds
stockstats.py 文件源码 项目:stockstats 作者: jealous 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _get_boll(cls, df):
        """ Get Bollinger bands.

        boll_ub means the upper band of the Bollinger bands
        boll_lb means the lower band of the Bollinger bands
        boll_ub = MA + K?
        boll_lb = MA ? K?
        M = BOLL_PERIOD
        K = BOLL_STD_TIMES
        :param df: data
        :return: None
        """
        moving_avg = df['close_{}_sma'.format(cls.BOLL_PERIOD)]
        moving_std = df['close_{}_mstd'.format(cls.BOLL_PERIOD)]
        df['boll'] = moving_avg
        moving_avg = list(map(np.float64, moving_avg))
        moving_std = list(map(np.float64, moving_std))
        # noinspection PyTypeChecker
        df['boll_ub'] = np.add(moving_avg,
                               np.multiply(cls.BOLL_STD_TIMES, moving_std))
        # noinspection PyTypeChecker
        df['boll_lb'] = np.subtract(moving_avg,
                                    np.multiply(cls.BOLL_STD_TIMES,
                                                moving_std))


问题


面经


文章

微信
公众号

扫码关注公众号