python类add()的实例源码

Array.py 文件源码 项目:Neural_Network 作者: Shoeboxam 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __add__(self, other):
        """Implicitly broadcast lesser operand to a higher conformable dimension"""
        if type(self) in self._types or type(other) in self._types:
            return super().__add__(other)

        # Stimuli become vectorized, but bias units remain 1D. To add wx + b, must broadcast
        if self.ndim == 2 and other.ndim == 1:
            return Array(np.add(self, np.tile(other[..., np.newaxis], self.shape[1])))
        if self.ndim == 1 and other.ndim == 2:
            return Array(np.add(np.tile(self[..., np.newaxis], other.shape[1]), other))

        if self.ndim == 3 and other.ndim == 2:
            return Array(np.add(self, np.tile(other[..., np.newaxis], self.shape[2])))
        if self.ndim == 2 and other.ndim == 3:
            return Array(np.add(np.tile(self[..., np.newaxis], other.shape[2]), other))
        return np.add(self, other)
training.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def logprob_dc(counts, prior, axis=None):
    """Non-normalized log probability of a Dirichlet-Categorical distribution.

    See https://en.wikipedia.org/wiki/Dirichlet-multinomial_distribution
    """
    # Note that this excludes the factorial(counts) term, since we explicitly
    # track permutations in assignments.
    return gammaln(np.add(counts, prior, dtype=np.float32)).sum(axis)
training.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def set_edges(self, edges):
        TreeTrainer.set_edges(self, edges)
        V, E, K, M = self._VEKM
        assignments = self._assignments[sorted(self._added_rows), :]
        for e, v1, v2 in self._tree.tree_grid.T:
            self._edge_ss[e, :, :] = count_pairs(assignments, v1, v2, M)
        np.add(self._edge_ss, self._edge_prior, out=self._edge_probs)
training.py 文件源码 项目:treecat 作者: posterior 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def add_row(self, row_id):
        logger.debug('TreeGaussTrainer.add_row %d', row_id)
        assert row_id not in self._added_rows, row_id
        self._added_rows.add(row_id)

        treegauss_add_row(
            self._data[row_id, :],
            self._tree.tree_grid,
            self._program,
            self._latent[row_id, :, :],
            self._vert_ss,
            self._edge_ss,
            self._feat_ss, )
numerics.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def isum(arrays, axis = -1, dtype = None, ignore_nan = False):
    """ 
    Streaming sum of array elements.

    Parameters
    ----------
    arrays : iterable
        Arrays to be summed. 
    axis : int or None, optional
        Reduction axis. Default is to sum the arrays in the stream as if 
        they had been stacked along a new axis, then sum along this new axis.
        If None, arrays are flattened before summing. If `axis` is an int larger that
        the number of dimensions in the arrays of the stream, arrays are summed
        along the new axis.
    dtype : numpy.dtype, optional
        The type of the yielded array and of the accumulator in which the elements 
        are summed. The dtype of a is used by default unless a has an integer dtype 
        of less precision than the default platform integer. In that case, if a is 
        signed then the platform integer is used while if a is unsigned then an 
        unsigned integer of the same precision as the platform integer is used.
    ignore_nan : bool, optional
        If True, NaNs are ignored. Default is propagation of NaNs.

    Yields
    ------
    online_sum : ndarray
    """    
    yield from ireduce_ufunc(arrays, ufunc = np.add, axis = axis, ignore_nan = ignore_nan, dtype = dtype)
test_reduce.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_no_side_effects(self):
        """ Test that no arrays in the stream are modified """
        for arr in self.source:
            arr.setflags(write = False)
        out = last(ireduce_ufunc(self.source, np.add))
test_reduce.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_single_array(self):
        """ Test ireduce_ufunc on a single array, not a sequence """
        source = np.ones( (16, 16), dtype = np.int)
        out = last(ireduce_ufunc(source, np.add, axis = -1))
        self.assertTrue(np.allclose(source, out))
test_reduce.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_output_shape(self):
        """ Test output shape """
        for axis in (0, 1, 2, 3, None):
            with self.subTest('axis = {}'.format(axis)):
                from_numpy = np.add.reduce(self.stack, axis = axis)
                out = last(ireduce_ufunc(self.source, np.add, axis = axis))
                self.assertSequenceEqual(from_numpy.shape, out.shape)
                self.assertTrue(np.allclose(out, from_numpy))
test_reduce.py 文件源码 项目:npstreams 作者: LaurentRDC 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_ignore_nan(self):
        """ Test that ignore_nan is working """
        for axis in (0, 1, 2, 3, None):
            with self.subTest('axis = {}'.format(axis)):
                out = last(ireduce_ufunc(self.source, np.add, axis = axis, ignore_nan = True))
                self.assertFalse(np.any(np.isnan(out)))

# Dynamics generation of tests on binary ufuncs
7_2_manydicts_server.py 文件源码 项目:Tencent2017_Final_Rank28_code 作者: Dojocat-GO 项目源码 文件源码 阅读 61 收藏 0 点赞 0 评论 0
def con_two_dict(dic1,dic2):
    dic=[]
    for item in dic1:
        dic.append(item.copy())
    for i in range(0,len(dic)):
        for k in dic2[i].keys():
            if dic[i].__contains__(k):
                dic[i][k]=np.add(dic[i][k],dic2[i][k])
            else:
                dic[i][k]=dic2[i][k]
    return dic
chart_utils.py 文件源码 项目:simple_rl 作者: david-abel 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def compute_conf_intervals(data, cumulative=False):
    '''
    Args:
        data (list): A 3D matrix, [algorithm][instance][episode]
        cumulative (bool) *opt
    '''

    confidence_intervals_each_alg = [] # [alg][conf_inv_for_episode]

    for i, all_instances in enumerate(data):

        num_instances = len(data[i])
        num_episodes = len(data[i][0])

        all_instances = np.array(all_instances)
        alg_i_ci = []
        total_so_far = np.zeros(num_instances)
        for j in xrange(num_episodes):
            # Compute datum for confidence interval.
            episode_j_all_instances = all_instances[:, j]

            if cumulative:
                # Cumulative.
                summed_vector = np.add(episode_j_all_instances, total_so_far)
                total_so_far = np.add(episode_j_all_instances, total_so_far)
                episode_j_all_instances = summed_vector

            # Compute the interval and add it to list.
            conf_interv = compute_single_conf_interval(episode_j_all_instances)
            alg_i_ci.append(conf_interv)

        confidence_intervals_each_alg.append(alg_i_ci)

    return confidence_intervals_each_alg
noise_test.py 文件源码 项目:Wall-EEG 作者: neurotechuoft 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __call__(self, sample):
        # keep tract of absolute value of
        self.diff = np.add(self.diff,
                           np.absolute(np.asarray(sample.channel_data)))
        self.sample_count = self.sample_count + 1

        elapsed_time = timeit.default_timer() - self.last_report
        if elapsed_time > self.polling_interval:
            channel_noise_power = np.divide(self.diff, self.sample_count)

            print (channel_noise_power)
            self.diff = np.zeros(self.eeg_channels)
            self.last_report = timeit.default_timer()

    # # Instanciate "monitor" thread
ocr.py 文件源码 项目:OCR 作者: OrangeGuo 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def predict(self, test):
        print('predict')
        y1 = np.dot(np.mat(self.theta1), np.mat(test).T)
        y1 = y1 + np.mat(self.input_layer_bias)  # Add the bias
        y1 = self.sigmoid(y1)

        y2 = np.dot(np.array(self.theta2), y1)
        y2 = np.add(y2, self.hidden_layer_bias)  # Add the bias
        y2 = self.sigmoid(y2)

        results = y2.T.tolist()[0]
        return results.index(max(results))
proc.py 文件源码 项目:PyMDNet 作者: HungWei-Andy 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def sample(gt, n, im_size, scale_factor, transfer_range, scale_range, valid, verbose=False):
  samp = np.array([gt[0]+gt[2]/2.0, gt[1]+gt[3]/2.0, gt[2], gt[3]])
  samples = np.repeat(np.reshape(samp, [1, -1]), n, axis=0)
  h, w = im_size

  if verbose:
    print(w, h)
    print(gt)
    print(samp)
    print(transfer_range)
    print(scale_range)

  samples[:, 0] = np.add(samples[:, 0], transfer_range*samp[2]*(np.random.rand(n)*2-1))
  samples[:, 1] = np.add(samples[:, 1], transfer_range*samp[3]*(np.random.rand(n)*2-1))
  samples[:, 2:]  = np.multiply(samples[:, 2:], np.power(scale_factor, scale_range*np.repeat(np.random.rand(n,1)*2-1,2,axis=1)))
  samples[:, 2] = np.maximum(0, np.minimum(w-5, samples[:,2]))
  samples[:, 3] = np.maximum(0, np.minimum(h-5, samples[:,3]))

  if verbose:
    print(samples[0])

  samples = np.c_[samples[:,0]-samples[:,2]/2, samples[:,1]-samples[:,3]/2, samples[:,2], samples[:,3]]

  if verbose:
    print(samples[0])

  if valid:
    samples[:,0] = np.maximum(0,np.minimum(w-samples[:,2],samples[:,0]))
    samples[:,1] = np.maximum(0,np.minimum(h-samples[:,3],samples[:,1]))
  else:
    samples[:,0] = np.maximum(0-samples[:,2]/2,np.minimum(w-samples[:,2]/2,samples[:,0]))
    samples[:,1] = np.maximum(0-samples[:,3]/2,np.minimum(h-samples[:,3]/2,samples[:,1]))

  if verbose:
    print(samples[0])
  return samples

###########################################################################
#                          overlap_ratio                                  #
###########################################################################
operator.py 文件源码 项目:MOSFiT 作者: guillochon 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def set_attributes(self, task):
        """Set key replacement dictionary."""
        Utility.set_attributes(self, task)
        self._operands = task.get('operands', [])
        if not self._operands:
            raise ValueError('`Operator` must have at least one operand.')
        self._result = task.get('result', 'result')
        self._op = self.ops.get(task.get('operator', '+'), np.add)
ltls.py 文件源码 项目:ltls 作者: kjasinska 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def evaluate_model(self, x, w):
        if not self.regularization or self.lambd == 0:
            edge_weight = x.dot(w)
            edge_weight = np.multiply(edge_weight, self.skipped)
        else:
            edge_weight = np.zeros((1, self.num_edges))
            for idx, value in izip(x.indices, x.data):
                # edge_weight = np.add(edge_weight, np.multiply(value, np.multiply(np.maximum(np.subtract(np.abs(w[idx, :]), self.lambd), 0), np.sign(w[idx, :]))))
                for edge in xrange(self.num_edges):
                    if w[idx, edge] > self.lambd:
                        edge_weight[0, edge] += value * (w[idx, edge] - self.lambd)
                    elif w[idx, edge] < -self.lambd:
                        edge_weight[0, edge] += value * (w[idx, edge] + self.lambd)
        return edge_weight
ltls.py 文件源码 项目:ltls 作者: kjasinska 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_unseen_labels(self, y):
        unseen = set(y).difference(self.classes_seen)
        for c in unseen:
            self.classes_seen.add(c)
        return unseen
COPparamsFs.py 文件源码 项目:wiicop 作者: barnabuskev 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def pathl(cop_dat):
    # to calculate COP path length
    delt = np.diff(cop_dat[:,(0,1)], axis = 0)
    sqs = np.square(delt)
    sum_s = np.add(sqs[:,0],sqs[:,1])
    lgths = np.sqrt(sum_s)
    return np.sum(lgths)
test_regression.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_reduce(self,level=rlevel):
        # Ticket #40
        assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
test_regression.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_add_identity(self,level=rlevel):
        # Ticket #143
        assert_equal(0, np.add.identity)


问题


面经


文章

微信
公众号

扫码关注公众号