python类cumsum()的实例源码

core.py 文件源码 项目:dask_gdf 作者: gpuopenanalytics 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def reset_index(self):
        """Reset index to range based
        """
        dfs = self.to_delayed()
        sizes = np.asarray(compute(*map(delayed(len), dfs)))
        prefixes = np.zeros_like(sizes)
        prefixes[1:] = np.cumsum(sizes[:-1])

        @delayed
        def fix_index(df, startpos):
            return df.set_index(np.arange(start=startpos,
                                          stop=startpos + len(df),
                                          dtype=np.intp))

        outdfs = [fix_index(df, startpos)
                  for df, startpos in zip(dfs, prefixes)]
        return from_delayed(outdfs)
efficient_pd_weight_updates.py 文件源码 项目:pdnn 作者: petered 项目源码 文件源码 阅读 68 收藏 0 点赞 0 评论 0
def scalar_weight_updates(xc, ec, kp, kd):

    tx_last = 0
    te_last = 0
    x_last = 0
    e_last = 0
    n_samples = xc.shape[0]
    dw = np.zeros(n_samples)
    r = kd/float(kp+kd)

    for t in xrange(n_samples):

        t_last = max(tx_last, te_last)
        if xc[t]:
            dw[t] = x_last*e_last*r**(2*t_last-tx_last-te_last)*geosum(r**2, t_end=t-t_last+1, t_start=1)
            x_last = x_last * r**(t-tx_last) + xc[t]
            tx_last = t
        if ec[t]:
            dw[t] = x_last*e_last*r**(2*t_last-tx_last-te_last)*geosum(r**2, t_end=t-t_last+1, t_start=1)  # THing...
            e_last = e_last * r**(t-te_last) + ec[t]
            te_last = t
    return np.cumsum(dw)/kd**2
efficient_pd_weight_updates.py 文件源码 项目:pdnn 作者: petered 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def pd_weight_grads_future(xc, ec, kp, kd):
    r = kd/float(kp+kd)
    kd=float(kd)
    scale_factor = 1./float(kp**2 + 2*kp*kd)
    n_samples = xc.shape[0]
    assert n_samples == ec.shape[0]
    n_in = xc.shape[1]
    n_out = ec.shape[1]
    dws = np.zeros((n_samples, n_in, n_out))
    xr = np.zeros(n_in)
    er = np.zeros(n_out)
    for t in xrange(n_samples):
        xr *= r
        er = er*r + ec[t]
        dws[t] = (xc[t][:, None]*er[None, :] + xr[:, None]*ec[t][None, :]) * scale_factor
        xr += xc[t]
        # er += ec[t]
    return np.cumsum(dws, axis=0)
PLDA.py 文件源码 项目:bob.bio.base 作者: bioidiap 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _train_pca(self, training_set):
    """Trains and returns a LinearMachine that is trained using PCA"""
    data = numpy.vstack(feature for feature in training_set)

    logger.info("  -> Training LinearMachine using PCA ")
    trainer = bob.learn.linear.PCATrainer()
    machine, eigen_values = trainer.train(data)

    if isinstance(self.subspace_dimension_pca, float):
      cummulated = numpy.cumsum(eigen_values) / numpy.sum(eigen_values)
      for index in range(len(cummulated)):
        if cummulated[index] > self.subspace_dimension_pca:
          self.subspace_dimension_pca = index
          break
      self.subspace_dimension_pca = index

    # limit number of pcs
    logger.info("  -> limiting PCA subspace to %d dimensions", self.subspace_dimension_pca)
    machine.resize(machine.shape[0], self.subspace_dimension_pca)
    return machine
PCA.py 文件源码 项目:bob.bio.base 作者: bioidiap 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def train_projector(self, training_features, projector_file):
    """Generates the PCA covariance matrix and writes it into the given projector_file.

    **Parameters:**

    training_features : [1D :py:class:`numpy.ndarray`]
      A list of 1D training arrays (vectors) to train the PCA projection matrix with.

    projector_file : str
      A writable file, into which the PCA projection matrix (as a :py:class:`bob.learn.linear.Machine`) and the eigenvalues will be written.
    """
    # Assure that all data are 1D
    [self._check_feature(feature) for feature in training_features]

    # Initializes the data
    data = numpy.vstack(training_features)
    logger.info("  -> Training LinearMachine using PCA")
    t = bob.learn.linear.PCATrainer()
    self.machine, self.variances = t.train(data)
    # For re-shaping, we need to copy...
    self.variances = self.variances.copy()

    # compute variance percentage, if desired
    if isinstance(self.subspace_dim, float):
      cummulated = numpy.cumsum(self.variances) / numpy.sum(self.variances)
      for index in range(len(cummulated)):
        if cummulated[index] > self.subspace_dim:
          self.subspace_dim = index
          break
      self.subspace_dim = index
    logger.info("    ... Keeping %d PCA dimensions", self.subspace_dim)
    # re-shape machine
    self.machine.resize(self.machine.shape[0], self.subspace_dim)
    self.variances.resize(self.subspace_dim)

    f = bob.io.base.HDF5File(projector_file, "w")
    f.set("Eigenvalues", self.variances)
    f.create_group("Machine")
    f.cd("/Machine")
    self.machine.save(f)
helpers.py 文件源码 项目:multimodal_varinf 作者: tmoer 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def split(x, split_dim, split_sizes):
    n = len(list(x.get_shape()))
    dim_size = np.sum(split_sizes)
    assert int(x.get_shape()[split_dim]) == dim_size
    ids = np.cumsum([0] + split_sizes)
    ids[-1] = -1
    begin_ids = ids[:-1]

    ret = []
    for i in range(len(split_sizes)):
        cur_begin = np.zeros([n], dtype=np.int32)
        cur_begin[split_dim] = begin_ids[i]
        cur_end = np.zeros([n], dtype=np.int32) - 1
        cur_end[split_dim] = split_sizes[i]
        ret += [tf.slice(x, cur_begin, cur_end)]
    return ret
mcmc_func.py 文件源码 项目:CRN_ProbabilisticInversion 作者: elaloy 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def GenCR(MCMCPar,pCR):

    if type(pCR) is np.ndarray:
        p=np.ndarray.tolist(pCR)[0]
    else:
        p=pCR
    CR=np.zeros((MCMCPar.seq * MCMCPar.steps),dtype=np.float)
    L =  np.random.multinomial(MCMCPar.seq * MCMCPar.steps, p, size=1)
    L2 = np.concatenate((np.zeros((1),dtype=np.int), np.cumsum(L)),axis=0)

    r = np.random.permutation(MCMCPar.seq * MCMCPar.steps)

    for zz in range(0,MCMCPar.nCR):

        i_start = L2[zz]
        i_end = L2[zz+1]
        idx = r[i_start:i_end]
        CR[idx] = np.float(zz+1)/MCMCPar.nCR

    CR = np.reshape(CR,(MCMCPar.seq,MCMCPar.steps))
    return CR, L
mcmc_func.py 文件源码 项目:CRN_ProbabilisticInversion 作者: elaloy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def DEStrategy(DEpairs,seq):

    # Determine which sequences to evolve with what DE strategy

    # Determine probability of selecting a given number of pairs
    p_pair = (1.0/DEpairs) * np.ones((1,DEpairs))
    p_pair = np.cumsum(p_pair)
    p_pair = np.concatenate((np.zeros((1)),p_pair),axis=0)

    DEversion=np.zeros((seq),dtype=np.int32)
    Z = np.random.rand(seq)
    # Select number of pairs
    for qq in range(0,seq):
        z = np.where(p_pair<=Z[qq])
        DEversion[qq] = z[0][-1]

    return DEversion
randtest.py 文件源码 项目:prbg 作者: Lakate 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cumultativesumstest(binin):
    ''' The focus of this test is the maximal excursion (from zero) of the random walk defined by the cumulative sum of adjusted (-1, +1) digits in the sequence. The purpose of the test is to determine whether the cumulative sum of the partial sequences occurring in the tested sequence is too large or too small relative to the expected behavior of that cumulative sum for random sequences.  This cumulative sum may be considered as a random walk. For a random sequence, the random walk should be near zero. For non-random sequences, the excursions of this random walk away from zero will be too large.'''
    n = len(binin)
    ss = [int(el) for el in binin]
    sc = map(sumi, ss)
    cs = np.cumsum(sc)
    z = max(abs(cs))
    ra = 0
    start = int(np.floor(0.25 * np.floor(-n / z) + 1))
    stop = int(np.floor(0.25 * np.floor(n / z) - 1))
    pv1 = []
    for k in xrange(start, stop + 1):
        pv1.append(sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)) - sst.norm.cdf((4 * k - 1) * z / np.sqrt(n)))
    start = int(np.floor(0.25 * np.floor(-n / z - 3)))
    stop = int(np.floor(0.25 * np.floor(n / z) - 1))
    pv2 = []
    for k in xrange(start, stop + 1):
        pv2.append(sst.norm.cdf((4 * k + 3) * z / np.sqrt(n)) - sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)))
    pval = 1
    pval -= reduce(su, pv1)
    pval += reduce(su, pv2)

    return pval
meshutils.py 文件源码 项目:discretize 作者: simpeg 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def exampleLrmGrid(nC, exType):
    assert type(nC) == list, "nC must be a list containing the number of nodes"
    assert len(nC) == 2 or len(nC) == 3, "nC must either two or three dimensions"
    exType = exType.lower()

    possibleTypes = ['rect', 'rotate']
    assert exType in possibleTypes, "Not a possible example type."

    if exType == 'rect':
        return list(ndgrid([np.cumsum(np.r_[0, np.ones(nx)/nx]) for nx in nC], vector=False))
    elif exType == 'rotate':
        if len(nC) == 2:
            X, Y = ndgrid([np.cumsum(np.r_[0, np.ones(nx)/nx]) for nx in nC], vector=False)
            amt = 0.5-np.sqrt((X - 0.5)**2 + (Y - 0.5)**2)
            amt[amt < 0] = 0
            return [X + (-(Y - 0.5))*amt, Y + (+(X - 0.5))*amt]
        elif len(nC) == 3:
            X, Y, Z = ndgrid([np.cumsum(np.r_[0, np.ones(nx)/nx]) for nx in nC], vector=False)
            amt = 0.5-np.sqrt((X - 0.5)**2 + (Y - 0.5)**2 + (Z - 0.5)**2)
            amt[amt < 0] = 0
            return [X + (-(Y - 0.5))*amt, Y + (-(Z - 0.5))*amt, Z + (-(X - 0.5))*amt]
test_function_base.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_basic(self):
        ba = [1, 2, 10, 11, 6, 5, 4]
        ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
        for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32,
                      np.uint32, np.float32, np.float64, np.complex64, np.complex128]:
            a = np.array(ba, ctype)
            a2 = np.array(ba2, ctype)

            tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype)
            assert_array_equal(np.cumsum(a, axis=0), tgt)

            tgt = np.array(
                [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype)
            assert_array_equal(np.cumsum(a2, axis=0), tgt)

            tgt = np.array(
                [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype)
            assert_array_equal(np.cumsum(a2, axis=1), tgt)
_iotools.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, delimiter=None, comments=asbytes('#'), autostrip=True):
        self.comments = comments
        # Delimiter is a character
        if isinstance(delimiter, unicode):
            delimiter = delimiter.encode('ascii')
        if (delimiter is None) or _is_bytes_like(delimiter):
            delimiter = delimiter or None
            _handyman = self._delimited_splitter
        # Delimiter is a list of field widths
        elif hasattr(delimiter, '__iter__'):
            _handyman = self._variablewidth_splitter
            idx = np.cumsum([0] + list(delimiter))
            delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
        # Delimiter is a single integer
        elif int(delimiter):
            (_handyman, delimiter) = (
                    self._fixedwidth_splitter, int(delimiter))
        else:
            (_handyman, delimiter) = (self._delimited_splitter, None)
        self.delimiter = delimiter
        if autostrip:
            self._handyman = self.autostrip(_handyman)
        else:
            self._handyman = _handyman
    #
concatenate.py 文件源码 项目:inferno 作者: inferno-pytorch 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def map_index(self, index):
        # Get a list of lengths of all datasets. Say the answer is [4, 3, 3],
        # and we're looking for index = 5.
        len_list = list(map(len, self.datasets))
        # Cumulate to a numpy array. The answer is [4, 7, 10]
        cumulative_len_list = np.cumsum(len_list)
        # When the index is subtracted, we get [-1, 2, 5]. We're looking for the (index
        # of the) first cumulated len which is larger than the index (in this case,
        # 7 (index 1)).
        offset_cumulative_len_list = cumulative_len_list - index
        dataset_index = np.argmax(offset_cumulative_len_list > 0)
        # With the dataset index, we figure out the index in dataset
        if dataset_index == 0:
            # First dataset - index corresponds to index_in_dataset
            index_in_dataset = index
        else:
            # Get cumulated length up to the current dataset
            len_up_to_dataset = cumulative_len_list[dataset_index - 1]
            # Compute index_in_dataset as that what's left
            index_in_dataset = index - len_up_to_dataset
        return dataset_index, index_in_dataset
sumprod.py 文件源码 项目:cupy 作者: cupy 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def cumsum(a, axis=None, dtype=None, out=None):
    """Returns the cumulative sum of an array along a given axis.

    Args:
        a (cupy.ndarray): Input array.
        axis (int): Axis along which the cumulative sum is taken. If it is not
        specified, the input is flattened.
        dtype: Data type specifier.
        out (cupy.ndarray): Output array.

    Returns:
        cupy.ndarray: The result array.

    .. seealso:: :func:`numpy.cumsum`

    """
    return _cum_core(a, axis, dtype, out, _cumsum_kern, _cumsum_batch_kern)
TimeControl.py 文件源码 项目:astk 作者: openalea-incubator 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, time_sequence, weather_data):
        """ Compute thermal time accumulation over time_sequence

        :Parameters:
        ----------
        - `time_sequence` (panda dateTime index)
            A sequence of TimeStamps indicating the dates of all elementary time steps of the simulation
        - weather (alinea.astk.Weather instance)
            A Weather database

        """    
        try:
            Tair = weather_data.temperature_air[time_sequence]
        except:
            #strange extract needed on visualea 1.0 (to test again with ipython in visualea)
            T_data = weather_data[['temperature_air']]
            Tair = numpy.array([float(T_data.loc[d]) for d in time_sequence])
        Tcut = numpy.maximum(numpy.zeros_like(Tair), Tair - self.Tbase)
        days = [0] + [((t - time_sequence[0]).total_seconds()+ 3600) / 3600 / 24 for t in time_sequence]
        dt = numpy.diff(days).tolist()
        return numpy.cumsum(Tcut * dt)

# functional call for nodes
reinforce.py 文件源码 项目:chainerrl 作者: chainer 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def accumulate_grad(self):
        if self.n_backward == 0:
            self.model.zerograds()
        # Compute losses
        losses = []
        for r_seq, log_prob_seq, ent_seq in zip(self.reward_sequences,
                                                self.log_prob_sequences,
                                                self.entropy_sequences):
            assert len(r_seq) - 1 == len(log_prob_seq) == len(ent_seq)
            # Convert rewards into returns (=sum of future rewards)
            R_seq = np.cumsum(list(reversed(r_seq[1:])))[::-1]
            for R, log_prob, entropy in zip(R_seq, log_prob_seq, ent_seq):
                loss = -R * log_prob - self.beta * entropy
                losses.append(loss)
        total_loss = chainerrl.functions.sum_arrays(losses)
        # When self.batchsize is future.types.newint.newint, dividing a
        # Variable with it will raise an error, so it is manually converted to
        # float here.
        total_loss /= float(self.batchsize)
        total_loss.backward()
        self.reward_sequences = [[]]
        self.log_prob_sequences = [[]]
        self.entropy_sequences = [[]]
        self.n_backward += 1
data.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _to_notesequences(self, samples):
    output_sequences = []
    dim_ranges = np.cumsum(self._split_output_depths)
    for s in samples:
      mel_ns = self._melody_converter.to_notesequences(
          [s[:, :dim_ranges[0]]])[0]
      bass_ns = self._melody_converter.to_notesequences(
          [s[:, dim_ranges[0]:dim_ranges[1]]])[0]
      drums_ns = self._drums_converter.to_notesequences(
          [s[:, dim_ranges[1]:]])[0]

      for n in bass_ns.notes:
        n.instrument = 1
        n.program = ELECTRIC_BASS_PROGRAM
      for n in drums_ns.notes:
        n.instrument = 9

      ns = mel_ns
      ns.notes.extend(bass_ns.notes)
      ns.notes.extend(drums_ns.notes)
      ns.total_time = max(
          mel_ns.total_time, bass_ns.total_time, drums_ns.total_time)
      output_sequences.append(ns)
    return output_sequences
fastgen.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def sample_categorical(pmf):
  """Sample from a categorical distribution.

  Args:
    pmf: Probablity mass function. Output of a softmax over categories.
      Array of shape [batch_size, number of categories]. Rows sum to 1.

  Returns:
    idxs: Array of size [batch_size, 1]. Integer of category sampled.
  """
  if pmf.ndim == 1:
    pmf = np.expand_dims(pmf, 0)
  batch_size = pmf.shape[0]
  cdf = np.cumsum(pmf, axis=1)
  rand_vals = np.random.rand(batch_size)
  idxs = np.zeros([batch_size, 1])
  for i in range(batch_size):
    idxs[i] = cdf[i].searchsorted(rand_vals[i])
  return idxs
ConsMarkovModel.py 文件源码 项目:HARK 作者: econ-ark 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def simBirth(self,which_agents):
        '''
        Makes new Markov consumer by drawing initial normalized assets, permanent income levels, and
        discrete states. Calls IndShockConsumerType.simBirth, then draws from initial Markov distribution.

        Parameters
        ----------
        which_agents : np.array(Bool)
            Boolean array of size self.AgentCount indicating which agents should be "born".

        Returns
        -------
        None
        '''
        IndShockConsumerType.simBirth(self,which_agents) # Get initial assets and permanent income
        N = np.sum(which_agents)
        base_draws = drawUniform(N,seed=self.RNG.randint(0,2**31-1))
        Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
        self.MrkvNow[which_agents] = np.searchsorted(Cutoffs,base_draws).astype(int)
lsimodel.py 文件源码 项目:paragraph2vec 作者: thunlp 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def clip_spectrum(s, k, discard=0.001):
    """
    Given eigenvalues `s`, return how many factors should be kept to avoid
    storing spurious (tiny, numerically instable) values.

    This will ignore the tail of the spectrum with relative combined mass < min(`discard`, 1/k).

    The returned value is clipped against `k` (= never return more than `k`).
    """
    # compute relative contribution of eigenvalues towards the energy spectrum
    rel_spectrum = numpy.abs(1.0 - numpy.cumsum(s / numpy.sum(s)))
    # ignore the last `discard` mass (or 1/k, whichever is smaller) of the spectrum
    small = 1 + len(numpy.where(rel_spectrum > min(discard, 1.0 / k))[0])
    k = min(k, small) # clip against k
    logger.info("keeping %i factors (discarding %.3f%% of energy spectrum)" %
                (k, 100 * rel_spectrum[k - 1]))
    return k
plotting.py 文件源码 项目:financial_life 作者: MartinPyka 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def plot_stack_cum(semantic, *reports, color_theme = C_cold_colors, color_offset = 0):
    """ Creates a stacked plot with cumulated sums for a given semantic """ 
    X, Y, c = extract_data(semantic, *reports, color_theme = color_theme, color_offset = color_offset)
    Y = remove_nones(Y)
    # add zeros only at the beginning    
    for x in X:
        x.insert(0, x[0] - 1)

    for data in Y:
        for d in data:
            d.insert(0, 0.)

    # create the cumulated sum
    Y = [np.cumsum(np.array(yi), 1) if yi else [] for yi in Y]

    plot_stack_generic(X, Y, c, semantic, *reports, color_theme = color_theme, color_offset = color_offset)
    legend(loc = 'upper left', fancybox=True, framealpha=0.4, prop={'size':10})
vote.py 文件源码 项目:srep 作者: Answeror 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_segment_vote_accuracy(segment_label, segment_predictions, window):
    def gen():
        count = {
            label: np.hstack([[0], np.cumsum(segment_predictions == label)])
            for label in set(segment_predictions)
        }
        tmp = window
        if tmp == -1:
            tmp = len(segment_predictions)
        tmp = min(tmp, len(segment_predictions))
        for begin in range(len(segment_predictions) - tmp + 1):
            yield segment_label == max(
                count,
                key=lambda label: count[label][begin + tmp] - count[label][begin]
            ), segment_label
    return list(gen())
reademc.py 文件源码 项目:Dragonfly 作者: duaneloh 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _parse_headers(self):
        self.num_data_list = []
        self.ones_accum_list = []
        self.multi_accum_list = []
        self.num_pix = []

        for i, photons_file in enumerate(self.photons_list):
            with open(photons_file, 'rb') as f:
                num_data = np.fromfile(f, dtype='i4', count=1)[0]
                self.num_pix.append(np.fromfile(f, dtype='i4', count=1)[0])
                if self.num_pix[i] != len(self.geom_list[i].x):
                    sys.stderr.write('Warning: num_pix for %s is different (%d vs %d)\n' % (photons_file, self.num_pix[i], len(self.geom_list[i].x)))
                f.seek(1024, 0)
                ones = np.fromfile(f, dtype='i4', count=num_data)
                multi = np.fromfile(f, dtype='i4', count=num_data)
            self.num_data_list.append(num_data)
            self.ones_accum_list.append(np.cumsum(ones))
            self.multi_accum_list.append(np.cumsum(multi))

        self.num_data_list = np.cumsum(self.num_data_list)
        self.num_frames = self.num_data_list[-1]
run_wei.py 文件源码 项目:nanopores 作者: mitschabaude 项目源码 文件源码 阅读 50 收藏 0 点赞 0 评论 0
def fit_koff(nmax=523, NN=4e8, **params):
    tbind = params.pop("tbind")
    params["kd"] = 1e9/tbind
    dx = params.pop("dx")
    rw = randomwalk.get_rw(NAME, params, setup=setup_rw, calc=True)
    rw.domains[1].dx = dx
    times = draw_empirically(rw, N=NN, nmax=nmax, success=False)
    bins = np.logspace(np.log10(min(times)), np.log10(max(times)), 35)
    #bins = np.logspace(-3., 2., 35)
    hist, _ = np.histogram(times, bins=bins)
    cfd = np.cumsum(hist)/float(np.sum(hist))
    t = 0.5*(bins[:-1] + bins[1:])
    tmean = times.mean()
    toff = NLS(t, cfd, t0=tmean)
    koff = 1./toff
    return dict(t=t, cfd=cfd, toff=toff, tmean=tmean, koff=koff)

##### run rw in collect mode and draw bindings from empirical distributions
Parallel_BiGRU.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
Att_BiGRU.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
BiGRU.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
JA_Parallel_BiGRU.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
LSTM.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}
Hybrid_BiGRU.py 文件源码 项目:NANHM-for-GEC 作者: shinochin 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def convert(batch, device):
    def to_device_batch(batch):
        if device is None:
            return batch
        elif device < 0:
            return [chainer.dataset.to_device(device, x) for x in batch]
        else:
            xp = cuda.cupy.get_array_module(*batch)
            concat = xp.concatenate(batch, axis=0)
            sections = np.cumsum([len(x) for x in batch[:-1]], dtype='i')
            concat_dev = chainer.dataset.to_device(device, concat)
            batch_dev = cuda.cupy.split(concat_dev, sections)
            return batch_dev

    return {'xs': to_device_batch([x for x, _ in batch]),
            'ys': to_device_batch([y for _, y in batch])}


问题


面经


文章

微信
公众号

扫码关注公众号