python类asscalar()的实例源码

museProc_tenere.py 文件源码 项目:Interactivity 作者: treeoftenere 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _send_sparseOutput(self, output, timestamp, name):
        for out in self._sparseOutput_threads:
            if isinstance(out, str):  # LSL outlet
                raise NotImplementedError
            else:  # OSC output stream
                if USE_LIBLO:
                    if (np.array(output).size==1):
                         new_output = [('f', np.asscalar(output))]
                         message = Message('/{}'.format(name), *new_output)
                    else:
                         new_output = [('f', x) for x in output[:]]
                         message = Message('/{}'.format(name), *new_output)
                    #send(out, Bundle(timestamp, message))
                    send(out, message)
                else:
                    raise NotImplementedError
            if self.verbose:
                print('spareOutput: {}'.format(output))
unscented.py 文件源码 项目:bayestsa 作者: thalesians 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def predict(self):
        try:
            X, Wm, Wc = sigmaPoints(self.xa, self.Pa)
        except:
            warnings.warn('Encountered a matrix that is not positive definite in the sigma points calculation at the predict step')
            self.Pa = nearpd(self.Pa)
            X, Wm, Wc = sigmaPoints(self.xa, self.Pa)
        fX, x, Pxx = unscentedTransform(X, Wm, Wc, self.fa)
        x = np.asscalar(x)
        Pxx = np.asscalar(Pxx)

        Pxv = 0.
        N = np.shape(X)[1]
        for j in range(0, N):
            Pxv += Wc[j] * fX[0,j] * X[3,j]

        self.xa = np.array( ((x,), (0.,), (0.,), (0.,)) )
        self.Pa = np.array( ((Pxx, Pxv   , 0.      , 0.      ),
                             (Pxv, self.R, 0.      , 0.      ),
                             (0. , 0.    , self.Q  , self.cor),
                             (0. , 0.    , self.cor, self.R  )) )
saliency.py 文件源码 项目:DeepLearning_PlantDiseases 作者: MarkoArsenovic 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def Saliency_map(image,model,preprocess,ground_truth,use_gpu=False,method=util.GradType.GUIDED):
    vis_param_dict['method'] = method
    img_tensor = preprocess(image)
    img_tensor.unsqueeze_(0)
    if use_gpu:
        img_tensor=img_tensor.cuda()
    input = Variable(img_tensor,requires_grad=True)

    if  input.grad is not None:
        input.grad.data.zero_()

    model.zero_grad()
    output = model(input)
    ind=torch.LongTensor(1)
    if(isinstance(ground_truth,np.int64)):
        ground_truth=np.asscalar(ground_truth)
    ind[0]=ground_truth
    ind=Variable(ind)
    energy=output[0,ground_truth]
    energy.backward() 
    grad=input.grad
    if use_gpu:
        return np.abs(grad.data.cpu().numpy()[0]).max(axis=0)
    return np.abs(grad.data.numpy()[0]).max(axis=0)
_tuningcurve.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mean(self,*,axis=None):
        """Returns the mean of firing rate (in Hz).
        Parameters
        ----------
        axis : int, optional
            When axis is None, the global mean firing rate is returned.
            When axis is 0, the mean firing rates across units, as a
            function of the external correlate (e.g. position) are
            returned.
            When axis is 1, the mean firing rate for each unit is
            returned.
        Returns
        -------
        mean :
        """
        means = np.mean(self.ratemap, axis=axis).squeeze()
        if means.size == 1:
            return np.asscalar(means)
        return means
_tuningcurve.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def max(self,*,axis=None):
        """Returns the mean of firing rate (in Hz).
        Parameters
        ----------
        axis : int, optional
            When axis is None, the global mean firing rate is returned.
            When axis is 0, the mean firing rates across units, as a
            function of the external correlate (e.g. position) are
            returned.
            When axis is 1, the mean firing rate for each unit is
            returned.
        Returns
        -------
        mean :
        """
        maxes = np.max(self.ratemap, axis=axis).squeeze()
        if maxes.size == 1:
            return np.asscalar(maxes)
        return maxes
_tuningcurve.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def min(self,*,axis=None):
        """Returns the mean of firing rate (in Hz).
        Parameters
        ----------
        axis : int, optional
            When axis is None, the global mean firing rate is returned.
            When axis is 0, the mean firing rates across units, as a
            function of the external correlate (e.g. position) are
            returned.
            When axis is 1, the mean firing rate for each unit is
            returned.
        Returns
        -------
        mean :
        """
        mins = np.min(self.ratemap, axis=axis).squeeze()
        if mins.size == 1:
            return np.asscalar(mins)
        return mins
qlearner.py 文件源码 项目:malmo-challenge 作者: Kaixhin 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def inject_summaries(self, idx):
        if len(self._stats_mean_qvalues) > 0:
            self.visualize(idx, "%s/episode mean q" % self.name,
                           np.asscalar(np.mean(self._stats_mean_qvalues)))
            self.visualize(idx, "%s/episode mean stddev.q" % self.name,
                           np.asscalar(np.mean(self._stats_stddev_qvalues)))

        if len(self._stats_loss) > 0:
            self.visualize(idx, "%s/episode mean loss" % self.name,
                           np.asscalar(np.mean(self._stats_loss)))

        if len(self._stats_rewards) > 0:
            self.visualize(idx, "%s/episode mean reward" % self.name,
                           np.asscalar(np.mean(self._stats_rewards)))

            # Reset
            self._stats_mean_qvalues = []
            self._stats_stddev_qvalues = []
            self._stats_loss = []
            self._stats_rewards = []
qlearner.py 文件源码 项目:malmo-challenge 作者: Microsoft 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def inject_summaries(self, idx):
        if len(self._stats_mean_qvalues) > 0:
            self.visualize(idx, "%s/episode mean q" % self.name,
                           np.asscalar(np.mean(self._stats_mean_qvalues)))
            self.visualize(idx, "%s/episode mean stddev.q" % self.name,
                           np.asscalar(np.mean(self._stats_stddev_qvalues)))

        if len(self._stats_loss) > 0:
            self.visualize(idx, "%s/episode mean loss" % self.name,
                           np.asscalar(np.mean(self._stats_loss)))

        if len(self._stats_rewards) > 0:
            self.visualize(idx, "%s/episode mean reward" % self.name,
                           np.asscalar(np.mean(self._stats_rewards)))

            # Reset
            self._stats_mean_qvalues = []
            self._stats_stddev_qvalues = []
            self._stats_loss = []
            self._stats_rewards = []
sp_facade.py 文件源码 项目:nupic-history-server 作者: htm-community 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _conjurePotentialPools(self, **kwargs):
    # These only need to be fetched from the SP once.
    if self._potentialPools:
      return self._potentialPools
    sp = self._sp
    out = []
    for colIndex in range(0, sp.getNumColumns()):
      columnPool = self._getZeroedInput()
      columnPoolIndices = []
      sp.getPotential(colIndex, columnPool)
      for i, pool in enumerate(columnPool):
        if np.asscalar(pool) == 1.0:
          columnPoolIndices.append(i)
      out.append(columnPoolIndices)
    self._potentialPools = out
    return out
color_diff.py 文件源码 项目:MagicWand 作者: GianlucaSilvestri 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def delta_e_cie1994(color1, color2, K_L=1, K_C=1, K_H=1, K_1=0.045, K_2=0.015):
    """
    Calculates the Delta E (CIE1994) of two colors.

    K_l:
      0.045 graphic arts
      0.048 textiles
    K_2:
      0.015 graphic arts
      0.014 textiles
    K_L:
      1 default
      2 textiles
    """

    color1_vector = _get_lab_color1_vector(color1)
    color2_matrix = _get_lab_color2_matrix(color2)
    delta_e = color_diff_matrix.delta_e_cie1994(
        color1_vector, color2_matrix, K_L=K_L, K_C=K_C, K_H=K_H, K_1=K_1, K_2=K_2)[0]
    return numpy.asscalar(delta_e)


# noinspection PyPep8Naming
parameters.py 文件源码 项目:mushroom 作者: carloderamo 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def get_value(self, *args, **kwargs):
        if len(args) == 2:
            gradient = args[0]
            nat_gradient = args[1]
            tmp = np.asscalar(gradient.dot(nat_gradient))
            lambda_v = np.sqrt(tmp / (4. * self._eps))
            # For numerical stability
            lambda_v = max(lambda_v, 1e-8)
            step_length = 1. / (2. * lambda_v)

            return step_length
        elif len(args) == 1:
            return self.get_value(args[0], args[0], **kwargs)
        else:
            raise ValueError('Adaptive parameters needs gradient or gradient'
                             'and natural gradient')
sequence_decoder_actor_learner.py 文件源码 项目:tensorflow-rl 作者: steveKapturowski 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def choose_next_action(self, state):
        network_output_v, network_output_pi, action_repeat_probs = self.session.run(
            [
                self.local_network.output_layer_v,
                self.local_network.output_layer_pi,
                self.local_network.action_repeat_probs,
            ],
            feed_dict={
                self.local_network.input_ph: [state],
            })

        network_output_pi = network_output_pi.reshape(-1)
        network_output_v = np.asscalar(network_output_v)

        action_index = self.sample_policy_action(network_output_pi)
        new_action = np.zeros([self.num_actions])
        new_action[action_index] = 1

        action_repeat = 1 + self.sample_policy_action(action_repeat_probs[0])

        return new_action, network_output_v, network_output_pi, action_repeat
actual.py 文件源码 项目:AnswerClassify 作者: kenluck2001 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def enspredict (Xval, indices):
    '''
        blend models using majority voting scheme
    '''
    totalLabelist = []
    for ind in range (len(Xval)):
        labelist = []
        for model in featureSelectModel:
            label = model.predict( Xval[:, indices ][ind].reshape(1, -1) )
            labelist.append (np.asscalar (label) )

        for model in wholeFeatureModel:
            label = model.predict( Xval[ind].reshape(1, -1) )
            labelist.append (np.asscalar (label) )


        votedLabel = max ( set (labelist), key=labelist.count  )
        totalLabelist.append (votedLabel)

    return totalLabelist
dataio.py 文件源码 项目:lexdecomp 作者: mcrisc 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def question_batches(data_file):
    """Iterates over a dataset returning batches composed by a single question
    and its candidate answers.

    :data_file: a HDF5 file object holding the dataset
    :returns: a DataSet namedtuple of arrays (questions, sentences, labels).
    """
    n_questions = np.asscalar(data_file['metadata/questions/count'][...])
    questions_ds = data_file['data/questions']
    sentences_ds = data_file['data/sentences']

    for i in range(n_questions):
        row_labels = data_file['data/labels/q%d' % i][...]
        labels = row_labels[:, 1]
        rows = row_labels[:, 0]
        questions = questions_ds[rows, ...]
        sentences = sentences_ds[rows, ...]
        yield DataSet(questions, sentences, labels)
optimize.py 文件源码 项目:optimize-stencil 作者: Ablinne 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _optimize_single(self, x0):
        x0 = list(x0)

        if x0[0] == None:
            x0[0] = 0
            dt_ok = np.asscalar(self.dispersion.dt_ok(x0))
            if dt_ok < 0:
                # Initial conditions violate constraints, reject
                return x0, None, float('inf')

            x0[0] = dt_ok
            x0[0] = min(x0[0], self.dtmax)
            x0[0] = max(x0[0], self.dtmin)

        x0 = np.asfarray(x0)

        stencil_ok = self.dispersion.stencil_ok(x0)
        if stencil_ok < 0:
            # Initial conditions violate constraints, reject
            return x0, None, float('inf')

        res = scop.minimize(self.dispersion.norm, x0, method='SLSQP', constraints = self.constraints, options = dict(disp=False, iprint = 2))
        norm = self.dispersion_high.norm(res.x)

        return x0, res, norm
redsequence.py 文件源码 项目:redmapper 作者: erykoff 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def zindex(self,z):
        """
        redshift index lookup

        parameters
        ----------
        z: array of floats

        returns
        -------
        indices: array of integers
            redshift indices

        """
        # return the z index/indices with rounding.

        zind = np.searchsorted(self.zinteger,np.round(np.atleast_1d(z)*self.zbinscale).astype(np.int64))
        if (zind.size == 1):
            return np.asscalar(zind)
        else:
            return zind

        # and check for top overflows.  Minimum is always 0
        #test,=np.where(zind == self.z.size)
        #if (test.size > 0): zind[test] = self.z.size-1
redsequence.py 文件源码 项目:redmapper 作者: erykoff 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def refmagindex(self,refmag):
        """
        reference magnitude index lookup

        parameters
        ----------
        refmag: array of floats

        returns
        -------
        indices: array of integers
            refmag indices
        """
        # return the refmag index/indices with rounding

        refmagind = np.searchsorted(self.refmaginteger,np.round(np.atleast_1d(refmag)*self.refmagbinscale).astype(np.int64))
        if (refmagind.size == 1):
            return np.asscalar(refmagind)
        else:
            return refmagind
redsequence.py 文件源码 项目:redmapper 作者: erykoff 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def lumrefmagindex(self,lumrefmag):
        """
        luminosity reference magnitude index lookup

        parameters
        ----------
        lumrefmag: array of floats

        returns
        -------
        indices: array of integers
            lumrefmag indices

        """

        lumrefmagind = np.searchsorted(self.lumrefmaginteger,np.round(np.atleast_1d(lumrefmag)*self.refmagbinscale).astype(np.int64))
        if (lumrefmagind.size == 1):
            return np.asscalar(lumrefmagind)
        else:
            return lumrefmagind
utils.py 文件源码 项目:DistributedES 作者: ShangtongZhang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, o_):
        if np.isscalar(o_):
            o = torch.FloatTensor([o_])
        else:
            o = torch.FloatTensor(o_)
        self.online_stats.feed(o)
        if self.offline_stats.n[0] == 0:
            return o_
        std = (self.offline_stats.v + 1e-6) ** .5
        o = (o - self.offline_stats.m) / std
        o = o.numpy()
        if np.isscalar(o_):
            o = np.asscalar(o)
        else:
            o = o.reshape(o_.shape)
        return o
kalmanfilter.py 文件源码 项目:opentrack-prototyping 作者: DaMichel 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculateQ(self, k):
      Q = M(self.Q(k))
      R = M(self.kf.R(k))
      H = M(self.kf.H(k))
      D = M(self.D[k-1])

      alpha = np.trace(D - R) / np.trace(H * M(self.kf.Pminus[k-1]) * H.T)
      alpha = np.asscalar(alpha)
      if np.isfinite(alpha) and alpha>0:
          alpha = np.power(alpha, self.exponent)
          alpha = max(0.0001, min(alpha, 1000.*mt.trace(R) / mt.trace(Q)))
      else:
          alpha = 0.0001
      Q = Q * alpha
      self.alpha[k] = alpha
      return Q
normalizer.py 文件源码 项目:DeepRL 作者: ShangtongZhang 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __call__(self, o_):
        if np.isscalar(o_):
            o = torch.FloatTensor([o_])
        else:
            o = torch.FloatTensor(o_)
        self.online_stats.feed(o)
        if self.offline_stats.n[0] == 0:
            return o_
        std = (self.offline_stats.v + 1e-6) ** .5
        o = (o - self.offline_stats.m) / std
        o = o.numpy()
        if np.isscalar(o_):
            o = np.asscalar(o)
        else:
            o = o.reshape(o_.shape)
        return o
CrosspropLearner.py 文件源码 项目:Crossprop 作者: ShangtongZhang 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def learn(self, target, epoch=None):
        BasicLearner.learn(self, target, epoch)
        error = np.asscalar(target - self.y)

        self.beta += self.theta * error * self.h * np.asarray(self.phi).flatten()
        self.alpha = np.exp(self.beta)

        self.W += np.matrix(self.alpha * error * np.asarray(self.phi).flatten()).T
        self.U += self.stepSize * self.m

        phi_2 = np.asarray(np.power(self.phi, 2)).flatten()
        m_decay = 1 - self.theta * np.power(self.h, 2) * phi_2
        m_delta = error * self.h * np.asarray(self.X.T * self.gradientAct(self.phi, self.net))
        self.m = m_decay * self.m + self.theta * m_delta

        h_decay = 1 - self.alpha * phi_2
        h_delta = error * self.alpha * np.asarray(self.phi).flatten()
        self.h = h_decay * self.h + h_delta

        return 0.5 * error * error

# TODO: refactor classification learner
save_utils.py 文件源码 项目:regionmask 作者: mathause 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _dcoord(coord):
    """determine the spacing of a coordinate"""

    coord = np.array(coord)

    if coord.ndim > 1:
        msg = 'Only 1D coordinates are supported'
        raise AssertionError(msg)

    dcoord = np.unique(np.round(np.diff(coord), 4))

    # irregularly spaced
    if dcoord.size > 1:
        dcoord_str = 'irr'
    # regularly spaced
    else:
        dcoord_str = '{:0.2f}'.format(np.asscalar(dcoord))

    return dcoord_str
qlearner.py 文件源码 项目:malmo-challenge 作者: rhaps0dy 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def inject_summaries(self, idx):
        if len(self._stats_mean_qvalues) > 0:
            self.visualize(idx, "%s/episode mean q" % self.name,
                           np.asscalar(np.mean(self._stats_mean_qvalues)))
            self.visualize(idx, "%s/episode mean stddev.q" % self.name,
                           np.asscalar(np.mean(self._stats_stddev_qvalues)))

        if len(self._stats_loss) > 0:
            self.visualize(idx, "%s/episode mean loss" % self.name,
                           np.asscalar(np.mean(self._stats_loss)))

        if len(self._stats_rewards) > 0:
            self.visualize(idx, "%s/episode mean reward" % self.name,
                           np.asscalar(np.mean(self._stats_rewards)))

            # Reset
            self._stats_mean_qvalues = []
            self._stats_stddev_qvalues = []
            self._stats_loss = []
            self._stats_rewards = []
Irwin.py 文件源码 项目:irwin 作者: clarkerubber 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def buildPlayerGameActivationsTable(self, model=None):
    if model is None:
      print("using default model")
      model = self.narrowGameModel.model()
    print("getting players")
    engines = self.env.playerDB.byEngine(True)
    legits = self.env.playerDB.byEngine(False)

    print("got " + str(len(engines + legits)) + " players")

    playerGameActivations = []

    for player in engines + legits:
      print("predicting " + player.id)
      gs = GameAnalysisStore.new()
      gs.addGameAnalyses(self.env.gameAnalysisDB.byUserId(player.id))
      predictions = self.predict(gs.gameAnalysisTensors(), model)
      playerGameActivations.append(PlayerGameActivations(player.id, player.engine, [int(100*np.asscalar(p[0][0][0])) for p in predictions]))

    print("writing to DB")
    self.env.playerGameActivationsDB.lazyWriteMany(playerGameActivations)
api.py 文件源码 项目:pyculib 作者: numba 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def rotg(self, a, b):
        '''Compute the given rotation matrix given a column vector (a, b).
        Returns r, z, c, s.

        r: r = a ** 2 + b ** 2.

        z: Use to recover c and s.

        if abs(z) < 1:
            c, s = 1 - z ** 2, z
        elif abs(z) == 1:
            c, s = 0, 1
        else:
            c, s = 1 / z, 1 - z ** 2

        c: Cosine element of the rotation matrix.

        s: Sine element of the rotation matrix.
        '''
        a, b = np.asarray(a), np.asarray(b)
        _sentry_same_dtype(a, b)
        fn = self._dispatch(self.rotg.vtable, a.dtype)
        return fn(np.asscalar(a), np.asscalar(b))
saver.py 文件源码 项目:rnnlab 作者: phueb 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def calc_ba_data(self, probe_simmat, multi_probe_list):
        # make thr range
        probe_simmat_mean = np.asscalar(np.mean(probe_simmat))
        thr1 = max(0.0, round(min(0.9, round(probe_simmat_mean, 2)) - 0.1, 2))  # don't change
        thr2 = round(thr1 + 0.2, 2)
        # use bayes optimization to find best_thr
        if SaverConfigs.PRINT_BAYES_OPT:
            print('Finding best thresholds between {} and {} using bayesian-optimization...'.format(thr1, thr2))
        gp_params = {"alpha": 1e-5, "n_restarts_optimizer": 2}
        func_to_be_opt = partial(self.calc_probe_ba_list, probe_simmat, multi_probe_list, True)
        bo = BayesianOptimization(func_to_be_opt, {'thr': (thr1, thr2)}, verbose=SaverConfigs.PRINT_BAYES_OPT)
        bo.explore({'thr': [probe_simmat_mean]})
        bo.maximize(init_points=2, n_iter=SaverConfigs.NUM_BAYES_STEPS,
                    acq="poi", xi=0.001, **gp_params)  # smaller xi: exploitation
        best_thr = bo.res['max']['max_params']['thr']
        # calc probe_ba_list with best_thr
        probe_ba_list = self.calc_probe_ba_list(probe_simmat, multi_probe_list, False, best_thr)
        probe_ba_list = np.multiply(probe_ba_list, 100).tolist()
        # make avg_probe_ba_list
        avg_probe_ba_list = pd.DataFrame(
            data={'probe': multi_probe_list,
                  'probe_ba': probe_ba_list}).groupby('probe').mean()['probe_ba'].values.tolist()
        return probe_ba_list, avg_probe_ba_list
model.py 文件源码 项目:rnnlab 作者: phueb 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def make_phrase_pps(self,
                        terms):
        print('Making phrase_pps...')
        terms = ['PERIOD'] + terms  # to get pp value for very first term
        num_terms = len(terms)
        task_id = 0
        pps = []
        for n in range(num_terms):
            term_ids = [self.terms.item_id_dict[term] for term in
                        terms[:n + 2]]  # add to to compensate for 0index and y
            window_mat = np.asarray(term_ids)[:, np.newaxis][-self.bptt_steps:].T
            x, y = np.split(window_mat, [-1], axis=1)
            x2 = np.tile(np.eye(GlobalConfigs.NUM_TASKS)[task_id], [1, x.shape[1], 1])
            feed_dict = {self.graph.x: x, self.graph.x2: x2, self.graph.y: y}
            pp = np.asscalar(self.sess.run(self.graph.pps, feed_dict=feed_dict))
            pps.append(pp)
        pps = pps[:-1]  # compensate for PERIOD insertion
        return pps
lt_model.py 文件源码 项目:latenttrees 作者: kaltwang 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def init_distrib_idx(self, distrib, idx=None):
        assert isinstance(distrib, DistribGauss)
        x = distrib.get_mu()
        if idx is None:
            # initialize prior and thus average over all cases
            mu = np.nanmean(x, axis=0, keepdims=True)
        else:
            # select cases idx
            mu = x[idx, :]
            idx_nan = np.isnan(mu)
            if np.any(idx_nan):
                # we need to randomly select new values for all NaNs
                idx_good = np.ones_like(idx, dtype=bool)
                idx_good[idx, :] = False
                idx_good[np.isnan(x)] = False
                x_good = x[idx_good, :]
                num_nan = np.count_nonzero(idx_nan)
                mu[idx_nan] = np.random.choice(x_good, num_nan, replace=False)
            mu = np.copy(mu)  # make sure to not overwrite data

        std = np.empty_like(mu)
        std.fill(np.asscalar(np.nanstd(x)))
        self.init_data(mu, std)
police_base.py 文件源码 项目:gym-sandbox 作者: suqi 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _police_move_by_continous_angle(self, police_list, police_actions):
        # Accept continous move action, which is more suitable for MADDPG
        # move angle (0~2pi)
        police_actions = np.clip(police_actions, 0, 2*np.pi)

        police_new_loc = police_list.copy()
        police_speed = self.teams['police']['speed']
        for _i, _a in enumerate(police_actions):
            _a = np.asscalar(_a)  # transform array to scalar
            action_dir = np.array([np.cos(_a), np.sin(_a)])
            police_dir = action_dir * police_speed
            _p = police_list[_i]
            _p = (_p[0] + police_dir[0], _p[1] + police_dir[1])
            _p = self.ensure_inside(_p)
            police_new_loc[_i] = _p

        return police_new_loc


问题


面经


文章

微信
公众号

扫码关注公众号