python类asanyarray()的实例源码

kshape.py 文件源码 项目:rca-evaluation 作者: sieve-microservices 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def roll_zeropad(a, shift, axis=None):
    a = np.asanyarray(a)
    if shift == 0: return a
    if axis is None:
        n = a.size
        reshape = True
    else:
        n = a.shape[axis]
        reshape = False
    if np.abs(shift) > n:
        res = np.zeros_like(a)
    elif shift < 0:
        shift += n
        zeros = np.zeros_like(a.take(np.arange(n-shift), axis))
        res = np.concatenate((a.take(np.arange(n-shift,n), axis), zeros), axis)
    else:
        zeros = np.zeros_like(a.take(np.arange(n-shift,n), axis))
        res = np.concatenate((zeros, a.take(np.arange(n-shift), axis)), axis)
    if reshape:
        return res.reshape(a.shape)
    else:
        return res
sklearnmape.py 文件源码 项目:Supply-demand-forecasting 作者: LevinJ 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def mean_absolute_percentage_error(y_true, y_pred): 
    """
    Use of this metric is not recommended; for illustration only. 
    See other regression metrics on sklearn docs:
      http://scikit-learn.org/stable/modules/classes.html#regression-metrics
    Use like any other metric
    >>> y_true = [3, -0.5, 2, 7]; y_pred = [2.5, -0.3, 2, 8]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    Out[]: 24.791666666666668
    """
    y_true = np.asanyarray(y_true)
    y_pred = np.asanyarray(y_pred)
    assert_all_finite(y_true)
    assert_all_finite(y_pred)
    #Filter zero values in y_true
    sel = (y_true != 0)
    y_true = y_true[sel]
    y_pred = y_pred[sel]
    ## Note: does not handle mix 1d representation
    #if _is_1d(y_true): 
    #    y_true, y_pred = _check_1d_array(y_true, y_pred)
#     return np.abs((y_true - y_pred) / y_true.astype(np.float32)).sum()/float(district_num * dateslot_num)
    return np.mean(np.abs((y_true - y_pred) / y_true.astype(np.float32)))
core.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def compressed(x):
    """
    Return all the non-masked data as a 1-D array.

    This function is equivalent to calling the "compressed" method of a
    `MaskedArray`, see `MaskedArray.compressed` for details.

    See Also
    --------
    MaskedArray.compressed
        Equivalent method.

    """
    if not isinstance(x, MaskedArray):
        x = asanyarray(x)
    return x.compressed()
classification.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def example_of_aggregating_sim_matrix(raw_data, labels, num_subjects, num_epochs_per_subj):
    # aggregate the kernel matrix to save memory
    svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
    clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
    rearranged_data = raw_data[num_epochs_per_subj:] + raw_data[0:num_epochs_per_subj]
    rearranged_labels = labels[num_epochs_per_subj:] + labels[0:num_epochs_per_subj]
    clf.fit(list(zip(rearranged_data, rearranged_data)), rearranged_labels,
            num_training_samples=num_epochs_per_subj*(num_subjects-1))
    predict = clf.predict()
    print(predict)
    print(clf.decision_function())
    test_labels = labels[0:num_epochs_per_subj]
    incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
    logger.info(
        'when aggregating the similarity matrix to save memory, '
        'the accuracy is %d / %d = %.2f' %
        (num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
         (num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
    )
    # when the kernel matrix is computed in portion, the test data is already in
    print(clf.score(None, test_labels))
classification.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def example_of_correlating_two_components(raw_data, raw_data2, labels, num_subjects, num_epochs_per_subj):
    # aggregate the kernel matrix to save memory
    svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
    clf = Classifier(svm_clf, epochs_per_subj=num_epochs_per_subj)
    num_training_samples=num_epochs_per_subj*(num_subjects-1)
    clf.fit(list(zip(raw_data[0:num_training_samples], raw_data2[0:num_training_samples])),
            labels[0:num_training_samples])
    X = list(zip(raw_data[num_training_samples:], raw_data2[num_training_samples:]))
    predict = clf.predict(X)
    print(predict)
    print(clf.decision_function(X))
    test_labels = labels[num_training_samples:]
    incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
    logger.info(
        'when aggregating the similarity matrix to save memory, '
        'the accuracy is %d / %d = %.2f' %
        (num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
         (num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
    )
    # when the kernel matrix is computed in portion, the test data is already in
    print(clf.score(X, test_labels))
classification.py 文件源码 项目:brainiak 作者: brainiak 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def example_of_correlating_two_components_aggregating_sim_matrix(raw_data, raw_data2, labels,
                                                                 num_subjects, num_epochs_per_subj):
    # aggregate the kernel matrix to save memory
    svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1)
    clf = Classifier(svm_clf, num_processed_voxels=1000, epochs_per_subj=num_epochs_per_subj)
    num_training_samples=num_epochs_per_subj*(num_subjects-1)
    clf.fit(list(zip(raw_data, raw_data2)), labels,
            num_training_samples=num_training_samples)
    predict = clf.predict()
    print(predict)
    print(clf.decision_function())
    test_labels = labels[num_training_samples:]
    incorrect_predict = hamming(predict, np.asanyarray(test_labels)) * num_epochs_per_subj
    logger.info(
        'when aggregating the similarity matrix to save memory, '
        'the accuracy is %d / %d = %.2f' %
        (num_epochs_per_subj-incorrect_predict, num_epochs_per_subj,
         (num_epochs_per_subj-incorrect_predict) * 1.0 / num_epochs_per_subj)
    )
    # when the kernel matrix is computed in portion, the test data is already in
    print(clf.score(None, test_labels))

# python3 classification.py face_scene bet.nii.gz face_scene/prefrontal_top_mask.nii.gz face_scene/fs_epoch_labels.npy
proximal.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _fit(self, X, y, warm_start=None):
        if warm_start is None:
            self.coef_ = np.zeros(X.shape[1])
        else:
            self.coef_ = np.asanyarray(warm_start)

        l1l2_proximal = l1l2_regularization
        self.coef_, self.niter_ = l1l2_proximal(X, y,
                                                self.mu, self.tau,
                                                beta=self.coef_,
                                                kmax=self.max_iter,
                                                tolerance=self.tol,
                                                return_iterations=True,
                                                adaptive=self.adaptive_step_size)

        if self.niter_ == self.max_iter:
            warnings.warn('Objective did not converge, you might want'
                          ' to increase the number of iterations')

        return self
base.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def fit(self, X, y, *args, **kwargs):
        X = np.asanyarray(X)
        y = np.asanyarray(y)

        # Centering Data
        X, y, X_offset, y_offset, X_scale, precompute, Xy = \
            _pre_fit(X, y, None, self.precompute, self.normalize,
                     self.fit_intercept, copy=False)

        # Calling the class-specific train method
        self._fit(X, y, *args, **kwargs)

        # Fitting the intercept if required
        self._set_intercept(X_offset, y_offset, X_scale)

        self._trained = True
        return self
double.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def fit(self, X, y):
        X = np.asanyarray(X)
        y = np.asanyarray(y)

        # Selection        
        self.selector.fit(X, y)
        self.selected_ = (np.abs(self.selector.coef_) >= self.threshold)

        # Final Estimation
        self.estimator.fit(X[:, self.selected_], y)

        # Coefficients
        self.coef_ = np.zeros_like(self.selector.coef_)
        self.coef_[self.selected_] = self.estimator.coef_
        self.intercept_ = self.estimator.intercept_

        return self
decoding.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def rmse(predictions, targets):
    """Calculate the root mean squared error of an array of predictions.

    Parameters
    ----------
    predictions : array_like
        Array of predicted values.
    targets : array_like
        Array of target values.

    Returns
    -------
    rmse: float
        Root mean squared error of the predictions wrt the targets.
    """
    predictions = np.asanyarray(predictions)
    targets = np.asanyarray(targets)
    rmse = np.sqrt(np.nanmean((predictions - targets) ** 2))
    return rmse
_tuningcurve.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __getitem__(self, *idx):
        """TuningCurve1D index access.

        Accepts integers, slices, and lists"""

        idx = [ii for ii in idx]
        if len(idx) == 1 and not isinstance(idx[0], int):
            idx = idx[0]
        if isinstance(idx, tuple):
            idx = [ii for ii in idx]

        if self.isempty:
            return self
        try:
            out = copy.copy(self)
            out._ratemap = self.ratemap[idx,:]
            out._unit_ids = (np.asanyarray(out._unit_ids)[idx]).tolist()
            out._unit_labels = (np.asanyarray(out._unit_labels)[idx]).tolist()
            return out
        except Exception:
            raise TypeError(
                'unsupported subsctipting type {}'.format(type(idx)))
single_File_For_ColorizationModel_For_Not_OOP_Fan.py 文件源码 项目:Deep-learning-Colorization-for-visual-media 作者: OmarSayedMostafa 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def ReadNextBatch(): 
    '''Reads the Next (grey,Color) Batch and computes the Color_Images_batch Chrominance (AB colorspace values)

    Return:
     GreyImages_Batch: List with all Greyscale images [Batch size,224,224,1]
     ColorImages_Batch: List with all Colored images [Batch size,Colored images]
    '''
    global GreyImages_Batch
    global ColorImages_Batch
    global CurrentBatch_indx
    global Batch_size
    GreyImages_Batch = []
    ColorImages_Batch = []
    for ind in range(Batch_size):
        Colored_img = Image.open(ColorImgsPath + str(CurrentBatch_indx) + '.png')
        ColorImages_Batch.append(Colored_img)
        Grey_img = Image.open(GreyImgsPath + str(CurrentBatch_indx) + '.png')        
        Grey_img = np.asanyarray(Grey_img) 
        img_shape = Grey_img.shape
        img_reshaped = Grey_img.reshape(img_shape[0],img_shape[1], GreyChannels)#[224,224,1]
        GreyImages_Batch.append(img_reshaped)#[#imgs,224,224,1]
        CurrentBatch_indx = CurrentBatch_indx + 1
    Get_Batch_Chrominance() 
    return GreyImages_Batch
owperiodogram.py 文件源码 项目:orange3-timeseries 作者: biolab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def periodogram(self, attr):
        is_equispaced = self.data.time_delta is not None
        if is_equispaced:
            x = np.ravel(self.data.interp(attr))
            periods, pgram = periodogram_equispaced(x)
            # TODO: convert periods into time_values-relative values, i.e.
            # periods *= self.data.time_delta; like lombscargle already does
            # periods *= self.data.time_delta
        else:
            times = np.asanyarray(self.data.time_values, dtype=float)
            x = np.ravel(self.data[:, attr])
            # Since lombscargle works with explicit times,
            # we can skip any nan values
            nonnan = ~np.isnan(x)
            if not nonnan.all():
                x, times = x[nonnan], times[nonnan]

            periods, pgram = periodogram_nonequispaced(times, x)
        return periods, pgram
gauss.py 文件源码 项目:elfi 作者: elfi-dev 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def gauss(mu, sigma, n_obs=50, batch_size=1, random_state=None):
    """Sample the 1-D Gaussian distribution.

    Parameters
    ----------
    mu : float, array_like
    sigma : float, array_like
    n_obs : int, optional
    batch_size : int, optional
    random_state : np.random.RandomState, optional

    Returns
    -------
    array_like
        1-D observations.

    """
    # Transforming the arrays' shape to be compatible with batching.
    batches_mu = np.asanyarray(mu).reshape((-1, 1))
    batches_sigma = np.asanyarray(sigma).reshape((-1, 1))

    # Sampling observations.
    y_obs = ss.norm.rvs(loc=batches_mu, scale=batches_sigma,
                        size=(batch_size, n_obs), random_state=random_state)
    return y_obs
posteriors.py 文件源码 项目:elfi 作者: elfi-dev 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _unnormalized_loglikelihood(self, x):
        x = np.asanyarray(x)
        ndim = x.ndim
        x = x.reshape((-1, self.dim))

        logpdf = -np.ones(len(x)) * np.inf

        logi = self._within_bounds(x)
        x = x[logi, :]
        if len(x) == 0:
            if ndim == 0 or (ndim == 1 and self.dim > 1):
                logpdf = logpdf[0]
            return logpdf

        mean, var = self.model.predict(x)
        logpdf[logi] = ss.norm.logcdf(self.threshold, mean, np.sqrt(var)).squeeze()

        if ndim == 0 or (ndim == 1 and self.dim > 1):
            logpdf = logpdf[0]

        return logpdf
acquisition.py 文件源码 项目:elfi 作者: elfi-dev 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _add_noise(self, x):
        # Add noise for more efficient fitting of GP
        if self.noise_var is not None:
            noise_var = np.asanyarray(self.noise_var)
            if noise_var.ndim == 0:
                noise_var = np.tile(noise_var, self.model.input_dim)

            for i in range(self.model.input_dim):
                std = np.sqrt(noise_var[i])
                if std == 0:
                    continue
                xi = x[:, i]
                a = (self.model.bounds[i][0] - xi) / std
                b = (self.model.bounds[i][1] - xi) / std
                x[:, i] = ss.truncnorm.rvs(
                    a, b, loc=xi, scale=std, size=len(x), random_state=self.random_state)

        return x
core.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def compressed(x):
    """
    Return all the non-masked data as a 1-D array.

    This function is equivalent to calling the "compressed" method of a
    `MaskedArray`, see `MaskedArray.compressed` for details.

    See Also
    --------
    MaskedArray.compressed
        Equivalent method.

    """
    if not isinstance(x, MaskedArray):
        x = asanyarray(x)
    return x.compressed()
cnn_tensorflow.py 文件源码 项目:SmartSlam 作者: Oneiroe 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def evaluate(graph, mels, label, mapping):
    """ Check correctness of a file classification """
    logging.info('Evaluating audio classification')
    audio_feature = np.asanyarray(list(mels.flatten()), dtype=np.float32)

    true_result = mapping[label]

    x = graph.get_tensor_by_name('prefix/input:0')
    y = graph.get_tensor_by_name('prefix/softmax_tensor:0')

    with tf.Session(graph=graph) as sess:
        # Note: we didn't initialize/restore anything, everything is stored in the graph_def
        y_out = sess.run(y, feed_dict={
            x: [audio_feature]
        })

        logging.info('true value:' + str(true_result))
        logging.info('predicted value:' + str(y_out[0].argmax()))
        logging.info('predictions:' + str(y_out))
        if y_out[0].argmax() == true_result:
            return True
        else:
            return False
_ReaderBase.py 文件源码 项目:dataArtist 作者: radjkarl 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def toFloat(self, arr, toFloat=True, forceFloat64=False):
        if hasattr(self, 'preferences'):
            p = self.preferences
            toFloat = p.pToFloat.value()
            forceFloat64 = p.pForceFloat64.value()

        if not toFloat:
            return arr
        arr = np.asanyarray(arr)
        try:
            if forceFloat64:
                dtype = np.float64
            else:
                dtype = {np.dtype('uint8'): np.float32,  # float16 is just to coarse and cause nans and infs
                         np.dtype('uint16'): np.float32,
                         np.dtype('uint32'): np.float64,
                         np.dtype('uint64'): np.float64}[arr.dtype]
            return arr.astype(dtype, copy=False)
        except KeyError:
            return arr
dqn_agent_without_ER.py 文件源码 项目:stock_dqn_f 作者: wdy06 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def agent_start(self, observation):


        # Initialize State
        self.state = observation
        state_ = np.asanyarray(self.state, dtype=np.float32)

        # Generate an Action e-greedy
        action, Q_now = self.DQN.e_greedy(state_, self.epsilon)
        self.Q_recent = Q_now[0]
        # Update for next step
        self.lastAction = action
        self.last_state = self.state.copy()
        self.last_observation = observation.copy()
        self.max_Q_list.append(np.max(self.Q_recent))

        return action
dqn_agent_nature.py 文件源码 项目:stock_dqn_f 作者: wdy06 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def agent_start(self, observation):


        # Initialize State
        self.state = observation
        state_ = cuda.to_gpu(np.asanyarray(self.state, dtype=np.float32),self.gpu_id)

        # Generate an Action e-greedy
        action, Q_now = self.DQN.e_greedy(state_, self.epsilon)
        self.Q_recent = Q_now.get()[0]
        # Update for next step
        self.lastAction = action
        self.last_state = self.state.copy()
        self.last_observation = observation.copy()
        self.max_Q_list.append(np.max(self.Q_recent))

        return action
core.py 文件源码 项目:Deep-Subspace-Clustering 作者: tonyabracadabra 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def find(a,n=None,d=None,nargout=1):
    if d:
        raise NotImplementedError

    # there is no promise that nonzero or flatnonzero
    # use or will use indexing of the argument without
    # converting it to array first.  So we use asarray
    # instead of asanyarray
    if nargout == 1:
        i = np.flatnonzero(np.asarray(a)).reshape(1,-1)+1
        if n is not None:
            i = i.take(n)
        return matlabarray(i)
    if nargout == 2:
        i,j = np.nonzero(np.asarray(a))
        if n is not None:
            i = i.take(n)
            j = j.take(n)
        return (matlabarray((i+1).reshape(-1,1)),
                matlabarray((j+1).reshape(-1,1)))
    raise NotImplementedError
all_correlations.py 文件源码 项目:Building-Machine-Learning-Systems-With-Python-Second-Edition 作者: PacktPublishing 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def all_correlations_fast_no_scipy(y, X):
    '''
    Cs = all_correlations(y, X)

    Cs[i] = np.corrcoef(y, X[i])[0,1]
    '''
    X = np.asanyarray(X, float)
    y = np.asanyarray(y, float)
    xy = np.dot(X, y)
    y_ = y.mean()
    ys_ = y.std()
    x_ = X.mean(1)
    xs_ = X.std(1)
    n = float(len(y))
    ys_ += 1e-5  # Handle zeros in ys
    xs_ += 1e-5  # Handle zeros in x

    return (xy - x_ * y_ * n) / n / xs_ / ys_
datamodel.py 文件源码 项目:heliopy 作者: heliopython 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _dateToISO(indict):
    """
    covert datetimes to iso strings inside of datamodel attributes
    """
    retdict = dmcopy(indict)
    if isinstance(indict, dict):
        for key in indict:
            if isinstance(indict[key], datetime.datetime):
                retdict[key] = retdict[key].isoformat()
            elif hasattr(indict[key], '__iter__'):
                for idx, el in enumerate(indict[key]):
                    if isinstance(el, datetime.datetime):
                        retdict[key][idx] = el.isoformat()
    else:
        if isinstance(indict, datetime.datetime):
            retdict = retdict.isoformat()
        elif hasattr(indict, '__iter__'):
            retdict = numpy.asanyarray(indict)
            for idx, el in numpy.ndenumerate(indict):
                if isinstance(el, datetime.datetime):
                    retdict[idx] = el.isoformat()
    return retdict
__init__.py 文件源码 项目:heliopy 作者: heliopython 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def interweave(a, b):
    """
    given two array-like variables interweave them together.
    Discussed here: http://stackoverflow.com/questions/5347065/interweaving-two-numpy-arrays

    Parameters
    ==========
    a : array-like
        first array

    b : array-like
        second array

    Returns
    =======
    out : numpy.ndarray
        interweaved array
    """
    a = np.asanyarray(a)
    b = np.asanyarray(b)
    ans = np.empty((a.size + b.size), dtype=a.dtype)
    ans[0::2] = a
    ans[1::2] = b
    return ans
internals.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def take(self, indexer, axis=1, verify=True, convert=True):
        """
        Take items along any axis.
        """
        self._consolidate_inplace()
        indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
                             dtype='int64')
                   if isinstance(indexer, slice)
                   else np.asanyarray(indexer, dtype='int64'))

        n = self.shape[axis]
        if convert:
            indexer = maybe_convert_indices(indexer, n)

        if verify:
            if ((indexer == -1) | (indexer >= n)).any():
                raise Exception('Indices must be nonzero and less than '
                                'the axis length')

        new_labels = self.axes[axis].take(indexer)
        return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
                                    axis=axis, allow_dups=True)
core.py 文件源码 项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda 作者: SignalMedia 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def compressed(x):
    """
    Return all the non-masked data as a 1-D array.

    This function is equivalent to calling the "compressed" method of a
    `MaskedArray`, see `MaskedArray.compressed` for details.

    See Also
    --------
    MaskedArray.compressed
        Equivalent method.

    """
    if not isinstance(x, MaskedArray):
        x = asanyarray(x)
    return x.compressed()
dqn_agent_nature.py 文件源码 项目:stock_dqn 作者: wdy06 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def agent_start(self, observation):


        # Initialize State
        self.state = observation
        state_ = cuda.to_gpu(np.asanyarray(self.state, dtype=np.float32),self.gpu_id)

        # Generate an Action e-greedy
        action, Q_now = self.DQN.e_greedy(state_, self.epsilon)

        # Update for next step
        self.lastAction = action
        self.last_state = self.state.copy()
        self.last_observation = observation.copy()
        self.max_Q_list.append(np.max(Q_now.get()))

        return action
core.py 文件源码 项目:aws-lambda-numpy 作者: vitolimandibhrata 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def compressed(x):
    """
    Return all the non-masked data as a 1-D array.

    This function is equivalent to calling the "compressed" method of a
    `MaskedArray`, see `MaskedArray.compressed` for details.

    See Also
    --------
    MaskedArray.compressed
        Equivalent method.

    """
    if not isinstance(x, MaskedArray):
        x = asanyarray(x)
    return x.compressed()
ddqn_agent.py 文件源码 项目:doubleDQN 作者: masataka46 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def agent_start(self, observation):

        # Preprocess
        tmp = np.bitwise_and(np.asarray(observation.intArray[128:]).reshape([210, 160]), 0b0001111)  # Get Intensity from the observation
        obs_array = (spm.imresize(tmp, (110, 84)))[110-84-8:110-8, :]  # Scaling

        # Initialize State
        self.state = np.zeros((4, 84, 84), dtype=np.uint8)
        self.state[0] = obs_array
        state_ = cuda.to_gpu(np.asanyarray(self.state.reshape(1, 4, 84, 84), dtype=np.float32))

        # Generate an Action e-greedy
        returnAction = Action()
        action, Q_now = self.DDQN.e_greedy(state_, self.epsilon)
        returnAction.intArray = [action]

        # Update for next step
        self.lastAction = copy.deepcopy(returnAction)
        self.last_state = self.state.copy()
        self.last_observation = obs_array

        return returnAction


问题


面经


文章

微信
公众号

扫码关注公众号