python类newaxis()的实例源码

image_channel.py 文件源码 项目:FCN_train 作者: 315386775 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def preprocess(image):
    """Takes an image and apply preprocess"""
    # ????????????
    image = cv2.resize(image, (data_shape, data_shape))
    # ?? BGR ? RGB
    image = image[:, :, (2, 1, 0)]
    # ?mean?????float
    image = image.astype(np.float32)
    # ? mean
    image -= np.array([123, 117, 104])
    # ??? [batch-channel-height-width]
    image = np.transpose(image, (2, 0, 1))
    image = image[np.newaxis, :]
    # ?? ndarray
    image = nd.array(image)
    return image
doodle.py 文件源码 项目:neural-doodle 作者: alexjc 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def prepare_style(self, scale=1.0):
        """Called each phase of the optimization, process the style image according to the scale, then run it
        through the model to extract intermediate outputs (e.g. sem4_1) and turn them into patches.
        """
        style_img = self.rescale_image(self.style_img_original, scale)
        self.style_img = self.model.prepare_image(style_img)

        style_map = self.rescale_image(self.style_map_original, scale)
        self.style_map = style_map.transpose((2, 0, 1))[np.newaxis].astype(np.float32)

        # Compile a function to run on the GPU to extract patches for all layers at once.
        layer_outputs = zip(self.style_layers, self.model.get_outputs('sem', self.style_layers))
        extractor = self.compile([self.model.tensor_img, self.model.tensor_map], self.do_extract_patches(layer_outputs))
        result = extractor(self.style_img, self.style_map)

        # Store all the style patches layer by layer, resized to match slice size and cast to 16-bit for size. 
        self.style_data = {}
        for layer, *data in zip(self.style_layers, result[0::3], result[1::3], result[2::3]):
            patches = data[0]
            l = self.model.network['nn'+layer]
            l.num_filters = patches.shape[0] // args.slices
            self.style_data[layer] = [d[:l.num_filters*args.slices].astype(np.float16) for d in data]\
                                   + [np.zeros((patches.shape[0],), dtype=np.float16)]
            print('  - Style layer {}: {} patches in {:,}kb.'.format(layer, patches.shape, patches.size//1000))
utils.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_covariance(self):
        """Compute data covariance with the generative model.
        ``cov = components_.T * S**2 * components_ + sigma2 * eye(n_features)``
        where  S**2 contains the explained variances.
        Returns
        -------
        cov : array, shape=(n_features, n_features)
            Estimated covariance of data.
        """
        components_ = self.components_
        exp_var = self.explained_variance_
        if self.whiten:
            components_ = components_ * np.sqrt(exp_var[:, np.newaxis])
        exp_var_diff = np.maximum(exp_var - self.noise_variance_, 0.)
        cov = np.dot(components_.T * exp_var_diff, components_)
        cov.flat[::len(cov) + 1] += self.noise_variance_  # modify diag inplace
        return cov
utils.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def inverse_transform(self, X):
        """Transform data back to its original space, i.e.,
        return an input X_original whose transform would be X
        Parameters
        ----------
        X : array-like, shape (n_samples, n_components)
            New data, where n_samples is the number of samples
            and n_components is the number of components.
        Returns
        -------
        X_original array-like, shape (n_samples, n_features)
        """
        check_is_fitted(self, 'mean_')

        if self.whiten:
            return fast_dot(
                X,
                np.sqrt(self.explained_variance_[:, np.newaxis]) *
                self.components_) + self.mean_
        else:
            return fast_dot(X, self.components_) + self.mean_
gui.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def update_data_sort_order(self, new_sort_order=None):
        if new_sort_order is not None:
            self.current_order = new_sort_order
        self.update_sort_idcs()
        self.data_image.set_extent((self.raw_lags[0], self.raw_lags[-1],
                            0, len(self.sort_idcs)))
        self.data_ax.set_ylim(0, len(self.sort_idcs))
        all_raw_data  = self.raw_data
        all_raw_data /= (1 + self.raw_data.mean(1)[:, np.newaxis])
        if len(all_raw_data) > 0:
            cmax          = 0.5*all_raw_data.max()
            cmin          = 0.5*all_raw_data.min()
            all_raw_data  = all_raw_data[self.sort_idcs, :]
        else:
            cmin = 0
            cmax = 1
        self.data_image.set_data(all_raw_data)
        self.data_image.set_clim(cmin, cmax)
        self.data_selection.set_y(len(self.sort_idcs)-len(self.selected_points))
        self.data_selection.set_height(len(self.selected_points))
        self.update_data_plot()
gui.py 文件源码 项目:spyking-circus 作者: spyking-circus 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def plot_electrodes(self):
        if not getattr(self, 'collections', None):
            # It is important to set one facecolor per point so that we can change
            # it later
            self.electrode_collection = self.electrode_ax.scatter(self.x_position,
                                                                  self.y_position,
                                                                  facecolor=['black' for _ in self.x_position],
                                                                  s=30)
            self.electrode_ax.set_xlabel('Space [um]')
            self.electrode_ax.set_xticklabels([])
            self.electrode_ax.set_ylabel('Space [um]')
            self.electrode_ax.set_yticklabels([])
        else:
            self.electrode_collection.set_offsets(np.hstack([self.x_position[np.newaxis, :].T,
                                                             self.y_position[np.newaxis, :].T]))
        ax, x, y = self.electrode_ax, self.y_position, self.x_position
        ymin, ymax = min(x), max(x)
        yrange = (ymax - ymin)*0.5 * 1.05  # stretch everything a bit
        ax.set_ylim((ymax + ymin)*0.5 - yrange, (ymax + ymin)*0.5 + yrange)
        xmin, xmax = min(y), max(y)
        xrange = (xmax - xmin)*0.5 * 1.05  # stretch everything a bit
        ax.set_xlim((xmax + xmin)*0.5 - xrange, (xmax + xmin)*0.5 + xrange)

        self.ui.raw_data.draw_idle()
classify.py 文件源码 项目:photo-manager-classifier 作者: damianmoore 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self):
        if not self.code_table:
            with open(CATEGORY_CODES) as codes:
                self.code_table = {int(k): v for k, v in json.loads(codes.read()).items()}

        caffe_models = os.path.expanduser(CAFFE_MODELS)
        model = 'squeezenet', 'init_net.pb', 'predict_net.pb', 'ilsvrc_2012_mean.npy', 227
        self.model = model

        mean_file = os.path.join(caffe_models, model[0], model[3])
        if not os.path.exists(mean_file):
            self.mean = 128
        else:
            mean = np.load(mean_file).mean(1).mean(1)
            self.mean = mean[:, np.newaxis, np.newaxis]

        init_net = os.path.join(caffe_models, model[0], model[1])
        predict_net = os.path.join(caffe_models, model[0], model[2])

        with open(init_net) as f:
            self.init_net = f.read()
        with open(predict_net) as f:
            self.predict_net = f.read()
agent.py 文件源码 项目:snake_game 作者: wing3s 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def play(self, nb_rounds):
        img_saver = save_image()
        img_saver.next()

        game_cnt = it.count(1)
        for i in xrange(nb_rounds):
            game = self.game(width=self.width, height=self.height)
            screen, _ = game.next()
            img_saver.send(screen)
            frame_cnt = it.count()
            try:
                state = np.asarray([screen] * self.nb_frames)
                while True:
                    frame_cnt.next()
                    act_idx = np.argmax(
                        self.model.predict(state[np.newaxis]), axis=-1)[0]
                    screen, _ = game.send(self.actions[act_idx])
                    state = np.roll(state, 1, axis=0)
                    state[0] = screen
                    img_saver.send(screen)
            except StopIteration:
                print 'Saved %4i frames for game %3i' % (
                    frame_cnt.next(), game_cnt.next())
        img_saver.close()
__init__.py 文件源码 项目:wmd-relax 作者: src-d 项目源码 文件源码 阅读 76 收藏 0 点赞 0 评论 0
def compute_similarity(self, doc1, doc2):
            """
            Calculates the similarity between two spaCy documents. Extracts the
            nBOW from them and evaluates the WMD.

            :return: The calculated similarity.
            :rtype: float.
            """
            doc1 = self._convert_document(doc1)
            doc2 = self._convert_document(doc2)
            vocabulary = {
                w: i for i, w in enumerate(sorted(set(doc1).union(doc2)))}
            w1 = self._generate_weights(doc1, vocabulary)
            w2 = self._generate_weights(doc2, vocabulary)
            evec = numpy.zeros((len(vocabulary), self.nlp.vocab.vectors_length),
                               dtype=numpy.float32)
            for w, i in vocabulary.items():
                evec[i] = self.nlp.vocab[w].vector
            evec_sqr = (evec * evec).sum(axis=1)
            dists = evec_sqr - 2 * evec.dot(evec.T) + evec_sqr[:, numpy.newaxis]
            dists[dists < 0] = 0
            dists = numpy.sqrt(dists)
            return libwmdrelax.emd(w1, w2, dists)
plot_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys, block=True):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    f = plt.figure(1)
    f.clf()
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=block)
camera_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def ray(self, pts, undistort=True, rotate=False, normalize=False): 
        """
        Returns the ray corresponding to the points. 
        Optionally undistort (defaults to true), and 
        rotate ray to the camera's viewpoint 
        """
        upts = self.undistort_points(pts) if undistort else pts
        ret = unproject_points(
            np.hstack([ (colvec(upts[:,0])-self.cx) / self.fx, (colvec(upts[:,1])-self.cy) / self.fy ])
        )

        if rotate: 
            ret = self.extrinsics.rotate_vec(ret)

        if normalize: 
            ret = ret / np.linalg.norm(ret, axis=1)[:, np.newaxis]

        return ret
metrics.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=plt.cm.Greys):
    # Colormaps: jet, Greys
    cm_normalized = cm.astype(np.float32) / cm.sum(axis=1)[:, np.newaxis]
    plt.imshow(cm_normalized, interpolation='nearest', cmap=cmap)

    # Show confidences
    for i, cas in enumerate(cm): 
        for j, c in enumerate(cas): 
            if c > 0: 
                plt.text(j-0.1, i+0.2, c, fontsize=16, fontweight='bold', color='#b70000')

    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(target_names))
    plt.xticks(tick_marks, target_names, rotation=45)
    plt.yticks(tick_marks, target_names)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show(block=True)
cpm_utils.py 文件源码 项目:convolutional-pose-machines-tensorflow 作者: timctho 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def make_gaussian(size, fwhm=3, center=None):
    """ Make a square gaussian kernel.
    size is the length of a side of the square
    fwhm is full-width-half-maximum, which
    can be thought of as an effective radius.
    """

    x = np.arange(0, size, 1, float)
    y = x[:, np.newaxis]

    if center is None:
        x0 = y0 = size // 2
    else:
        x0 = center[0]
        y0 = center[1]

    return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / 2.0 / fwhm / fwhm)
ImageView.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def roiChanged(self):
        if self.image is None:
            return

        image = self.getProcessedImage()
        if image.ndim == 2:
            axes = (0, 1)
        elif image.ndim == 3:
            axes = (1, 2)
        else:
            return

        data, coords = self.roi.getArrayRegion(image.view(np.ndarray), self.imageItem, axes, returnMappedCoords=True)
        if data is not None:
            while data.ndim > 1:
                data = data.mean(axis=1)
            if image.ndim == 3:
                self.roiCurve.setData(y=data, x=self.tVals)
            else:
                while coords.ndim > 2:
                    coords = coords[:,:,0]
                coords = coords - coords[:,0,np.newaxis]
                xvals = (coords**2).sum(axis=0) ** 0.5
                self.roiCurve.setData(y=data, x=xvals)
PlotCurveItem.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def generatePath(self, x, y):
        if self.opts['stepMode']:
            ## each value in the x/y arrays generates 2 points.
            x2 = np.empty((len(x),2), dtype=x.dtype)
            x2[:] = x[:,np.newaxis]
            if self.opts['fillLevel'] is None:
                x = x2.reshape(x2.size)[1:-1]
                y2 = np.empty((len(y),2), dtype=y.dtype)
                y2[:] = y[:,np.newaxis]
                y = y2.reshape(y2.size)
            else:
                ## If we have a fill level, add two extra points at either end
                x = x2.reshape(x2.size)
                y2 = np.empty((len(y)+2,2), dtype=y.dtype)
                y2[1:-1] = y[:,np.newaxis]
                y = y2.reshape(y2.size)[1:-1]
                y[0] = self.opts['fillLevel']
                y[-1] = self.opts['fillLevel']

        path = fn.arrayToQPath(x, y, connect=self.opts['connect'])

        return path
MeshData.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def faceNormals(self, indexed=None):
        """
        Return an array (Nf, 3) of normal vectors for each face.
        If indexed='faces', then instead return an indexed array
        (Nf, 3, 3)  (this is just the same array with each vector
        copied three times).
        """
        if self._faceNormals is None:
            v = self.vertexes(indexed='faces')
            self._faceNormals = np.cross(v[:,1]-v[:,0], v[:,2]-v[:,0])

        if indexed is None:
            return self._faceNormals
        elif indexed == 'faces':
            if self._faceNormalsIndexedByFaces is None:
                norms = np.empty((self._faceNormals.shape[0], 3, 3))
                norms[:] = self._faceNormals[:,np.newaxis,:]
                self._faceNormalsIndexedByFaces = norms
            return self._faceNormalsIndexedByFaces
        else:
            raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
ImageView.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def roiChanged(self):
        if self.image is None:
            return

        image = self.getProcessedImage()
        if image.ndim == 2:
            axes = (0, 1)
        elif image.ndim == 3:
            axes = (1, 2)
        else:
            return

        data, coords = self.roi.getArrayRegion(image.view(np.ndarray), self.imageItem, axes, returnMappedCoords=True)
        if data is not None:
            while data.ndim > 1:
                data = data.mean(axis=1)
            if image.ndim == 3:
                self.roiCurve.setData(y=data, x=self.tVals)
            else:
                while coords.ndim > 2:
                    coords = coords[:,:,0]
                coords = coords - coords[:,0,np.newaxis]
                xvals = (coords**2).sum(axis=0) ** 0.5
                self.roiCurve.setData(y=data, x=xvals)
PlotCurveItem.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def generatePath(self, x, y):
        if self.opts['stepMode']:
            ## each value in the x/y arrays generates 2 points.
            x2 = np.empty((len(x),2), dtype=x.dtype)
            x2[:] = x[:,np.newaxis]
            if self.opts['fillLevel'] is None:
                x = x2.reshape(x2.size)[1:-1]
                y2 = np.empty((len(y),2), dtype=y.dtype)
                y2[:] = y[:,np.newaxis]
                y = y2.reshape(y2.size)
            else:
                ## If we have a fill level, add two extra points at either end
                x = x2.reshape(x2.size)
                y2 = np.empty((len(y)+2,2), dtype=y.dtype)
                y2[1:-1] = y[:,np.newaxis]
                y = y2.reshape(y2.size)[1:-1]
                y[0] = self.opts['fillLevel']
                y[-1] = self.opts['fillLevel']

        path = fn.arrayToQPath(x, y, connect=self.opts['connect'])

        return path
MeshData.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def faceNormals(self, indexed=None):
        """
        Return an array (Nf, 3) of normal vectors for each face.
        If indexed='faces', then instead return an indexed array
        (Nf, 3, 3)  (this is just the same array with each vector
        copied three times).
        """
        if self._faceNormals is None:
            v = self.vertexes(indexed='faces')
            self._faceNormals = np.cross(v[:,1]-v[:,0], v[:,2]-v[:,0])

        if indexed is None:
            return self._faceNormals
        elif indexed == 'faces':
            if self._faceNormalsIndexedByFaces is None:
                norms = np.empty((self._faceNormals.shape[0], 3, 3))
                norms[:] = self._faceNormals[:,np.newaxis,:]
                self._faceNormalsIndexedByFaces = norms
            return self._faceNormalsIndexedByFaces
        else:
            raise Exception("Invalid indexing mode. Accepts: None, 'faces'")
nestio.py 文件源码 项目:NeoAnalysis 作者: neoanalysis 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, filename):
        """
        filename: string, path to ASCII file to read.
        """

        self.filename = filename

        # read the first line to check the data type (int or float) of the data
        f = open(self.filename)
        line = f.readline()

        additional_parameters = {}
        if '.' not in line:
            additional_parameters['dtype'] = np.int32

        self.data = np.loadtxt(self.filename, **additional_parameters)

        if len(self.data.shape) == 1:
            self.data = self.data[:, np.newaxis]
evalx.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def multiclass_log_loss(y_true, y_pred, eps=1e-15):
    """Multi class version of Logarithmic Loss metric.
    https://www.kaggle.com/wiki/MultiClassLogLoss
    Parameters
    ----------
    y_true : array, shape = [n_samples]
            true class, intergers in [0, n_classes - 1)
    y_pred : array, shape = [n_samples, n_classes]
    Returns
    -------
    loss : float
    """
    predictions = np.clip(y_pred, eps, 1 - eps)

    # normalize row sums to 1
    predictions /= predictions.sum(axis=1)[:, np.newaxis]

    actual = np.zeros(y_pred.shape)
    n_samples = actual.shape[0]
    actual[np.arange(n_samples), y_true.astype(int)] = 1
    vectsum = np.sum(actual * np.log(predictions))
    loss = -1.0 / n_samples * vectsum
    return loss
rigid_transformations.py 文件源码 项目:autolab_core 作者: BerkeleyAutomation 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def rotation_from_axes(x_axis, y_axis, z_axis):
        """Convert specification of axis in target frame to
        a rotation matrix from source to target frame.

        Parameters
        ----------
        x_axis : :obj:`numpy.ndarray` of float
            A normalized 3-vector for the target frame's x-axis.

        y_axis : :obj:`numpy.ndarray` of float
            A normalized 3-vector for the target frame's y-axis.

        z_axis : :obj:`numpy.ndarray` of float
            A normalized 3-vector for the target frame's z-axis.

        Returns
        -------
        :obj:`numpy.ndarray` of float
            A 3x3 rotation matrix that transforms from a source frame to the
            given target frame.
        """
        return np.hstack((x_axis[:,np.newaxis], y_axis[:,np.newaxis], z_axis[:,np.newaxis]))
points.py 文件源码 项目:autolab_core 作者: BerkeleyAutomation 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _preprocess_data(self, data):
        """Converts the data array to the preferred dim x #points structure.

        Parameters
        ----------
        data : :obj:`numpy.ndarray` of float
            The data to process.

        Returns
        -------
        :obj:`numpy.ndarray` of float
            The same data array, but reshapes lists to be dim x 1.
        """
        if len(data.shape) == 1: 
            data = data[:,np.newaxis]
        return data
meter.py 文件源码 项目:python-utils 作者: zhijian-liu 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def add(self, outputs, targets):
        outputs = to_numpy(outputs)
        targets = to_numpy(targets)

        if np.ndim(targets) == 2:
            targets = np.argmax(targets, 1)

        assert np.ndim(outputs) == 2, 'wrong output size (2D expected)'
        assert np.ndim(targets) == 1, 'wrong target size (1D or 2D expected)'
        assert targets.shape[0] == outputs.shape[0], 'number of outputs and targets do not match'

        top_k = self.top_k
        max_k = int(top_k[-1])

        predict = torch.from_numpy(outputs).topk(max_k, 1, True, True)[1].numpy()
        correct = (predict == targets[:, np.newaxis].repeat(predict.shape[1], 1))

        self.size += targets.shape[0]
        for k in top_k:
            self.corrects[k] += correct[:, :k].sum()
image_processing.py 文件源码 项目:mx-rfcn 作者: giorking 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def transform(im, pixel_means, need_mean=False):
    """
    transform into mxnet tensor
    substract pixel size and transform to correct format
    :param im: [height, width, channel] in BGR
    :param pixel_means: [[[R, G, B pixel means]]]
    :return: [batch, channel, height, width]
    """
    im = im.copy()
    im[:, :, (0, 1, 2)] = im[:, :, (2, 1, 0)]
    im = im.astype(float)
    if need_mean:
        im -= pixel_means
    im_tensor = im[np.newaxis, :]
    # put channel first
    channel_swap = (0, 3, 1, 2)
    im_tensor = im_tensor.transpose(channel_swap)
    return im_tensor
torch_image_transform_layer.py 文件源码 项目:faster-rcnn-resnet 作者: Eniac-Xie 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setup(self, bottom, top):
        # (1, 3, 1, 1) shaped arrays
        self.PIXEL_MEANS = \
            np.array([[[[0.48462227599918]],
                       [[0.45624044862054]],
                       [[0.40588363755159]]]])
        self.PIXEL_STDS = \
            np.array([[[[0.22889466674951]],
                       [[0.22446679341259]],
                       [[0.22495548344775]]]])
        # The default ("old") pixel means that were already subtracted
        channel_swap = (0, 3, 1, 2)
        self.OLD_PIXEL_MEANS = \
            cfg.PIXEL_MEANS[np.newaxis, :, :, :].transpose(channel_swap)

        top[0].reshape(*(bottom[0].shape))
ps2d.py 文件源码 项目:atoolbox 作者: liweitianux 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calc_ps3d(self):
        """
        Calculate the 3D power spectrum of the image cube.

        The power spectrum is properly normalized to have dimension
        of [K^2 Mpc^3].
        """
        if self.window is not None:
            logger.info("Applying window along frequency axis ...")
            self.cube *= self.window[:, np.newaxis, np.newaxis]

        logger.info("3D FFTing data cube ...")
        cubefft = fftpack.fftshift(fftpack.fftn(self.cube))

        logger.info("Calculating 3D power spectrum ...")
        ps3d = np.abs(cubefft) ** 2  # [K^2]
        # Normalization
        norm1 = 1 / (self.Nx * self.Ny * self.Nz)
        norm2 = 1 / (self.fs_xy**2 * self.fs_z)  # [Mpc^3]
        norm3 = 1 / (2*np.pi)**3
        self.ps3d = ps3d * norm1 * norm2 * norm3  # [K^2 Mpc^3]
        return self.ps3d
geometry.py 文件源码 项目:prysm 作者: brandondube 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def gaussian(sigma=0.5, samples=128):
    ''' Generates a gaussian mask with a given sigma

    Args:
        sigma (`float`): width parameter of the gaussian, expressed in radii of
            the output array.

        samples (`int`): number of samples in square array.

    Returns:
        `numpy.ndarray`: mask with gaussian shape.

    '''
    s = sigma

    x = np.arange(0, samples, 1, float)
    y = x[:, np.newaxis]

    # // is floor division in python
    x0 = y0 = samples // 2
    return exp(-4 * log(2) * ((x - x0 ** 2) + (y - y0) ** 2) / (s * samples) ** 2)
qerbt.py 文件源码 项目:untwist 作者: IoSR-Surrey 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def process(self, wave):
        wave.check_mono()
        if wave.sample_rate != self.sr:
            raise Exception("Wrong sample rate")                              
        n = int(np.ceil(2 * wave.num_frames / float(self.w_len)))
        m = (n + 1) * self.w_len / 2 
        swindow = self.make_signal_window(n)
        win_ratios = [self.window / swindow[t * self.w_len / 2 : 
            t * self.w_len / 2 + self.w_len] 
            for t in range(n)]
        wave = wave.zero_pad(0, int(m - wave.num_frames))
        wave = audio.Wave(signal.hilbert(wave), wave.sample_rate)        
        result = np.zeros((self.n_bins, n))

        for b in range(self.n_bins): 
            w = self.widths[b]
            wc = 1 / np.square(w + 1)
            filter = self.filters[b]
            band = fftfilt(filter, wave.zero_pad(0, int(2 * w))[:,0])
            band = band[int(w) : int(w + m), np.newaxis]    
            for t in range(n):
                frame = band[t * self.w_len / 2:
                             t * self.w_len / 2 + self.w_len,:] * win_ratios[t]
                result[b, t] =  wc * np.real(np.conj(np.dot(frame.conj().T, frame)))
        return audio.Spectrogram(result, self.sr, self.w_len, self.w_len / 2)


问题


面经


文章

微信
公众号

扫码关注公众号