python类flipud()的实例源码

qmf_realtime_class.py 文件源码 项目:ASP 作者: TUIlmenauAMS 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def DST4(samples):
    """
        Method to create DST4 transformation using DST3

        Arguments   :
            samples : (1D Array) Input samples to be transformed

        Returns     :
            y       :  (1D Array) Transformed output samples

    """

    # Initialize
    samplesup=np.zeros(2*N, dtype = np.float32)

    # Upsample signal
    # Reverse order to obtain DST4 out of DCT4:
    #samplesup[1::2]=np.flipud(samples)
    samplesup[0::2] = samples
    y = spfft.dst(samplesup,type=3,norm='ortho')*np.sqrt(2)#/2

    # Flip sign of every 2nd subband to obtain DST4 out of DCT4
    #y=(y[0:N])*(((-1)*np.ones(N, dtype = np.float32))**range(N))

    return y[0: N]
_main.py 文件源码 项目:matplotlib_venn_wordcloud 作者: paulbrodersen 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _get_wordcloud(img, patch, words, word_to_frequency=None, **wordcloud_kwargs):

    # get the boolean mask corresponding to each patch
    path = patch.get_path()
    mask = path.contains_points(img.pixel_coordinates).reshape((img.y_resolution, img.x_resolution))

    # make mask matplotlib-venn compatible
    mask = (~mask * 255).astype(np.uint8) # black indicates mask position
    mask = np.flipud(mask) # origin is in upper left

    # create wordcloud
    wc = WordCloud(mask=mask,
                   background_color=None,
                   mode="RGBA",
                   **wordcloud_kwargs)

    if not word_to_frequency:
        text = " ".join(words)
        wc.generate(text)
    else:
        wc.generate_from_frequencies({word: word_to_frequency[word] for word in words})

    return wc
TimeNuclearWavefunction.py 文件源码 项目:spectroscopy 作者: jgoodknight 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def autocorrelation(self):
        "Autocorrelation as a function of time"
        if self.__autocorrelation is not None:
            return self.__autocorrelationTimeSeries, self.__autocorrelation

        negT = -np.flipud(self.timeSeries[1:])
        autocorrelationTime = np.hstack((negT, self.timeSeries))
        self.__autocorrelationTimeSeries = autocorrelationTime

        initialWF = self[0]
        ACF = []
        for WF in self:
            ACF.append(WF.overlap(initialWF))
        ACF = np.array(ACF)
        negACF = np.conj(np.flipud(ACF[1:]))
        totalACF = np.hstack((negACF, ACF))
        self.__autocorrelation = totalACF
        return self.__autocorrelationTimeSeries, self.__autocorrelation
image_functions.py 文件源码 项目:tf-Faster-RCNN 作者: kevinjliang 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _applyImageFlips(image, flips):
    '''
    Apply left-right and up-down flips to an image

    Args:
        image (numpy array 2D/3D): image to be flipped
        flips (tuple):
            [0]: Boolean to flip horizontally
            [1]: Boolean to flip vertically

    Returns:
        Flipped image
    '''
    image = np.fliplr(image) if flips[0] else image
    image = np.flipud(image) if flips[1] else image

    return image
conv1d.py 文件源码 项目:DBQA-KBQA 作者: Lucien-qiang 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def convolve1d_2D_numpy(a, b, mode='full'):
  nwords, ndim = a.shape
  filter_width, ndim = b.shape
  b = np.flipud(b)  # flip the kernel
  if mode == 'full':
    pad = np.zeros((filter_width-1, ndim))
    a = np.vstack([pad, a, pad])
    shape = (nwords+filter_width-1, filter_width, ndim)
  elif mode == 'valid':
    shape = (nwords-filter_width+1, filter_width, ndim)

  strides = (a.strides[0],) + a.strides
  view = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)

  conv_out = np.einsum('kij,ij->kj', view, b)
  return conv_out
conv1d.py 文件源码 项目:DEEP-CLICK-MODEL 作者: THUIR 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def convolve1d_2D_numpy(a, b, mode='full'):
  nwords, ndim = a.shape
  filter_width, ndim = b.shape
  b = np.flipud(b)  # flip the kernel
  if mode == 'full':
    pad = np.zeros((filter_width-1, ndim))
    a = np.vstack([pad, a, pad])
    shape = (nwords+filter_width-1, filter_width, ndim)
  elif mode == 'valid':
    shape = (nwords-filter_width+1, filter_width, ndim)

  strides = (a.strides[0],) + a.strides
  view = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)

  conv_out = np.einsum('kij,ij->kj', view, b)
  return conv_out
rendertrainingimages.py 文件源码 项目:LabelFusion 作者: RobotLocomotion 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def captureLabelImage(self, filename):
        view = self.view
        self.disableLighting()
        im = sgp.saveScreenshot(view, filename, shouldRender=False, shouldWrite=False)

        if filename is not None:
            img = vnp.getNumpyFromVtk(im, 'ImageScalars')
            assert img.dtype == np.uint8

            img.shape = (im.GetDimensions()[1], im.GetDimensions()[0], 3)
            img = np.flipud(img)

            img = img[:,:,0]
            print 'writing:', filename
            scipy.misc.imsave(filename, img)

        return im
test_source_okada.py 文件源码 项目:kite 作者: pyrocko 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _plot_displacement(ms):
        if not plot:
            ms.down
            return

        import matplotlib.pyplot as plt
        from matplotlib.patches import Polygon
        fig = plt.figure()
        ax = fig.gca()
        ms.processSources()

        ax.imshow(num.flipud(ms.down), aspect='equal',
                  extent=[0, ms.frame.E.max(), 0, ms.frame.N.max()])
        for src in ms.sources:
            for seg in src.segments:
                p = Polygon(seg.outline(), alpha=.8, fill=False)
                ax.add_artist(p)
            if isinstance(src, OkadaPath):
                nodes = num.array(src.nodes)
                ax.scatter(nodes[:, 0], nodes[:, 1], color='r')
        plt.show()
        fig.clear()
test_source_pyrocko.py 文件源码 项目:kite 作者: pyrocko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _plot_displacement(ms):
        if not plot:
            ms.down
            return

        import matplotlib.pyplot as plt
        from matplotlib.patches import Polygon  # noqa
        fig = plt.figure()
        ax = fig.gca()
        ms.processSources()

        ax.imshow(num.flipud(ms.north), aspect='equal',
                  extent=[0, ms.frame.E.max(), 0, ms.frame.N.max()])
        # for src in ms.sources:
        #     for seg in src.segments:
        #         p = Polygon(seg.outline(), alpha=.8, fill=False)
        #         ax.add_artist(p)
        plt.show()
        fig.clear()
dagger.py 文件源码 项目:Imitation-Learning-Dagger-Torcs 作者: zsdonghao 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def img_reshape(input_img):
    """ (3, 64, 64) --> (64, 64, 3) """
    _img = np.transpose(input_img, (1, 2, 0))
    _img = np.flipud(_img)
    _img = np.reshape(_img, (1, img_dim[0], img_dim[1], img_dim[2]))
    return _img
preprocessing.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def flip_plane(array,plane=0):
    # Flip axial plane LR, i.e. change left/right hemispheres. 3D tensors-only, batch_size=1.
    # n_slices = array.shape[2]
    # for i in range(n_slices):
    #     array[:,:,i] = np.flipud(array[:,:,i])
    # return array
    n_x = array.shape[plane]
    for i in range(n_x):
        if plane == 0:
            array[i,:,:] = np.flipud(array[i,:,:])
        if plane == 1:
            array[:,i,:] = np.flipud(array[:,i,:])
        if plane == 2:
            array[:,:,i] = np.flipud(array[:,:,i])
    return array
recording.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _trim_silence(self, audio: ndarray) -> ndarray:
        def trim_start(sound: ndarray) -> ndarray:
            return numpy.array(list(dropwhile(lambda x: x < self.silence_threshold_for_not_normalized_sound, sound)))

        def trim_end(sound: ndarray) -> ndarray:
            return flipud(trim_start(flipud(sound)))

        return trim_start(trim_end(audio))
io.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def write_SSR_IRs(filename, time_data_l, time_data_r, wavformat="float"):
    """Takes two time signals and writes out the horizontal plane as HRIRs for the SoundScapeRenderer.
    Ideally, both hold 360 IRs but smaller sets are tried to be scaled up using repeat.

    Parameters
    ----------
    filename : string
       filename to write to
    time_data_l, time_data_l : io.ArraySignal
       ArraySignals for left/right ear
    wavformat : string
       wav file format to write. Either "float" or "int16"
    """
    equator_IDX_left = utils.nearest_to_value_logical_IDX(time_data_l.grid.colatitude, _np.pi / 2)
    equator_IDX_right = utils.nearest_to_value_logical_IDX(time_data_r.grid.colatitude, _np.pi / 2)

    IRs_left = time_data_l.signal.signal[equator_IDX_left]
    IRs_right = time_data_r.signal.signal[equator_IDX_right]

    if _np.mod(360 / IRs_left.shape[0], 1) == 0:
        IRs_left = _np.repeat(IRs_left, 360 / IRs_left.shape[0], axis=0)
    else:
        raise ValueError('Number of channels for left ear cannot be fit into 360.')
    if _np.mod(360 / IRs_right.shape[0], 1) == 0:
        IRs_right = _np.repeat(IRs_right, 360 / IRs_right.shape[0], axis=0)
    else:
        raise ValueError('Number of channels for left ear cannot be fit into 360.')

    IRs_to_write = utils.interleave_channels(IRs_left, IRs_right, style="SSR")
    data_to_write = utils.simple_resample(IRs_to_write, original_fs=time_data_l.signal.fs, target_fs=44100)

    # Fix SSR IR alignment stuff: left<>right flipped and 90 degree rotation
    data_to_write = _np.flipud(data_to_write)
    data_to_write = _np.roll(data_to_write, -90, axis=0)

    if wavformat == "float":
        sio.wavfile.write(filename, 44100, data_to_write.astype(_np.float32).T)
    elif wavformat == "int16":
        sio.wavfile.write(filename, 44100, (data_to_write * 32767).astype(_np.int16).T)
    else:
        raise TypeError("Format " + wavformat + "not known. Should be either 'float' or 'int16'.")
mules.py 文件源码 项目:Wall-EEG 作者: neurotechuoft 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def parsedata(self, package):
        """
            This function parses the Data Package sent by MuLES to obtain all the data 
            available in MuLES as matrix of the size [n_samples, n_columns], therefore the
            total of elements in the matrix is n_samples * n_columns. Each column represents
            one channel

            Argument:
            package: Data package sent by MuLES.
        """
        size_element = 4           # Size of each one of the elements is 4 bytes

        n_columns = len(self.params['data format'])
        n_bytes = len(package)
        n_samples = (n_bytes/size_element) / n_columns
        ####mesData = np.uint8(mesData) # Convert from binary to integers (not necessary pyton)

        bytes_per_element = np.flipud(np.reshape(list(bytearray(package)), [size_element,-1],order='F'))
        # Changes "package" to a list with size (n_bytes,1) 
        # Reshapes the list into a matrix bytes_per_element which has the size: (4,n_bytes/4)
        # Flips Up-Down the matrix of size (4,n_bytes/4) to correct the swap in bytes    

        package_correct_order = np.uint8(np.reshape(bytes_per_element,[n_bytes,-1],order='F' ))
        # Unrolls the matrix bytes_per_element, in "package_correct_order" 
        # that has a size (n_bytes,1) 

        data_format_tags = self.params['data format']*n_samples
        # Tags used to map the elements into their corresponding representation
        package_correct_order_char = "".join(map(chr,package_correct_order))

        elements = struct.unpack(data_format_tags,package_correct_order_char)
        # Elements are cast in their corresponding representation
        data = np.reshape(np.array(elements),[n_samples,n_columns],order='C')
        # Elements are reshap into data [n_samples, n_columns]        

        return data
visualizer.py 文件源码 项目:DistanceGAN 作者: sagiebenaim 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def display_current_results(self, visuals, epoch):
        if self.display_id > 0: # show images in the browser
            idx = 1
            for label, image_numpy in visuals.items():
                #image_numpy = np.flipud(image_numpy)
                self.vis.image(image_numpy.transpose([2,0,1]), opts=dict(title=label),
                                   win=self.display_id + idx)
                idx += 1

        if self.use_html: # save images to a html file
            for label, image_numpy in visuals.items():
                img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.png' % (epoch, label))
                util.save_image(image_numpy, img_path)
            # update website
            webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, reflesh=1)
            for n in range(epoch, 0, -1):
                webpage.add_header('epoch [%d]' % n)
                ims = []
                txts = []
                links = []

                for label, image_numpy in visuals.items():
                    img_path = 'epoch%.3d_%s.png' % (n, label)
                    ims.append(img_path)
                    txts.append(label)
                    links.append(img_path)
                webpage.add_images(ims, txts, links, width=self.win_size)
            webpage.save()

    # errors: dictionary of error labels and values
HarmonicVMDCylWidget.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def mirrorArray(self, x, direction="x"):
        X = x.reshape((self.nx_core, self.ny_core), order="F")
        if direction == "x" or direction == "y" :
            X2 = np.vstack((-np.flipud(X), X))
        else:
            X2 = np.vstack((np.flipud(X), X))
        return X2
TransientVMDCylWidget.py 文件源码 项目:em_examples 作者: geoscixyz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def mirrorArray(self, x, direction="x"):
        X = x.reshape((self.nx_core, self.ny_core), order="F")
        if direction == "x" or direction == "y" :
            X2 = np.vstack((-np.flipud(X), X))
        else:
            X2 = np.vstack((np.flipud(X), X))
        return X2


问题


面经


文章

微信
公众号

扫码关注公众号