python类roll()的实例源码

dispycos_client6.py 文件源码 项目:pycos 作者: pgiri 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
    import numpy as np
    data = np.empty(window_size, dtype=float)
    data.fill(0.0)
    cumsum = 0.0
    while True:
        i, n = yield task.receive()
        if n is None:
            break
        cumsum += (n - data[0])
        avg = cumsum / window_size
        if avg > threshold:
            trend_task.send((i, 'high', float(avg)))
        elif avg < -threshold:
            trend_task.send((i, 'low', float(avg)))
        data = np.roll(data, -1)
        data[-1] = n
    raise StopIteration(0)


# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
dispycos_client6.py 文件源码 项目:pycos 作者: pgiri 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
    import numpy as np
    data = np.empty(window_size, dtype=float)
    data.fill(0.0)
    cumsum = 0.0
    while True:
        i, n = yield task.receive()
        if n is None:
            break
        cumsum += (n - data[0])
        avg = cumsum / window_size
        if avg > threshold:
            trend_task.send((i, 'high', float(avg)))
        elif avg < -threshold:
            trend_task.send((i, 'low', float(avg)))
        data = np.roll(data, -1)
        data[-1] = n
    raise StopIteration(0)


# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
dispycos_client6.py 文件源码 项目:pycos 作者: pgiri 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
    import numpy as np
    data = np.empty(window_size, dtype=float)
    data.fill(0.0)
    cumsum = 0.0
    while True:
        i, n = yield task.receive()
        if n is None:
            break
        cumsum += (n - data[0])
        avg = cumsum / window_size
        if avg > threshold:
            trend_task.send((i, 'high', float(avg)))
        elif avg < -threshold:
            trend_task.send((i, 'low', float(avg)))
        data = np.roll(data, -1)
        data[-1] = n
    raise StopIteration(0)


# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
utils.py 文件源码 项目:cellranger 作者: 10XGenomics 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def numpy_groupby(values, keys):
    """ Group a collection of numpy arrays by key arrays.
        Yields (key_tuple, view_tuple) where key_tuple is the key grouped on and view_tuple is a tuple of views into the value arrays.
          values: tuple of arrays to group
          keys: tuple of sorted, numeric arrays to group by """

    if len(values) == 0:
        return
    if len(values[0]) == 0:
        return

    for key_array in keys:
        assert len(key_array) == len(keys[0])
    for value_array in values:
        assert len(value_array) == len(keys[0])

    # The indices where any of the keys differ from the previous key become group boundaries
    key_change_indices = np.logical_or.reduce(tuple(np.concatenate(([1], np.diff(key))) != 0 for key in keys))
    group_starts = np.flatnonzero(key_change_indices)
    group_ends = np.roll(group_starts, -1)
    group_ends[-1] = len(keys[0])

    for group_start, group_end in itertools.izip(group_starts, group_ends):
        yield tuple(key[group_start] for key in keys), tuple(value[group_start:group_end] for value in values)
agent.py 文件源码 项目:snake_game 作者: wing3s 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def play(self, nb_rounds):
        img_saver = save_image()
        img_saver.next()

        game_cnt = it.count(1)
        for i in xrange(nb_rounds):
            game = self.game(width=self.width, height=self.height)
            screen, _ = game.next()
            img_saver.send(screen)
            frame_cnt = it.count()
            try:
                state = np.asarray([screen] * self.nb_frames)
                while True:
                    frame_cnt.next()
                    act_idx = np.argmax(
                        self.model.predict(state[np.newaxis]), axis=-1)[0]
                    screen, _ = game.send(self.actions[act_idx])
                    state = np.roll(state, 1, axis=0)
                    state[0] = screen
                    img_saver.send(screen)
            except StopIteration:
                print 'Saved %4i frames for game %3i' % (
                    frame_cnt.next(), game_cnt.next())
        img_saver.close()
simulations_from_real_data.py 文件源码 项目:genomedisco 作者: kundajelab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def shift_dataset(m,boundarynoise):
    if boundarynoise==0:
        return m
    nonzero_rows=np.where(m.any(axis=1))[0]
    small_m=copy.deepcopy(m)
    small_m=small_m[nonzero_rows,:]
    small_m=small_m[:,nonzero_rows]
    print small_m
    print 'roll'
    small_m=np.roll(small_m,boundarynoise,axis=0)
    print small_m
    print 'roll2'
    small_m=np.roll(small_m,boundarynoise,axis=1)
    print small_m
    outm=np.zeros(m.shape)
    for i_idx in range(len(nonzero_rows)):
        i=nonzero_rows[i_idx]
        for j_idx in range(i_idx,len(nonzero_rows)):
            j=nonzero_rows[j_idx]
            outm[i,j]=small_m[i_idx,j_idx]
            outm[j,i]=outm[i,j]
    return outm
camera_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def points_and_normals(self): 
        """
        Returns the point/normals parametrization for planes, 
        including clipped zmin and zmax frustums

        Note: points need to be in CCW
        """

        nv1, fv1 = self._front_back_vertices
        nv2 = np.roll(nv1, -1, axis=0)
        fv2 = np.roll(fv1, -1, axis=0)

        vx = np.vstack([fv1-nv1, nv2[0]-nv1[0], fv1[2]-fv1[1]])
        vy = np.vstack([fv2-fv1, nv2[1]-nv2[0], fv1[1]-fv1[0]])
        pts = np.vstack([fv1, nv1[0], fv1[1]])

        # vx += 1e-12
        # vy += 1e-12

        vx /= np.linalg.norm(vx, axis=1).reshape(-1,1)
        vy /= np.linalg.norm(vy, axis=1).reshape(-1,1)

        normals = np.cross(vx, vy)
        normals /= np.linalg.norm(normals, axis=1).reshape(-1,1)
        return pts, normals
ImageFFT_class.py 文件源码 项目:DenoiseAverage 作者: Pella86 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def correlate(self, imgfft):
        #Very much related to the convolution theorem, the cross-correlation
        #theorem states that the Fourier transform of the cross-correlation of
        #two functions is equal to the product of the individual Fourier
        #transforms, where one of them has been complex conjugated:  


        if self.imgfft is not 0 or imgfft.imgfft is not 0:
            imgcj = np.conjugate(self.imgfft)
            imgft = imgfft.imgfft

            prod = deepcopy(imgcj)
            for x in range(imgcj.shape[0]):
                for y in range(imgcj.shape[0]):
                    prod[x][y] = imgcj[x][y] * imgft[x][y]

            cc = Corr( np.real(fft.ifft2(fft.fftshift(prod)))) # real image of the correlation

            # adjust to center
            cc.data = np.roll(cc.data, int(cc.data.shape[0] / 2), axis = 0)
            cc.data = np.roll(cc.data, int(cc.data.shape[1] / 2), axis = 1)
        else:
            raise FFTnotInit()
        return cc
helpers.py 文件源码 项目:inqbus.rainflow 作者: Inqbus 项目源码 文件源码 阅读 55 收藏 0 点赞 0 评论 0
def get_extrema(data):
    # find extrema by finding indexes where diff changes sign
    data_diff = np.diff(data)
    asign = np.sign(data_diff)
    signchange = ((np.roll(asign, 1) - asign) != 0).astype(int)

    # first and last value is always a local extrema
    signchange[0] = 1

    # last value is missing because the diff-array is 1 value shorter than the
    # input array so we have to add it again
    signchange = np.append(signchange, np.array([1]))

    calc_data = data[np.where(signchange != 0)]

    return calc_data
pySLUtilities.py 文件源码 项目:pyshearlab 作者: stefanloock 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def SLdshear(inputArray, k, axis):
    """
    Computes the discretized shearing operator for a given inputArray, shear
    number k and axis.

    This version is adapted such that the MATLAB indexing can be used here in the
    Python version.
    """
    axis = axis - 1
    if k==0:
        return inputArray
    rows = np.asarray(inputArray.shape)[0]
    cols = np.asarray(inputArray.shape)[1]

    shearedArray = np.zeros((rows, cols), dtype=inputArray.dtype)

    if axis == 0:
        for col in range(cols):
            shearedArray[:,col] = np.roll(inputArray[:,col], int(k * np.floor(cols/2-col)))
    else:
        for row in range(rows):
            shearedArray[row,:] = np.roll(inputArray[row,:], int(k * np.floor(rows/2-row)))
    return shearedArray
script.py 文件源码 项目:deepdream 作者: martinkaptein 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
2models.py 文件源码 项目:deepdream 作者: martinkaptein 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
video.py 文件源码 项目:deepdream 作者: martinkaptein 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def make_step(net, step_size=1.5, end='inception_4d/output', jitter=32, clip=True, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
experimental.py 文件源码 项目:deepdream 作者: martinkaptein 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def make_step(net, step_size=1.5, end='inception_5a/output', jitter=32, clip=False, objective=objective_L2):

#function BAK def make_step(net, step_size=1.5, end='inception_4c/output', jitter=32, clip=True, objective=objective_L2):
    '''Basic gradient ascent step.'''

    src = net.blobs['data'] # input image is stored in Net's 'data' blob
    dst = net.blobs[end]

    ox, oy = np.random.randint(-jitter, jitter+1, 2)
    src.data[0] = np.roll(np.roll(src.data[0], ox, -1), oy, -2) # apply jitter shift

    net.forward(end=end)
    objective(dst)  # specify the optimization objective
    net.backward(start=end)
    g = src.diff[0]
    # apply normalized ascent step to the input image
    src.data[:] += step_size/np.abs(g).mean() * g

    src.data[0] = np.roll(np.roll(src.data[0], -ox, -1), -oy, -2) # unshift image

    if clip:
        bias = net.transformer.mean['data']
        src.data[:] = np.clip(src.data, -bias, 255-bias)
topology.py 文件源码 项目:Sverchok 作者: Sverchok 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def torus_faces(x, y):
    faces = np.empty((x * y, 4), dtype=np.uint32)
    tmp = np.arange(0, x * y)
    faces[:, 0] = tmp
    faces[:, 1] = np.roll(tmp, -y)
    tmp += 1
    tmp.shape = (x, y)
    tmp[:, y - 1] -= y
    tmp.shape = -1
    faces[:, 3] = tmp
    faces[:, 2] = np.roll(tmp, -y)
    faces.shape = -1
    l_total = np.empty(x * y, dtype=np.uint32)
    l_total[:] = 4
    l_start = np.arange(0, (x * y) * 4, 4, dtype=np.uint32)
    return SvPolygon(l_start, l_total, faces)
wave_transform.py 文件源码 项目:semantic-segmentation 作者: albertbuchard 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def fft_convolve(X,Y, inv = 0):

    XF = np.fft.rfft2(X)
    YF = np.fft.rfft2(Y)
#    YF0 = np.copy(YF)
#    YF.imag = 0
#    XF.imag = 0
    if inv == 1:
 #       plt.imshow(np.real(YF)); plt.colorbar(); plt.show()
        YF = np.conj(YF)

    SF = XF*YF

    S = np.fft.irfft2(SF)
    n1,n2 = np.shape(S)

    S = np.roll(S,-n1/2+1,axis = 0)
    S = np.roll(S,-n2/2+1,axis = 1)

    return np.real(S)
google_home_lights.py 文件源码 项目:mic_array 作者: respeaker 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _wakeup(self, direction=0):
        position = int((direction + 15) / 30) % 12

        basis = numpy.roll(self.basis, position * 4)
        for i in range(1, 25):
            pixels = basis * i
            self.write(pixels)
            time.sleep(0.005)

        pixels =  numpy.roll(pixels, 4)
        self.write(pixels)
        time.sleep(0.1)

        for i in range(2):
            new_pixels = numpy.roll(pixels, 4)
            self.write(new_pixels * 0.5 + pixels)
            pixels = new_pixels
            time.sleep(0.1)

        self.write(pixels)
        self.pixels = pixels
google_home_lights.py 文件源码 项目:mic_array 作者: respeaker 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _think(self):
        pixels = self.pixels

        self.next.clear()
        while not self.next.is_set():
            pixels = numpy.roll(pixels, 4)
            self.write(pixels)
            time.sleep(0.2)

        t = 0.1
        for i in range(0, 5):
            pixels = numpy.roll(pixels, 4)
            self.write(pixels * (4 - i) / 4)
            time.sleep(t)
            t /= 2

        # time.sleep(0.5)

        self.pixels = pixels
test_arrayiterator.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def test_rolling_window(input_seq, batch_size, seq_len, strides):
    # This test checks if the rolling window works
    # We check if the first two samples in each batch are strided by strides

    # Truncate input sequence such that last section that doesn't fit in a batch
    # is thrown away
    input_seq = input_seq[:seq_len * batch_size * (len(input_seq) // seq_len // batch_size)]
    data_array = {'X': input_seq,
                  'y': np.roll(input_seq, axis=0, shift=-1)}
    time_steps = seq_len
    it_array = SequentialArrayIterator(data_arrays=data_array, time_steps=time_steps,
                                       stride=strides, batch_size=batch_size, tgt_key='y',
                                       shuffle=False)
    for idx, iter_val in enumerate(it_array):
        # Start of the array needs to be time_steps * idx
        assert np.array_equal(iter_val['X'][0, strides:time_steps],
                              iter_val['X'][1, :time_steps - strides])
        assert np.array_equal(iter_val['y'][0, strides:time_steps],
                              iter_val['y'][1, :time_steps - strides])
combinatorics.py 文件源码 项目:quadpy 作者: nschloe 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def pm_roll(n, v):
    '''Returns `2**k * n` number of points of dimension `n` such that

    p[0] = [+-v[0], ..., +-v[k], 0, ..., 0]
    p[1] = [0, +-v[0], ..., +-v[k], 0, ..., 0]
    ...
    p[n-1] = [+-v[1], ..., +-v[k], 0, ..., 0, +-v[0]]

    with all +- configurations.
    '''
    k = len(v)
    assert k <= n

    pm_v = pm_array(v)

    r0 = numpy.zeros((len(pm_v), n), dtype=pm_v.dtype)
    r0[:, :k] = pm_v

    return numpy.concatenate([
        numpy.roll(r0, i, axis=1)
        for i in range(n)
        ])


# TODO remove
surrogates.py 文件源码 项目:tensorpac 作者: EtienneCmb 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def time_lag(pha, amp, axis):
    """Introduce a time lag on phase series..

    Parameters
    ----------
    pha : array_like
        Array of phases of shapes (npha, ..., npts)

    amp : array_like
        Array of amplitudes of shapes (namp, ..., npts)

    axis : int
        Location of the time axis.

    Returns
    -------
    pha : array_like
        Shiffted version of phases of shapes (npha, ..., npts)

    amp : array_like
        Original version of amplitudes of shapes (namp, ..., npts)
    """
    npts = pha.shape[-1]
    return np.roll(pha, np.random.randint(npts), axis=axis), amp
google_home_led_pattern.py 文件源码 项目:4mics_hat 作者: respeaker 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def wakeup(self, direction=0):
        position = int((direction + 15) / 30) % 12

        basis = numpy.roll(self.basis, position * 4)
        for i in range(1, 25):
            pixels = basis * i
            self.show(pixels)
            time.sleep(0.005)

        pixels =  numpy.roll(pixels, 4)
        self.show(pixels)
        time.sleep(0.1)

        for i in range(2):
            new_pixels = numpy.roll(pixels, 4)
            self.show(new_pixels * 0.5 + pixels)
            pixels = new_pixels
            time.sleep(0.1)

        self.show(pixels)
        self.pixels = pixels
google_home_led_pattern.py 文件源码 项目:4mics_hat 作者: respeaker 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def think(self):
        pixels = self.pixels

        while not self.stop:
            pixels = numpy.roll(pixels, 4)
            self.show(pixels)
            time.sleep(0.2)

        t = 0.1
        for i in range(0, 5):
            pixels = numpy.roll(pixels, 4)
            self.show(pixels * (4 - i) / 4)
            time.sleep(t)
            t /= 2

        self.pixels = pixels
deepdream.py 文件源码 项目:TensorFlow-Machine-Learning-Cookbook 作者: PacktPublishing 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def calc_grad_tiled(img, t_grad, tile_size=512):
    '''Compute the value of tensor t_grad over the image in a tiled way.
    Random shifts are applied to the image to blur tile boundaries over 
    multiple iterations.'''
    # Pick a subregion square size
    sz = tile_size
    # Get the image height and width
    h, w = img.shape[:2]
    # Get a random shift amount in the x and y direction
    sx, sy = np.random.randint(sz, size=2)
    # Randomly shift the image (roll image) in the x and y directions
    img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
    # Initialize the while image gradient as zeros
    grad = np.zeros_like(img)
    # Now we loop through all the sub-tiles in the image
    for y in range(0, max(h-sz//2, sz),sz):
        for x in range(0, max(w-sz//2, sz),sz):
            # Select the sub image tile
            sub = img_shift[y:y+sz,x:x+sz]
            # Calculate the gradient for the tile
            g = sess.run(t_grad, {t_input:sub})
            # Apply the gradient of the tile to the whole image gradient
            grad[y:y+sz,x:x+sz] = g
    # Return the gradient, undoing the roll operation
    return np.roll(np.roll(grad, -sx, 1), -sy, 0)
replay.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def column_cycle_array(posterior, amt=None):
    """Also called 'position cycle' by Kloosterman et al.
    If amt is an array of the same length as posterior, then
    cycle each column by the corresponding amount in amt.
    Otherwise, cycle each column by a random amount."""
    out = copy.deepcopy(posterior)
    rows, cols = posterior.shape

    if amt is None:
        for col in range(cols):
            if np.isnan(np.sum(posterior[:,col])):
                continue
            else:
                out[:,col] = np.roll(posterior[:,col], np.random.randint(1, rows))
    else:
        if len(amt) == cols:
            for col in range(cols):
                if np.isnan(np.sum(posterior[:,col])):
                    continue
                else:
                    out[:,col] = np.roll(posterior[:,col], int(amt[col]))
        else:
            raise TypeError("amt does not seem to be the correct shape!")
    return out
hmm_sparsity.py 文件源码 项目:nelpy 作者: nelpy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _within_event_incoherent_shuffle(self, kind='train'):
        """Time cycle on BinnedSpikeTrainArray, cycling only within each epoch.
        We cycle each unit independently, within each epoch.
        """
        if kind == 'train':
            bst = self.PBEs_train
        elif kind == 'test':
            bst = self.PBEs_test
        else:
            raise ValueError("kind '{}' not understood!".format(kind))

        out = copy.deepcopy(bst) # should this be deep?
        data = out._data
        edges = np.insert(np.cumsum(bst.lengths),0,0)

        for uu in range(bst.n_units):
            for ii in range(bst.n_epochs):
                segment = np.squeeze(data[uu, edges[ii]:edges[ii+1]])
                segment = np.roll(segment, np.random.randint(len(segment)))
                data[uu, edges[ii]:edges[ii+1]] = segment

        if kind == 'train':
            self.PBEs_train = out
        else:
            self.PBEs_test = out
data.py 文件源码 项目:Personal_AI_Assistant 作者: PratylenClub 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _augment_speech(mfcc):

    # random frequency shift ( == speed perturbation effect on MFCC )
    r = np.random.randint(-2, 2)

    # shifting mfcc
    mfcc = np.roll(mfcc, r, axis=0)

    # zero padding
    if r > 0:
        mfcc[:r, :] = 0
    elif r < 0:
        mfcc[r:, :] = 0

    return mfcc


# Speech Corpus
quickshear.py 文件源码 项目:quickshear 作者: nipy 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def edge_mask(mask):
    """ Find the edges of a mask or masked image

    Parameters
    ----------
    mask : 3D array
        Binary mask (or masked image) with axis orientation LPS or RPS, and the
        non-brain region set to 0

    Returns
    -------
    2D array
        Outline of sagittal profile (PS orientation) of mask
    """
    # Sagittal profile
    brain = mask.any(axis=0)

    # Simple edge detection
    edgemask = 4 * brain - np.roll(brain, 1, 0) - np.roll(brain, -1, 0) - \
                           np.roll(brain, 1, 1) - np.roll(brain, -1, 1) != 0
    return edgemask.astype('uint8')
linalg.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def roll(u, shift):
    """
    Apply :func:`numpy.roll` to multiple array axes.

    Parameters
    ----------
    u : array_like
      Input array
    shift : array_like of int
      Shifts to apply to axes of input `u`

    Returns
    -------
    v : ndarray
      Output array
    """

    v = u.copy()
    for k in range(len(shift)):
        v = np.roll(v, shift[k], axis=k)
    return v
samplers.py 文件源码 项目:importance-sampling 作者: idiap 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def update(self, idxs, x):
        # Fetch the classes for the regression
        _, y = self.dataset.train_data[idxs]

        # If we are doing the regression in logspace
        if self.log:
            x = np.log(x)

        # Train the lstm so that it can predict x given the history
        self.model.train_on_batch([self.history[idxs], self._to_ids(y)], x)

        # Update the history to include x
        full = idxs[self.cnts[idxs] == self.history.shape[1]]
        self.history[full] = np.roll(self.history[full], -1, axis=1)
        self.cnts[full] -= 1
        self.history[idxs, self.cnts[idxs], :1] = x
        self.cnts[idxs] += 1


问题


面经


文章

微信
公众号

扫码关注公众号