python类floor()的实例源码

nddl.py 文件源码 项目:ndparse 作者: neurodata 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _downsample_mask(X, pct):
    """ Create a boolean mask indicating which subset of X should be
    evaluated.
    """
    if pct < 1.0:
        Mask = np.zeros(X.shape, dtype=np.bool)
        m = X.shape[-2]
        n = X.shape[-1]
        nToEval = np.round(pct*m*n).astype(np.int32)
        idx = sobol(2, nToEval ,0)
        idx[0] = np.floor(m*idx[0])
        idx[1] = np.floor(n*idx[1])
        idx = idx.astype(np.int32)
        Mask[:,:,idx[0], idx[1]] = True
    else:
        Mask = np.ones(X.shape, dtype=np.bool)

    return Mask
spatial_average_pooling.py 文件源码 项目:PyFunt 作者: dnlcrl 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def update_output(self, x):
        N, C, H, W = x.shape
        pool_height, pool_width = self.kW, self.kH
        stride = self.dW

        assert (
            H - pool_height) % stride == 0 or H == pool_height, 'Invalid height'
        assert (
            W - pool_width) % stride == 0 or W == pool_width, 'Invalid width'

        out_height = int(np.floor((H - pool_height) / stride + 1))
        out_width = int(np.floor((W - pool_width) / stride + 1))

        x_split = x.reshape(N * C, 1, H, W)
        x_cols = im2col_cython(
            x_split, pool_height, pool_width, padding=0, stride=stride)
        x_cols_avg = np.mean(x_cols, axis=0)
        out = x_cols_avg.reshape(
            out_height, out_width, N, C).transpose(2, 3, 0, 1)

        self.x_shape = x.shape
        self.x_cols = x_cols
        self.output = out
        return self.output
TopLoad.py 文件源码 项目:fem 作者: mlp6 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def extract_top_plane_nodes(nodefile, top_face):
    """

    :param nodefile:
    :param top_face:
    :return: planeNodeIDs
    """
    import numpy as np
    import fem_mesh

    top_face = np.array(top_face)

    nodeIDcoords = fem_mesh.load_nodeIDs_coords(nodefile)
    [snic, axes] = fem_mesh.SortNodeIDs(nodeIDcoords)

    # extract spatially-sorted node IDs on a the top z plane
    axis = int(np.floor(np.divide(top_face.nonzero(), 2)))
    if np.mod(top_face.nonzero(), 2) == 1:
        plane = (axis, axes[axis].max())
    else:
        plane = (axis, axes[axis].min())

    planeNodeIDs = fem_mesh.extractPlane(snic, axes, plane)

    return planeNodeIDs
bench.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def timer(s, v='', nloop=500, nrep=3):
    units = ["s", "ms", "µs", "ns"]
    scaling = [1, 1e3, 1e6, 1e9]
    print("%s : %-50s : " % (v, s), end=' ')
    varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz']
    setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames)
    Timer = timeit.Timer(stmt=s, setup=setup)
    best = min(Timer.repeat(nrep, nloop)) / nloop
    if best > 0.0:
        order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3)
    else:
        order = 3
    print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep,
                                                      3,
                                                      best * scaling[order],
                                                      units[order]))
desert_mirage_lib.py 文件源码 项目:desert-mirage 作者: valentour 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def dec_round(num, dprec=4, rnd='down', rto_zero=False):
    """
    Round up/down numeric ``num`` at specified decimal ``dprec``.

    Parameters
    ----------
    num: float
    dprec: int
        Decimal position for truncation.
    rnd: str (default: 'down')
        Set as 'up' or 'down' to return a rounded-up or rounded-down value.
    rto_zero: bool (default: False)
        Use a *round-towards-zero* method, e.g., ``floor(-3.5) == -3``.

    Returns
    ----------
    float (default: rounded-up)
    """
    dprec = 10**dprec
    if rnd == 'up' or (rnd == 'down' and rto_zero and num < 0.):
        return np.ceil(num*dprec)/dprec
    elif rnd == 'down' or (rnd == 'up' and rto_zero and num < 0.):
        return np.floor(num*dprec)/dprec
    return np.round(num, dprec)
dataset.py 文件源码 项目:sceneReco 作者: bear63 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, batch):
        images, labels = zip(*batch)

        imgH = self.imgH
        imgW = self.imgW
        if self.keep_ratio:
            ratios = []
            for image in images:
                w, h = image.size
                ratios.append(w / float(h))
            ratios.sort()
            max_ratio = ratios[-1]
            imgW = int(np.floor(max_ratio * imgH))
            imgW = max(imgH * self.min_ratio, imgW)  # assure imgH >= imgW

        transform = resizeNormalize((imgW, imgH))
        images = [transform(image) for image in images]
        images = torch.cat([t.unsqueeze(0) for t in images], 0)

        return images, labels
cyberpunk_trainer.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def shuffle_to_training_data(self, expert_data, on_policy_data, expert_fail_data):
        data = np.vstack([expert_data['data'], on_policy_data['data'], expert_fail_data['data']])
        classes = np.vstack([expert_data['classes'], on_policy_data['classes'], expert_fail_data['classes']])
        domains = np.vstack([expert_data['domains'], on_policy_data['domains'], expert_fail_data['domains']])

        sample_range = data.shape[0]*data.shape[1]
        all_idxs = np.random.permutation(sample_range)

        t_steps = data.shape[1]

        data_matrix = np.zeros(shape=(sample_range, self.im_height, self.im_width, self.im_channels))
        data_matrix_two = np.zeros(shape=(sample_range, self.im_height, self.im_width, self.im_channels))
        class_matrix = np.zeros(shape=(sample_range, 2))
        dom_matrix = np.zeros(shape=(sample_range, 2))
        for one_idx, iter_step in zip(all_idxs, range(0, sample_range)):
            traj_key = np.floor(one_idx/t_steps)
            time_key = one_idx % t_steps
            time_key_plus_one = min(time_key + 3, t_steps-1)
            data_matrix[iter_step, :, :, :] = data[traj_key, time_key, :, :, :]
            data_matrix_two[iter_step, :, :, :] = data[traj_key, time_key_plus_one, :, :, :]
            class_matrix[iter_step, :] = classes[traj_key, time_key, :]
            dom_matrix[iter_step, :] = domains[traj_key, time_key, :]
        return data_matrix, data_matrix_two, dom_matrix, class_matrix
voc_data_layer.py 文件源码 项目:pycaffe-yolo 作者: Zehaos 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def transform_to_yolo_labels(self, labels):
        """
        Transform voc_label_parser' result to yolo label.
        :param labels: [is_obj, x, y, w, h, class_probs..], ...
        :return: yolo label
        """
        yolo_label = np.zeros([self.side, self.side, (1 + self.coords) + self.classes]).astype(np.float32)
        shuffle(labels)
        for label in labels:
            yolo_box = self.convert_to_yolo_box(self.ori_im_shape[::-1], list(label[2:]))
            assert np.max(yolo_box) < 1
            [loc_y, loc_x] = [int(np.floor(yolo_box[1] * self.side)), int(np.floor(yolo_box[0] * self.side))]
            yolo_label[loc_y][loc_x][0] = 1.0  # is obj
            yolo_label[loc_y][loc_x][1:5] = yolo_box  # bbox
            yolo_label[loc_y][loc_x][5:] = 0  # only one obj in one grid
            yolo_label[loc_y][loc_x][4+label[0]] = 1.0  # class
        return yolo_label
sun_position.py 文件源码 项目:astk 作者: openalea-incubator 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def julian_date(hUTC, dayofyear, year):
    """ Julian calendar date

    Args:
        hUTC: fractional hour (UTC time)
        dayofyear (int):
        year (int):

    Returns:
        the julian date

    Details:
        World Meteorological Organization (2006).Guide to meteorological
        instruments and methods of observation. Geneva, Switzerland.
    """
    delta = year - 1949
    leap = numpy.floor(delta / 4.)
    return 2432916.5 + delta * 365 + leap + dayofyear + hUTC / 24.
feature_extractor.py 文件源码 项目:speech_feature_extractor 作者: ZhihaoDU 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def unknown_feature_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):
    x_spectrum = stft_extractor(x, win_len, shift_len, win_type)
    coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)
    bark_spect = np.matmul(coef, x_spectrum)
    ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))
    for i in range(barks):
        channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')
        if method_version == 'v1':
            ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))
        elif method_version == 'v2':
            channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))
            ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle
        else:
            ams[i, :, :] = np.abs(channel_stft)
    return ams
ams_extractor.py 文件源码 项目:speech_feature_extractor 作者: ZhihaoDU 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def ams_extractor(x, sr, win_len, shift_len, barks, inner_win, inner_shift, win_type, method_version):
    x_spectrum = stft_extractor(x, win_len, shift_len, win_type)
    coef = get_fft_bark_mat(sr, win_len, barks, 20, sr//2)
    bark_spect = np.matmul(coef, x_spectrum)
    ams = np.zeros((barks, inner_win//2+1, (bark_spect.shape[1] - inner_win)//inner_shift))
    for i in range(barks):
        channel_stft = stft_extractor(bark_spect[i, :], inner_win, inner_shift, 'hanning')
        if method_version == 'v1':
            ams[i, :, :] = 20 * np.log(np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift]))
        elif method_version == 'v2':
            channel_amplitude = np.abs(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = np.angle(channel_stft[:inner_win//2+1, :(bark_spect.shape[1] - inner_win)//inner_shift])
            channel_angle = channel_angle - (np.floor(channel_angle / (2.*np.pi)) * (2.*np.pi))
            ams[i, :, :] = np.power(channel_amplitude, 1./3.) * channel_angle
        else:
            ams[i, :, :] = np.abs(channel_stft)
    return ams
VideoTools.py 文件源码 项目:SlidingWindowVideoTDA 作者: ctralie 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def getTimeDerivative(I, Win):
    dw = np.floor(Win/2)
    t = np.arange(-dw, dw+1)
    sigma = 0.4*dw
    xgaussf = t*np.exp(-t**2  / (2*sigma**2))
    #Normalize by L1 norm to control for length of window
    xgaussf = xgaussf/np.sum(np.abs(xgaussf))
    xgaussf = xgaussf[:, None]
    IRet = scipy.signal.convolve2d(I, xgaussf, 'valid')
    validIdx = np.arange(dw, I.shape[0]-dw, dtype='int64')
    return [IRet, validIdx]


#############################################################
####            FAST TIME DELAY EMBEDDING, Tau = 1      #####
#############################################################
#Input: I: P x N Video with frames along the columns
#W: Windows
#Ouput: Mu: P x W video with mean frames along the columns
utils.py 文件源码 项目:magenta 作者: tensorflow 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def mu_law(x, mu=255, int8=False):
  """A TF implementation of Mu-Law encoding.

  Args:
    x: The audio samples to encode.
    mu: The Mu to use in our Mu-Law.
    int8: Use int8 encoding.

  Returns:
    out: The Mu-Law encoded int8 data.
  """
  out = tf.sign(x) * tf.log(1 + mu * tf.abs(x)) / np.log(1 + mu)
  out = tf.floor(out * 128)
  if int8:
    out = tf.cast(out, tf.int8)
  return out
gputools.py 文件源码 项目:slitSpectrographBlind 作者: aasensio 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def impad_gpu(y_gpu, sf):

  sf = np.array(sf)
  shape = (np.array(y_gpu.shape) + sf).astype(np.uint32)
  dtype = y_gpu.dtype
  block_size = (16,16,1)
  grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
               int(np.ceil(float(shape[0])/block_size[1])))

  preproc = _generate_preproc(dtype, shape)
  mod = SourceModule(preproc + kernel_code, keep=True)

  padded_gpu = cua.empty((int(shape[0]), int(shape[1])), dtype)
  impad_fun = mod.get_function("impad")

  upper_left = np.uint32(np.floor(sf / 2.))
  original_size = np.uint32(np.array(y_gpu.shape))

  impad_fun(padded_gpu.gpudata, y_gpu.gpudata,
            upper_left[1], upper_left[0],
            original_size[0], original_size[1],
            block=block_size, grid=grid_size)

  return padded_gpu
gputools.py 文件源码 项目:slitSpectrographBlind 作者: aasensio 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def laplace_stack_gpu(y_gpu, mode='valid'):
  """
  This funtion computes the Laplacian of each slice of a stack of images
  """
  shape = np.array(y_gpu.shape).astype(np.uint32)
  dtype = y_gpu.dtype
  block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))
  grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
               int(np.ceil(float(shape[0])/block_size[1])))
  shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])
                    *dtype.itemsize)

  preproc = _generate_preproc(dtype, (shape[1],shape[2]))
  mod = SourceModule(preproc + kernel_code, keep=True)

  laplace_fun_gpu = mod.get_function("laplace_stack_same")
  laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),
                          y_gpu.dtype)

  laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
                  block=block_size, grid=grid_size, shared=shared_size)

  return laplace_gpu
gputools.py 文件源码 项目:slitSpectrographBlind 作者: aasensio 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def laplace3d_gpu(y_gpu):

  shape = np.array(y_gpu.shape).astype(np.uint32)
  dtype = y_gpu.dtype
  block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0]))
  grid_size = (int(np.ceil(float(shape[1])/block_size[0])),
               int(np.ceil(float(shape[0])/block_size[1])))
  shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2])
                    *dtype.itemsize)

  preproc = _generate_preproc(dtype, (shape[1],shape[2]))
  mod = SourceModule(preproc + kernel_code, keep=True)

  laplace_fun_gpu = mod.get_function("laplace3d_same")
  laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]),
                          y_gpu.dtype)

  laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata,
                  block=block_size, grid=grid_size, shared=shared_size)

  return laplace_gpu
gputools.py 文件源码 项目:slitSpectrographBlind 作者: aasensio 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def wsparsify(w_gpu, percentage):
  """
  Keeps only as many entries nonzero as specified by percentage.
  """

  w    = w_gpu.get()
  vals = sort(w)[::-1]
  idx  = floor(prod(w.shape()) * percentage/100)
  zw_gpu = cua.zeros_like(w_gpu)   # gpu array filled with zeros
  tw_gpu = cua.empty_like(w_gpu)   # gpu array containing threshold
  tw_gpu.fill(vals[idx])        
  w_gpu  = cua.if_positive(w_gpu > tw_gpu, w_gpu, zw_gpu)

  del zw_gpu
  del tw_gpu

  return w_gpu
imagetools.py 文件源码 项目:slitSpectrographBlind 作者: aasensio 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def sparsify(x, percentage):
    """
    Keeps only as many entries nonzero as specified by percentage.
    Note that only the larges values are kept.

    --------------------------------------------------------------------------
    Usage:

    Call:  y = sparsify(x, percentage)

    Input: x            input ndarray x
           percentage   percentage of nonzero entries in y 

    Output: sparsified version of x            
    --------------------------------------------------------------------------

    Copyright (C) 2011 Michael Hirsch   
    """
    vals = np.sort(x.flatten())[::-1]
    idx  = np.floor(np.prod(x.shape) * percentage/100)
    x[x < vals[idx]] = 0

    return x
spectrogram.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
    """ short time fourier transform of audio signal """
    win = window(frameSize)
    hopSize = int(frameSize - np.floor(overlapFac * frameSize))
    # zeros at beginning (thus center of 1st window should be for sample nr. 0)
    # samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
    samples = np.array(sig, dtype='float64')
    # cols for windowing
    cols = np.floor((len(samples) - frameSize) / float(hopSize))
    # zeros at end (thus samples can be fully covered by frames)
    # samples = np.append(samples, np.zeros(frameSize))
    frames = stride_tricks.as_strided(
        samples,
        shape=(cols, frameSize),
        strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
    frames *= win
    return np.fft.rfft(frames)
audio_eval.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
    """ short time fourier transform of audio signal """
    win = window(frameSize)
    hopSize = int(frameSize - np.floor(overlapFac * frameSize))
    # zeros at beginning (thus center of 1st window should be for sample nr. 0)
    # samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
    samples = np.array(sig, dtype='float64')
    # cols for windowing
    cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
    # zeros at end (thus samples can be fully covered by frames)
    # samples = np.append(samples, np.zeros(frameSize))
    frames = stride_tricks.as_strided(
        samples,
        shape=(cols, frameSize),
        strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
    frames *= win
    return np.fft.rfft(frames)


问题


面经


文章

微信
公众号

扫码关注公众号