python类swapaxes()的实例源码

GCForest.py 文件源码 项目:gcForest 作者: pylablanche 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _create_feat_arr(self, X, prf_crf_pred):
        """ Concatenate the original feature vector with the predicition probabilities
        of a cascade layer.

        :param X: np.array
            Array containing the input samples.
            Must be of shape [n_samples, data] where data is a 1D array.

        :param prf_crf_pred: list
            Prediction probabilities by a cascade layer for X.

        :return: np.array
            Concatenation of X and the predicted probabilities.
            To be used for the next layer in a cascade forest.
        """
        swap_pred = np.swapaxes(prf_crf_pred, 0, 1)
        add_feat = swap_pred.reshape([np.shape(X)[0], -1])
        feat_arr = np.concatenate([add_feat, X], axis=1)

        return feat_arr
app.py 文件源码 项目:flask-app-for-mxnet-img-classifier 作者: XD-DENG 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_image(file_location, local=False):
    # users can either 
    # [1] upload a picture (local = True)
    # or
    # [2] provide the image URL (local = False)
    if local == True:
        fname = file_location
    else:
        fname = mx.test_utils.download(file_location, dirname="static/img_pool")
    img = cv2.cvtColor(cv2.imread(fname), cv2.COLOR_BGR2RGB)

    if img is None:
         return None

    # convert into format (batch, RGB, width, height)
    img = cv2.resize(img, (224, 224))
    img = np.swapaxes(img, 0, 2)
    img = np.swapaxes(img, 1, 2)
    img = img[np.newaxis, :]

    return img
test_colorconv.py 文件源码 项目:FCN_train 作者: 315386775 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_rgb2lab_brucelindbloom(self):
        """
        Test the RGB->Lab conversion by comparing to the calculator on the
        authoritative Bruce Lindbloom
        [website](http://brucelindbloom.com/index.html?ColorCalculator.html).
        """
        # Obtained with D65 white point, sRGB model and gamma
        gt_for_colbars = np.array([
            [100,0,0],
            [97.1393, -21.5537, 94.4780],
            [91.1132, -48.0875, -14.1312],
            [87.7347, -86.1827, 83.1793],
            [60.3242, 98.2343, -60.8249],
            [53.2408, 80.0925, 67.2032],
            [32.2970, 79.1875, -107.8602],
            [0,0,0]]).T
        gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
        assert_array_almost_equal(rgb2lab(self.colbars_array), gt_array, decimal=2)
test_colorconv.py 文件源码 项目:FCN_train 作者: 315386775 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_rgb2luv_brucelindbloom(self):
        """
        Test the RGB->Lab conversion by comparing to the calculator on the
        authoritative Bruce Lindbloom
        [website](http://brucelindbloom.com/index.html?ColorCalculator.html).
        """
        # Obtained with D65 white point, sRGB model and gamma
        gt_for_colbars = np.array([
            [100, 0, 0],
            [97.1393, 7.7056, 106.7866],
            [91.1132, -70.4773, -15.2042],
            [87.7347, -83.0776, 107.3985],
            [60.3242, 84.0714, -108.6834],
            [53.2408, 175.0151, 37.7564],
            [32.2970, -9.4054, -130.3423],
            [0, 0, 0]]).T
        gt_array = np.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
        assert_array_almost_equal(rgb2luv(self.colbars_array),
                                  gt_array, decimal=2)
colorconv.py 文件源码 项目:FCN_train 作者: 315386775 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def _convert(matrix, arr):
    """Do the color space conversion.

    Parameters
    ----------
    matrix : array_like
        The 3x3 matrix to use.
    arr : array_like
        The input array.

    Returns
    -------
    out : ndarray, dtype=float
        The converted array.
    """
    arr = _prepare_colorarray(arr)
    arr = np.swapaxes(arr, 0, -1)
    oldshape = arr.shape
    arr = np.reshape(arr, (3, -1))
    out = np.dot(matrix, arr)
    out.shape = oldshape
    out = np.swapaxes(out, -1, 0)

    return np.ascontiguousarray(out)
cgan.py 文件源码 项目:shenlan 作者: vector-1127 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_data(datadir):
    #datadir = args.data
    # assume each image is 512x256 split to left and right
    imgs = glob.glob(os.path.join(datadir, '*.jpg'))
    data_X = np.zeros((len(imgs),3,img_cols,img_rows))
    data_Y = np.zeros((len(imgs),3,img_cols,img_rows))  
    i = 0
    for file in imgs:
        img = cv2.imread(file,cv2.IMREAD_COLOR)
        img = cv2.resize(img, (img_cols*2, img_rows)) 
        #print('{} {},{}'.format(i,np.shape(img)[0],np.shape(img)[1]))
        img = np.swapaxes(img,0,2)

        X, Y = split_input(img)

        data_X[i,:,:,:] = X
        data_Y[i,:,:,:] = Y
        i = i+1
    return data_X, data_Y
cnn_visualization.py 文件源码 项目:NumpyDL 作者: oujago 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def train():
    for i in range(20000):
        randomint = randint(0, 10000 - batchsize - 1)
        trainingData = batch["data"][randomint:batchsize + randomint]
        rawlabel = batch["labels"][randomint:batchsize + randomint]
        trainingLabel = np.zeros((batchsize, 10))
        trainingLabel[np.arange(batchsize), rawlabel] = 1
        trainingData = trainingData / 255.0
        trainingData = np.reshape(trainingData, [-1, 3, 32, 32])
        trainingData = np.swapaxes(trainingData, 1, 3)

        if i % 10 == 0:
            train_accuracy = accuracy.eval(feed_dict={
                img: validationData, lbl: validationLabel, keepProb: 1.0})
            print("step %d, training accuracy %g" % (i, train_accuracy))

            if i % 50 == 0:
                saver.save(sess, os.getcwd() + "/training/train", global_step=i)

        optimizer.run(feed_dict={img: trainingData, lbl: trainingLabel, keepProb: 0.5})
        print(i)
convert_data_2_hdf5.py 文件源码 项目:mtcnn 作者: daikankan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def write_hdf5(file, data, label_class, label_bbox, label_landmarks):
  # transform to np array
  data_arr = np.array(data, dtype = np.float32)
  # print data_arr.shape
  # if no swapaxes, transpose to num * channel * width * height ???
  # data_arr = data_arr.transpose(0, 3, 2, 1)
  label_class_arr = np.array(label_class, dtype = np.float32)
  label_bbox_arr = np.array(label_bbox, dtype = np.float32)
  label_landmarks_arr = np.array(label_landmarks, dtype = np.float32)
  with h5py.File(file, 'w') as f:
    f['data'] = data_arr
    f['label_class'] = label_class_arr
    f['label_bbox'] = label_bbox_arr
    f['label_landmarks'] = label_landmarks_arr

# list_file format:
# image_path | label_class | label_boundingbox(4) | label_landmarks(10)
test_numeric.py 文件源码 项目:radar 作者: amoose136 项目源码 文件源码 阅读 65 收藏 0 点赞 0 评论 0
def setUp(self):
        self.data = [
                # Array scalars
                (np.array(3.), None),
                (np.array(3), 'f8'),
                # 1D arrays
                (np.arange(6, dtype='f4'), None),
                (np.arange(6), 'c16'),
                # 2D C-layout arrays
                (np.arange(6).reshape(2, 3), None),
                (np.arange(6).reshape(3, 2), 'i1'),
                # 2D F-layout arrays
                (np.arange(6).reshape((2, 3), order='F'), None),
                (np.arange(6).reshape((3, 2), order='F'), 'i1'),
                # 3D C-layout arrays
                (np.arange(24).reshape(2, 3, 4), None),
                (np.arange(24).reshape(4, 3, 2), 'f4'),
                # 3D F-layout arrays
                (np.arange(24).reshape((2, 3, 4), order='F'), None),
                (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
                # 3D non-C/F-layout arrays
                (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
                (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
                     ]
geom.py 文件源码 项目:Sverchok 作者: Sverchok 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def eval(self, t_in):
        """
        Evaluate the spline at the points in t_in, which must be an array
        with values in [0,1]
        returns and np array with the corresponding points
        """
        t_in = t_in.clip(0.0, 1.0)
        splines = self.splines
        tknots = self.tknots
        index = tknots.searchsorted(t_in, side='left') - 1
        index = index.clip(0, len(splines) - 1)
        to_calc = splines[index]
        ax, bx, cx, dx, tx = np.swapaxes(to_calc, 0, 1)
        t_r = t_in[:, np.newaxis] - tx
        out = ax + t_r * (bx + t_r * (cx + t_r * dx))
        return out
tools.py 文件源码 项目:brainpipe 作者: EtienneCmb 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def binArray(x, binList, axis=0):
    """Binarize an array

    x : array
        Array to binarize

    binList : list of tuple/list
        This list contain the index to binarize the array x

    axis : int, optional, [def: 0]
        Binarize along the axis "axis"

    -> Return the binarize x and the center of each window.
    """
    nbin = len(binList)
    x = np.swapaxes(x, 0, axis)

    xBin = np.zeros((nbin,)+x.shape[1::])
    for k, i in enumerate(binList):
        if i[1] - i[0] == 1:
            xBin[k, ...] = x[i[0], ...]
        else:
            xBin[k, ...] = np.mean(x[i[0]:i[1], ...], 0)

    return np.swapaxes(xBin, 0, axis), [(k[0]+k[1])/2 for k in binList]
caffe_functions.py 文件源码 项目:RealtimeFacialEmotionRecognition 作者: sushant3095 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def compute_mean(input_list, plot_mean=False):
    # If no data supplied, use mean supplied with pretrained model
    if len(input_list) == 0:
        net_root = '.'
        net_dir = 'VGG_S_rgb'
        mean_filename=os.path.join(net_root, net_dir, 'mean.binaryproto')
        proto_data = open(mean_filename, "rb").read()
        a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
        mean  = caffe.io.blobproto_to_array(a)[0]
    else:
        x,y,c = 256,256,3
        mean = np.zeros((c, x, y))
        for img_file in input_list:
            img = caffe.io.load_image(img_file)
            img = mod_dim(img, x, y, c)
            mean += img
        mean /= len(input_list)

        # Plot the mean image if desired:
        if plot_mean:
            plt.imshow(np.swapaxes(np.swapaxes(mean, 0, 1), 1, 2))
            plt.show()
    return mean

# Return VGG_S_Net from mean image and optional network type
caffe_functions.py 文件源码 项目:RealtimeFacialEmotionRecognition 作者: sushant3095 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def classify_video_frame(frame, faces, VGG_S_Net, categories=None):
    # Handle incorrect image dims for uncropped images
    # TODO: Get uncropped images to import correctly
    #if frame.shape[0] == 3:
    #    frame = np.swapaxes(np.swapaxes(frame, 0, 1), 1, 2)


    # Convert to float format:
    frame = frame.astype(np.float32)
    frame /= 255.0

    labels = []

    for x,y,w,h in faces:
        img = frame[y:y+h,x:x+w,:]

        # Input image should be WxHxK, e.g. 490x640x3
        prediction = VGG_S_Net.predict([img], oversample=False)

        labels.append(prediction.argmax())

    return labels
app2.py 文件源码 项目:ecs-mxnet-example 作者: awslabs 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def predict(url, mod, synsets):
     req = urllib2.urlopen(url)
     arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
     cv2_img = cv2.imdecode(arr, -1)
     img = cv2.cvtColor(cv2_img, cv2.COLOR_BGR2RGB)
     if img is None:
         return None
     img = cv2.resize(img, (224, 224))
     img = np.swapaxes(img, 0, 2)
     img = np.swapaxes(img, 1, 2)
     img = img[np.newaxis, :]

     mod.forward(Batch([mx.nd.array(img)]))
     prob = mod.get_outputs()[0].asnumpy()
     prob = np.squeeze(prob)

     a = np.argsort(prob)[::-1]
     out = ''
     for i in a[0:5]:
         out += 'probability=%f, class=%s' %(prob[i], synsets[i])
     out += "\n"
     return out
ccmodmd.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def block_sep0(self, Y):
        r"""Separate variable into component corresponding to
        :math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`. The method from
        parent class :class:`.ADMMTwoBlockCnstrnt` is overridden here to
        allow swapping of K (multi-image) and M (filter) axes in block 0
        so that it can be concatenated on axis M with block 1. This is
        necessary because block 0 has the dimensions of S while block 1
        has the dimensions of D. Handling of multi-channel signals
        substantially complicate this issue. There are two multi-channel
        cases: multi-channel dictionary and signal (Cd = C > 1), and
        single-channel dictionary with multi-channel signal (Cd = 1, C >
        1). In the former case, S and D shapes are (N x C x K x 1) and
        (N x C x 1 x M) respectively. In the latter case,
        :meth:`.__init__` has already taken care of combining C
        (multi-channel) and K (multi-image) axes in S, so the S and D
        shapes are (N x 1 x C K x 1) and (N x 1 x 1 x M) respectively.
        """

        return np.swapaxes(Y[(slice(None),)*self.blkaxis +
            (slice(0, self.blkidx),)], self.cri.axisK, self.cri.axisM)
ccmodmd.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 52 收藏 0 点赞 0 评论 0
def relax_AX(self):
        """The parent class method that this method overrides only
        implements the relaxation step for the variables of the baseline
        consensus algorithm. This method calls the overridden method and
        then implements the relaxation step for the additional variables
        required for the mask decoupling modification to the baseline
        algorithm.
        """

        super(ConvCnstrMODMaskDcpl_Consensus, self).relax_AX()
        self.AX1nr = sl.irfftn(sl.inner(self.Zf, self.swapaxes(self.Xf),
                                        axis=self.cri.axisM),
                               self.cri.Nv, self.cri.axisN)
        if self.rlx == 1.0:
            self.AX1 = self.AX1nr
        else:
            alpha = self.rlx
            self.AX1 = alpha*self.AX1nr + (1-alpha)*(self.Y1 + self.S)
cbpdntv.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def block_sep1(self, Y):
        """Separate variable into component corresponding to Y1 in Y."""

        Y1 = Y[..., self.cri.M:]

        # If cri.Cd > 1 (multi-channel dictionary), we need to undo the
        # reshape performed in block_cat
        if self.cri.Cd > 1:
            shp = list(Y1.shape)
            shp[self.cri.axisM] = self.cri.dimN
            shp[self.cri.axisC] = self.cri.Cd
            Y1 = Y1.reshape(shp)

        # Axes are swapped here for similar reasons to those
        # motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_sep0
        Y1 =  np.swapaxes(Y1[..., np.newaxis], self.cri.axisM, -1)

        return Y1
cbpdntv.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def block_cat(self, Y0, Y1):
        """Concatenate components corresponding to Y0 and Y1 blocks
        into Y.
        """

        # Axes are swapped here for similar reasons to those
        # motivating swapping in cbpdn.ConvTwoBlockCnstrnt.block_cat
        Y1sa = np.swapaxes(Y1, self.cri.axisM, -1)[..., 0]

        # If cri.Cd > 1 (multi-channel dictionary) Y0 has a singleton
        # channel axis but Y1 has a non-singleton channel axis. To make
        # it possible to concatenate Y0 and Y1, we reshape Y1 by a
        # partial ravel of axisM and axisC onto axisM.
        if self.cri.Cd > 1:
            shp = list(Y1sa.shape)
            shp[self.cri.axisM] *= shp[self.cri.axisC]
            shp[self.cri.axisC] = 1
            Y1sa = Y1sa.reshape(shp)

        return np.concatenate((Y0, Y1sa), axis=self.cri.axisM)
cbpdn.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def block_sep0(self, Y):
        r"""Separate variable into component corresponding to
        :math:`\mathbf{y}_0` in :math:`\mathbf{y}\;\;`. The method
        from parent class :class:`.ADMMTwoBlockCnstrnt` is overridden
        here to allow swapping of C (channel) and M (filter) axes in
        block 0 so that it can be concatenated on axis M with block
        1. This is necessary because block 0 has the dimensions of S
        (N x C x K x 1) while block 1 has the dimensions of X (N x 1 x
        K x M).
        """
        if self.y0swapaxes:
            return np.swapaxes(Y[(slice(None),)*self.blkaxis +
                                 (slice(0, self.blkidx),)],
                               self.cri.axisC, self.cri.axisM)
        else:
            return super(ConvTwoBlockCnstrnt, self).block_sep0(Y)
parcnsdl.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def swap_axis_to_0(x, axis):
    """Insert a new singleton axis at position 0 and swap it with the
    specified axis. The resulting array has an additional dimension,
    with ``axis`` + 1 (which was ``axis`` before the insertion of the
    new axis) of ``x`` at position 0, and a singleton axis at position
    ``axis`` + 1.

    Parameters
    ----------
    x : ndarray
      Input array
    axis : int
      Index of axis in ``x`` to swap to axis index 0.

    Returns
    -------
    arr : ndarray
      Output array
    """

    return np.ascontiguousarray(np.swapaxes(x[np.newaxis, ...], 0, axis+1))
spectral_analysis.py 文件源码 项目:resin 作者: kylerbrown 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def multi_taper_psd(psd_generator):
    """
  Calculates an MTM PSD from the signal.

  Parameters:
    psd_generator  : see iter_mt()

  Returns:
    pxx   : NxMxT matrix of power values at each frequency,
             where T is the number of tapers
    freqs : vector of size N containing frequency at each index
            N
    times : vector of size M containing times corresponding to
            each index M
  """
    pxx = []
    t = []
    for spectrum, time in psd_generator:
        pxx.append(spectrum)
        t.append(time)
    pxx = np.swapaxes(np.array(pxx), 0, 1)  # freq needs to be first dim
    return pxx, np.array(t)

# --- Multi-taper machinery ---
test_numeric.py 文件源码 项目:krpcScripts 作者: jwvanderbeck 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def setUp(self):
        self.data = [
                # Array scalars
                (np.array(3.), None),
                (np.array(3), 'f8'),
                # 1D arrays
                (np.arange(6, dtype='f4'), None),
                (np.arange(6), 'c16'),
                # 2D C-layout arrays
                (np.arange(6).reshape(2, 3), None),
                (np.arange(6).reshape(3, 2), 'i1'),
                # 2D F-layout arrays
                (np.arange(6).reshape((2, 3), order='F'), None),
                (np.arange(6).reshape((3, 2), order='F'), 'i1'),
                # 3D C-layout arrays
                (np.arange(24).reshape(2, 3, 4), None),
                (np.arange(24).reshape(4, 3, 2), 'f4'),
                # 3D F-layout arrays
                (np.arange(24).reshape((2, 3, 4), order='F'), None),
                (np.arange(24).reshape((4, 3, 2), order='F'), 'f4'),
                # 3D non-C/F-layout arrays
                (np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
                (np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
                     ]
__init__.py 文件源码 项目:ome-files-py 作者: ome 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def open_bytes_simple(self, plane):
        """\
        Obtain the image plane for the given index as a numpy array, or a
        list of numpy arrays in the RGB case.
        """

        # Fetch 9D array
        a = self.open_bytes(plane)
        # Drop all unused dimensions
        s = np.squeeze(a, axis=(2, 3, 4, 6, 7, 8))
        # Swap x,y to y,x
        s = np.swapaxes(s, 0, 1)
        # Split RGB samples into separate arrays
        if s.shape[2] == 1:
            return np.squeeze(s, axis=2)
        else:
            return [s[:, :, i] for i in range(0, s.shape[2])]
descriptor.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def cropImg(img,oriRio=0.24/0.36):
   #oriImg = cv2.imread(img)
    #img = np.swapaxes(img,0,2)
    h = img.shape[0]
    w = img.shape[1]
    # from the middle of the long side (the middle two points)to
    # crop based on the shorter side, then according to the ucf101
    # ratio to crop the other side  
    if h <= w * oriRio:
       crop_ws = w/2-1-int(h/(oriRio*2))
       crop_we = w/2+int(h/(oriRio*2))
       subImg = img[:,crop_ws:crop_we,:]
    else:
       crop_hs = h/2-1-int(w*(oriRio/2))
       crop_he = h/2+int(w*(oriRio/2))
       subImg = img[crop_hs:crop_he,:,:]

    return subImg
descriptor.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    # use cv to get frame number is not correct
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count 
    else:
        break
    pdb.set_trace()
    tensor = tensor_1[:,:count,:,:] 
    return tensor
mul_decriptor.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cropImg(img,oriRio=0.24/0.36):
   #oriImg = cv2.imread(img)
    #img = np.swapaxes(img,0,2)
    h = img.shape[0]
    w = img.shape[1]
    # from the middle of the long side (the middle two points)to
    # crop based on the shorter side, then according to the ucf101
    # ratio to crop the other side  
    if h <= w * oriRio:
       crop_ws = w/2-1-int(h/(oriRio*2))
       crop_we = w/2+int(h/(oriRio*2))
       subImg = img[:,crop_ws:crop_we,:]
    else:
       crop_hs = h/2-1-int(w*(oriRio/2))
       crop_he = h/2+int(w*(oriRio/2))
       subImg = img[crop_hs:crop_he,:,:]

    return subImg
mul_decriptor.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count 
    else:
        break
    return tensor_1
pair_evaluation_ucf.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def cropImg(img,oriRio=0.24/0.36):
   #oriImg = cv2.imread(img)
    #img = np.swapaxes(img,0,2)
    h = img.shape[0]
    w = img.shape[1]
    # from the middle of the long side (the middle two points)to
    # crop based on the shorter side, then according to the ucf101
    # ratio to crop the other side  
    if h <= w * oriRio:
       crop_ws = w/2-1-int(h/(oriRio*2))
       crop_we = w/2+int(h/(oriRio*2))
       subImg = img[:,crop_ws:crop_we,:]
    else:
       crop_hs = h/2-1-int(w*(oriRio/2))
       crop_he = h/2+int(w*(oriRio/2))
       subImg = img[crop_hs:crop_he,:,:]

    return subImg
mul_decriptor1.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def cropImg(img,oriRio=0.24/0.36):
   #oriImg = cv2.imread(img)
    #img = np.swapaxes(img,0,2)
    h = img.shape[0]
    w = img.shape[1]
    # from the middle of the long side (the middle two points)to
    # crop based on the shorter side, then according to the ucf101
    # ratio to crop the other side  
    if h <= w * oriRio:
       crop_ws = w/2-1-int(h/(oriRio*2))
       crop_we = w/2+int(h/(oriRio*2))
       subImg = img[:,crop_ws:crop_we,:]
    else:
       crop_hs = h/2-1-int(w*(oriRio/2))
       crop_he = h/2+int(w*(oriRio/2))
       subImg = img[crop_hs:crop_he,:,:]

    return subImg
mul_decriptor1.py 文件源码 项目:c3d_ucf101_siamese_yilin 作者: fxing328 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def create_tensor(file1,mean_array):
    video_1 = cv2.VideoCapture(file1)
    len_1 = int(video_1.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))

    tensor_1 = np.zeros([3,len_1,112,112])
    count = 0
    ret = True
    while True:
    ret, frame_1 = video_1.read()
        if frame_1 is not None:
        tensor_1[:,count,:,:] = np.swapaxes(cv2.resize(cropImg(frame_1),(112,112)),0,2) - mean_array
            count = count+1
        print count 
    else:
        break
    return tensor_1


问题


面经


文章

微信
公众号

扫码关注公众号