python类norm()的实例源码

skipthoughts.py 文件源码 项目:TAC-GAN 作者: dashayushman 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def nn(model, text, vectors, query, k=5):
    """
    Return the nearest neighbour sentences to query
    text: list of sentences
    vectors: the corresponding representations for text
    query: a string to search
    """
    qf = encode(model, [query])
    qf /= norm(qf)
    scores = numpy.dot(qf, vectors.T).flatten()
    sorted_args = numpy.argsort(scores)[::-1]
    sentences = [text[a] for a in sorted_args[:k]]
    print('QUERY: ' + query)
    print('NEAREST: ')
    for i, s in enumerate(sentences):
        print(s, sorted_args[i])
skipthoughts.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def nn(model, text, vectors, query, k=5):
    """
    Return the nearest neighbour sentences to query
    text: list of sentences
    vectors: the corresponding representations for text
    query: a string to search
    """
    qf = encode(model, [query])
    qf /= norm(qf)
    scores = numpy.dot(qf, vectors.T).flatten()
    sorted_args = numpy.argsort(scores)[::-1]
    sentences = [text[a] for a in sorted_args[:k]]
    print 'QUERY: ' + query
    print 'NEAREST: '
    for i, s in enumerate(sentences):
        print s, sorted_args[i]
skipthoughts.py 文件源码 项目:StackGAN 作者: hanzhanggit 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def nn(model, text, vectors, query, k=5):
    """
    Return the nearest neighbour sentences to query
    text: list of sentences
    vectors: the corresponding representations for text
    query: a string to search
    """
    qf = encode(model, [query])
    qf /= norm(qf)
    scores = numpy.dot(qf, vectors.T).flatten()
    sorted_args = numpy.argsort(scores)[::-1]
    sentences = [text[a] for a in sorted_args[:k]]
    print 'QUERY: ' + query
    print 'NEAREST: '
    for i, s in enumerate(sentences):
        print s, sorted_args[i]
regression.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def get_lipschitz(data):
    """Get the Lipschitz constant for a specific loss function.

    Only square loss implemented.

    Parameters
    ----------
    data : (n, d) float ndarray
        data matrix
    loss : string
        the selected loss function in {'square', 'logit'}
    Returns
    ----------
    L : float
        the Lipschitz constant
    """
    n, p = data.shape

    if p > n:
        tmp = np.dot(data, data.T)
    else:
        tmp = np.dot(data.T, data)
    return la.norm(tmp, 2)
regression.py 文件源码 项目:l1l2py 作者: slipguru 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def prox_l1(w, alpha):
    r"""Proximity operator for l1 norm.

    :math:`\\hat{\\alpha}_{l,m} = sign(u_{l,m})\\left||u_{l,m}| - \\alpha \\right|_+`
    Parameters
    ----------
    u : ndarray
        The vector (of the n-dimensional space) on witch we want
        to compute the proximal operator
    alpha : float
        regularisation parameter
    Returns
    -------
    ndarray : the vector corresponding to the application of the
             proximity operator to u
    """
    return np.sign(w) * np.maximum(np.abs(w) - alpha, 0.)
linalg.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def dctii(x, axes=None):
    """
    Compute a multi-dimensional DCT-II over specified array axes. This
    function is implemented by calling the one-dimensional DCT-II
    :func:`scipy.fftpack.dct` with normalization mode 'ortho' for each
    of the specified axes.

    Parameters
    ----------
    a : array_like
      Input array
    axes : sequence of ints, optional (default None)
      Axes over which to compute the DCT-II.

    Returns
    -------
    y : ndarray
      DCT-II of input array
    """

    if axes is None:
        axes = list(range(x.ndim))
    for ax in axes:
        x = fftpack.dct(x, type=2, axis=ax, norm='ortho')
    return x
linalg.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def idctii(x, axes=None):
    """
    Compute a multi-dimensional inverse DCT-II over specified array axes.
    This function is implemented by calling the one-dimensional inverse
    DCT-II :func:`scipy.fftpack.idct` with normalization mode 'ortho'
    for each of the specified axes.

    Parameters
    ----------
    a : array_like
      Input array
    axes : sequence of ints, optional (default None)
      Axes over which to compute the inverse DCT-II.

    Returns
    -------
    y : ndarray
      Inverse DCT-II of input array
    """

    if axes is None:
        axes = list(range(x.ndim))
    for ax in axes[::-1]:
        x = fftpack.idct(x, type=2, axis=ax, norm='ortho')
    return x
linalg.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def fl2norm2(xf, axis=(0, 1)):
    r"""
    Compute the squared :math:`\ell_2` norm in the DFT domain, taking
    into account the unnormalised DFT scaling, i.e. given the DFT of a
    multi-dimensional array computed via :func:`fftn`, return the
    squared :math:`\ell_2` norm of the original array.

    Parameters
    ----------
    xf : array_like
      Input array
    axis : sequence of ints, optional (default (0,1))
      Axes on which the input is in the frequency domain

    Returns
    -------
    x : float
      :math:`\|\mathbf{x}\|_2^2` where the input array is the result of
      applying :func:`fftn` to the specified axes of multi-dimensional
      array :math:`\mathbf{x}`
    """

    xfs = xf.shape
    return (linalg.norm(xf)**2)/np.prod(np.array([xfs[k] for k in axis]))
linalg.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def rrs(ax, b):
    r"""
    Compute relative residual :math:`\|\mathbf{b} - A \mathbf{x}\|_2 /
    \|\mathbf{b}\|_2` of the solution to a linear equation :math:`A \mathbf{x}
    = \mathbf{b}`. Returns 1.0 if :math:`\mathbf{b} = 0`.

    Parameters
    ----------
    ax : array_like
      Linear component :math:`A \mathbf{x}` of equation
    b : array_like
      Constant component :math:`\mathbf{b}` of equation

    Returns
    -------
    x : float
      Relative residual
    """

    nrm = linalg.norm(b.ravel())
    if nrm == 0.0:
        return 1.0
    else:
        return linalg.norm((ax - b).ravel()) / nrm
admm.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute_residuals(self):
        """Compute residuals and stopping thresholds."""

        if self.opt['AutoRho', 'StdResiduals']:
            r = linalg.norm(self.rsdl_r(self.AXnr, self.Y))
            s = linalg.norm(self.rsdl_s(self.Yprev, self.Y))
            epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol'] + \
                self.rsdl_rn(self.AXnr, self.Y)*self.opt['RelStopTol']
            edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol'] + \
                self.rsdl_sn(self.U)*self.opt['RelStopTol']
        else:
            rn = self.rsdl_rn(self.AXnr, self.Y)
            if rn == 0.0:
                rn = 1.0
            sn = self.rsdl_sn(self.U)
            if sn == 0.0:
                sn = 1.0
            r = linalg.norm(self.rsdl_r(self.AXnr, self.Y)) / rn
            s = linalg.norm(self.rsdl_s(self.Yprev, self.Y)) / sn
            epri = scipy.sqrt(self.Nc)*self.opt['AbsStopTol']/rn + \
                self.opt['RelStopTol']
            edua = scipy.sqrt(self.Nx)*self.opt['AbsStopTol']/sn + \
                self.opt['RelStopTol']

        return r, s, epri, edua
test_bpdn.py 文件源码 项目:sporco 作者: bwohlberg 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def test_08(self):
        N = 64
        M = 2*N
        L = 4
        np.random.seed(12345)
        D = np.random.randn(N, M)
        x0 = np.zeros((M, 1))
        si = np.random.permutation(list(range(0, M-1)))
        x0[si[0:L]] = np.random.randn(L, 1)
        s0 = D.dot(x0)
        lmbda = 5e-3
        opt = bpdn.BPDN.Options({'Verbose': False, 'MaxMainIter': 500,
                                 'RelStopTol': 5e-4})
        b = bpdn.BPDN(D, s0, lmbda, opt)
        b.solve()
        x1 = b.Y
        assert(np.abs(b.itstat[-1].ObjFun - 0.012009) < 1e-5)
        assert(np.abs(b.itstat[-1].DFid - 1.9636082e-06) < 1e-5)
        assert(np.abs(b.itstat[-1].RegL1 - 2.401446) < 1e-5)
        assert(linalg.norm(x1-x0) < 1e-3)
main.py 文件源码 项目:classical-machine-learning-algorithm 作者: xwzhong 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def costFunc(alpha, *args):
    i = args[2]
    original_thetai = args[0]
    delta_thetai = args[1]
    x = args[3]
    y = args[4]
    _lambda = args[5]
    labels = set(y)
    thetai = original_thetai
    thetai[i, :] = thetai[i, :] - alpha * delta_thetai
    k = 0
    sum_log_p = 0.0
    for label in labels:
        index = y == label
        xi = x[index]
        p = condProb(original_thetai,thetai[k, :], xi)
        log_p = np.log10(p)
        sum_log_p = sum_log_p + log_p.sum()
        k = k + 1
    r = -sum_log_p / x.shape[0]+ (_lambda / 2.0) * pow(norm(thetai),2)
    #print r ,alpha
    return r
pf_rank_process.py 文件源码 项目:cellstar 作者: Fafa87 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def distance_smooth_norm(expected, result):
    """
    Calculates 2-norm from difference in fitness between expected and given snakes
    @param expected: array of expected fitness
    @param result: array of given fitness
    @return:
    """
    global best_so_far, calculations
    n = result.size
    differences = abs(expected - result) ** 4 * np.arange(n * 2, 0, -2)
    distance = norm(differences) / np.sqrt(n)

    best_so_far = min(best_so_far, distance)
    calculations += 1

    show_progress(distance, calculations)
    return distance
_sparse_tools_test.py 文件源码 项目:FermiLib 作者: ProjectQ-Framework 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_jw_restrict_operator(self):
        """Test the scheme for restricting JW encoded operators to number"""
        # Make a Hamiltonian that cares mostly about number of electrons
        n_qubits = 6
        target_electrons = 3
        penalty_const = 100.
        number_sparse = jordan_wigner_sparse(number_operator(n_qubits))
        bias_sparse = jordan_wigner_sparse(
            sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i
                 in range(n_qubits)], FermionOperator()))
        hamiltonian_sparse = penalty_const * (
            number_sparse - target_electrons *
            scipy.sparse.identity(2**n_qubits)).dot(
            number_sparse - target_electrons *
            scipy.sparse.identity(2**n_qubits)) + bias_sparse

        restricted_hamiltonian = jw_number_restrict_operator(
            hamiltonian_sparse, target_electrons, n_qubits)
        true_eigvals, _ = eigh(hamiltonian_sparse.A)
        test_eigvals, _ = eigh(restricted_hamiltonian.A)

        self.assertAlmostEqual(norm(true_eigvals[:20] - test_eigvals[:20]),
                               0.0)
dca.py 文件源码 项目:dca 作者: BenjoCowley 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def get_Gt(u, gradf, t):
    # vector used for backtracking check with projected gradient descent

    u_n = u - t * gradf;
    norm_u_n = norm(u_n);

    if (norm_u_n > 1.0): # project to L2 unit ball
        u_norm = u_n / norm_u_n;
    else:
        u_norm = u_n;

    Gt = 1.0/t * (u - u_norm);

    return Gt






###################################################
##  DCA_ONE STOCH  - STOCHASTIC GRADIENT DESCENT ##
###################################################
skipthoughts.py 文件源码 项目:StackGAN_pytorch 作者: qizhex 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def nn(model, text, vectors, query, k=5):
    """
    Return the nearest neighbour sentences to query
    text: list of sentences
    vectors: the corresponding representations for text
    query: a string to search
    """
    qf = encode(model, [query])
    qf /= norm(qf)
    scores = numpy.dot(qf, vectors.T).flatten()
    sorted_args = numpy.argsort(scores)[::-1]
    sentences = [text[a] for a in sorted_args[:k]]
    print 'QUERY: ' + query
    print 'NEAREST: '
    for i, s in enumerate(sentences):
        print s, sorted_args[i]
logistic_regression.py 文件源码 项目:prml 作者: Yevgnen 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def logistic_regression(x, t, w, eps=1e-2, max_iter=int(1e3)):
    N = x.shape[1]
    Phi = np.vstack([np.ones(N), phi(x)]).T

    for k in range(max_iter):
        y = expit(Phi.dot(w))
        R = np.diag(np.ones(N) * (y * (1 - y)))
        H = Phi.T.dot(R).dot(Phi)
        g = Phi.T.dot(y - t)

        w_new = w - linalg.solve(H, g)

        diff = linalg.norm(w_new - w) / linalg.norm(w)
        if (diff < eps):
            break

        w = w_new
        print('{0:5d} {1:10.6f}'.format(k, diff))

    return w
skipthoughts.py 文件源码 项目:ConversationalQA 作者: btjhjeon 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def nn(model, text, vectors, query, k=5):
    """
    Return the nearest neighbour sentences to query
    text: list of sentences
    vectors: the corresponding representations for text
    query: a string to search
    """
    qf = encode(model, [query])
    qf /= norm(qf)
    scores = numpy.dot(qf, vectors.T).flatten()
    sorted_args = numpy.argsort(scores)[::-1]
    sentences = [text[a] for a in sorted_args[:k]]
    print 'QUERY: ' + query
    print 'NEAREST: '
    for i, s in enumerate(sentences):
        print s, sorted_args[i]
eval_rank.py 文件源码 项目:ConversationalQA 作者: btjhjeon 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_encoder(tparams, options):
    """
    Construct encoder
    """
    # inputs (image, sentence)
    im = tensor.matrix('im', dtype='float32')
    s = tensor.matrix('s', dtype='float32')

    # embeddings
    eim = get_layer('ff')[1](tparams, im, options, prefix='ff_im', activ='linear')
    es = get_layer('ff')[1](tparams, s, options, prefix='ff_s', activ='linear')

    # L2 norm of rows
    lim = l2norm(eim)
    ls = l2norm(es)

    return [im, s], lim, ls

# optimizers
# name(hyperp, tparams, grads, inputs (list), cost) = f_grad_shared, f_update
point_cloud.py 文件源码 项目:FRIDA 作者: LCAV 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def doa(self, receiver, source):
        ''' Computes the direction of arrival wrt a source and receiver '''

        s_ind = self.key2ind(source)
        r_ind = self.key2ind(receiver)

        # vector from receiver to source
        v = self.X[:,s_ind] - self.X[:,r_ind]

        azimuth = np.arctan2(v[1], v[0])
        elevation = np.arctan2(v[2], la.norm(v[:2]))

        azimuth = azimuth + 2*np.pi if azimuth < 0. else azimuth
        elevation = elevation + 2*np.pi if elevation < 0. else elevation

        return np.array([azimuth, elevation])
classifiers.py 文件源码 项目:parametrix 作者: vincentchoqueuse 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def compute_criterion(self,y):

        self.N=len(y)

        #construct matrices
        A=np.matrix(self.A)
        b=np.matrix(self.b).T
        H=np.matrix(self.H)

        x=lg.inv(H.T*H)*H.T*y                           #estimation of x

        if self.estimate_sigma2 is True:
            r,p=self.A.shape
            coef=(self.N-p)/r
            den=lg.norm(y-H*x)**2
        else:
            den=self.sigma2
            coef=1

        term1=A*x-b
        num=term1.T*lg.inv(A*lg.inv(H.T*H)*A.T)*term1
        self.criterion=coef*num/den  ## See page 274 / 345
snapshot.py 文件源码 项目:eTraGo 作者: openego 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def linkage(df, n_groups):
    # create the distance matrix based on the forbenius norm: |A-B|_F where A is
    # a 24 x N matrix with N the number of timeseries inside the dataframe df
    # TODO: We can save have time as we only need the upper triangle once as the
    # distance matrix is symmetric
    if True:
        Y = np.empty((n_groups, n_groups,))
        Y[:] = np.NAN
        for i in range(len(Y)):
            for j in range(len(Y[i,:])):
                A = df.loc[i+1].values
                B = df.loc[j+1].values
                #print('Computing distance of:{},{}'.format(i,j))
                Y[i,j] = norm(A-B, ord='fro')

    # condensed distance matrix as vector for linkage (upper triangle as a vector)
    y = Y[np.triu_indices(n_groups, 1)]
    # create linkage matrix with wards algorithm an euclidean norm
    Z = hac.linkage(y, method='ward', metric='euclidean')
    # R = hac.inconsistent(Z, d=10)
    return Z
qr.py 文件源码 项目:Matrix-Analysis 作者: kingofspace0wzz 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def qr_ls(A, b):
    '''
    least square using QR (A must be full column rank)
    '''
    m = A.shape[0]
    n = A.shape[1]
    if rank(A) < n:
        raise Exception('Rank deficient')

    A = qr_householder(A)
    for j in range(n):
        v = np.hstack((1, A[j+1:, j]))
        A[j+1:, j] = 0
        b[j:] = (np.eye(m - j + 1) - 2 * np.outer(v, v) / la.norm(v, 2)).dot(b[j:])

    x_ls = la.solve(A[:n, :n], b[:n])

    return x_ls
ls.py 文件源码 项目:Matrix-Analysis 作者: kingofspace0wzz 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def ls_qr(A, b):
    '''
    least square using QR (A must be full column rank)
    '''
    m = A.shape[0]
    n = A.shape[1]
    if rank(A) < n:
        raise Exception('Rank deficient')

    A = qr.qr_householder(A)
    for j in range(n):
        v = np.hstack((1, A[j+1:, j]))
        A[j+1:, j] = 0
        b[j:] = (np.eye(m - j + 1) - 2 * np.outer(v, v) / la.norm(v, 2)).dot(b[j:])

    x_ls = la.solve(A, b)

    return x_ls0
SpeicalMatrix.py 文件源码 项目:Matrix-Analysis 作者: kingofspace0wzz 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def householder_vector(x):

    dimensionX  = len(x)
    sigma = x[1:].conjugate().T.dot(x[1:])
    v = np.vstack((1, x[1:]))

    if sigma == 0:
        beta = 0
        return v, beta
    else:
        miu = np.sqrt(x[0]**2 / sigma)
        if x[0] <= 0:
            v[0] = x[0] - miu
        else:
            v[0] = - sigma / (x[0] + miu)
        beta = 2 * v[0]**2 / (sigma + v[0]**2)
        v = v / la.norm(v, 2)

        return v, beta

# a test fuction that asks whether a particular matrix is one of the special matries above
skprocrustes.py 文件源码 项目:skprocrustes 作者: melissawm 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def debug_bidiag(i, s, inds, A, B, U, V, T):
    print("\n       ********* DEBUGGING BLOCKBIDIAG: ************\n")
    # We will check the recurrence relations listed in Karimi, Toutounian 
    print("\n        Iteration i = {}, inds = {}\n".format(i, inds))
    E1 = np.zeros((inds+s, s))
    E1[0:s, :] = np.eye(s,s)
    errorRecurrence1 = sp.norm(B-np.dot(U[:,0:inds+s], np.dot(E1, B1)))
    print("\n        B - UU(i+1)*E1*B1 = {}\n".format(errorRecurrence1))
    #
    # AVk = Ukp1Tk
    errorRecurrence2 = sp.norm(np.dot(A, V[:, 0:inds]) - np.dot(U[:, 0:inds+s], T[0:inds+s, 0:inds]))
    print("\n        A*VV(i) - UU(i+1)T(i) = {}\n".format(errorRecurrence2))
    #
    # ATUkp1 = VkTkT + Vkp1Akp1Ekp1T
    Eip1 = np.zeros((inds+s, s))
    Eip1[inds:inds+s, :] = np.eye(s,s)
    errorRecurrence3 = sp.norm(np.dot(A.T, U[:, 0:inds+s]) - np.dot(V[:, 0:inds], T[0:inds+s, 0:inds].T) - np.dot(V[:, inds:inds+s], np.dot(Aip1, Eip1.T)))
    print("\n        A.T*UU(i+1) - VV(i)*T(i).T - V(i+1)*A(i+1)*E(i+1).T = {}\n".format(errorRecurrence3))
mxne_optim.py 文件源码 项目:decoding_challenge_cortana_2016_3rd 作者: kingjr 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
    """Compute lipschitz constant for FISTA

    It uses a power iteration method.
    """
    n_times = M.shape[1]
    n_points = G.shape[1]
    iv = np.ones((n_points, n_times), dtype=np.float)
    v = phi(iv)
    L = 1e100
    for it in range(100):
        L_old = L
        logger.info('Lipschitz estimation: iteration = %d' % it)
        iv = np.real(phiT(v))
        Gv = np.dot(G, iv)
        GtGv = np.dot(G.T, Gv)
        w = phi(GtGv)
        L = np.max(np.abs(w))  # l_inf norm
        v = w / L
        if abs((L - L_old) / L_old) < tol:
            break
    return L
SGD.py 文件源码 项目:MorphForest 作者: j-luo93 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def run(self, params, loss):
        m = theano.shared(np.zeros(params.shape.eval()), borrow=True, name='m')
        v = theano.shared(np.zeros(params.shape.eval()), borrow=True, name='v')
        grad = T.grad(loss, params)
        norm_grad = grad.norm(2)
        m_t = self.beta1 * m + (1 - self.beta1) * grad
        v_t = self.beta2 * v + (1 - self.beta2) * T.pow(grad, 2)
        step = T.iscalar(name='step')
        update_rules = [(params, params - self.lr * (m_t / (1.0 - T.pow(self.beta1, step)) / (T.sqrt(v_t / (1.0 - T.pow(self.beta2, step))) + self.stable))), (m, m_t), (v, v_t)]
        train_epoch = theano.function([step], [loss, norm_grad], updates=update_rules)

        for epoch in xrange(self.max_epoch):
            loss, grad = train_epoch(epoch + 1)
            norm_l2 = norm(grad)
            print("epoch = %d\t loss = %f\t norm = %f" %(epoch + 1, loss, norm_l2), end='')
            print()
            if norm_l2 < self.eps: break
feature.py 文件源码 项目:QScode 作者: PierreHao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def multi_im_run(self, image_name):
        """detection and extraction with many boxes"""
        #caffe.set_mode_gpu()
        multi_im = self.detect(image_name, multi_box=True)
        features = []
        for im in multi_im: 
            image = pad(im,size=224)
            feature = extraction.forward(self.net_e, image, self.transformer)
            r = np.squeeze(feature['pool5/7x7_s1'].data[0])
            #feature2 = extraction.forward(self.net_e2, image, self.transformer2)
            #r2 = np.squeeze(feature2['pool5/7x7_s1'].data[0])
            #r = np.hstack((r, r2)).copy()
            #r = r2
            if self.pca is not None:
                r = self.pca.transform(r)[0,:]
            r = r/norm(r)
            #print r.shape
            features.append(r)
        return features
skipthoughts.py 文件源码 项目:text-to-image 作者: paarthneekhara 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def nn(model, text, vectors, query, k=5):
    """
    Return the nearest neighbour sentences to query
    text: list of sentences
    vectors: the corresponding representations for text
    query: a string to search
    """
    qf = encode(model, [query])
    qf /= norm(qf)
    scores = numpy.dot(qf, vectors.T).flatten()
    sorted_args = numpy.argsort(scores)[::-1]
    sentences = [text[a] for a in sorted_args[:k]]
    print 'QUERY: ' + query
    print 'NEAREST: '
    for i, s in enumerate(sentences):
        print s, sorted_args[i]


问题


面经


文章

微信
公众号

扫码关注公众号