python类atleast_2d()的实例源码

inputchecks.py 文件源码 项目:casiopeia 作者: adbuerger 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def check_states_data(xdata, nx, number_of_intervals):

    if not nx == 0:

        if xdata is None:
            xdata = np.zeros((nx, number_of_intervals + 1))

        xdata = np.atleast_2d(xdata)

        if xdata.shape == (number_of_intervals + 1, nx):
            xdata = xdata.T

        if not xdata.shape == (nx, number_of_intervals + 1):

            raise ValueError( \
                "State values provided by user have wrong dimension.")

        return xdata

    else:

        return ci.dmatrix(0,0)
inputchecks.py 文件源码 项目:casiopeia 作者: adbuerger 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def check_measurement_data(ydata, nphi, number_of_measurements):

    if ydata is None:
        ydata = np.zeros((nphi, number_of_measurements))

    ydata = np.atleast_2d(ydata)

    if ydata.shape == (number_of_measurements, nphi):
        ydata = ydata.T

    if not ydata.shape == (nphi, number_of_measurements):

        raise ValueError( \
            "Measurement data provided by user has wrong dimension.")

    return ydata
inputchecks.py 文件源码 项目:casiopeia 作者: adbuerger 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def check_measurement_weightings(wv, nphi, number_of_measurements):

    if wv is None:
        wv = np.ones((nphi, number_of_measurements))

    wv = np.atleast_2d(wv)

    if wv.shape == (number_of_measurements, nphi):
        wv = wv.T

    if not wv.shape == (nphi, number_of_measurements):

        raise ValueError( \
            "Measurement weightings provided by user have wrong dimension.")

    return wv
numpy_filereader.py 文件源码 项目:coordinates 作者: markovmodel 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def _reshape(self, array):
        """
        checks shapes, eg convert them (2d), raise if not possible
        after checks passed, set self._array and return it.
        """

        if array.ndim == 1:
            array = np.atleast_2d(array).T
        elif array.ndim == 2:
            pass
        else:
            shape = array.shape
            # hold first dimension, multiply the rest
            shape_2d = (shape[0],
                        functools.reduce(lambda x, y: x * y, shape[1:]))
            array = np.reshape(array, shape_2d)
        return array
data_in_memory.py 文件源码 项目:coordinates 作者: markovmodel 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _add_array_to_storage(self, array):
        """
        checks shapes, eg convert them (2d), raise if not possible
        after checks passed, add array to self._data
        """
        if array.ndim == 1:
            array = np.atleast_2d(array).T
        elif array.ndim == 2:
            pass
        else:
            shape = array.shape
            # hold first dimension, multiply the rest
            shape_2d = (shape[0], functools.reduce(lambda x, y: x * y, shape[1:]))
            array = np.reshape(array, shape_2d)

        self.data.append(array)
NeuralNetwork.py 文件源码 项目:machineLearning 作者: zhangtianle 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def fit(self, x, y, learningRate=0.2, epochs=10000):
        x = np.atleast_2d(x)
        temp = np.ones([x.shape[0], x.shape[1]+1])
        temp[:, 0:-1] = x
        x = temp

        for k in range(epochs):
            i = np.random.randint(x.shape[0])
            result = [x[i]]
            for l in range(len(self._weights)):
                result.append(self._activation(np.dot(result[l], self._weights[l])))
            error = y[i] - result[-1]
            deltas = [error * self._activationDeriv(result[-1])]

            for l in range(len(self._weights)-1, 0, -1):
                deltas.append(np.dot(self._weights[l], deltas[-1]) * self._activationDeriv(result[l]))
                # deltas.append(deltas[-1].dot(self._weights[l].T) * self._activationDeriv(result[l]))
            deltas.reverse()

            for i in range(len(self._weights)):
                layer = np.atleast_2d(result[i])
                delta = np.atleast_2d(deltas[i])
                self._weights[i] += learningRate * layer.T.dot(delta)
rigid_registration.py 文件源码 项目:pycpd 作者: siavashk 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def register(self, callback):
    self.initialize()

    while self.iteration < self.maxIterations and self.err > self.tolerance:
      self.iterate()
      if callback:
        callback(iteration=self.iteration, error=self.err, X=self.X, Y=self.TY)

    return self.TY, self.R, np.atleast_2d(self.t), self.s
affine_registration.py 文件源码 项目:pycpd 作者: siavashk 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def register(self, callback):
    self.initialize()

    while self.iteration < self.maxIterations and self.err > self.tolerance:
      self.iterate()
      if callback:
        callback(iteration=self.iteration, error=self.err, X=self.X, Y=self.TY)

    return self.TY, self.B, np.atleast_2d(self.t)
jrmpc_rigid.py 文件源码 项目:pycpd 作者: siavashk 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, Y, R=None, t=None, maxIterations=100, gamma=0.1, ):
    if Y is None:
      raise 'Empty list of point clouds!'

    dimensions = [cloud.shape[1] for cloud in Y]

    if not all(dimension == dimensions[0] for dimension in dimensions):
      raise 'All point clouds must have the same number of dimensions!'

    self.Y = Y
    self.M = [cloud.shape[0] for cloud in self.Y]
    self.D = dimensions[0]

    if R:
      rotations = [rotation.shape for rotation in R]
      if not all(rotation[0] == self.D and rotation[1] == self.D for rotation in rotations):
        raise 'All rotation matrices need to be %d x %d matrices!' % (self.D, self.D)
      self.R = R
    else:
      self.R = [np.eye(self.D) for cloud in Y]

    if t:
      translations = [translations.shape for translation in t]
      if not all(translations[0] == 1 and translations[1] == self.D for translation in translations):
        raise 'All translation vectors need to be 1 x %d matrices!' % (self.D)
      self.t = t
    else:
      self.t = [np.atleast_2d(np.zeros((1, self.D))) for cloud in self.Y]
util.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def plot_prediction(x_test, y_test, prediction, save=False):
    import matplotlib
    import matplotlib.pyplot as plt

    test_size = x_test.shape[0]
    fig, ax = plt.subplots(test_size, 3, figsize=(12,12), sharey=True, sharex=True)

    x_test = crop_to_shape(x_test, prediction.shape)
    y_test = crop_to_shape(y_test, prediction.shape)

    ax = np.atleast_2d(ax)
    for i in range(test_size):
        cax = ax[i, 0].imshow(x_test[i])
        plt.colorbar(cax, ax=ax[i,0])
        cax = ax[i, 1].imshow(y_test[i, ..., 1])
        plt.colorbar(cax, ax=ax[i,1])
        pred = prediction[i, ..., 1]
        pred -= np.amin(pred)
        pred /= np.amax(pred)
        cax = ax[i, 2].imshow(pred)
        plt.colorbar(cax, ax=ax[i,2])
        if i==0:
            ax[i, 0].set_title("x")
            ax[i, 1].set_title("y")
            ax[i, 2].set_title("pred")
    fig.tight_layout()

    if save:
        fig.savefig(save)
    else:
        fig.show()
        plt.show()
motifs.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def process_contig_chunk( args ):
    chunk_id      = args[0]
    control_pkl   = args[1]
    cut_CMDs      = args[2]
    kmers         = args[3]
    cols_chunk    = args[4]
    contig_id     = args[5]
    n_chunks      = args[6]
    n_contigs     = args[7]
    opts          = args[8]
    logging.info("  - Contig %s/%s: chunk %s/%s" % ((contig_id+1), n_contigs, (chunk_id+1), (n_chunks+1)))
    control_means = pickle.load(open(control_pkl, "rb"))
    contig_motifs = {}
    case_motif_Ns = {}

    for cut_CMD in cut_CMDs:
        sts,stdOutErr = mbin.run_OS_command( cut_CMD )

    fns                = map(lambda x: x.split("> ")[-1], cut_CMDs)
    contig_ipds_sub    = np.loadtxt(fns[0], dtype="float")
    contig_ipds_N_sub  = np.loadtxt(fns[1], dtype="int")
    # If there is only one row (read) for this contig, still treat as
    # a 2d matrix of many reads
    contig_ipds_sub    = np.atleast_2d(contig_ipds_sub)
    contig_ipds_N_sub  = np.atleast_2d(contig_ipds_N_sub)
    for j in range(len(cols_chunk)):
        motif = kmers[cols_chunk[j]]
        case_contig_Ns    = contig_ipds_N_sub[:,j]
        if control_means.get(motif):
            case_contig_means = contig_ipds_sub[:,j]
            if np.sum(case_contig_Ns)>0:
                case_mean = np.dot(case_contig_means, case_contig_Ns) / np.sum(case_contig_Ns)
            else:
                case_mean = 0
            score                = case_mean - control_means[motif]
            contig_motifs[motif] = score
            case_motif_Ns[motif] = np.sum(case_contig_Ns)
    return contig_motifs,case_motif_Ns
controls.py 文件源码 项目:mbin 作者: fanglab 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def process_contig_chunk( args ):
    chunk_id        = args[0]
    cut_CMDs        = args[1]
    kmers           = args[2]
    cols_chunk      = args[3]
    n_chunks        = args[4]
    min_motif_count = args[5]
    logging.info("  - Control data: chunk %s/%s" % ((chunk_id+1), (n_chunks+1)))
    control_means = {}

    for cut_CMD in cut_CMDs:
        sts,stdOutErr = mbin.run_OS_command( cut_CMD )

    fns                = map(lambda x: x.split("> ")[-1], cut_CMDs)
    control_ipds_sub   = np.loadtxt(fns[0], dtype="float")
    control_ipds_N_sub = np.loadtxt(fns[1], dtype="int")
    # If there is only one row (read) for this contig, still treat as
    # a 2d matrix of many reads
    control_ipds_sub   = np.atleast_2d(control_ipds_sub)
    control_ipds_N_sub = np.atleast_2d(control_ipds_N_sub)

    not_found     = 0
    for j in range(len(cols_chunk)):
        motif = kmers[cols_chunk[j]]
        if np.sum(control_ipds_N_sub[:,j])>=min_motif_count:
            if np.sum(control_ipds_N_sub[:,j])>0:
                control_mean = np.dot(control_ipds_sub[:,j], control_ipds_N_sub[:,j]) / np.sum(control_ipds_N_sub[:,j])
            else:
                control_mean = 0
            control_means[motif] = control_mean
        else:
            not_found += 1

    return control_means,not_found
HBLR.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def computeSMatrix(self):
        for m in range(self.n_tasks):
            task_X = self.task_dict[m]['X']
            task_Y = self.task_dict[m]['Y']
            task_xi = np.array(self.xi[m])

            for k in range(self.K):
                # Note that transposes are different because we are using different notation than in the paper - specifically we use row vectors where they are using column vectors

                # This does all data points (n) at once 
                inner = np.dot(np.atleast_2d(self.theta[k,:]).T, np.atleast_2d(self.theta[k,:])) + self.gamma[k]
                diag_entries = np.einsum('ij,ij->i', np.dot(task_X, inner), task_X)
                s_sum = -rhoFunction(task_xi)*diag_entries

                s_sum += ((task_Y.T - 0.5)* np.dot(np.atleast_2d(self.theta[k,:]), task_X.T))[0,:]
                s_sum += np.log(sigmoid(task_xi))
                s_sum += (-0.5)*task_xi
                s_sum += rhoFunction(task_xi)*(task_xi**2)

                s_sum = np.sum(s_sum)

                if k < self.K-1:
                    s_sum = s_sum + scipy.special.psi(self.small_phi1[k]) \
                                    - scipy.special.psi(self.small_phi1[k] + self.small_phi2[k])
                if k > 0:
                    for i in range(k):
                        s_sum = s_sum + scipy.special.psi(self.small_phi2[i]) \
                                    - scipy.special.psi(self.small_phi1[i] + self.small_phi2[i])


                self.s[m,k] = s_sum
        if self.debug: print "s:", self.s
HBLR.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def updatePhi(self):
        a = np.array([np.max(self.s, axis=1)]).T #as used in logsumexp trick https://hips.seas.harvard.edu/blog/2013/01/09/computing-log-sum-exp/
        self.phi = np.exp(self.s - (a + np.log(np.atleast_2d(np.sum(np.exp(self.s - a),axis=1)).T)))
        if self.debug: 
            print "phi:", self.phi
HBLR.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def updateTheta(self):
        for k in range(self.K):
            inner_sum = np.zeros((1,self.num_feats))
            for m in range(self.n_tasks):
                inner_sum = inner_sum + self.phi[m,k] * np.atleast_2d(self.task_vectors[m,:])
            self.theta[k,:] = (np.dot(self.gamma[k],(np.dot(la.inv(self.sigma),self.mu.T) + inner_sum.T)  )).T
HBLR.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def computeXi(self):
        for m in range(self.n_tasks):
            task_X = self.task_dict[m]['X']
            for n in range(len(task_X)):
                inner_sum = 0
                for k in range(self.K):
                    # Note that transposes are different because we are using different notation than in the paper - specifically we use row vectors where they are using column vectors
                    inner_sum += self.phi[m,k]*np.dot((np.dot(np.atleast_2d(task_X[n,:]), 
                                                        (np.dot(np.atleast_2d(self.theta[k,:]).T, np.atleast_2d(self.theta[k,:])) + self.gamma[k]))),
                                                        np.atleast_2d(task_X[n,:]).T)
                assert inner_sum >= 0           # This number can't be negative since we are taking the square root

                self.xi[m][n] = np.sqrt(inner_sum[0,0])
                if self.xi[m][n]==0:
                    print m,n
HBLR.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def predictProbability(self, task, X):
        prob = 0
        for k in range(self.K):
            numerator = np.dot(np.atleast_2d(self.theta[k,:]),X.T)
            diag_entries = np.einsum('ij,ij->i', np.dot(X, self.gamma[k]), X) ##
            denom = np.sqrt(1.0 + np.pi/8 * diag_entries)
            prob = prob + self.phi[task,k] * sigmoid(numerator / denom)
        return prob


    # Code for Predicting for a new task
HBLR.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def dataProb(self,new_task_X,new_task_y,weights):
        prod = 1
        for i in range(len(new_task_X)):
            sig = sigmoid(np.dot(weights,np.atleast_2d(new_task_X[i,:]).T ))
            prod = prod*(sig**new_task_y[i]) * (1.0-sig)**(1-new_task_y[i])

        return prod
HBLR.py 文件源码 项目:PersonalizedMultitaskLearning 作者: mitmedialab 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def predictNewTask(self,new_task_X,new_task_y,pred_X,N_sam=1000):
        w_dot_array = self.metropolisHastingsAlgorithm(new_task_X,new_task_y,N_sam)

        predictions = []
        for x_star in pred_X:
            predictions.append(sum([sigmoid(np.dot(w,np.atleast_2d(x_star).T))[0,0] for w in w_dot_array])/float(N_sam))
        predictions = [1.0 if p>=0.5 else 0.0 for p in predictions]
        return predictions


# Helper function
nn.py 文件源码 项目:Machine-Learning 作者: grasses 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def fit(self, X, y, learning_rate = 0.2, epochs = 10000):
        X = np.atleast_2d(X)
        # temp.shape=(X.shape[0], X.shape[1] + 1) `+1` is for bais, so X[*][-1] = 1 => numpy.dot(x, weights) + numpy.dot(1 * bais)
        temp = np.ones([X.shape[0], X.shape[1] + 1])
        temp[:, 0:-1] = X
        X = temp
        y = np.array(y)

        '''
        loop operation for epochs times
        '''
        for k in range(epochs):
            # select a random line from X for training
            i = np.random.randint(X.shape[0])
            x = [X[i]]

            # going forward network, for each layer
            for l in range(len(self.weights)):
                # computer the node value for each layer (O_i) using activation function
                x.append(self.activation(np.dot(x[l], self.weights[l])))

            # computer the error at the top layer
            error = y[i] - x[-1]
            deltas = [error * self.activation_deriv(x[-1])]  # For output layer, Err calculation (delta is updated error)

            # start backprobagation
            for l in range(len(x) - 2, 0, -1):  # we need to begin at the second to last layer
                # compute the updated error (i,e, deltas) for each node going from top layer to input layer
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(x[l]))
            deltas.reverse()

            for i in range(len(self.weights)):
                layer = np.atleast_2d(x[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)


问题


面经


文章

微信
公众号

扫码关注公众号