python类squeeze()的实例源码

posenet_utils.py 文件源码 项目:pybot 作者: spillai 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __init__(self, model_file, weights_file, mean_file): 
        if not os.path.exists(model_file) or \
           not os.path.exists(weights_file) or \
           not os.path.exists(mean_file): 
            raise ValueError('Invalid model: {}, \nweights file: {}, \nmean file: {}'
                             .format(model_file, weights_file, mean_file))

        # Init caffe with model
        self.net_ = caffe.Net(model_file, weights_file, caffe.TEST)
        self.mean_file_ = mean_file
        self.input_shape_ = self.net_.blobs['data'].data.shape    

        # Initialize mean file
        blob_meanfile = caffe.proto.caffe_pb2.BlobProto()
        data_meanfile = open(mean_file , 'rb' ).read()
        blob_meanfile.ParseFromString(data_meanfile)
        meanfile = np.squeeze(np.array(caffe.io.blobproto_to_array(blob_meanfile)))
        self.meanfile_ = meanfile.transpose((1,2,0))
        self.meanfile_image_ = None
lidc.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _get_mask(self, scan, slide, series):
        img, s, o, origShape = scan
        mask = np.zeros((origShape[1], origShape[2]))
        nodules = self._nodule_info[series]
        for nodule in nodules:
            iid, z, edges = nodule
            z = int((z - o[2])/s[2])
            if z == slide:
                if edges.shape[0] > 1:
                    cv.fillPoly(mask, [edges], 255)
                else:
                    #It's a small nodule. Make a circle of radius 3mm
                    edges = np.squeeze(edges)
                    center = tuple(edges)
                    radius = max(3.0/s[0], 3.0/s[1])
                    cv.circle(mask, center, int(radius+1), 255, -1)

        if img.shape[1] != origShape[1] or img.shape[2] != origShape[2]:
            mask = imu.resize_2d(mask, (img.shape[1], img.shape[2]))
        return mask
utils.py 文件源码 项目:CausalGAN 作者: mkocaoglu 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def did_succeed( output_dict, cond_dict ):
    '''
    Used in rejection sampling:
    for each row, determine if cond is satisfied
    for every cond in cond_dict

    success is hardcoded as round(label) being exactly equal
    to the integer in cond_dict
    '''

    #definition success:
    def is_win(key):
        #cond=np.squeeze(cond_dict[key])
        cond=np.squeeze(cond_dict[key])
        val=np.squeeze(output_dict[key])
        condition= np.round(val)==cond
        return condition

    scoreboard=[is_win(key) for key in cond_dict]
    #print('scoreboard', scoreboard)
    all_victories_bool=np.logical_and.reduce(scoreboard)
    return all_victories_bool.flatten()
post_sub.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def post_sub_one(inx):
    w,h = 1918,1280
    path,out,threshold = inx
    data = np.load(path).item()
    imgs,pred = data['name'], data['pred']
    #print(pred.shape)
    fo = open(out,'w')
    #masks = pred>threshold
    for name,mask in zip(imgs,np.squeeze(pred)):
        mask = imresize(mask,[h,w])
        mask = mask>threshold
        code = rle_encode(mask)
        code = [str(i) for i in code]
        code = " ".join(code)
        fo.write("%s,%s\n"%(name,code))
    fo.close()
    return 0
poke.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def show_one_img_mask(data):
    w,h = 1918,1280
    a = randint(0,31)
    path = "../input/test"
    data = np.load(data).item()
    name,masks = data['name'][a],data['pred']
    img = Image.open("%s/%s"%(path,name))
    #img.show()
    plt.imshow(img)
    plt.show()
    mask = np.squeeze(masks[a])
    mask = imresize(mask,[h,w]).astype(np.float32)
    print(mask.shape,mask[0])
    img = Image.fromarray(mask*256)#.resize([w,h])
    plt.imshow(img)
    plt.show()
main.py 文件源码 项目:SGAN 作者: YuhangSong 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def log_img(x,name,iteration=0,nrow=8):

    def log_img_final(x,name,iteration=0,nrow=8):
        vutils.save_image(
            x,
            LOGDIR+name+'_'+str(iteration)+'.png',
            nrow=nrow,
        )
        vis.images( 
            x.cpu().numpy(),
            win=str(MULTI_RUN)+'-'+name,
            opts=dict(caption=str(MULTI_RUN)+'-'+name+'_'+str(iteration)),
            nrow=nrow,
        )

    if params['REPRESENTATION']==chris_domain.VECTOR:
        x = vector2image(x)
    x = x.squeeze(1)
    if params['DOMAIN']=='2Dgrid':
        if x.size()[1]==2:
            log_img_final(x[:,0:1,:,:],name+'_b',iteration,nrow)
            log_img_final(x[:,1:2,:,:],name+'_a',iteration,nrow)
            x = torch.cat([x,x[:,0:1,:,:]],1)
    log_img_final(x,name,iteration,nrow)
main.py 文件源码 项目:SGAN 作者: YuhangSong 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def plot_convergence(images,name):
    '''
        evaluate domain
    '''

    dis, accept_rate = get_transition_prob_distribution(images)
    if not (np.sum(dis)==0.0):
        kl = scipy.stats.entropy(
            dis,
            qk=params['GRID_ACTION_DISTRIBUTION'],
            base=None
        )
        logger.plot(
            name+'-KL',
            np.asarray([kl])
        )
    l1 = np.squeeze(np.sum(np.abs(dis - np.asarray(params['GRID_ACTION_DISTRIBUTION']))))
    logger.plot(
        name+'-L1',
        np.asarray([l1])
    )
    logger.plot(
        name+'-AR',
        np.asarray([accept_rate])
    )
train.py 文件源码 项目:TAC-GAN 作者: dashayushman 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def save_for_viz_val(data_dir, generated_images, image_files, image_caps,
                     image_ids, image_size, id):

    generated_images = np.squeeze(np.array(generated_images))
    for i in range(0, generated_images.shape[0]) :
        image_dir = join(data_dir, str(image_ids[i]))
        if not os.path.exists(image_dir):
            os.makedirs(image_dir)

        real_image_path = join(image_dir,
                               '{}.jpg'.format(image_ids[i]))
        if os.path.exists(image_dir):
            real_images_255 = image_processing.load_image_array(image_files[i],
                                        image_size, image_ids[i], mode='val')
            scipy.misc.imsave(real_image_path, real_images_255)

        caps_dir = join(image_dir, "caps.txt")
        if not os.path.exists(caps_dir):
            with open(caps_dir, "w") as text_file:
                text_file.write(image_caps[i]+"\n")

        fake_images_255 = generated_images[i]
        scipy.misc.imsave(join(image_dir, 'fake_image_{}.jpg'.format(id)),
                          fake_images_255)
t_interpolation.py 文件源码 项目:TAC-GAN 作者: dashayushman 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def save_distributed_image_batch(data_dir, generated_images, sel_i, sel_2, z_i,
                                 t_i, sel_img, sel_cap, sel_img_2, sel_cap_2,
                                 batch_size):

    generated_images = np.squeeze(generated_images)
    folder_name = str(sel_i) + '_' + str(sel_2)

    image_dir = join(data_dir, 't_interpolation', folder_name, str(z_i))
    if not os.path.exists(image_dir):
        os.makedirs(image_dir)

    meta_path = os.path.join(image_dir, "meta.txt")
    with open(meta_path, "w") as text_file:
        text_file.write(str(sel_img) + "\t" + str(sel_cap) +
                        str(sel_img_2) + "\t" + str(sel_cap_2))
    fake_image_255 = (generated_images[batch_size-1])
    scipy.misc.imsave(join(image_dir, '{}.jpg'.format(t_i)),
                      fake_image_255)
net.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 41 收藏 0 点赞 0 评论 0
def _decode_lambda(self, args):
        """
        Decoding within tensorflow graph.
        In case kenlm_directory is specified, a modified version of tensorflow 
        (available at https://github.com/timediv/tensorflow-with-kenlm) 
        is needed to run that extends ctc_decode to use a kenlm decoder.
        :return: 
            Most probable decoded sequence.  Important: blank labels are returned as `-1`. 
        """
        import tensorflow as tf

        prediction_batch, prediction_lengths = args

        log_prediction_batch = tf.log(tf.transpose(prediction_batch, perm=[1, 0, 2]) + 1e-8)
        prediction_length_batch = tf.to_int32(tf.squeeze(prediction_lengths, axis=[1]))

        (decoded, log_prob) = self.ctc_get_decoded_and_log_probability_batch(log_prediction_batch,
                                                                             prediction_length_batch)

        return single([tf.sparse_to_dense(st.indices, st.dense_shape, st.values, default_value=-1) for st in decoded])
net.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def test_and_predict_batch(self, labeled_spectrogram_batch: List[LabeledSpectrogram]) -> ExpectationsVsPredictions:
        input_by_name, dummy_labels = self._inputs_for_loss_net(labeled_spectrogram_batch)

        predicted_graphemes, loss_batch = self.get_predicted_graphemes_and_loss_batch(
            [input_by_name[input.name.split(":")[0]] for input in self.loss_net.inputs] + [self.prediction_phase_flag])

        # blank labels are returned as -1 by tensorflow:
        predicted_graphemes[predicted_graphemes < 0] = self.grapheme_encoding.ctc_blank

        prediction_lengths = list(numpy.squeeze(input_by_name[Wav2Letter.InputNames.prediction_lengths], axis=1))
        losses = list(numpy.squeeze(loss_batch, axis=1))

        # merge was already done by tensorflow, so we disable it here:
        predictions = self.grapheme_encoding.decode_grapheme_batch(predicted_graphemes, prediction_lengths,
                                                                   merge_repeated=False)

        return ExpectationsVsPredictions(
            [ExpectationVsPrediction(predicted=predicted, expected=expected, loss=loss) for predicted, expected, loss in
             zip(predictions, (e.label for e in labeled_spectrogram_batch), losses)])
datasets.py 文件源码 项目:how_to_convert_text_to_images 作者: llSourcell 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def sample_embeddings(self, embeddings, filenames, class_id, sample_num):
        if len(embeddings.shape) == 2 or embeddings.shape[1] == 1:
            return np.squeeze(embeddings)
        else:
            batch_size, embedding_num, _ = embeddings.shape
            # Take every sample_num captions to compute the mean vector
            sampled_embeddings = []
            sampled_captions = []
            for i in range(batch_size):
                randix = np.random.choice(embedding_num,
                                          sample_num, replace=False)
                if sample_num == 1:
                    randix = int(randix)
                    captions = self.readCaptions(filenames[i],
                                                 class_id[i])
                    sampled_captions.append(captions[randix])
                    sampled_embeddings.append(embeddings[i, randix, :])
                else:
                    e_sample = embeddings[i, randix, :]
                    e_mean = np.mean(e_sample, axis=0)
                    sampled_embeddings.append(e_mean)
            sampled_embeddings_array = np.array(sampled_embeddings)
            return np.squeeze(sampled_embeddings_array), sampled_captions
FindCats.py 文件源码 项目:RaspberryPi-Robot 作者: timestocome 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def is_cat(self):

        #now = datetime.datetime.now()

        # take photo
        t = self.capture_image()

        # see if Min, Merlin or no cat in photo
        input_operation = self.graph.get_operation_by_name(self.input_layer_name);
        output_operation = self.graph.get_operation_by_name(self.output_layer_name);

        results = self.sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
        results = np.squeeze(results)



        found = []
        for i in range(3):
            found.append((self.labels[i], results[i]))

        #print(datetime.datetime.now() - now)
        return found
FindCats.py 文件源码 项目:RaspberryPi-Robot 作者: timestocome 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def is_cat(self):

        now = datetime.datetime.now()

        # take photo
        t = self.capture_image()

        # see if Min, Merlin or no cat in photo
        input_operation = self.graph.get_operation_by_name(self.input_layer_name);
        output_operation = self.graph.get_operation_by_name(self.output_layer_name);

        results = self.sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
        results = np.squeeze(results)



        found = []
        for i in range(3):
            found.append((self.labels[i], results[i]))


        print(datetime.datetime.now() - now)
        return found
findCats.py 文件源码 项目:RaspberryPi-Robot 作者: timestocome 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def run_graph(self):

        # take photo
        t = self.capture_image()

        # see if Min, Merlin or no cat in photo
        input_operation = self.graph.get_operation_by_name(self.input_layer_name);
        output_operation = self.graph.get_operation_by_name(self.output_layer_name);

        results = self.sess.run(output_operation.outputs[0], {input_operation.outputs[0]: t})
        results = np.squeeze(results)

        top_k = results.argsort()[-3:][::-1]

        # print results
        #for i in top_k:
        #    print(self.labels[i], results[i])

        found = []
        for i in top_k:
            found.append((self.labels[i], results[i]))
        return found
helpers.py 文件源码 项目:iterative_inference_segm 作者: adri-romsor 项目源码 文件源码 阅读 133 收藏 0 点赞 0 评论 0
def my_label2rgboverlay(labels, colors, image, alpha=0.2):
    """
    Generates image with segmentation labels on top

    Parameters
    ----------
    labels:  labels of one image (0, 1)
    colors:  colormap
    image:   image (0, 1, c), where c=3 (rgb)
    alpha: transparency
    """
    image_float = gray2rgb(img_as_float(rgb2gray(image) if
                                        image.shape[2] == 3 else
                                        np.squeeze(image)))
    label_image = my_label2rgb(labels, colors)
    output = image_float * alpha + label_image * (1 - alpha)
    return output
load_model.py 文件源码 项目:mx-rfcn 作者: giorking 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def do_checkpoint(prefix):
    """Callback to checkpoint the model to prefix every epoch.

    Parameters
    ----------
    prefix : str
        The file prefix to checkpoint to

    Returns
    -------
    callback : function
        The callback function that can be passed as iter_end_callback to fit.
    """
    def _callback(iter_no, sym, arg, aux):
        #if config.TRAIN.BBOX_NORMALIZATION_PRECOMPUTED:
        #    print "save model with mean/std"
        #    num_classes = len(arg['bbox_pred_bias'].asnumpy()) / 4
        #    means = np.tile(np.array(config.TRAIN.BBOX_MEANS), (1, num_classes))
        #    stds = np.tile(np.array(config.TRAIN.BBOX_STDS), (1, num_classes))
        #    arg['bbox_pred_weight'] = (arg['bbox_pred_weight'].T * mx.nd.array(stds)).T
        #    arg['bbox_pred_bias'] = arg['bbox_pred_bias'] * mx.nd.array(np.squeeze(stds)) + \
        #                                   mx.nd.array(np.squeeze(means))
        """The checkpoint function."""
        save_checkpoint(prefix, iter_no + 1, sym, arg, aux)
    return _callback
utils.py 文件源码 项目:SRGAN-tensorflow 作者: zoharli 项目源码 文件源码 阅读 48 收藏 0 点赞 0 评论 0
def batch_ssim(dbatch):
    im1,im2=np.split(dbatch,2)
    imgsize=im1.shape[1]*im1.shape[2]
    avg1=im1.mean((1,2),keepdims=1)
    avg2=im2.mean((1,2),keepdims=1)
    std1=im1.std((1,2),ddof=1)
    std2=im2.std((1,2),ddof=1)
    cov=((im1-avg1)*(im2-avg2)).mean((1,2))*imgsize/(imgsize-1)
    avg1=np.squeeze(avg1)
    avg2=np.squeeze(avg2)
    k1=0.01
    k2=0.03
    c1=(k1*255)**2
    c2=(k2*255)**2
    c3=c2/2
    return np.mean((2*avg1*avg2+c1)*2*(cov+c3)/(avg1**2+avg2**2+c1)/(std1**2+std2**2+c2))
sph.py 文件源码 项目:sound_field_analysis-py 作者: QULab 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def dspbessel(n, kr):
    """Derivative of spherical Bessel (first kind) of order n at kr

    Parameters
    ----------
    n : array_like
       Order
    kr: array_like
       Argument

    Returns
    -------
    J' : complex float
       Derivative of spherical Bessel
    """
    return _np.squeeze((n * spbessel(n - 1, kr) - (n + 1) * spbessel(n + 1, kr)) / (2 * n + 1))
evaluations.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def map(y_true, y_pred, rel_threshold=0):
    s = 0.
    y_true = _to_list(np.squeeze(y_true).tolist())
    y_pred = _to_list(np.squeeze(y_pred).tolist())
    c = list(zip(y_true, y_pred))
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    for j, (g, p) in enumerate(c):
        if g > rel_threshold:
            ipos += 1.
            s += ipos / ( j + 1.)
    if ipos == 0:
        s = 0.
    else:
        s /= ipos
    return s
evaluations.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def recall(k=10):
    def top_k(y_true, y_pred, rel_threshold=0.):
        if k <= 0:
            return 0.
        s = 0.
        y_true = _to_list(np.squeeze(y_true).tolist()) # y_true: the ground truth scores for documents under a query
        y_pred = _to_list(np.squeeze(y_pred).tolist()) # y_pred: the predicted scores for documents under a query
        pos_count = sum(i > rel_threshold for i in y_true) # total number of positive documents under this query
        c = list(zip(y_true, y_pred))
        random.shuffle(c)
        c = sorted(c, key=lambda x: x[1], reverse=True)
        ipos = 0
        recall = 0.
        for i, (g, p) in enumerate(c):
            if i >= k:
                break
            if g > rel_threshold:
                recall += 1
        recall /= pos_count
        return recall
    return top_k
rank_evaluations.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def eval_map(y_true, y_pred, rel_threshold=0):
    s = 0.
    y_true = np.squeeze(y_true)
    y_pred = np.squeeze(y_pred)
    c = zip(y_true, y_pred)
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    for j, (g, p) in enumerate(c):
        if g > rel_threshold:
            ipos += 1.
            s += ipos / ( j + 1.)
    if ipos == 0:
        s = 0.
    else:
        s /= ipos
    return s
rank_evaluations.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def eval_precision(y_true, y_pred, k = 10, rel_threshold=0.):
    if k <= 0:
        return 0.
    s = 0.
    y_true = np.squeeze(y_true)
    y_pred = np.squeeze(y_pred)
    c = zip(y_true, y_pred)
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    precision = 0.
    for i, (g,p) in enumerate(c):
        if i >= k:
            break
        if g > rel_threshold:
            precision += 1
    precision /=  k
    return precision
rank_evaluations.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def eval_precision(y_true, y_pred, k = 10, rel_threshold=0.):
    if k <= 0:
        return 0.
    s = 0.
    y_true = np.squeeze(y_true)
    y_pred = np.squeeze(y_pred)
    c = zip(y_true, y_pred)
    random.shuffle(c)
    c = sorted(c, key=lambda x:x[1], reverse=True)
    ipos = 0
    precision = 0.
    for i, (g,p) in enumerate(c):
        if i >= k:
            break
        if g > rel_threshold:
            precision += 1
    precision /=  k
    return precision
lm_remote.py 文件源码 项目:DL4MT 作者: thompsonb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def score(self, x_or_y):
        if len(x_or_y.shape) > 2:  # x shape: (1, N, M). y shape: (N, M)  todo: work with factors
            x_or_y = numpy.squeeze(x_or_y, axis=0)
        """
        Nematus is generally called on 1)Tokenized, 2)Truecased, 3)BPE data.
        So we will train KenLM on Tokenized, Truecase data.
        Therefore all we need to do is convert to a string and deBPE.
        """
        sentences = [deBPE(seqs2words(seq, self.id_to_word)) for seq in x_or_y.T]
        scores = self.model.score(sentences)
        #try:
        #    print 'remote LM sentences/scores:'
        #    for sent, score in zip(sentences, scores):
        #        print '"'+sent+'":', score
        #except Exception, e:
        #    print 'failed to print LM sentences/scores', e
        return scores
calibrate.py 文件源码 项目:esys-pbi 作者: fsxfreak 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def calculate_residual_3D_Points( ref_points, gaze_points, eye_to_world_matrix ):

    average_distance = 0.0
    distance_variance = 0.0
    transformed_gaze_points = []

    for p in gaze_points:
        point = np.zeros(4)
        point[:3] = p
        point[3] = 1.0
        point = eye_to_world_matrix.dot(point)
        point = np.squeeze(np.asarray(point))
        transformed_gaze_points.append( point[:3] )

    for(a,b) in zip( ref_points, transformed_gaze_points):
        average_distance += np.linalg.norm(a-b)

    average_distance /= len(ref_points)

    for(a,b) in zip( ref_points, transformed_gaze_points):
        distance_variance += (np.linalg.norm(a-b) - average_distance)**2

    distance_variance /= len(ref_points)

    return average_distance, distance_variance
matrix_factorization.py 文件源码 项目:probabilistic-matrix-factorization 作者: aki-nishimura 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def update_per_row(self, y_i, phi_i, J, mu0, c, v, r_prev_i, u_prev_i, phi_r_i, phi_u):
        # Params:
        #   J - column indices

        nnz_i = len(J)
        residual_i = y_i - mu0 - c[J]
        prior_Phi = np.diag(np.concatenate(([phi_r_i], phi_u)))
        v_T = np.hstack((np.ones((nnz_i, 1)), v[J, :]))
        post_Phi_i = prior_Phi + \
                     np.dot(v_T.T,
                            np.tile(phi_i[:, np.newaxis], (1, 1 + self.num_factor)) * v_T)  # Weighted sum of v_j * v_j.T
        post_mean_i = np.squeeze(np.dot(phi_i * residual_i, v_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_i)
        post_mean_i = scipy.linalg.cho_solve((C, lower), post_mean_i)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        ru_i = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_i)),
                                                                                   lower=lower)
        ru_i += post_mean_i + self.relaxation * (post_mean_i - np.concatenate(([r_prev_i], u_prev_i)))
        r_i = ru_i[0]
        u_i = ru_i[1:]

        return r_i, u_i
matrix_factorization.py 文件源码 项目:probabilistic-matrix-factorization 作者: aki-nishimura 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def update_per_col(self, y_j, phi_j, I, mu0, r, u, c_prev_j, v_prev_j, phi_c_j, phi_v):

        prior_Phi = np.diag(np.concatenate(([phi_c_j], phi_v)))
        nnz_j = len(I)
        residual_j = y_j - mu0 - r[I]
        u_T = np.hstack((np.ones((nnz_j, 1)), u[I, :]))
        post_Phi_j = prior_Phi + \
                     np.dot(u_T.T,
                            np.tile(phi_j[:, np.newaxis], (1, 1 + self.num_factor)) * u_T)  # Weighted sum of u_i * u_i.T
        post_mean_j = np.squeeze(np.dot(phi_j * residual_j, u_T))
        C, lower = scipy.linalg.cho_factor(post_Phi_j)
        post_mean_j = scipy.linalg.cho_solve((C, lower), post_mean_j)
        # Generate Gaussian, recycling the Cholesky factorization from the posterior mean computation.
        cv_j = math.sqrt(1 - self.relaxation ** 2) * scipy.linalg.solve_triangular(C, np.random.randn(len(post_mean_j)),
                                                                              lower=lower)
        cv_j += post_mean_j + self.relaxation * (post_mean_j - np.concatenate(([c_prev_j], v_prev_j)))
        c_j = cv_j[0]
        v_j = cv_j[1:]

        return c_j, v_j
processing.py 文件源码 项目:deep-lossy-fun 作者: PetarV- 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def deprocess_and_save(x, img_path):
    # Remove the batch dimension
    x = np.squeeze(x)

    # Restore the mean values on each channel
    x[:, :, 0] += 103.939
    x[:, :, 1] += 116.779
    x[:, :, 2] += 123.68

    # BGR --> RGB
    x = x[:, :, ::-1]

    # Clip unprintable colours
    x = np.clip(x, 0, 255).astype('uint8')

    # Save the image
    imsave(img_path, x)
utils.py 文件源码 项目:cleverhans 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cleverhans_attack_wrapper(cleverhans_attack_fn, reset=True):
    def attack(a):
        session = tf.Session()
        with session.as_default():
            model = RVBCleverhansModel(a)
            adversarial_image = cleverhans_attack_fn(model, session, a)
            adversarial_image = np.squeeze(adversarial_image, axis=0)
            if reset:
                # optionally, reset to ignore other adversarials
                # found during the search
                a._reset()
            # run predictions to make sure the returned adversarial
            # is taken into account
            min_, max_ = a.bounds()
            adversarial_image = np.clip(adversarial_image, min_, max_)
            a.predictions(adversarial_image)
    return attack


问题


面经


文章

微信
公众号

扫码关注公众号