python类subtract()的实例源码

model.py 文件源码 项目:single-image-depth-estimation 作者: liuhyCV 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def loss(logits, depths, invalid_depths):
    #304*228    55*74
    #576*172    27*142
    out_put_size = 55*74
    logits_flat = tf.reshape(logits, [-1, out_put_size])
    depths_flat = tf.reshape(depths, [-1, out_put_size])
    invalid_depths_flat = tf.reshape(invalid_depths, [-1, out_put_size])

    predict = tf.multiply(logits_flat, invalid_depths_flat)
    target = tf.multiply(depths_flat, invalid_depths_flat)

    d = tf.subtract(predict, target)
    square_d = tf.square(d)
    sum_square_d = tf.reduce_sum(square_d, 1)
    sum_d = tf.reduce_sum(d, 1)
    sqare_sum_d = tf.square(sum_d)

    cost = tf.reduce_mean(sum_square_d / out_put_size - 0.5*sqare_sum_d / math.pow(out_put_size, 2))

    tf.add_to_collection('losses', cost)



    #return tf.add_n(tf.get_collection('losses'), name='total_loss')
mobilenetdet.py 文件源码 项目:MobileNet 作者: Zehaos 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def iou(bbox_1, bbox_2):
  """Compute iou of a box with another box. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    bbox_1: 1-D with shape `[4]`.
    bbox_2: 1-D with shape `[4]`.

  Returns:
    IOU
  """
  lr = tf.minimum(bbox_1[3], bbox_2[3]) - tf.maximum(bbox_1[1], bbox_2[1])
  tb = tf.minimum(bbox_1[2], bbox_2[2]) - tf.maximum(bbox_1[0], bbox_2[0])
  lr = tf.maximum(lr, lr * 0)
  tb = tf.maximum(tb, tb * 0)
  intersection = tf.multiply(tb, lr)
  union = tf.subtract(
    tf.multiply((bbox_1[3] - bbox_1[1]), (bbox_1[2] - bbox_1[0])) +
    tf.multiply((bbox_2[3] - bbox_2[1]), (bbox_2[2] - bbox_2[0])),
    intersection
  )
  iou = tf.div(intersection, union)
  return iou
det_utils.py 文件源码 项目:MobileNet 作者: Zehaos 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def iou(bbox_1, bbox_2):
  """Compute iou of a box with another box. Box format '[y_min, x_min, y_max, x_max]'.
  Args:
    bbox_1: 1-D with shape `[4]`.
    bbox_2: 1-D with shape `[4]`.

  Returns:
    IOU
  """
  lr = tf.minimum(bbox_1[3], bbox_2[3]) - tf.maximum(bbox_1[1], bbox_2[1])
  tb = tf.minimum(bbox_1[2], bbox_2[2]) - tf.maximum(bbox_1[0], bbox_2[0])
  lr = tf.maximum(lr, lr * 0)
  tb = tf.maximum(tb, tb * 0)
  intersection = tf.multiply(tb, lr)
  union = tf.subtract(
    tf.multiply((bbox_1[3] - bbox_1[1]), (bbox_1[2] - bbox_1[0])) +
    tf.multiply((bbox_2[3] - bbox_2[1]), (bbox_2[2] - bbox_2[0])),
    intersection
  )
  iou = tf.div(intersection, union)
  return iou
lenet_preprocessing.py 文件源码 项目:MobileNet 作者: Zehaos 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def preprocess_image(image, output_height, output_width, is_training):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.

  Returns:
    A preprocessed image.
  """
  image = tf.to_float(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image
tensorflow_state.py 文件源码 项目:quantum-optimal-control 作者: SchusterLab 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get_inner_product(self,psi1,psi2):
        #Take 2 states psi1,psi2, calculate their overlap, for single vector
        state_num=self.sys_para.state_num

        psi_1_real = (psi1[0:state_num])
        psi_1_imag = (psi1[state_num:2*state_num])
        psi_2_real = (psi2[0:state_num])
        psi_2_imag = (psi2[state_num:2*state_num])
        # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude
        with tf.name_scope('inner_product'):
            ac = tf.multiply(psi_1_real,psi_2_real)
            bd = tf.multiply(psi_1_imag,psi_2_imag)
            bc = tf.multiply(psi_1_imag,psi_2_real)
            ad = tf.multiply(psi_1_real,psi_2_imag)
            reals = tf.square(tf.add(tf.reduce_sum(ac),tf.reduce_sum(bd)))
            imags = tf.square(tf.subtract(tf.reduce_sum(bc),tf.reduce_sum(ad)))
            norm = tf.add(reals,imags)
        return norm
tensorflow_state.py 文件源码 项目:quantum-optimal-control 作者: SchusterLab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_inner_product_2D(self,psi1,psi2):
        #Take 2 states psi1,psi2, calculate their overlap, for arbitrary number of vectors
        # psi1 and psi2 are shaped as (2*state_num, number of vectors)
        state_num=self.sys_para.state_num

        psi_1_real = (psi1[0:state_num,:])
        psi_1_imag = (psi1[state_num:2*state_num,:])
        psi_2_real = (psi2[0:state_num,:])
        psi_2_imag = (psi2[state_num:2*state_num,:])
        # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude
        with tf.name_scope('inner_product'):
            ac = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_real),0)
            bd = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_imag),0)
            bc = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_real),0)
            ad = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_imag),0)
            reals = tf.square(tf.reduce_sum(tf.add(ac,bd))) # first trace inner product of all vectors, then squared
            imags = tf.square(tf.reduce_sum(tf.subtract(bc,ad)))
            norm = (tf.add(reals,imags))/(len(self.sys_para.states_concerned_list)**2)
        return norm
tensorflow_state.py 文件源码 项目:quantum-optimal-control 作者: SchusterLab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_inner_product_3D(self,psi1,psi2):
        #Take 2 states psi1,psi2, calculate their overlap, for arbitrary number of vectors and timesteps
        # psi1 and psi2 are shaped as (2*state_num, time_steps, number of vectors)
        state_num=self.sys_para.state_num

        psi_1_real = (psi1[0:state_num,:])
        psi_1_imag = (psi1[state_num:2*state_num,:])
        psi_2_real = (psi2[0:state_num,:])
        psi_2_imag = (psi2[state_num:2*state_num,:])
        # psi1 has a+ib, psi2 has c+id, we wanna get Sum ((ac+bd) + i (bc-ad)) magnitude
        with tf.name_scope('inner_product'):
            ac = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_real),0)
            bd = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_imag),0)
            bc = tf.reduce_sum(tf.multiply(psi_1_imag,psi_2_real),0)
            ad = tf.reduce_sum(tf.multiply(psi_1_real,psi_2_imag),0)
            reals = tf.reduce_sum(tf.square(tf.reduce_sum(tf.add(ac,bd),1)))
            # first trace inner product of all vectors, then squared, then sum contribution of all time steps
            imags = tf.reduce_sum(tf.square(tf.reduce_sum(tf.subtract(bc,ad),1)))
            norm = (tf.add(reals,imags))/(len(self.sys_para.states_concerned_list)**2)
        return norm
osvos.py 文件源码 项目:OSVOS-TensorFlow 作者: scaelles 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def preprocess_img(image):
    """Preprocess the image to adapt it to network requirements
    Args:
    Image we want to input the network (W,H,3) numpy array
    Returns:
    Image ready to input the network (1,W,H,3)
    """
    if type(image) is not np.ndarray:
        image = np.array(Image.open(image), dtype=np.uint8)
    in_ = image[:, :, ::-1]
    in_ = np.subtract(in_, np.array((104.00699, 116.66877, 122.67892), dtype=np.float32))
    # in_ = tf.subtract(tf.cast(in_, tf.float32), np.array((104.00699, 116.66877, 122.67892), dtype=np.float32))
    in_ = np.expand_dims(in_, axis=0)
    # in_ = tf.expand_dims(in_, 0)
    return in_


# TO DO: Move preprocessing into Tensorflow
facenet.py 文件源码 项目:facenet 作者: davidsandberg 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def triplet_loss(anchor, positive, negative, alpha):
    """Calculate the triplet loss according to the FaceNet paper

    Args:
      anchor: the embeddings for the anchor images.
      positive: the embeddings for the positive images.
      negative: the embeddings for the negative images.

    Returns:
      the triplet loss according to the FaceNet paper as a float tensor.
    """
    with tf.variable_scope('triplet_loss'):
        pos_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, positive)), 1)
        neg_dist = tf.reduce_sum(tf.square(tf.subtract(anchor, negative)), 1)

        basic_loss = tf.add(tf.subtract(pos_dist,neg_dist), alpha)
        loss = tf.reduce_mean(tf.maximum(basic_loss, 0.0), 0)

    return loss
utils.py 文件源码 项目:DeepCellSeg 作者: arbellea 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def my_clustering_loss(net_out,feature_map):

    net_out_vec = tf.reshape(net_out,[-1,1])
    pix_num = net_out_vec.get_shape().as_list()[0]
    feature_vec = tf.reshape(feature_map,[pix_num,-1])
    net_out_vec = tf.div(net_out_vec, tf.reduce_sum(net_out_vec,keep_dims=True))
    not_net_out_vec = tf.subtract(tf.constant(1.),net_out_vec)
    mean_fg_var = tf.get_variable('mean_bg',shape = [feature_vec.get_shape().as_list()[1],1], trainable=False)
    mean_bg_var = tf.get_variable('mean_fg',shape = [feature_vec.get_shape().as_list()[1],1], trainable=False)

    mean_bg = tf.matmul(not_net_out_vec,feature_vec,True)
    mean_fg = tf.matmul(net_out_vec,feature_vec,True)

    feature_square = tf.square(feature_vec)

    loss = tf.add(tf.matmul(net_out_vec, tf.reduce_sum(tf.square(tf.subtract(feature_vec, mean_fg_var)), 1, True), True),
                  tf.matmul(not_net_out_vec, tf.reduce_sum(tf.square(tf.subtract(feature_vec,mean_bg_var)), 1, True), True))
    with tf.control_dependencies([loss]):
        update_mean = tf.group(tf.assign(mean_fg_var,mean_fg),tf.assign(mean_bg_var,mean_bg))
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean)

    return loss
tf_utils.py 文件源码 项目:thesis_scripts 作者: PhilippKopp 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def subtract_mean_multi(image_tensors, mean_image_path, channels=NUM_CHANNELS, image_size=512):

    mean_image = tf.convert_to_tensor(mean_image_path, dtype=tf.string)
    mean_file_contents = tf.read_file(mean_image)
    mean_uint8 = tf.image.decode_png(mean_file_contents, channels=channels)
    mean_uint8.set_shape([image_size, image_size, channels])


    images_mean_free = []
    for image_tensor in image_tensors:
        image_tensor.set_shape([image_size, image_size, channels])
        image = tf.cast(image_tensor, tf.float32)

        #subtract mean image
        image_mean_free = tf.subtract(image, tf.cast(mean_uint8, tf.float32))
        images_mean_free.append(image_mean_free)

    return images_mean_free
tf_utils.py 文件源码 项目:thesis_scripts 作者: PhilippKopp 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def single_input_image(image_str, mean_image_path, png_with_alpha=False, image_size=512):

    mean_image_str = tf.convert_to_tensor(mean_image_path, dtype=tf.string)

    file_contents = tf.read_file(image_str)
    if png_with_alpha:
        uint8image = tf.image.decode_png(file_contents, channels=4)
        uint8image.set_shape([image_size, image_size, 4])
    else:
        uint8image = tf.image.decode_image(file_contents, channels=NUM_CHANNELS)
        uint8image.set_shape([image_size, image_size, NUM_CHANNELS])
    image = tf.cast(uint8image, tf.float32)

    #subtract mean image
    mean_file_contents = tf.read_file(mean_image_str)
    if png_with_alpha:
        mean_uint8 = tf.image.decode_png(mean_file_contents, channels=4)
        mean_uint8.set_shape([image_size, image_size, 4])
    else:
        mean_uint8 = tf.image.decode_image(mean_file_contents, channels=NUM_CHANNELS)
        mean_uint8.set_shape([image_size, image_size, NUM_CHANNELS])
    image_mean_free = tf.subtract(image, tf.cast(mean_uint8, tf.float32))

    return image_mean_free
inception_preprocessing.py 文件源码 项目:X-ray-classification 作者: bendidi 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def preprocess_for_eval(image, height, width,
                        central_fraction=0.875, scope=None):
  """Prepare one image for evaluation.
  If height and width are specified it would output an image with that size by
  applying resize_bilinear.
  If central_fraction is specified it would cropt the central fraction of the
  input image.
  Args:
    image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
      [0, 1], otherwise it would converted to tf.float32 assuming that the range
      is [0, MAX], where MAX is largest positive representable number for
      int(8/16/32) data type (see `tf.image.convert_image_dtype` for details)
    height: integer
    width: integer
    central_fraction: Optional Float, fraction of the image to crop.
    scope: Optional scope for name_scope.
  Returns:
    3-D float Tensor of prepared image.
  """
  with tf.name_scope(scope, 'eval_image', [image, height, width]):
    if image.dtype != tf.float32:
      image = tf.image.convert_image_dtype(image, dtype=tf.float32)
    # Crop the central region of the image with an area containing 87.5% of
    # the original image.
    if central_fraction:
      image = tf.image.central_crop(image, central_fraction=central_fraction)

    if height and width:
      # Resize the image to the specified height and width.
      image = tf.expand_dims(image, 0)
      image = tf.image.resize_bilinear(image, [height, width],
                                       align_corners=False)
      image = tf.squeeze(image, [0])
    image = tf.subtract(image, 0.5)
    image = tf.multiply(image, 2.0)
    return image
a3C.py 文件源码 项目:A3C 作者: go2sea 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def TD_loss(self):
        return tf.subtract(self.v_input, self.v)
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def prewhiten(x):
    mean = np.mean(x)
    std = np.std(x)
    std_adj = np.maximum(std, 1.0/np.sqrt(x.size))
    y = np.multiply(np.subtract(x, mean), 1/std_adj)
    return y
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)

    tprs = np.zeros((nrof_folds,nrof_thresholds))
    fprs = np.zeros((nrof_folds,nrof_thresholds))
    accuracy = np.zeros((nrof_folds))

    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)

    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):

        # Find the best threshold for the fold
        acc_train = np.zeros((nrof_thresholds))
        for threshold_idx, threshold in enumerate(thresholds):
            _, _, acc_train[threshold_idx] = calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
        best_threshold_index = np.argmax(acc_train)
        for threshold_idx, threshold in enumerate(thresholds):
            tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
        _, _, accuracy[fold_idx] = calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])

    tpr = np.mean(tprs,0)
    fpr = np.mean(fprs,0)
    return tpr, fpr, accuracy
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10):
    assert(embeddings1.shape[0] == embeddings2.shape[0])
    assert(embeddings1.shape[1] == embeddings2.shape[1])
    nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
    nrof_thresholds = len(thresholds)
    k_fold = KFold(n_splits=nrof_folds, shuffle=False)

    val = np.zeros(nrof_folds)
    far = np.zeros(nrof_folds)

    diff = np.subtract(embeddings1, embeddings2)
    dist = np.sum(np.square(diff),1)
    indices = np.arange(nrof_pairs)

    for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):

        # Find the threshold that gives FAR = far_target
        far_train = np.zeros(nrof_thresholds)
        for threshold_idx, threshold in enumerate(thresholds):
            _, far_train[threshold_idx] = calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
        if np.max(far_train)>=far_target:
            f = interpolate.interp1d(far_train, thresholds, kind='slinear')
            threshold = f(far_target)
        else:
            threshold = 0.0

        val[fold_idx], far[fold_idx] = calculate_val_far(threshold, dist[test_set], actual_issame[test_set])

    val_mean = np.mean(val)
    far_mean = np.mean(far)
    val_std = np.std(val)
    return val_mean, val_std, far_mean
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
losses.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def calculate_loss(self, predictions, labels, b=1.0, **unused_params):
    with tf.name_scope("loss_hinge"):
      float_labels = tf.cast(labels, tf.float32)
      all_zeros = tf.zeros(tf.shape(float_labels), dtype=tf.float32)
      all_ones = tf.ones(tf.shape(float_labels), dtype=tf.float32)
      sign_labels = tf.subtract(tf.scalar_mul(2, float_labels), all_ones)
      hinge_loss = tf.maximum(
          all_zeros, tf.scalar_mul(b, all_ones) - sign_labels * predictions)
      return tf.reduce_mean(tf.reduce_sum(hinge_loss, 1))
linear_svm.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def loss_fn(W,b,x_data,y_target):
    logits = tf.subtract(tf.matmul(x_data, W),b)
    norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2)
    classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target))))
    total_loss = tf.add(tf.multiply(FLAGS.C_param,classification_loss), tf.multiply(FLAGS.Reg_param,norm_term))
    return total_loss


问题


面经


文章

微信
公众号

扫码关注公众号