python类less_equal()的实例源码

preprocess.py 文件源码 项目:reslearn 作者: mackcmillion 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _resize_aux(image, new_shorter_edge_tensor):
    shape = tf.shape(image)
    height = shape[0]
    width = shape[1]

    height_smaller_than_width = tf.less_equal(height, width)
    new_height_and_width = cf.cond(
            height_smaller_than_width,
            lambda: (new_shorter_edge_tensor, _compute_longer_edge(height, width, new_shorter_edge_tensor)),
            lambda: (_compute_longer_edge(width, height, new_shorter_edge_tensor), new_shorter_edge_tensor)
    )

    # workaround since tf.image.resize_images() does not work
    image = tf.expand_dims(image, 0)
    image = tf.image.resize_bilinear(image, tf.pack(new_height_and_width))
    return tf.squeeze(image, [0])
feedback.py 文件源码 项目:sequencing 作者: SwordYork 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def next_inputs(self, time, sample_ids=None, prev_finished=None):
        if sample_ids is None or self.teacher_rate > 0.:
            finished = tf.greater_equal(time + 1, self.sequence_length)
        else:
            finished = math_ops.logical_or(
                tf.greater_equal(time + 1, self.max_step),
                tf.equal(self.eos_id, sample_ids))

        if self.teacher_rate == 1. or (sample_ids is None):
            next_input_ids = self._input_tas.read(time)
            return finished, self.lookup(next_input_ids)

        if self.teacher_rate > 0.:
            # scheduled
            teacher_rates = tf.less_equal(
                tf.random_uniform(tf.shape(sample_ids), minval=0., maxval=1.),
                self.teacher_rate)
            teacher_rates = tf.to_int32(teacher_rates)

            next_input_ids = (teacher_rates * self._input_tas.read(time)
                              + (1 - teacher_rates) * sample_ids)
        else:
            next_input_ids = sample_ids

        return finished, self.lookup(next_input_ids)
model.py 文件源码 项目:attention-ocr 作者: emedvedev 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _prepare_image(self, image):
        """Resize the image to a maximum height of `self.height` and maximum
        width of `self.width` while maintaining the aspect ratio. Pad the
        resized image to a fixed size of ``[self.height, self.width]``."""
        img = tf.image.decode_png(image, channels=self.channels)
        dims = tf.shape(img)
        self.width = self.max_width

        max_width = tf.to_int32(tf.ceil(tf.truediv(dims[1], dims[0]) * self.height_float))
        max_height = tf.to_int32(tf.ceil(tf.truediv(self.width, max_width) * self.height_float))

        resized = tf.cond(
            tf.greater_equal(self.width, max_width),
            lambda: tf.cond(
                tf.less_equal(dims[0], self.height),
                lambda: tf.to_float(img),
                lambda: tf.image.resize_images(img, [self.height, max_width],
                                               method=tf.image.ResizeMethod.BICUBIC),
            ),
            lambda: tf.image.resize_images(img, [max_height, self.width],
                                           method=tf.image.ResizeMethod.BICUBIC)
        )

        padded = tf.image.pad_to_bounding_box(resized, 0, 0, self.height, self.width)
        return padded
trueshadow.py 文件源码 项目:pytruenorth 作者: vmonaco 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def normal_ccdf(x, mu, sigma2):
    """Normal CCDF"""
    # Check for degenerate distributions when sigma2 == 0
    # if x >= mu, n = 0
    # if x < mu, n = 1
    # sigma2_le_0 = tf.less_equal(sigma2, 0.)
    # x_gte_mu = tf.greater_equal(x, mu)
    # x_lt_mu = tf.less(x, mu)

    # Never divide by zero, instead the logic below handles degenerate distribution cases
    # sigma2 = tf.cond(sigma2_le_0, lambda: tf.ones_like(sigma2), lambda: sigma2)

    p = (1. - 0.5 * (1. + tf.erf((x - mu) / tf.sqrt(2. * sigma2))))
    # p = tf.cond(tf.logical_and(sigma2_le_0, x_gte_mu), lambda: tf.zeros_like(p), lambda: p)
    # p = tf.cond(tf.logical_and(sigma2_le_0, x_lt_mu), lambda: tf.ones_like(p), lambda: p)
    return p
preprocessor_test.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testRandomPixelValueScale(self):
    preprocessing_options = []
    preprocessing_options.append((preprocessor.normalize_image, {
        'original_minval': 0,
        'original_maxval': 255,
        'target_minval': 0,
        'target_maxval': 1
    }))
    preprocessing_options.append((preprocessor.random_pixel_value_scale, {}))
    images = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocessing_options)
    images_min = tf.to_float(images) * 0.9 / 255.0
    images_max = tf.to_float(images) * 1.1 / 255.0
    images = tensor_dict[fields.InputDataFields.image]
    values_greater = tf.greater_equal(images, images_min)
    values_less = tf.less_equal(images, images_max)
    values_true = tf.fill([1, 4, 4, 3], True)
    with self.test_session() as sess:
      (values_greater_, values_less_, values_true_) = sess.run(
          [values_greater, values_less, values_true])
      self.assertAllClose(values_greater_, values_true_)
      self.assertAllClose(values_less_, values_true_)
loss_graphs.py 文件源码 项目:tensorrec 作者: jfkirk 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def separation_loss(tf_prediction_serial, tf_interactions_serial, **kwargs):
    """
    This loss function models the explicit positive and negative interaction predictions as normal distributions and
    returns the probability of overlap between the two distributions.
    :param tf_prediction_serial:
    :param tf_interactions_serial:
    :return:
    """

    tf_positive_mask = tf.greater(tf_interactions_serial, 0.0)
    tf_negative_mask = tf.less_equal(tf_interactions_serial, 0.0)

    tf_positive_predictions = tf.boolean_mask(tf_prediction_serial, tf_positive_mask)
    tf_negative_predictions = tf.boolean_mask(tf_prediction_serial, tf_negative_mask)

    tf_pos_mean, tf_pos_var = tf.nn.moments(tf_positive_predictions, axes=[0])
    tf_neg_mean, tf_neg_var = tf.nn.moments(tf_negative_predictions, axes=[0])

    tf_overlap_distribution = tf.contrib.distributions.Normal(loc=(tf_neg_mean - tf_pos_mean),
                                                              scale=tf.sqrt(tf_neg_var + tf_pos_var))

    loss = 1.0 - tf_overlap_distribution.cdf(0.0)
    return loss
mjsynth.py 文件源码 项目:cnn_lstm_ctc_ocr 作者: weinman 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _get_input_filter(width, width_threshold, length, length_threshold):
    """Boolean op for discarding input data based on string or image size
    Input:
      width            : Tensor representing the image width
      width_threshold  : Python numerical value (or None) representing the 
                         maximum allowable input image width 
      length           : Tensor representing the ground truth string length
      length_threshold : Python numerical value (or None) representing the 
                         maximum allowable input string length
   Returns:
      keep_input : Boolean Tensor indicating whether to keep a given input 
                  with the specified image width and string length
"""

    keep_input = None

    if width_threshold!=None:
        keep_input = tf.less_equal(width, width_threshold)

    if length_threshold!=None:
        length_filter = tf.less_equal(length, length_threshold)
        if keep_input==None:
            keep_input = length_filter 
        else:
            keep_input = tf.logical_and( keep_input, length_filter)

    if keep_input==None:
        keep_input = True
    else:
        keep_input = tf.reshape( keep_input, [] ) # explicitly make a scalar

    return keep_input
cwt.py 文件源码 项目:cwt-tensorflow 作者: nickgeoca 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def cwt(wav, widthCwt, wavelet):
    length = wav.shape[0]
    wav = tf.to_float(wav)
    wav = tf.reshape(wav, [1,length,1,1])

    # While loop functions
    def body(i, m): 
        v = conv1DWavelet(wav, i, wavelet)
        v = tf.reshape(v, [length, 1])

        m = tf.concat([m,v], 1)

        return [1 + i, m]

    def cond_(i, m):
        return tf.less_equal(i, widthCwt)

    # Initialize and run while loop
    emptyCwtMatrix = tf.zeros([length, 0], dtype='float32') 
    i = tf.constant(1)
    _, result = tf.while_loop(
            cond_,
            body,
            [i, emptyCwtMatrix],
            shape_invariants=[i.get_shape(), tf.TensorShape([length, None])],
            back_prop=False,
            parallel_iterations=1024,
            )
    result = tf.transpose(result)

    return result

# ------------------------------------------------------
#                 wavelets
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def lesser_equal(x, y):
    '''Element-wise truth value of (x <= y).
    Returns a bool tensor.
    '''
    return tf.less_equal(x, y)
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __le__(self, other):
        return tf.less_equal(self, other)
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _prob(self, given):
        mask = tf.cast(tf.logical_and(tf.less_equal(self.minval, given),
                                      tf.less(given, self.maxval)),
                       self.dtype)
        p = 1. / (self.maxval - self.minval)
        if self._check_numerics:
            p = tf.check_numerics(p, "p")
        return p * mask
model.py 文件源码 项目:densecap-tensorflow 作者: rampage644 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def huber_loss(x, delta=1):
    coef = 0.5
    l2_mask = tf.less_equal(tf.abs(x), delta)
    l1_mask = tf.greater(tf.abs(x), delta)

    term_1 = tf.reduce_sum(coef * tf.square(tf.boolean_mask(x, l2_mask)))
    term_2 = tf.reduce_sum(delta * (tf.abs(tf.boolean_mask(x, l1_mask)) - coef * delta))

    return term_1 + term_2
metrics.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
train.py 文件源码 项目:ssd_tensorflow 作者: railsnoob 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _smooth_l1(self,x):
        return tf.where( tf.less_equal(tf.abs(x),1.0), 0.5*x*x,  tf.abs(x) - 0.5)
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def lesser_equal(x, y):
    """Element-wise truth value of (x <= y).

    # Returns
        A bool tensor.
    """
    return tf.less_equal(x, y)
model_seg+pos.py 文件源码 项目:tensorflow-CWS-LSTM 作者: elvinpoon 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def certainty(self):
        certainty = self.seg_prediction * tf.log(self.seg_prediction)
        certainty = -tf.reduce_sum(certainty,reduction_indices=2)
        s1 = tf.ones(tf.shape(certainty))
        csum = tf.cumsum(s1,axis=1)
        mask = tf.less_equal(csum,tf.cast(tf.tile(tf.expand_dims(self._length,1),[1,tf.shape(certainty)[1]]),tf.float32))
        mask = tf.select(mask, tf.ones(tf.shape(certainty)),
                  tf.zeros(tf.shape(certainty)))
        certainty *= mask
        certainty = tf.reduce_sum(certainty, reduction_indices=1)
        return certainty
metrics.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
textdataflow.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _example_too_big(self, example, max_length):
        return tf.less_equal(self._example_length(example), max_length)
core_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def setUp(self):
    super(CoreBinaryOpsTest, self).setUp()

    self.x_probs_broadcast_tensor = tf.reshape(
        self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])

    self.channel_probs_broadcast_tensor = tf.reshape(
        self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])

    # == and != are not element-wise for tf.Tensor, so they shouldn't be
    # elementwise for LabeledTensor, either.
    self.ops = [
        ('add', operator.add, tf.add, core.add),
        ('sub', operator.sub, tf.sub, core.sub),
        ('mul', operator.mul, tf.mul, core.mul),
        ('div', operator.truediv, tf.div, core.div),
        ('mod', operator.mod, tf.mod, core.mod),
        ('pow', operator.pow, tf.pow, core.pow_function),
        ('equal', None, tf.equal, core.equal),
        ('less', operator.lt, tf.less, core.less),
        ('less_equal', operator.le, tf.less_equal, core.less_equal),
        ('not_equal', None, tf.not_equal, core.not_equal),
        ('greater', operator.gt, tf.greater, core.greater),
        ('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
    ]
    self.test_lt_1 = self.x_probs_lt
    self.test_lt_2 = self.channel_probs_lt
    self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
    self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
    self.broadcast_axes = [self.a0, self.a1, self.a3]
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def lesser_equal(x, y):
    '''Element-wise truth value of (x <= y).
    Returns a bool tensor.
    '''
    return tf.less_equal(x, y)
metrics.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
ops.py 文件源码 项目:ethnicity-tensorflow 作者: jhyuklee 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mask_by_index(batch_size, input_len, max_time_step):
    with tf.variable_scope('Masking') as scope:
        input_index = tf.range(0, batch_size) * max_time_step + (input_len - 1)
        lengths_transposed = tf.expand_dims(input_index, 1)
        lengths_tiled = tf.tile(lengths_transposed, [1, max_time_step])
        mask_range = tf.range(0, max_time_step)
        range_row = tf.expand_dims(mask_range, 0)
        range_tiled = tf.tile(range_row, [batch_size, 1])
        mask = tf.less_equal(range_tiled, lengths_tiled)
        weight = tf.select(mask, tf.ones([batch_size, max_time_step]),
                           tf.zeros([batch_size, max_time_step]))
        weight = tf.reshape(weight, [-1])
        return weight
tensorflow_backend.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def lesser_equal(x, y):
    """Element-wise truth value of (x <= y).

    # Returns
        A bool tensor.
    """
    return tf.less_equal(x, y)
tensorflow_backend.py 文件源码 项目:keras_superpixel_pooling 作者: parag2489 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def less_equal(x, y):
    """Element-wise truth value of (x <= y).

    # Arguments
        x: Tensor or variable.
        y: Tensor or variable.

    # Returns
        A bool tensor.
    """
    return tf.less_equal(x, y)
seq2seq_helpers.py 文件源码 项目:DeepDeepParser 作者: janmbuys 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def mask_decoder_only_shift(logit, thin_stack_head_next, transition_state_map,
                          logit_size, batch_size):
  """Ensures that if the stack is empty, has to GEN_STATE (shift transition)

  For each batch entry k:
    If thin_stack_head_next == 0, #alternatively, or 1.
      let logit[k][reduce_index] = -np.inf, 
    else don't change.
  """
  stack_is_empty_bool = tf.less_equal(thin_stack_head_next, 1) 
  stack_is_empty = tf.select(stack_is_empty_bool, 
                            tf.ones(tf.pack([batch_size]), dtype=tf.int32),
                            tf.zeros(tf.pack([batch_size]), dtype=tf.int32))
  stack_is_empty = tf.reshape(stack_is_empty, [-1, 1])

  # Sh and Re states are disallowed (but not root).
  state_is_disallowed_updates = tf.sparse_to_dense(
      tf.pack([data_utils.RE_STATE, data_utils.ARC_STATE]),
      tf.pack([data_utils.NUM_TR_STATES]), 1)
  logit_states = tf.gather(transition_state_map, tf.range(logit_size))
  state_is_disallowed = tf.gather(state_is_disallowed_updates, logit_states)
  state_is_disallowed = tf.reshape(state_is_disallowed, [1, -1])

  index_delta = tf.matmul(stack_is_empty, state_is_disallowed) # 1 if disallowed
  values = tf.pack([0, -np.inf])
  delta = tf.gather(values, index_delta)
  new_logit = logit + delta
  return new_logit
tensorflow_backend.py 文件源码 项目:InnerOuterRNN 作者: Chemoinformatics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def lesser_equal(x, y):
    '''Element-wise truth value of (x <= y).
    Returns a bool tensor.
    '''
    return tf.less_equal(x, y)
metrics.py 文件源码 项目:DAVIS-2016-Chanllege-Solution 作者: tangyuhao 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def precision_recall_values(xvals, precision, recall, name=None):
    """Compute values on the precision/recall curve.

    Args:
      x: Python list of floats;
      precision: 1D Tensor decreasing.
      recall: 1D Tensor increasing.
    Return:
      list of precision values.
    """
    with ops.name_scope(name, "precision_recall_values",
                        [precision, recall]) as name:
        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        precision = tfe_math.cummax(precision, reverse=True)

        prec_values = []
        for x in xvals:
            mask = tf.less_equal(recall, x)
            val = tf.reduce_min(tf.boolean_mask(precision, mask))
            prec_values.append(val)
        return tf.tuple(prec_values)


# =========================================================================== #
# TF Extended metrics: old stuff!
# =========================================================================== #
box_list_ops.py 文件源码 项目:tensorflow 作者: luyishisi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_corners: a tensor with shape [M_out, 4] where M_out <= M_in
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices
tf_backend.py 文件源码 项目:odin_old 作者: trungnt13 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def le(a, b):
    """a <= b"""
    return tf.less_equal(a, b)
loss_graphs.py 文件源码 项目:tensorrec 作者: jfkirk 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def warp_loss(tf_prediction, tf_y, **kwargs):
    # TODO JK: implement WARP loss

    tf_positive_mask = tf.greater(tf_y, 0.0)
    tf_negative_mask = tf.less_equal(tf_y, 0.0)

    tf_positive_predictions = tf.boolean_mask(tf_prediction, tf_positive_mask) # noqa
    tf_negative_predictions = tf.boolean_mask(tf_prediction, tf_negative_mask) # noqa


问题


面经


文章

微信
公众号

扫码关注公众号