python类to_int32()的实例源码

network.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _crop_pool_layer(self, bottom, rois, name):
    with tf.variable_scope(name) as scope:
      batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
      # Get the normalized coordinates of bounding boxes
      bottom_shape = tf.shape(bottom)
      height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self._feat_stride[0])
      width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self._feat_stride[0])
      x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
      y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
      x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
      y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
      # Won't be back-propagated to rois anyway, but to save time
      bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
      pre_pool_size = cfg.POOLING_SIZE * 2
      crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")

    return slim.max_pool2d(crops, [2, 2], padding='SAME')
network.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _anchor_target_layer(self, rpn_cls_score, name):
    with tf.variable_scope(name) as scope:
      rpn_labels, rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = tf.py_func(
        anchor_target_layer,
        [rpn_cls_score, self._gt_boxes, self._im_info, self._feat_stride, self._anchors, self._num_anchors],
        [tf.float32, tf.float32, tf.float32, tf.float32],
        name="anchor_target")

      rpn_labels.set_shape([1, 1, None, None])
      rpn_bbox_targets.set_shape([1, None, None, self._num_anchors * 4])
      rpn_bbox_inside_weights.set_shape([1, None, None, self._num_anchors * 4])
      rpn_bbox_outside_weights.set_shape([1, None, None, self._num_anchors * 4])

      rpn_labels = tf.to_int32(rpn_labels, name="to_int32")
      self._anchor_targets['rpn_labels'] = rpn_labels
      self._anchor_targets['rpn_bbox_targets'] = rpn_bbox_targets
      self._anchor_targets['rpn_bbox_inside_weights'] = rpn_bbox_inside_weights
      self._anchor_targets['rpn_bbox_outside_weights'] = rpn_bbox_outside_weights

      self._score_summaries.update(self._anchor_targets)

    return rpn_labels
network.py 文件源码 项目:tf-faster-rcnn 作者: endernewton 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _proposal_target_layer(self, rois, roi_scores, name):
    with tf.variable_scope(name) as scope:
      rois, roi_scores, labels, bbox_targets, bbox_inside_weights, bbox_outside_weights = tf.py_func(
        proposal_target_layer,
        [rois, roi_scores, self._gt_boxes, self._num_classes],
        [tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.float32],
        name="proposal_target")

      rois.set_shape([cfg.TRAIN.BATCH_SIZE, 5])
      roi_scores.set_shape([cfg.TRAIN.BATCH_SIZE])
      labels.set_shape([cfg.TRAIN.BATCH_SIZE, 1])
      bbox_targets.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])
      bbox_inside_weights.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])
      bbox_outside_weights.set_shape([cfg.TRAIN.BATCH_SIZE, self._num_classes * 4])

      self._proposal_targets['rois'] = rois
      self._proposal_targets['labels'] = tf.to_int32(labels, name="to_int32")
      self._proposal_targets['bbox_targets'] = bbox_targets
      self._proposal_targets['bbox_inside_weights'] = bbox_inside_weights
      self._proposal_targets['bbox_outside_weights'] = bbox_outside_weights

      self._score_summaries.update(self._proposal_targets)

      return rois, roi_scores
memory.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def get_hash_slots(self, query):
        """Gets hashed-to buckets for batch of queries.

        Args:
          query: 2-d Tensor of query vectors.

        Returns:
          A list of hashed-to buckets for each hash function.
        """

        binary_hash = [
            tf.less(tf.matmul(query, self.hash_vecs[i], transpose_b=True), 0)
            for i in xrange(self.num_libraries)]
        hash_slot_idxs = [
            tf.reduce_sum(
                tf.to_int32(binary_hash[i]) *
                tf.constant([[2 ** i for i in xrange(self.num_hashes)]],
                            dtype=tf.int32), 1)
            for i in xrange(self.num_libraries)]
        return hash_slot_idxs
losses.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
    """Calculates the per-example cross-entropy loss for a sequence of logits and
        masks out all losses passed the sequence length.

    Args:
        logits: Logits of shape `[T, B, vocab_size]`
        targets: Target classes of shape `[T, B]`
        sequence_length: An int32 tensor of shape `[B]` corresponding
           to the length of each input

    Returns:
        A tensor of shape [T, B] that contains the loss per example, per time step.
    """
    with tf.name_scope("cross_entropy_sequence_loss"):
        losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=targets)
        loss_mask = tf.sequence_mask(tf.to_int32(
            sequence_length), tf.to_int32(tf.shape(targets)[0]))
        losses = losses * tf.transpose(tf.to_float(loss_mask), [1, 0])

    return losses
data_augmentation.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def random_image_scaling(image, label):
    """Randomly scales the images between 0.5 to 1.5 times the original size.

    Args:
      img: Training image to scale.
      label: Segmentation mask to scale.
    """
    scale = tf.random_uniform(
        [1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
    h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(image)[0]), scale))
    w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(image)[1]), scale))
    new_shape = tf.squeeze(tf.stack([h_new, w_new]), axis=1)
    image = tf.image.resize_images(image, new_shape)
    label = tf.image.resize_nearest_neighbor(
        tf.expand_dims(label, 0), new_shape)
    label = tf.squeeze(label, axis=0)

    return image, label
preprocessor.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def random_image_scaling(self, image, label):
        """Randomly scales the images between 0.5 to 1.5 times the original size.

        Args:
          img: Training image to scale.
          label: Segmentation mask to scale.
        """
        scale = tf.random_uniform(
            [1], minval=0.5, maxval=1.5, dtype=tf.float32, seed=None)
        h_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(image)[0]), scale))
        w_new = tf.to_int32(tf.multiply(tf.to_float(tf.shape(image)[1]), scale))
        new_shape = tf.squeeze(tf.stack([h_new, w_new]), axis=1)
        image = tf.image.resize_images(image, new_shape)
        label = tf.image.resize_nearest_neighbor(
            tf.expand_dims(label, 0), new_shape)
        label = tf.squeeze(label, axis=0)

        return image, label
network.py 文件源码 项目:fcn 作者: ilovin 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def build_loss(self):
        upsampled_batch = self.get_output('output_logits')
        annotation_batch=self.get_output('label')
        class_labels = [i for i in range(cfg.NCLASSES)]
        class_labels.append(255)
        print("class_label: ", class_labels)

        annotation_batch = tf.squeeze(annotation_batch, axis=3)
        annotation_batch=tf.to_int32(annotation_batch)
        valid_annotation_batch, valid_logits_batch = get_valid_logits_and_labels(logits_batch_tensor=upsampled_batch, \
                                                                                 annotation_batch_tensor=annotation_batch, \
                                                                                 class_labels=class_labels)

        loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=valid_logits_batch,
                                                                      labels=valid_annotation_batch))

        # add regularizer
        if cfg.TRAIN.WEIGHT_DECAY > 0:
            regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
            loss = tf.add_n(regularization_losses) + loss

        return loss
nn.py 文件源码 项目:mean-teacher 作者: CuriousAI 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def flip_randomly(inputs, horizontally, vertically, is_training, name=None):
    """Flip images randomly. Make separate flipping decision for each image.

    Args:
        inputs (4-D tensor): Input images (batch size, height, width, channels).
        horizontally (bool): If True, flip horizontally with 50% probability. Otherwise, don't.
        vertically (bool): If True, flip vertically with 50% probability. Otherwise, don't.
        is_training (bool): If False, no flip is performed.
        scope: A name for the operation.
    """
    with tf.name_scope(name, "flip_randomly") as scope:
        batch_size, height, width, _ = tf.unstack(tf.shape(inputs))
        vertical_choices = (tf.random_uniform([batch_size], 0, 2, tf.int32) *
                            tf.to_int32(vertically) *
                            tf.to_int32(is_training))
        horizontal_choices = (tf.random_uniform([batch_size], 0, 2, tf.int32) *
                              tf.to_int32(horizontally) *
                              tf.to_int32(is_training))
        vertically_flipped = tf.reverse_sequence(inputs, vertical_choices * height, 1)
        both_flipped = tf.reverse_sequence(vertically_flipped, horizontal_choices * width, 2)
        return tf.identity(both_flipped, name=scope)
cfr_net.py 文件源码 项目:cfrnet 作者: clinicalml 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _build_output_graph(self, rep, t, dim_in, dim_out, do_out, FLAGS):
        ''' Construct output/regression layers '''

        if FLAGS.split_output:

            i0 = tf.to_int32(tf.where(t < 1)[:,0])
            i1 = tf.to_int32(tf.where(t > 0)[:,0])

            rep0 = tf.gather(rep, i0)
            rep1 = tf.gather(rep, i1)

            y0, weights_out0, weights_pred0 = self._build_output(rep0, dim_in, dim_out, do_out, FLAGS)
            y1, weights_out1, weights_pred1 = self._build_output(rep1, dim_in, dim_out, do_out, FLAGS)

            y = tf.dynamic_stitch([i0, i1], [y0, y1])
            weights_out = weights_out0 + weights_out1
            weights_pred = weights_pred0 + weights_pred1
        else:
            h_input = tf.concat(1,[rep, t])
            y, weights_out, weights_pred = self._build_output(h_input, dim_in+1, dim_out, do_out, FLAGS)

        return y, weights_out, weights_pred
async_loader.py 文件源码 项目:shuttleNet 作者: shiyemin 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_fix_offset(h, w, crop_height, crop_width):
    crop_offsets = []
    height_off = (h - crop_height) / 4
    width_off = (w - crop_width) / 4
    crop_offsets.append(tf.stack([0, 0]))
    crop_offsets.append(tf.stack([0, tf.to_int32(4 * width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(4 * height_off), 0]))
    crop_offsets.append(tf.stack([tf.to_int32(4 * height_off), tf.to_int32(4 * width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(2 * height_off), tf.to_int32(2 * width_off)]))
    # more fix crop
    crop_offsets.append(tf.stack([0, tf.to_int32(2 * width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(4 * height_off), tf.to_int32(2 * width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(2 * height_off), 0]))
    crop_offsets.append(tf.stack([tf.to_int32(2 * height_off), tf.to_int32(4 * width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(height_off), tf.to_int32(width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(height_off), tf.to_int32(3 * width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(3 * height_off), tf.to_int32(width_off)]))
    crop_offsets.append(tf.stack([tf.to_int32(3 * height_off), tf.to_int32(3 * width_off)]))

    crop_offsets = tf.stack(crop_offsets)
    return crop_offsets
data_pipeline.py 文件源码 项目:hdrnet 作者: google 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _parse_example(self, serialized):
    """Unpack a serialized example to Tensor."""
    feats = self._get_data_features()
    sz_feats = self._get_sz_features()
    for s in sz_feats:
      feats[s] = sz_feats[s]
    sample = tf.parse_single_example(serialized, features=feats)

    data = {}
    for i, f in enumerate(self.FEATURES):
      s = tf.to_int32(sample[f+'_sz'])

      data[f] = tf.decode_raw(sample[f], self.dtypes[f], name='decode_{}'.format(f))
      data[f] = tf.reshape(data[f], s)

    return data
reader.py 文件源码 项目:neural_style_tensorflow 作者: burness 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def preprocess(image, size, max_length):
    shape = tf.shape(image)
    size_t = tf.constant(size, tf.float64)
    height = tf.cast(shape[0], tf.float64)
    width = tf.cast(shape[1], tf.float64)

    cond_op = tf.less(width, height) if max_length else tf.less(height, width)

    new_height, new_width = tf.cond(
        cond_op, lambda: (size_t, (width * size_t) / height),
        lambda: ((height * size_t) / width, size_t))
    new_size = [tf.to_int32(new_height), tf.to_int32(new_width)]
    resized_image = tf.image.resize_images(image, new_size)
    normalised_image = resized_image - mean_pixel
    return normalised_image


# max_length: Wether size dictates longest or shortest side. Default longest
build_model.py 文件源码 项目:sequencing 作者: SwordYork 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def cross_entropy_sequence_loss(logits, targets, sequence_length):
    with tf.name_scope('cross_entropy_sequence_loss'):
        total_length = tf.to_float(tf.reduce_sum(sequence_length))

        entropy_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=targets)

        # Mask out the losses we don't care about
        loss_mask = tf.sequence_mask(
            tf.to_int32(sequence_length), tf.to_int32(tf.shape(targets)[0]))
        loss_mask = tf.transpose(tf.to_float(loss_mask), [1, 0])

        losses = entropy_losses * loss_mask
        # losses.shape: T * B
        # sequence_length: B
        total_loss_avg = tf.reduce_sum(losses) / total_length

        return total_loss_avg
feedback.py 文件源码 项目:sequencing 作者: SwordYork 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def next_inputs(self, time, sample_ids=None, prev_finished=None):
        if sample_ids is None or self.teacher_rate > 0.:
            finished = tf.greater_equal(time + 1, self.sequence_length)
        else:
            finished = math_ops.logical_or(
                tf.greater_equal(time + 1, self.max_step),
                tf.equal(self.eos_id, sample_ids))

        if self.teacher_rate == 1. or (sample_ids is None):
            next_input_ids = self._input_tas.read(time)
            return finished, self.lookup(next_input_ids)

        if self.teacher_rate > 0.:
            # scheduled
            teacher_rates = tf.less_equal(
                tf.random_uniform(tf.shape(sample_ids), minval=0., maxval=1.),
                self.teacher_rate)
            teacher_rates = tf.to_int32(teacher_rates)

            next_input_ids = (teacher_rates * self._input_tas.read(time)
                              + (1 - teacher_rates) * sample_ids)
        else:
            next_input_ids = sample_ids

        return finished, self.lookup(next_input_ids)
feedback.py 文件源码 项目:sequencing 作者: SwordYork 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def sample(self, logits, time):
        rl_time_steps = tf.floordiv(tf.maximum(self.global_step_tensor -
                                               self.burn_in_step, 0),
                                    self.increment_step)
        start_rl_step = self.sequence_length - rl_time_steps

        next_input_ids = tf.cond(
            tf.greater_equal(time, self.max_sequence_length),
            lambda: tf.tile([self.eos_id], [self.batch_size]),
            lambda: self._input_tas.read(time))

        next_predicted_ids = tf.squeeze(tf.multinomial(logits, 1), axis=[-1])
        mask = tf.to_int32(time >= start_rl_step)

        return (1 - mask) * tf.to_int32(next_input_ids) + mask * tf.to_int32(
            next_predicted_ids)
attention.py 文件源码 项目:sequencing 作者: SwordYork 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, query_size, keys, values, values_length,
                 name='attention'):
        self.attention_size = keys.get_shape().as_list()[-1]
        self.keys = keys
        self.values = values
        self.values_length = values_length
        self.query_trans = LinearOp(query_size, self.attention_size, name=name)

        with tf.variable_scope(name):
            self.v_att = tf.get_variable('v_att', shape=[self.attention_size],
                                         dtype=DTYPE)

        self.time_axis = 0 if TIME_MAJOR else 1

        # Replace all scores for padded inputs with tf.float32.min
        num_scores = tf.shape(self.keys)[self.time_axis]
        scores_mask = tf.sequence_mask(
            lengths=tf.to_int32(self.values_length),
            maxlen=tf.to_int32(num_scores),
            dtype=DTYPE)

        if TIME_MAJOR:
            scores_mask = tf.transpose(scores_mask)

        self.scores_mask = scores_mask
test_nn_utils.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_keep_prob(self):
        """Counts dropped items and compare with the expectation"""

        var = tf.ones([10000])
        s = tf.Session()

        for kprob in [0.1, 0.7]:
            dropped_var = dropout(var, kprob, tf.constant(True))
            dropped_size = tf.reduce_sum(
                tf.to_int32(tf.equal(dropped_var, 0.0)))

            dsize = s.run(dropped_size)

            expected_dropped_size = 10000 * (1 - kprob)

            self.assertTrue(np.isclose(expected_dropped_size, dsize, atol=500))
test_nn_utils.py 文件源码 项目:neuralmonkey 作者: ufal 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_keep_prob(self):
        """Counts dropped items and compare with the expectation"""

        var = tf.ones([10000])
        s = tf.Session()

        for kprob in [0.1, 0.7]:
            dropped_var = dropout(var, kprob, tf.constant(True))
            dropped_size = tf.reduce_sum(
                tf.to_int32(tf.equal(dropped_var, 0.0)))

            dsize = s.run(dropped_size)

            expected_dropped_size = 10000 * (1 - kprob)

            self.assertTrue(np.isclose(expected_dropped_size, dsize, atol=500))


问题


面经


文章

微信
公众号

扫码关注公众号