python类scatter_nd()的实例源码

prior.py 文件源码 项目:attend_infer_repeat 作者: akosiorek 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def masked_apply(tensor, op, mask):
    """Applies `op` to tensor only at locations indicated by `mask` and sets the rest to zero.

    Similar to doing `tensor = tf.where(mask, op(tensor), tf.zeros_like(tensor))` but it behaves correctly
    when `op(tensor)` is NaN or inf while tf.where does not.

    :param tensor: tf.Tensor
    :param op: tf.Op
    :param mask: tf.Tensor with dtype == bool
    :return: tf.Tensor
    """
    chosen = tf.boolean_mask(tensor, mask)
    applied = op(chosen)
    idx = tf.to_int32(tf.where(mask))
    result = tf.scatter_nd(idx, applied, tf.shape(tensor))
    return result
impute.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _impute2D(self, X_2D):
        r"""Mean impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Sum the real values in each column
        col_tot = tf.reduce_sum(data_zeroed_missing_tf, 0)

        # Divide column totals by the number of non-nan values
        num_values_col = tf.reduce_sum(self.real_val_mask, 0)
        num_values_col = tf.maximum(num_values_col,
                                    tf.ones(tf.shape(num_values_col)))
        col_nan_means = tf.div(col_tot, num_values_col)

        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_nan_means, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
impute.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _impute2D(self, X_2D):
        r"""Randomly impute a rank 2 tensor."""
        # Fill zeros in for missing data initially
        data_zeroed_missing_tf = X_2D * self.real_val_mask

        # Divide column totals by the number of non-nan values
        col_draws = [n.sample(seed=next(seedgen)) for n in self.normal_array]
        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_draws, self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing_tf), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing_tf + missing_imputed

        return X_with_impute
matting.py 文件源码 项目:Deep-Image-Matting 作者: Joker316701882 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def unpool(pool, ind, ksize=[1, 2, 2, 1], scope='unpool'):

    with tf.variable_scope(scope):
        input_shape = pool.get_shape().as_list()
        output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])

        flat_input_size = np.prod(input_shape)
        flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]

        pool_ = tf.reshape(pool, [flat_input_size])
        batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])
        ind_ = tf.reshape(ind, [flat_input_size, 1])
        ind_ = tf.concat([b, ind_], 1)

        ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
        ret = tf.reshape(ret, output_shape)
        return ret
utilities.py 文件源码 项目:bgsCNN 作者: SaoYan 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def unpool(pool, ind, shape, ksize=[1, 2, 2, 1], scope=None):
    with tf.name_scope(scope):
        input_shape =  tf.shape(pool)
        output_shape = [input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3]]
        flat_input_size = tf.cumprod(input_shape)[-1]
        flat_output_shape = tf.stack([output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]])
        pool_ = tf.reshape(pool, tf.stack([flat_input_size]))
        batch_range = tf.reshape(tf.range(tf.cast(output_shape[0], tf.int64), dtype=ind.dtype),
                                shape=tf.stack([input_shape[0], 1, 1, 1]))
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, tf.stack([flat_input_size, 1]))
        ind_ = tf.reshape(ind, tf.stack([flat_input_size, 1]))
        ind_ = tf.concat([b, ind_], 1)
        ret = tf.scatter_nd(ind_, pool_, shape=tf.cast(flat_output_shape, tf.int64))
        ret = tf.reshape(ret, tf.stack(output_shape))
        ret = tf.reshape(ret, shape=shape)
        return ret
misc.py 文件源码 项目:GPflow 作者: GPflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def vec_to_tri(vectors, N):
    """
    Takes a D x M tensor `vectors' and maps it to a D x matrix_size X matrix_sizetensor
    where the where the lower triangle of each matrix_size x matrix_size matrix is
    constructed by unpacking each M-vector.

    Native TensorFlow version of Custom Op by Mark van der Wilk.

    def int_shape(x):
        return list(map(int, x.get_shape()))

    D, M = int_shape(vectors)
    N = int( np.floor( 0.5 * np.sqrt( M * 8. + 1. ) - 0.5 ) )
    # Check M is a valid triangle number
    assert((matrix * (N + 1)) == (2 * M))
    """
    indices = list(zip(*np.tril_indices(N)))
    indices = tf.constant([list(i) for i in indices], dtype=tf.int64)

    def vec_to_tri_vector(vector):
        return tf.scatter_nd(indices=indices, shape=[N, N], updates=vector)

    return tf.map_fn(vec_to_tri_vector, vectors)
op_builders.py 文件源码 项目:nengo_dl 作者: nengo 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def build_step(self, signals):
        if self.len_match:
            super(SparseDotIncBuilder, self).build_step(signals)
            return

        A = signals.gather(self.A_data)
        X = signals.gather(self.X_data)

        assert A.get_shape()[0] == self.sparse_indices.get_shape()[0]

        # approach 1: using sparse_tensor_dense_matmul
        dot = gen_sparse_ops._sparse_tensor_dense_mat_mul(
            self.sparse_indices, A, self.A_shape, X)

        # approach 2: matmul(a_is_sparse)
        # sparse_A = tf.scatter_nd(self.sparse_indices, A, self.A_shape)
        # dot = tf.matmul(sparse_A, X, a_is_sparse=self.is_sparse)

        dot.set_shape(self.Y_data.shape + (signals.minibatch_size,))

        signals.scatter(self.Y_data, dot, mode=self.mode)
expert_utils.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def restore(self, x):
    """Add padding back to the given tensor.

    Args:
      x (tf.Tensor): of shape [dim_compressed,...]

    Returns:
      a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The
      dim is restored from the original reference tensor
    """
    with tf.name_scope("pad_reduce/restore"):
      x = tf.scatter_nd(
          indices=self.nonpad_ids,
          updates=x,
          shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0),
      )
    return x
WhatWhereAutoencoder.py 文件源码 项目:Tensorflow_WhatWhereAutoencoder 作者: yselivonchyk 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def unpool(net, mask, stride):
  assert mask is not None
  with tf.name_scope('UnPool2D'):
    ksize = [1, stride, stride, 1]
    input_shape = net.get_shape().as_list()
    #  calculation new shape
    output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
    # calculation indices for batch, height, width and feature maps
    one_like_mask = tf.ones_like(mask)
    batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int64), shape=[input_shape[0], 1, 1, 1])
    b = one_like_mask * batch_range
    y = mask // (output_shape[2] * output_shape[3])
    x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
    feature_range = tf.range(output_shape[3], dtype=tf.int64)
    f = one_like_mask * feature_range
    # transpose indices & reshape update values to one dimension
    updates_size = tf.size(net)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
    values = tf.reshape(net, [updates_size])
    ret = tf.scatter_nd(indices, values, output_shape)
    return ret
network_utils.py 文件源码 项目:TensorFlow_DCIGN 作者: yselivonchyk 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def unpool(net, mask, stride=2):
  assert mask is not None
  with tf.name_scope('UnPool2D'):
    ksize = [1, stride, stride, 1]
    input_shape = net.get_shape().as_list()
    #  calculation new shape
    output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])
    # calculation indices for batch, height, width and feature maps
    one_like_mask = tf.ones_like(mask)
    batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int64), shape=[input_shape[0], 1, 1, 1])
    b = one_like_mask * batch_range
    y = mask // (output_shape[2] * output_shape[3])
    x = mask % (output_shape[2] * output_shape[3]) // output_shape[3]
    feature_range = tf.range(output_shape[3], dtype=tf.int64)
    f = one_like_mask * feature_range
    # transpose indices & reshape update values to one dimension
    updates_size = tf.size(net)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
    values = tf.reshape(net, [updates_size])
    ret = tf.scatter_nd(indices, values, output_shape)
    return ret
effects.py 文件源码 项目:py-noisemaker 作者: aayars 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def wormhole(tensor, shape, kink, input_stride, alpha=1.0):
    """
    Apply per-pixel field flow. Non-iterative.

    :param Tensor tensor:
    :param list[int] shape:
    :param float kink: Path twistiness
    :param float input_stride: Maximum pixel offset
    :return: Tensor
    """

    height, width, channels = shape

    values = value_map(tensor, shape)

    degrees = values * 360.0 * math.radians(1) * kink
    # stride = values * height * input_stride
    stride = height * input_stride

    x_index = tf.cast(row_index(shape), tf.float32)
    y_index = tf.cast(column_index(shape), tf.float32)

    x_offset = (tf.cos(degrees) + 1) * stride
    y_offset = (tf.sin(degrees) + 1) * stride

    x = tf.cast(x_index + x_offset, tf.int32) % width
    y = tf.cast(y_index + y_offset, tf.int32) % height

    luminosity = tf.square(tf.reshape(values, [height, width, 1]))

    out = normalize(tf.scatter_nd(offset_index(y, height, x, width), tensor * luminosity, tf.shape(tensor)))

    return blend(tensor, tf.sqrt(out), alpha)
impute.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _impute2D(self, X_2D):
        r"""Randomly impute a rank 2 tensor.

        Parameters
        ----------
        X_2D : Tensor
            a rank 2 Tensor with missing data
        scalars : Tensor 1 x D
            these values are filled into the missing elements (per column)

        Returns
        -------
        X_imputed : Tensor
            a rank 2 Tensor with imputed data

        """
        # Fill zeros in for missing data initially
        data_zeroed_missing = X_2D * self.real_val_mask

        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(self.impute_scalars[0, :],
                                 self.missing_ind[:, 1])

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing + missing_imputed

        return X_with_impute
impute.py 文件源码 项目:aboleth 作者: data61 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _impute2D(self, X_2D):
        r"""Impute a rank 2 tensor with draws from normal distributions.

        Parameters
        ----------
        X_2D : Tensor
            a rank 2 Tensor with missing data

        Returns
        -------
        X_imputed : Tensor
            a rank 2 Tensor with imputed data

        """
        # Fill zeros in for missing data initially
        data_zeroed_missing = X_2D * self.real_val_mask

        # Divide column totals by the number of non-nan values
        col_draws = tf.transpose(self.normal.sample(seed=next(seedgen)))
        # Make an vector of the impute values for each missing point
        imputed_vals = tf.gather(col_draws, self.missing_ind[:, 1])[:, 0]

        # Fill the imputed values into the data tensor of zeros
        shape = tf.cast(tf.shape(data_zeroed_missing), dtype=tf.int64)
        missing_imputed = tf.scatter_nd(self.missing_ind, imputed_vals, shape)

        X_with_impute = data_zeroed_missing + missing_imputed

        return X_with_impute
tensors.py 文件源码 项目:antgo 作者: jianzfb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
builder.py 文件源码 项目:Super_TF 作者: Dhruv-Mohan 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def Unpool_layer(self, pool, ind, k_size=[1, 2, 2, 1]):
        # https://github.com/tensorflow/tensorflow/issues/2169
        """
           Unpooling layer after max_pool_with_argmax.
           Args:
               pool:   max pooled output tensor
               ind:      argmax indices
               k_size:     k_size is the same as for the pool
           Return:
               unpool:    unpooling tensor
        """
        with tf.name_scope('Unpool'):
            input_shape =  tf.shape(pool)
            input_shape_aslist = pool.get_shape().as_list()
            output_shape = tf.stack([input_shape[0], input_shape[1] * k_size[1], input_shape[2] * k_size[2], input_shape[3]])
            output_shapeaslist = [-1,  input_shape_aslist[1]* k_size[1] ,  input_shape_aslist[2]  * k_size[2], input_shape_aslist[3]]

            pool_ = tf.reshape(pool, [input_shape_aslist[1] * input_shape_aslist[2] * input_shape_aslist[3]])
            batch_range = tf.reshape(tf.range(tf.cast(input_shape[0], tf.int64), dtype=ind.dtype), 
                                              shape=tf.stack([input_shape[0], 1, 1, 1]))
            b = tf.ones_like(ind) * batch_range
            b = tf.reshape(b, tf.stack([ input_shape_aslist[1] * input_shape_aslist[2] * input_shape_aslist[3], 1]))
            ind_ = tf.reshape(ind, tf.stack( [input_shape_aslist[1] * input_shape_aslist[2] * input_shape_aslist[3], 1]))
            ind_ = tf.concat([b, ind_], 1)
            ret = tf.scatter_nd(ind_, pool_, shape=tf.cast([input_shape[0], output_shapeaslist[1] * output_shapeaslist[2] * output_shapeaslist[3] ], tf.int64))
            ret = tf.reshape(ret, [-1, output_shapeaslist[1], output_shapeaslist[2], output_shapeaslist[3]])
            return ret
tensorlfowapi.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_scatter_nd():
    indices = tf.constant([[3]])
    updates = tf.constant([[9,10,11,12]])
    shape = tf.constant([8,4])
    scatter = tf.scatter_nd(indices, updates, shape)

    return scatter
tensors.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
ssd.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def __match_no_miss(self,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores,jaccard,gt_labels,gt_bboxes, num_anchors):
        #make sure every ground truth box can be matched to at least one anchor box
        max_inds = tf.cast(tf.argmax(jaccard, axis=1),tf.int32)
        def cond(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):
            r = tf.less(i, tf.shape(gt_labels)[0])
            return r
        def body(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):

            #upate gt_anchors_labels
            updates = tf.reshape(gt_labels[i], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])


            new_labels = tf.scatter_nd(indices, updates, shape)
            new_mask = tf.cast(new_labels, tf.bool)
            gt_anchors_labels = tf.where(new_mask, new_labels, gt_anchors_labels)

            #update gt_anchors_bboxes
            updates = tf.reshape(gt_bboxes[i], [1,-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.shape(gt_anchors_bboxes)
            new_bboxes = tf.scatter_nd(indices, updates, shape)
            gt_anchors_bboxes = tf.where(new_mask, new_bboxes, gt_anchors_bboxes)

            #update gt_anchors_scores
            updates = tf.reshape(jaccard[i, max_inds[i]], [-1])
            indices = tf.reshape(max_inds[i],[1,-1])
            shape = tf.reshape(num_anchors,[-1])
            new_scores = tf.scatter_nd(indices, updates, shape)
            gt_anchors_scores = tf.where(new_mask, new_scores, gt_anchors_scores)



            return [i+1,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores]


        i = 0
        [i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores] = tf.while_loop(cond, body,[i,gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores])

        return gt_anchor_labels,gt_anchor_bboxes,gt_anchor_scores
seq2seq_utils.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def next_inputs(self, time, outputs, state, sample_ids, name=None):
        with tf.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
                           [time, outputs, state, sample_ids]):
            (finished, base_next_inputs, state) = (
                super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
                    time=time,
                    outputs=outputs,
                    state=state,
                    sample_ids=sample_ids,
                    name=name))

            def maybe_sample():
                """Perform scheduled sampling."""
                where_sampling = tf.cast(
                    tf.where(sample_ids > -1), tf.int32)
                where_not_sampling = tf.cast(
                    tf.where(sample_ids <= -1), tf.int32)
                where_sampling_flat = tf.reshape(where_sampling, [-1])
                where_not_sampling_flat = tf.reshape(
                    where_not_sampling, [-1])
                sample_ids_sampling = tf.gather(
                    sample_ids, where_sampling_flat)
                inputs_not_sampling = tf.gather(
                    base_next_inputs, where_not_sampling_flat)
                sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
                base_shape = tf.shape(base_next_inputs)
                return (tf.scatter_nd(indices=where_sampling,
                                      updates=sampled_next_inputs,
                                      shape=base_shape)
                        + tf.scatter_nd(indices=where_not_sampling,
                                        updates=inputs_not_sampling,
                                        shape=base_shape))

            all_finished = tf.reduce_all(finished)
            next_inputs = tf.cond(
                all_finished, lambda: base_next_inputs, maybe_sample)
            return (finished, next_inputs, state)
process_builders.py 文件源码 项目:nengo_dl 作者: nengo 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_step(self, signals):
        input = signals.gather(self.input_data)
        input = tf.reshape(input, (self.n_ops, -1))

        state = signals.gather(self.state_sig)

        # compute output
        if self.C is None:
            output = tf.zeros_like(input)
        else:
            output = state * self.C
            output = tf.reshape(
                output,
                (self.n_ops, -1, signals.minibatch_size * self.signal_d))
            output = tf.reduce_sum(output, axis=1)

        if self.D is not None:
            output += self.D * input

        signals.scatter(self.output_data, output)

        # update state
        r = gen_sparse_ops._sparse_tensor_dense_mat_mul(
            self.A_indices, self.A, self.A_shape, state)

        with tf.control_dependencies([output]):
            state = r + tf.scatter_nd(self.offsets, input,
                                      self.state_sig.shape)
            # TODO: tensorflow does not yet support sparse_tensor_dense_add
            # on the GPU
            # state = gen_sparse_ops._sparse_tensor_dense_add(
            #     self.offsets, input, self.state_sig.shape, r)
        state.set_shape(self.state_sig.shape)

        signals.mark_gather(self.input_data)
        signals.mark_gather(self.state_sig)
        signals.scatter(self.state_sig, state)
tensors.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
layers.py 文件源码 项目:TF-SegNet 作者: mathildor 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def unpool_with_argmax(pool, ind, name = None, ksize=[1, 2, 2, 1]):

    """
       Unpooling layer after max_pool_with_argmax.
       Args:
           pool:   max pooled output tensor
           ind:      argmax indices
           ksize:     ksize is the same as for the pool
       Return:
           unpool:    unpooling tensor
    """
    with tf.variable_scope(name):
        input_shape = pool.get_shape().as_list()
        output_shape = (input_shape[0], input_shape[1] * ksize[1], input_shape[2] * ksize[2], input_shape[3])

        flat_input_size = np.prod(input_shape)
        flat_output_shape = [output_shape[0], output_shape[1] * output_shape[2] * output_shape[3]]

        pool_ = tf.reshape(pool, [flat_input_size])
        batch_range = tf.reshape(tf.range(output_shape[0], dtype=ind.dtype), shape=[input_shape[0], 1, 1, 1])
        b = tf.ones_like(ind) * batch_range
        b = tf.reshape(b, [flat_input_size, 1])
        ind_ = tf.reshape(ind, [flat_input_size, 1])
        ind_ = tf.concat([b, ind_], 1)

        ret = tf.scatter_nd(ind_, pool_, shape=flat_output_shape)
        ret = tf.reshape(ret, output_shape)
        return ret
common_attention.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def scatter_blocks_2d(x, indices, shape):
  """scatters blocks from x into shape with indices."""
  x_shape = common_layers.shape_list(x)
  # [length, batch, heads, dim]
  x_t = tf.transpose(
      tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3])
  x_t_shape = common_layers.shape_list(x_t)
  indices = tf.reshape(indices, [-1, 1])
  scattered_x = tf.scatter_nd(indices, x_t, x_t_shape)
  scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3])
  return tf.reshape(scattered_x, shape)
segnet_model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __unpool(self, updates, mask, ksize=[1, 2, 2, 1], output_shape=None, feature_count=None, name=''):
        with tf.variable_scope(name):
            mask = tf.cast(mask, tf.int32)
            input_shape = tf.shape(updates, out_type=tf.int32)
            #  calculation new shape

            if feature_count is None:
                feature_count = input_shape[3]

            if output_shape is None:
                output_shape = (1, input_shape[1] * ksize[1], input_shape[2] * ksize[2], feature_count)

            output_shape = tf.cast(output_shape, tf.int32)

            # calculation indices for batch, height, width and feature maps
            one_like_mask = tf.cast(tf.ones_like(mask, dtype=tf.int16), tf.int32)
            batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
            batch_range = tf.reshape(tf.range(output_shape[0], dtype=tf.int32), shape=batch_shape)
            b = one_like_mask * batch_range
            y = tf.floordiv(mask, output_shape[2] * output_shape[3])
            x = tf.mod(tf.floordiv(mask, output_shape[3]), output_shape[2]) #mask % (output_shape[2] * output_shape[3]) // output_shape[3]
            feature_range = tf.range(output_shape[3], dtype=tf.int32)
            f = one_like_mask * feature_range
            # transpose indices & reshape update values to one dimension
            updates_size = tf.size(updates)
            indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, updates_size]))
            values = tf.reshape(updates, [updates_size])
            ret = tf.scatter_nd(indices, values, output_shape)
            return ret
tensors.py 文件源码 项目:DAVIS-2016-Chanllege-Solution 作者: tangyuhao 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def pad_axis(x, offset, size, axis=0, name=None):
    """Pad a tensor on an axis, with a given offset and output size.
    The tensor is padded with zero (i.e. CONSTANT mode). Note that the if the
    `size` is smaller than existing size + `offset`, the output tensor
    was the latter dimension.

    Args:
      x: Tensor to pad;
      offset: Offset to add on the dimension chosen;
      size: Final size of the dimension.
    Return:
      Padded tensor whose dimension on `axis` is `size`, or greater if
      the input vector was larger.
    """
    with tf.name_scope(name, 'pad_axis'):
        shape = get_shape(x)
        rank = len(shape)
        # Padding description.
        new_size = tf.maximum(size-offset-shape[axis], 0)
        pad1 = tf.stack([0]*axis + [offset] + [0]*(rank-axis-1))
        pad2 = tf.stack([0]*axis + [new_size] + [0]*(rank-axis-1))
        paddings = tf.stack([pad1, pad2], axis=1)
        x = tf.pad(x, paddings, mode='CONSTANT')
        # Reshape, to get fully defined shape if possible.
        # TODO: fix with tf.slice
        shape[axis] = size
        x = tf.reshape(x, tf.stack(shape))
        return x


# def select_at_index(idx, val, t):
#     """Return a tensor.
#     """
#     idx = tf.expand_dims(tf.expand_dims(idx, 0), 0)
#     val = tf.expand_dims(val, 0)
#     t = t + tf.scatter_nd(idx, val, tf.shape(t))
#     return t
utilities.py 文件源码 项目:segmentation 作者: fregu856 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def max_unpool(inputs, pooling_indices, output_shape=None, k_size=[1, 2, 2, 1]):
    # NOTE! this function is based on the implementation by kwotsin in
    # https://github.com/kwotsin/TensorFlow-ENet

    # inputs has shape [batch_size, height, width, channels]

    # pooling_indices: pooling indices of the previously max_pooled layer

    # output_shape: what shape the returned tensor should have

    pooling_indices = tf.cast(pooling_indices, tf.int32)
    input_shape = tf.shape(inputs, out_type=tf.int32)

    one_like_pooling_indices = tf.ones_like(pooling_indices, dtype=tf.int32)
    batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
    batch_range = tf.reshape(tf.range(input_shape[0], dtype=tf.int32), shape=batch_shape)
    b = one_like_pooling_indices*batch_range
    y = pooling_indices//(output_shape[2]*output_shape[3])
    x = (pooling_indices//output_shape[3]) % output_shape[2]
    feature_range = tf.range(output_shape[3], dtype=tf.int32)
    f = one_like_pooling_indices*feature_range

    inputs_size = tf.size(inputs)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, inputs_size]))
    values = tf.reshape(inputs, [inputs_size])

    ret = tf.scatter_nd(indices, values, output_shape)

    return ret

# function for colorizing a label image:
Dense_Transformer_Network.py 文件源码 项目:3D_Dense_Transformer_Networks 作者: JohnYC1995 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _bilinear_interpolate(self,im, im_org, x, y):
        with tf.variable_scope('_interpolate'):
            # constants
            x = tf.cast(x, 'float32')
            y = tf.cast(y, 'float32')
            height_f = tf.cast(self.height, 'float32')
            width_f = tf.cast(self.width, 'float32')
            zero = tf.zeros([], dtype='int32')
            max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
            max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
            # scale indices from [-1, 1] to [0, width/height]
            x = (x + 1.0)*(width_f) / 2.0
            y = (y + 1.0)*(height_f) / 2.0
            # do sampling
            x0 = tf.cast(tf.floor(x), 'int32')
            x1 = x0 + 1
            y0 = tf.cast(tf.floor(y), 'int32')
            y1 = y0 + 1
            x0 = tf.clip_by_value(x0, zero, max_x)
            x1 = tf.clip_by_value(x1, zero, max_x)
            y0 = tf.clip_by_value(y0, zero, max_y)
            y1 = tf.clip_by_value(y1, zero, max_y)
            dim2 = self.width
            dim1 = self.width*self.height
            base = self._repeat(tf.range(self.num_batch)*dim1, self.out_height*self.out_width, 'int32')
            base_y0 = base + y0*dim2
            base_y1 = base + y1*dim2
            idx_a = tf.expand_dims(base_y0 + x0, 1)
            idx_b = tf.expand_dims(base_y1 + x0, 1)
            idx_c = tf.expand_dims(base_y0 + x1, 1)
            idx_d = tf.expand_dims(base_y1 + x1, 1)
            # use indices to lookup pixels in the flat image and restore
            # channels dim
            im_flat = tf.reshape(im, tf.stack([-1, self.num_channels]))
            im_flat = tf.cast(im_flat, 'float32')
            Ia = tf.scatter_nd(idx_a, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Ib = tf.scatter_nd(idx_b, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Ic = tf.scatter_nd(idx_c, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])
            Id = tf.scatter_nd(idx_d, im_flat, [self.num_batch*self.out_height*self.out_width, self.num_channels])

            x0_f = tf.cast(x0, 'float32')
            x1_f = tf.cast(x1, 'float32')
            y0_f = tf.cast(y0, 'float32')
            y1_f = tf.cast(y1, 'float32')
            wa = tf.scatter_nd(idx_a, tf.expand_dims(((x1_f-x) * (y1_f-y)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wb = tf.scatter_nd(idx_b, tf.expand_dims(((x1_f-x) * (y-y0_f)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wc = tf.scatter_nd(idx_c, tf.expand_dims(((x-x0_f) * (y1_f-y)), 1), [self.num_batch*self.out_height*self.out_width, 1])
            wd = tf.scatter_nd(idx_d, tf.expand_dims(((x-x0_f) * (y-y0_f)), 1), [self.num_batch*self.out_height*self.out_width, 1])

            value_all = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
            weight_all = tf.clip_by_value(tf.add_n([wa, wb, wc, wd]),1e-5,1e+10)
            flag = tf.less_equal(weight_all, 1e-5* tf.ones_like(weight_all))
            flag = tf.cast(flag, tf.float32)
            im_org = tf.reshape(im_org, [-1,self.num_channels])
            output = tf.add(tf.div(value_all, weight_all), tf.multiply(im_org, flag))
            return output
tensorlfowapi.py 文件源码 项目:SSD_tensorflow_VOC 作者: LevinJ 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def test_scatter_nd_3():
    gt_bboxes = tf.constant([[0,0,1,2],[1,0,3,4],[100,100,105,102.5]])
    gt_labels = tf.constant([1,2,6])

    jaccard = tf.constant( [[ 0. ,     0.  ,    0.02,    0.15  ],[ 0. ,     0.3125 , 0.08,    0.    ],[ 0.5 ,    0. ,     0.  ,    0.    ]])
    gt_anchors_scores = tf.constant([0.0,0.,0.,0.])
    gt_anchors_labels = tf.constant([100,100,100,100])
    gt_anchors_bboxes=tf.constant([[100,100,105,105],[2,1,3,3.5],[0,0,10,10],[0.5,0.5,0.8,1.5]])

    max_inds = tf.cast(tf.argmax(jaccard, axis=1),tf.int32)

    def cond(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):
        r = tf.less(i, tf.shape(gt_labels)[0])
        return r
    def body(i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores):

        #upate gt_anchors_labels
        updates = tf.reshape(gt_labels[i], [-1])
        indices = tf.reshape(max_inds[i],[1,-1])
        shape = tf.reshape(tf.shape(gt_anchors_bboxes)[0],[-1])


        new_labels = tf.scatter_nd(indices, updates, shape)
        new_mask = tf.cast(new_labels, tf.bool)
        gt_anchors_labels = tf.where(new_mask, new_labels, gt_anchors_labels)

        #update gt_anchors_bboxes
        updates = tf.reshape(gt_bboxes[i], [1,-1])
        indices = tf.reshape(max_inds[i],[1,-1])
        shape = tf.shape(gt_anchors_bboxes)
        new_bboxes = tf.scatter_nd(indices, updates, shape)
        gt_anchors_bboxes = tf.where(new_mask, new_bboxes, gt_anchors_bboxes)

        #update gt_anchors_scores
        updates = tf.reshape(jaccard[i, max_inds[i]], [-1])
        indices = tf.reshape(max_inds[i],[1,-1])
        shape = tf.reshape(tf.shape(gt_anchors_bboxes)[0],[-1])
        new_scores = tf.scatter_nd(indices, updates, shape)
        gt_anchors_scores = tf.where(new_mask, new_scores, gt_anchors_scores)



        return [i+1,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores]


    i = 0
    [i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores] = tf.while_loop(cond, body,[i,gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores])





    return gt_anchors_labels,gt_anchors_bboxes,gt_anchors_scores
seq2seq_utils.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def next_inputs(self, time, outputs, state, sample_ids, name=None):
        with tf.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
                           [time, outputs, state, sample_ids]):
            (finished, base_next_inputs, state) = (
                super(ScheduledOutputTrainingHelper, self).next_inputs(
                    time=time,
                    outputs=outputs,
                    state=state,
                    sample_ids=sample_ids,
                    name=name))

            def maybe_sample():
                """Perform scheduled sampling."""

                def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
                    """Concatenate outputs with auxiliary inputs, if they exist."""
                    if self._auxiliary_input_tas is None:
                        return outputs_

                    next_time = time + 1
                    auxiliary_inputs = nest.map_structure(
                        lambda ta: ta.read(next_time), self._auxiliary_input_tas)
                    if indices is not None:
                        auxiliary_inputs = tf.gather_nd(
                            auxiliary_inputs, indices)
                    return nest.map_structure(
                        lambda x, y: tf.concat((x, y), -1),
                        outputs_, auxiliary_inputs)

                if self._next_input_layer is None:
                    return tf.where(
                        sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
                        base_next_inputs)

                where_sampling = tf.cast(
                    tf.where(sample_ids), tf.int32)
                where_not_sampling = tf.cast(
                    tf.where(tf.logical_not(sample_ids)), tf.int32)
                outputs_sampling = tf.gather_nd(outputs, where_sampling)
                inputs_not_sampling = tf.gather_nd(base_next_inputs,
                                                   where_not_sampling)
                sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
                    self._next_input_layer(outputs_sampling), where_sampling)

                base_shape = tf.shape(base_next_inputs)
                return (tf.scatter_nd(indices=where_sampling,
                                      updates=sampled_next_inputs,
                                      shape=base_shape)
                        + tf.scatter_nd(indices=where_not_sampling,
                                        updates=inputs_not_sampling,
                                        shape=base_shape))

            all_finished = tf.reduce_all(finished)
            next_inputs = tf.cond(
                all_finished, lambda: base_next_inputs, maybe_sample)
            return (finished, next_inputs, state)
models.py 文件源码 项目:tensorflow-fcwta 作者: guoguo12 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _initialize_vars(self):
        """Sets up the training graph."""
        with tf.variable_scope(self.name) as scope:
            self.global_step = tf.get_variable(
                'global_step',
                shape=[],
                initializer=tf.zeros_initializer())
            self.input = tf.placeholder(tf.float32, shape=[None, self.input_dim])

        current = self.input
        for i in range(self.encode_layers - 1):
            current = self._relu_layer(current, self.input_dim, self.input_dim, i)
        self.encoded = self._relu_layer(current, self.input_dim, self.hidden_units, self.encode_layers - 1)

        # Make batch size the last dimension (for use with tf.nn.top_k)
        encoded_t = tf.transpose(self.encoded)

        # Compute the indices corresponding to the top k activations for each
        # neuron in the final encoder layer
        k = int(self.sparsity * self.batch_size)
        _, top_indices = tf.nn.top_k(encoded_t, k=k, sorted=False)

        # Transform top_indices, which contains rows of column indices, into
        # indices, a list of [row, column] pairs (for use with tf.scatter_nd)
        top_k_unstacked = tf.unstack(top_indices, axis=1)
        row_indices = [tf.range(self.hidden_units) for _ in range(k)]
        combined_columns = tf.transpose(tf.stack(_interleave(row_indices, top_k_unstacked)))
        indices = tf.reshape(combined_columns, [-1, 2])

        # Apply sparsity constraint
        updates = tf.ones(self.hidden_units * k)
        shape = tf.constant([self.hidden_units, self.batch_size])
        mask = tf.scatter_nd(indices, updates, shape)
        sparse_encoded = self.encoded * tf.transpose(mask)

        self.decoded = self._decode_layer(sparse_encoded)

        self.loss = tf.reduce_sum(tf.square(self.decoded - self.input))
        self.optimizer_op = self.optimizer(self.learning_rate).minimize(
            self.loss, self.global_step)

        self.saver = tf.train.Saver(tf.global_variables())


问题


面经


文章

微信
公众号

扫码关注公众号