python类TensorShape()的实例源码

variable_mgr_util.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, getter, *args, **kwargs):
    size = tf.TensorShape(kwargs['shape']).num_elements()
    if size < self.small_variable_size_threshold:
      device_name = self.device_for_small_variables
    else:
      device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
      device_name = self.devices[device_index]
      self.sizes[device_index] += size

    kwargs['caching_device'] = device_name
    var = getter(*args, **kwargs)
    return var


# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
rnn.py 文件源码 项目:seq2seq 作者: eske 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, num_units, activation=None, reuse=None, kernel_initializer=None, bias_initializer=None,
                 layer_norm=False, state_keep_prob=None, input_keep_prob=None, input_size=None, final=False):
        super(DropoutGRUCell, self).__init__(_reuse=reuse)
        self._num_units = num_units
        self._activation = activation or tf.nn.tanh
        self._kernel_initializer = kernel_initializer
        self._bias_initializer = bias_initializer
        self._layer_norm = layer_norm
        self._state_keep_prob = state_keep_prob
        self._input_keep_prob = input_keep_prob
        self._final = final

        def batch_noise(s):
            s = tf.concat(([1], tf.TensorShape(s).as_list()), 0)
            return tf.random_uniform(s)

        if input_keep_prob is not None:
            self._input_noise = DropoutGRUCell._enumerated_map_structure(lambda i, s: batch_noise(s), input_size)
        if state_keep_prob is not None:
            self._state_noise = DropoutGRUCell._enumerated_map_structure(lambda i, s: batch_noise(s), num_units)
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(tf.multinomial(logits_flat, n_samples))
        samples_flat = tf.cast(samples_flat, self.dtype)
        if self.logits.get_shape().ndims == 2:
            return samples_flat
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        samples = tf.reshape(samples_flat, shape)
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        n = self.n_experiments
        if self.logits.get_shape().ndims == 1:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1])
        log_1_minus_p = -tf.nn.softplus(logits_flat)
        log_p = logits_flat + log_1_minus_p
        stacked_logits_flat = tf.stack([log_1_minus_p, log_p], axis=-1)
        samples_flat = tf.transpose(
            tf.multinomial(stacked_logits_flat, n_samples * n))

        shape = tf.concat([[n, n_samples], self.batch_shape], 0)
        samples = tf.reduce_sum(tf.reshape(samples_flat, shape), axis=0)

        static_n_samples = n_samples if isinstance(n_samples, int) else None
        static_shape = tf.TensorShape([static_n_samples]).concatenate(
            self.get_batch_shape())
        samples.set_shape(static_shape)

        return tf.cast(samples, self.dtype)
univariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        # samples must be sampled from (-1, 1) rather than [-1, 1)
        loc, scale = self.loc, self.scale
        if not self.is_reparameterized:
            loc = tf.stop_gradient(loc)
            scale = tf.stop_gradient(scale)
        shape = tf.concat([[n_samples], self.batch_shape], 0)
        uniform_samples = tf.random_uniform(
            shape=shape,
            minval=np.nextafter(self.dtype.as_numpy_dtype(-1.),
                                self.dtype.as_numpy_dtype(0.)),
            maxval=1.,
            dtype=self.dtype)
        samples = loc - scale * tf.sign(uniform_samples) * \
            tf.log1p(-tf.abs(uniform_samples))
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(
                self.get_batch_shape()))
        return samples
multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        mean, cov_tril = self.mean, self.cov_tril
        if not self.is_reparameterized:
            mean = tf.stop_gradient(mean)
            cov_tril = tf.stop_gradient(cov_tril)

        def tile(t):
            new_shape = tf.concat([[n_samples], tf.ones_like(tf.shape(t))], 0)
            return tf.tile(tf.expand_dims(t, 0), new_shape)

        batch_mean = tile(mean)
        batch_cov = tile(cov_tril)
        # n_dim -> n_dim x 1 for matmul
        batch_mean = tf.expand_dims(batch_mean, -1)
        noise = tf.random_normal(tf.shape(batch_mean), dtype=self.dtype)
        samples = tf.matmul(batch_cov, noise) + batch_mean
        samples = tf.squeeze(samples, -1)
        # Update static shape
        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(tf.TensorShape([static_n_samples])
                          .concatenate(self.get_batch_shape())
                          .concatenate(self.get_value_shape()))
        return samples
multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        if self.logits.get_shape().ndims == 2:
            logits_flat = self.logits
        else:
            logits_flat = tf.reshape(self.logits, [-1, self.n_categories])
        samples_flat = tf.transpose(tf.multinomial(logits_flat, n_samples))
        if self.logits.get_shape().ndims == 2:
            samples = samples_flat
        else:
            shape = tf.concat([[n_samples], self.batch_shape], 0)
            samples = tf.reshape(samples_flat, shape)
            static_n_samples = n_samples if isinstance(n_samples,
                                                       int) else None
            samples.set_shape(
                tf.TensorShape([static_n_samples]).
                concatenate(self.get_batch_shape()))
        samples = tf.one_hot(samples, self.n_categories, dtype=self.dtype)
        return samples
multivariate.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def _sample(self, n_samples):
        logits, temperature = self.logits, self.temperature
        if not self.is_reparameterized:
            logits = tf.stop_gradient(logits)
            temperature = tf.stop_gradient(temperature)
        shape = tf.concat([[n_samples], tf.shape(self.logits)], 0)

        uniform = open_interval_standard_uniform(shape, self.dtype)
        # TODO: Add Gumbel distribution
        gumbel = -tf.log(-tf.log(uniform))
        samples = tf.nn.softmax((logits + gumbel) / temperature)

        static_n_samples = n_samples if isinstance(n_samples, int) else None
        samples.set_shape(
            tf.TensorShape([static_n_samples]).concatenate(logits.get_shape()))
        return samples
base.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _check_input_shape(self, given):
        given = tf.convert_to_tensor(given, dtype=self.dtype)

        err_msg = "The given argument should be able to broadcast to " \
                  "match batch_shape + value_shape of the distribution."
        if (given.get_shape() and self.get_batch_shape() and
                self.get_value_shape()):
            static_sample_shape = tf.TensorShape(
                self.get_batch_shape().as_list() +
                self.get_value_shape().as_list())
            try:
                tf.broadcast_static_shape(given.get_shape(),
                                          static_sample_shape)
            except ValueError:
                raise ValueError(
                    err_msg + " ({} vs. {} + {})".format(
                        given.get_shape(), self.get_batch_shape(),
                        self.get_value_shape()))
        return given
utils.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def tf_obj_shape(input):
    """
    Convert tf objects to shape tuple.

    Arguments:
        input: tf.TensorShape, tf.Tensor, tf.AttrValue or tf.NodeDef
               the corresponding tensorflow object

    Returns:
        tuple: shape of the tensorflow object
    """
    if isinstance(input, tf.TensorShape):
        return tuple([int(i.value) for i in input])
    elif isinstance(input, tf.Tensor):
        return tf_obj_shape(input.get_shape())
    elif isinstance(input, tf.AttrValue):
        return tuple([int(d.size) for d in input.shape.dim])
    elif isinstance(input, tf.NodeDef):
        return tf_obj_shape(input.attr['shape'])
    else:
        raise TypeError("Input to `tf_obj_shape` has the wrong type.")
utils.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def autoformat_kernel_2d(strides):
    if isinstance(strides, int):
        return [1, strides, strides, 1]
    elif isinstance(strides, (tuple, list, tf.TensorShape)):
        if len(strides) == 2:
            return [1, strides[0], strides[1], 1]
        elif len(strides) == 4:
            return [strides[0], strides[1], strides[2], strides[3]]
        else:
            raise Exception("strides length error: " + str(len(strides))
                            + ", only a length of 2 or 4 is supported.")
    else:
        raise Exception("strides format error: " + str(type(strides)))


# Auto format filter size
# Output shape: (rows, cols, input_depth, out_depth)
utils.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def autoformat_stride_3d(strides):
    if isinstance(strides, int):
        return [1, strides, strides, strides, 1]
    elif isinstance(strides, (tuple, list, tf.TensorShape)):
        if len(strides) == 3:
            return [1, strides[0], strides[1],strides[2], 1]
        elif len(strides) == 5:
            assert strides[0] == strides[4] == 1, "Must have strides[0] = strides[4] = 1"
            return [strides[0], strides[1], strides[2], strides[3], strides[4]]
        else:
            raise Exception("strides length error: " + str(len(strides))
                            + ", only a length of 3 or 5 is supported.")
    else:
        raise Exception("strides format error: " + str(type(strides)))


# Auto format kernel for 3d convolution
rnn_core.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def zero_state(self, batch_size, dtype):
    """Return zero-filled state tensor(s).

    Args:
      batch_size: int, float, or unit Tensor representing the batch size.
      dtype: the data type to use for the state.

    Returns:
      If `state_size` is an int or TensorShape, then the return value is a
      `N-D` tensor of shape `[batch_size x state_size]` filled with zeros.

      If `state_size` is a nested list or tuple, then the return value is
      a nested list or tuple (of the same structure) of `2-D` tensors with
      the shapes `[batch_size x s]` for each s in `state_size`.
    """
    # Keep scope for backwards compatibility.
    with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      return rnn_cell_impl._zero_state_tensors(  # pylint: disable=protected-access
          self.state_size, batch_size, dtype)
basic_rnn_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testMLPFinalCore(self):
    batch_size = 2
    sequence_length = 3
    input_size = 4
    mlp_last_layer_size = 17
    cores = [
        snt.LSTM(hidden_size=10),
        snt.nets.MLP(output_sizes=[6, 7, mlp_last_layer_size]),
    ]
    deep_rnn = snt.DeepRNN(cores, skip_connections=False)
    input_sequence = tf.constant(
        np.random.randn(sequence_length, batch_size, input_size),
        dtype=tf.float32)
    initial_state = deep_rnn.initial_state(batch_size=batch_size)
    output, unused_final_state = tf.nn.dynamic_rnn(
        deep_rnn, input_sequence,
        initial_state=initial_state,
        time_major=True)
    self.assertEqual(
        output.get_shape(),
        tf.TensorShape([sequence_length, batch_size, mlp_last_layer_size]))
tensor_ops.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def select_present(x, presence, batch_size=1, name='select_present'):
    with tf.variable_scope(name):
        presence = 1 - tf.to_int32(presence)  # invert mask

        bs = x.get_shape()[0]
        if bs != None:  # here type(bs) is tf.Dimension and == is ok
            batch_size = int(bs)

        num_partitions = 2 * batch_size
        r = tf.range(0, num_partitions,  2)
        r.set_shape(tf.TensorShape(batch_size))
        r = broadcast_against(r, presence)

        presence += r

        selected = tf.dynamic_partition(x, presence, num_partitions)
        selected = tf.concat(axis=0, values=selected)
        selected = tf.reshape(selected, tf.shape(x))

    return selected
tensor_ops.py 文件源码 项目:hart 作者: akosiorek 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def broadcast_against(tensor, against_expr):
    """Adds trailing dimensions to mask to enable broadcasting against data

    :param tensor: tensor to be broadcasted
    :param against_expr: tensor will be broadcasted against it
    :return: mask expr with tf.rank(mask) == tf.rank(data)
    """

    def cond(data, tensor):
        return tf.less(tf.rank(tensor), tf.rank(data))

    def body(data, tensor):
        return data, tf.expand_dims(tensor, -1)

    shape_invariants = [against_expr.get_shape(), tf.TensorShape(None)]
    _, tensor = tf.while_loop(cond, body, [against_expr, tensor], shape_invariants)
    return tensor
variable_mgr.py 文件源码 项目:stuff 作者: yaroslavvb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, getter, *args, **kwargs):
    size = tf.TensorShape(kwargs['shape']).num_elements()
    if size < self.small_variable_size_threshold:
      device_name = self.device_for_small_variables
    else:
      device_index, _ = min(enumerate(
          self.sizes), key=operator.itemgetter(1))
      device_name = self.devices[device_index]
      self.sizes[device_index] += size

    kwargs['caching_device'] = device_name
    var = getter(*args, **kwargs)
    return var


# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
variable_mgr.py 文件源码 项目:stuff 作者: yaroslavvb 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, getter, *args, **kwargs):
    size = tf.TensorShape(kwargs['shape']).num_elements()
    if size < self.small_variable_size_threshold:
      device_name = self.device_for_small_variables
    else:
      device_index, _ = min(enumerate(
          self.sizes), key=operator.itemgetter(1))
      device_name = self.devices[device_index]
      self.sizes[device_index] += size

    kwargs['caching_device'] = device_name
    var = getter(*args, **kwargs)
    return var


# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
mgp-rnn-fit.py 文件源码 项目:MGP-RNN 作者: jfutoma 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def get_probs_and_accuracy(preds,O):
    """
    helper function. we have a prediction for each MC sample of each observation
    in this batch.  need to distill the multiple preds from each MC into a single
    pred for this observation.  also get accuracy. use true probs to get ROC, PR curves in sklearn
    """
    all_probs = tf.exp(preds[:,1] - tf.reduce_logsumexp(preds, axis = 1)) #normalize; and drop a dim so only prob of positive case
    N = tf.cast(tf.shape(preds)[0]/n_mc_smps,tf.int32) #actual number of observations in preds, collapsing MC samples                    

    #predicted probability per observation; collapse the MC samples
    probs = tf.zeros([0]) #store all samples in a list, then concat into tensor at end
    #setup tf while loop (have to use this bc loop size is variable)
    def cond(i,probs):
        return i < N
    def body(i,probs):
        probs = tf.concat([probs,[tf.reduce_mean(tf.slice(all_probs,[i*n_mc_smps],[n_mc_smps]))]],0)
        return i+1,probs    
    i = tf.constant(0)
    i,probs = tf.while_loop(cond,body,loop_vars=[i,probs],shape_invariants=[i.get_shape(),tf.TensorShape([None])])

    #compare to truth; just use cutoff of 0.5 for right now to get accuracy
    correct_pred = tf.equal(tf.cast(tf.greater(probs,0.5),tf.int32), O)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) 
    return probs,accuracy
batch_repeat_unpack.py 文件源码 项目:master-thesis 作者: AndreasMadsen 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def batch_repeat_unpack(x, repeats=1, name=None):
    with tf.name_scope(name, "batch-repeat-unpack", values=[x]):
        # x.shape = (batches, repeats, ...)

        # reshape to (batches * repeats, ...)
        shape = tf.concat([[-1], [repeats], tf.shape(x)[1:]], axis=0)
        t = tf.reshape(x, shape=shape)

        repeats_dim = tf.Dimension(repeats)
        t.set_shape(
            tf.TensorShape([
                x.get_shape()[0] // repeats_dim, repeats_dim
            ]).concatenate(x.get_shape()[1:])
        )

        return t
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def output_size(self):
        # Return the cell output and the id
        return BeamSearchOptimizationDecoderOutput(
            scores=tf.TensorShape([self._beam_width]),
            predicted_ids=tf.TensorShape([self._beam_width]),
            parent_ids=tf.TensorShape([self._beam_width]),
            gold_score=tf.TensorShape(()),
            loss=tf.TensorShape(()))
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _merge_batch_beams(self, t, s):
        """Merges the tensor from a batch of beams into a batch by beams.
        More exactly, t is a tensor of dimension [batch_size, beam_width, s]. We
        reshape this into [batch_size*beam_width, s]
        Args:
          t: Tensor of dimension [batch_size, beam_width, s]
        Returns:
          A reshaped version of t with dimension [batch_size * beam_width, s].
        """
        t_shape = tf.shape(t)
        reshaped = tf.reshape(t, tf.concat(([self._batch_size * self._beam_width], t_shape[2:]), axis=0))
        reshaped.set_shape(tf.TensorShape([None]).concatenate(s))
        return reshaped
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _split_batch_beams(self, t, s):
        """Splits the tensor from a batch by beams into a batch of beams.
        More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
        reshape this into [batch_size, beam_width, s]
        Args:
          t: Tensor of dimension [batch_size*beam_width, s].
          s: (Possibly known) depth shape.
        Returns:
          A reshaped version of t with dimension [batch_size, beam_width, s].
        Raises:
          ValueError: If, after reshaping, the new tensor is not shaped
            `[batch_size, beam_width, s]` (assuming batch_size and beam_width
            are known statically).
        """
        t_shape = tf.shape(t)
        reshaped = tf.reshape(t, tf.concat(([self._batch_size, self._beam_width], t_shape[1:]), axis=0))
        reshaped.set_shape(tf.TensorShape([None, self._beam_width]).concatenate(t.shape[1:]))
        expected_reshaped_shape = tf.TensorShape([None, self._beam_width]).concatenate(s)
        if not reshaped.shape.is_compatible_with(expected_reshaped_shape):
            raise ValueError("Unexpected behavior when reshaping between beam width "
                             "and batch size.  The reshaped tensor has shape: %s.  "
                             "We expected it to have shape "
                             "(batch_size, beam_width, depth) == %s.  Perhaps you "
                             "forgot to create a zero_state with "
                             "batch_size=encoder_batch_size * beam_width?"
                             % (reshaped.shape, expected_reshaped_shape))
        return reshaped
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _maybe_split_batch_beams(self, t, s):
        """Maybe splits the tensor from a batch by beams into a batch of beams.
        We do this so that we can use nest and not run into problems with shapes.
        Args:
          t: Tensor of dimension [batch_size*beam_width, s]
          s: Tensor, Python int, or TensorShape.
        Returns:
          Either a reshaped version of t with dimension
          [batch_size, beam_width, s] if t's first dimension is of size
          batch_size*beam_width or t if not.
        Raises:
          TypeError: If t is an instance of TensorArray.
          ValueError: If the rank of t is not statically known.
        """
        return self._split_batch_beams(t, s) if t.shape.ndims >= 1 else t
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _maybe_merge_batch_beams(self, t, s):
        """Splits the tensor from a batch by beams into a batch of beams.
        More exactly, t is a tensor of dimension [batch_size*beam_width, s]. We
        reshape this into [batch_size, beam_width, s]
        Args:
          t: Tensor of dimension [batch_size*beam_width, s]
          s: Tensor, Python int, or TensorShape.
        Returns:
          A reshaped version of t with dimension [batch_size, beam_width, s].
        Raises:
          TypeError: If t is an instance of TensorArray.
          ValueError:  If the rank of t is not statically known.
        """
        return self._merge_batch_beams(t, s) if t.shape.ndims >= 2 else t
seq2seq_helpers.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def build(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        if input_shape[-1].value is None:
            raise ValueError("Input to DotProductLayer must have the last dimension defined")
        if input_shape[-1].value != self._depth_size:
            self._space_transform = self.add_variable('kernel',
                                                      shape=(input_shape[-1].value, self._depth_size),
                                                      dtype=self.dtype,
                                                      trainable=True)
        else:
            self._space_transform = None
seq2seq_helpers.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _compute_output_shape(self, input_shape):
        input_shape = tf.TensorShape(input_shape)
        input_shape = input_shape.with_rank_at_least(2)
        return input_shape[:-1].concatenate(self._output_size)
beam_search_decoder.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def output_size(self):
    return BeamDecoderOutput(
        logits=self.decoder.vocab_size,
        predicted_ids=tf.TensorShape([]),
        log_probs=tf.TensorShape([]),
        scores=tf.TensorShape([]),
        beam_parent_ids=tf.TensorShape([]),
        original_outputs=self.decoder.output_size)
basic_decoder.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def output_size(self):
    return DecoderOutput(
        logits=self.vocab_size,
        predicted_ids=tf.TensorShape([]),
        cell_output=self.cell.output_size)
attention_decoder.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def output_size(self):
    return AttentionDecoderOutput(
        logits=self.vocab_size,
        predicted_ids=tf.TensorShape([]),
        cell_output=self.cell.output_size,
        attention_scores=tf.shape(self.attention_values)[1:-1],
        attention_context=self.attention_values.get_shape()[-1])


问题


面经


文章

微信
公众号

扫码关注公众号