python类Tensor()的实例源码

activations.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def selu(x):
    """ SELU.

    Scaled Exponential Linear Unit.

    Arguments
        x : A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
            `int16`, or `int8`

    References:
        Self-Normalizing Neural Networks, Klambauer et al., 2017.

    Links:
        [https://arxiv.org/abs/1706.02515](https://arxiv.org/abs/1706.02515)

    """
    alpha = 1.6732632423543772848170429916717
    scale = 1.0507009873554804934193349852946
    return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))
conv.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def global_avg_pool(incoming, name="GlobalAvgPool"):
    """ Global Average Pooling.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        2-D Tensor [batch, pooled dim]

    Arguments:
        incoming: `Tensor`. Incoming 4-D Tensor.
        name: A name for this layer (optional). Default: 'GlobalAvgPool'.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) == 4, "Incoming Tensor shape must be 4-D"

    with tf.name_scope(name):
        inference = tf.reduce_mean(incoming, [1, 2])

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
core.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def custom_layer(incoming, custom_fn, **kwargs):
    """ Custom Layer.

    A custom layer that can apply any operations to the incoming Tensor or
    list of `Tensor`. The custom function can be pass as a parameter along
    with its parameters.

    Arguments:
        incoming : A `Tensor` or list of `Tensor`. Incoming tensor.
        custom_fn : A custom `function`, to apply some ops on incoming tensor.
        **kwargs: Some custom parameters that custom function might need.

    """
    name = "CustomLayer"
    if 'name' in kwargs:
        name = kwargs['name']
    with tf.name_scope(name):
        inference = custom_fn(incoming, **kwargs)

    return inference
core.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def reshape(incoming, new_shape, name="Reshape"):
    """ Reshape.

    A layer that reshape the incoming layer tensor output to the desired shape.

    Arguments:
        incoming: A `Tensor`. The incoming tensor.
        new_shape: A list of `int`. The desired shape.
        name: A name for this layer (optional).

    """

    with tf.name_scope(name) as scope:
        inference = incoming
        if isinstance(inference, list):
            inference = tf.concat(0, inference)
            inference = tf.cast(inference, tf.float32)
        inference = tf.reshape(inference, shape=new_shape)

    inference.scope = scope

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, inference)

    return inference
core.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def flatten(incoming, name="Flatten"):
    """ Flatten.

    Flatten the incoming Tensor.

    Input:
        (2+)-D `Tensor`.

    Output:
        2-D `Tensor` [batch, flatten_dims].

    Arguments:
        incoming: `Tensor`. The incoming tensor.

    """
    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) > 1, "Incoming Tensor shape must be at least 2-D"
    dims = int(np.prod(input_shape[1:]))
    x = reshape(incoming, [-1, dims], name)

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, x)

    return x
core.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def multi_target_data(name_list, shape, dtype=tf.float32):
    """ Multi Target Data.

    Create and concatenate multiple placeholders. To be used when a regression
    layer uses targets from different sources.

    Arguments:
        name_list: list of `str`. The names of the target placeholders.
        shape: list of `int`. The shape of the placeholders.
        dtype: `tf.type`, Placeholder data type (optional). Default: float32.

    Return:
        A `Tensor` of the concatenated placeholders.

    """
    placeholders = []
    for i in range(len(name_list)):
        with tf.name_scope(name_list[i]):
            p = tf.placeholder(shape=shape, dtype=dtype, name='Y')
        if p not in tf.get_collection(tf.GraphKeys.TARGETS):
            tf.add_to_collection(tf.GraphKeys.TARGETS, p)
        placeholders.append(p)

    return tf.concat(placeholders, axis=0)
utils.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def get_layer_by_name(name_or_scope):
    """ get_layer.

    Retrieve the output tensor of a layer with the given name or scope.

    Arguments:
        name_or_scope: `str`. The name (or scope) given to the layer to
            retrieve.

    Returns:
        A Tensor.

    """
    # Track output tensor.
    c = tf.get_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name_or_scope)
    if len(c) == 0:
        raise Exception("No layer found for this name.")
    if len(c) > 1:
        return c
    return c[0]
gradient_moment.py 文件源码 项目:probabilistic_line_search 作者: ProbabilisticNumerics 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _GradMom(op, v, out_grad, batch_size, mom=2):
  """Wrapper function for the operation type-specific GradMom functions below.

  Inputs:
      :op: A tensorflow operation of type in VALID_TYPES.
      :v: The read-tensor of the trainable variable consumed by this operation.
      :out_grad: The tensor containing the gradient w.r.t. to the output of
          the op (as computed by ``tf.gradients``).
      :batch_size: Batch size ``m`` (constant integer or scalar int tf.Tensor)
      :mom: Integer moment desired (defaults to 2)."""

  with tf.name_scope(op.name+"_grad_mom"):
    if op.type == "MatMul":
      return _MatMulGradMom(op, v, out_grad, batch_size, mom)
    elif op.type == "Conv2D":
      return _Conv2DGradMom(op, v, out_grad, batch_size, mom)
    elif op.type == "Add":
      return _AddGradMom(op, v, out_grad, batch_size, mom)
    else:
      raise ValueError("Don't know how to compute gradient moment for "
          "variable {}, consumed by operation of type {}".format(v.name,
          op.type))
gradient_moment.py 文件源码 项目:probabilistic_line_search 作者: ProbabilisticNumerics 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _MatMulGradMom(op, W, out_grad, batch_size, mom=2):
  """Computes gradient moment for a weight matrix through a MatMul operation.

  Assumes ``Z=tf.matmul(A, W)``, where ``W`` is a d1xd2 weight matrix, ``A``
  are the nxd1 activations of the previous layer (n being the batch size).
  ``out_grad`` is the gradient w.r.t. ``Z``, as computed by ``tf.gradients()``.
  No transposes in the MatMul operation allowed.

  Inputs:
      :op: The MatMul operation
      :W: The weight matrix (the tensor, not the variable)
      :out_grad: The tensor of gradient w.r.t. to the output of the op
      :batch_size: Batch size n (constant integer or scalar int tf.Tensor)
      :mom: Integer moment desired (defaults to 2)"""

  assert op.type == "MatMul"
  t_a, t_b = op.get_attr("transpose_a"), op.get_attr("transpose_b")
  assert W is op.inputs[1] and not t_a and not t_b

  A = op.inputs[0]
  out_grad_pow = tf.pow(out_grad, mom)
  A_pow = tf.pow(A, mom)
  return tf.mul(batch_size, tf.matmul(A_pow, out_grad_pow, transpose_a=True))
rnn_core.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def zero_state(self, batch_size, dtype):
    """Return zero-filled state tensor(s).

    Args:
      batch_size: int, float, or unit Tensor representing the batch size.
      dtype: the data type to use for the state.

    Returns:
      If `state_size` is an int or TensorShape, then the return value is a
      `N-D` tensor of shape `[batch_size x state_size]` filled with zeros.

      If `state_size` is a nested list or tuple, then the return value is
      a nested list or tuple (of the same structure) of `2-D` tensors with
      the shapes `[batch_size x s]` for each s in `state_size`.
    """
    # Keep scope for backwards compatibility.
    with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
      return rnn_cell_impl._zero_state_tensors(  # pylint: disable=protected-access
          self.state_size, batch_size, dtype)
basic.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, preserve_dims=1, name="batch_flatten"):
    """Constructs a BatchFlatten module.

    Args:
      preserve_dims: Number of leading dimensions that will not be reshaped.
          For example, given an input Tensor with shape `[B, H, W, C]`:
            * `preserve_dims=1` will return a Tensor with shape `[B, H*W*C]`.
            * `preserve_dims=2` will return a Tensor with
                shape `[B, H, W*C]`.
            * `preserve_dims=3` will return the input itself,
                shape `[B, H, W, C]`.
            * `preserve_dims=4` will  return a Tensor with
                shape `[B, H, W, C, 1]`.
            * `preserve_dims>=5` will throw an error on build.
          The preserved dimensions can be unknown at building time.
      name: Name of the module.
    """
    super(BatchFlatten, self).__init__(
        shape=(-1,), preserve_dims=preserve_dims, name=name)
basic.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _build(self):
    """Connects the TrainableTensor module into the graph.

    Returns:
      A Tensor of shape as determined in the constructor.
    """
    if "w" not in self._initializers:
      stddev = 1 / math.sqrt(np.prod(self._shape))
      self._initializers["w"] = tf.truncated_normal_initializer(stddev=stddev)

    self._w = tf.get_variable("w",
                              shape=self._shape,
                              dtype=self._dtype,
                              initializer=self._initializers["w"],
                              partitioner=self._partitioners.get("w", None),
                              regularizer=self._regularizers.get("w", None))
    return self._w
basic.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def _build(self, inputs):
    """Connects the `TileByDim` module into the graph.

    Args:
      inputs: `Tensor` to tile.

    Returns:
      The tiled tensor.
    """
    shape_inputs = inputs.get_shape().as_list()
    rank = len(shape_inputs)

    # Builds default lists for multiples to pass to `tf.tile`.
    full_multiples = [1] * rank

    # Updates lists with what the user provided.
    for dim, multiple in zip(self._dims, self._multiples):
      full_multiples[dim] = multiple

    return tf.tile(inputs, multiples=full_multiples)
basic.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _build(self, inputs):
    """Connects the MergeDims module into the graph.

    Args:
      inputs: Tensor or a nested list of Tensors to merge. Its rank must be
          greater than or equal to `start` + `size`.

    Returns:
      The merged Tensor or a nested list of merged Tensors.

    Raises:
      ValueError: If any of the `inputs` tensors has insufficient rank.
    """
    if nest.is_sequence(inputs):
      merged_tensors = [self._merge(tensor) for tensor in nest.flatten(inputs)]
      return nest.pack_sequence_as(inputs, merged_tensors)

    # inputs is a single tf.Tensor
    return self._merge(inputs)
base_info_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def testModuleInfo_recursion(self):
    # pylint: disable=not-callable
    tf.reset_default_graph()
    dumb = DumbModule(name="dumb_a", no_nest=True)
    ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
    val = {"one": ph_0, "self": None}
    val["self"] = val
    dumb(val)
    def check(check_type):
      sonnet_collection = tf.get_default_graph().get_collection(
          base_info.SONNET_COLLECTION_NAME)
      connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
      self.assertIsInstance(connected_subgraph.inputs["inputs"]["one"],
                            tf.Tensor)
      self.assertIsInstance(
          connected_subgraph.inputs["inputs"]["self"], check_type)
      self.assertIsInstance(connected_subgraph.outputs["one"], tf.Tensor)
      self.assertIsInstance(connected_subgraph.outputs["self"], check_type)
    check(dict)
    _copy_default_graph()
    check(base_info._UnserializableObject)
analyzers.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, inputs, output_dtype_shape_and_is_asset, spec, name):
    for tensor in inputs:
      if not isinstance(tensor, tf.Tensor):
        raise ValueError('Analyzers can only accept `Tensor`s as inputs')
    self._inputs = inputs
    self._outputs = []
    self._output_is_asset_map = {}
    with tf.name_scope(name) as scope:
      self._name = scope
      for dtype, shape, is_asset in output_dtype_shape_and_is_asset:
        output_tensor = tf.placeholder(dtype, shape)
        if is_asset and output_tensor.dtype != tf.string:
          raise ValueError(('Tensor {} cannot represent an asset, because it '
                            'is not a string.').format(output_tensor.name))
        self._outputs.append(output_tensor)
        self._output_is_asset_map[output_tensor] = is_asset
    self._spec = spec
    tf.add_to_collection(ANALYZER_COLLECTION, self)
analyzers.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def combine_analyzer(x, output_dtype, output_shape, combiner_spec, name):
  """Applies the combiner over the whole dataset.

  Args:
    x: An input `Tensor` or `SparseTensor`.
    output_dtype: The dtype of the output of the analyzer.
    output_shape: The shape of the output of the analyzer.
    combiner_spec: A subclass of CombinerSpec.
    name: Similar to a TF op name.  Used to define a unique scope for this
      analyzer, which can be used for debugging info.

  Returns:
    The combined values, which is a `Tensor` with type output_dtype and shape
    `output_shape`.  These must be compatible with the combiner_spec.
  """
  return Analyzer([x], [(output_dtype, output_shape, False)], combiner_spec,
                  name).outputs[0]
analyzers.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _numeric_combine(x, fn, reduce_instance_dims=True, name=None):
  """Apply an analyzer with _NumericCombineSpec to given input."""
  if not isinstance(x, tf.Tensor):
    raise TypeError('Expected a Tensor, but got %r' % x)

  if reduce_instance_dims:
    # If reducing over all dimensions, result is scalar.
    shape = ()
  elif x.shape.dims is not None:
    # If reducing over batch dimensions, with known shape, the result will be
    # the same shape as the input, but without the batch.
    shape = x.shape.as_list()[1:]
  else:
    # If reducing over batch dimensions, with unknown shape, the result will
    # also have unknown shape.
    shape = None
  return combine_analyzer(
      x, x.dtype, shape, _NumPyCombinerSpec(fn, reduce_instance_dims),
      name if name is not None else fn.__name__)
analyzers.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def size(x, reduce_instance_dims=True, name=None):
  """Computes the total size of instances in a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor`.
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor`. Has the same type as `x`.
  """
  with tf.name_scope(name, 'size'):
    # Note: Calling `sum` defined in this module, not the builtin.
    return sum(tf.ones_like(x), reduce_instance_dims)
analyzers.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mean(x, reduce_instance_dims=True, name=None):
  """Computes the mean of the values of a `Tensor` over the whole dataset.

  Args:
    x: A `Tensor`.
    reduce_instance_dims: By default collapses the batch and instance dimensions
        to arrive at a single scalar output. If False, only collapses the batch
        dimension and outputs a vector of the same shape as the input.
    name: (Optional) A name for this operation.

  Returns:
    A `Tensor` containing the mean. If `x` is floating point, the mean will
    have the same type as `x`. If `x` is integral, the output is cast to float32
    for int8 and int16 and float64 for int32 and int64 (similar to the behavior
    of tf.truediv).
  """
  with tf.name_scope(name, 'mean'):
    # Note: Calling `sum` defined in this module, not the builtin.
    return tf.divide(
        sum(x, reduce_instance_dims), size(x, reduce_instance_dims))


问题


面经


文章

微信
公众号

扫码关注公众号