python类Operation()的实例源码

tensor_forest_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def testTrainingConstructionRegression(self):
    input_data = [[-1., 0.], [-1., 2.],  # node 1
                  [1., 0.], [1., -2.]]  # node 2
    input_labels = [0, 1, 2, 3]

    params = tensor_forest.ForestHParams(
        num_classes=4, num_features=2, num_trees=10, max_nodes=1000,
        split_after_samples=25, regression=True).fill()

    graph_builder = tensor_forest.RandomForestGraphs(params)
    graph = graph_builder.training_graph(input_data, input_labels)
    self.assertTrue(isinstance(graph, tf.Operation))
summaries.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def tf_structure(x, include_shapes=False, finished=None):
  """A postfix expression summarizing the TF graph.

  This is intended to be used as part of test cases to
  check for gross differences in the structure of the graph.
  The resulting string is not invertible or unabiguous
  and cannot be used to reconstruct the graph accurately.

  Args:
      x: a tf.Tensor or tf.Operation
      include_shapes: include shapes in the output string
      finished: a set of ops that have already been output

  Returns:
      A string representing the structure as a string of
      postfix operations.
  """
  if finished is None:
    finished = set()
  if isinstance(x, tf.Tensor):
    shape = x.get_shape().as_list()
    x = x.op
  else:
    shape = []
  if x in finished:
    return " <>"
  finished |= {x}
  result = ""
  if not _truncate_structure(x):
    for y in x.inputs:
      result += tf_structure(y, include_shapes, finished)
  if include_shapes:
    result += " %s" % (shape,)
  if x.type != "Identity":
    name = SHORT_NAMES.get(x.type, x.type.lower())
    result += " " + name
  return result
summaries.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def tf_print(x, depth=0, finished=None, printer=print):
  """A simple print function for a TensorFlow graph.

  Args:
      x: a tf.Tensor or tf.Operation
      depth: current printing depth
      finished: set of nodes already output
      printer: print function to use

  Returns:
      Total number of parameters found in the
      subtree.
  """

  if finished is None:
    finished = set()
  if isinstance(x, tf.Tensor):
    shape = x.get_shape().as_list()
    x = x.op
  else:
    shape = ""
  if x.type == "Identity":
    x = x.inputs[0].op
  if x in finished:
    printer("%s<%s> %s %s" % ("  "*depth, x.name, x.type, shape))
    return
  finished |= {x}
  printer("%s%s %s %s" % ("  "*depth, x.name, x.type, shape))
  if not _truncate_structure(x):
    for y in x.inputs:
      tf_print(y, depth+1, finished, printer=printer)
summaries.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def tf_parameter_summary(x, printer=print, combine=True):
  """Summarize parameters by depth.

  Args:
      x: root of the subgraph (Tensor, Operation)
      printer: print function for output
      combine: combine layers by top-level scope
  """
  seq = tf_parameter_iter(x)
  if combine: seq = _combine_filter(seq)
  seq = reversed(list(seq))
  for name, total, shape in seq:
    printer("%10d %-20s %s" % (total, name, shape))
lstm_util.py 文件源码 项目:tensorlm 作者: batzner 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_state_update_op(state_variables, new_states):
    """Returns an operation to update an LSTM's state variables.

    See get_state_variables() for more info.

    Args:
        state_variables (tuple[tf.contrib.rnn.LSTMStateTuple]): The LSTM's state variables.
        new_states (tuple[tf.contrib.rnn.LSTMStateTuple]): The new values for the state variables.
            new_states may have state tuples with state sizes < max_batch_size. Then, only the first
            rows of the corresponding state variables will be updated.

    Returns:
        tf.Operation: An operation that updates the LSTM's.
    """

    # Add an operation to update the train states with the last state tensors.
    update_ops = []
    for state_variable, new_state in zip(state_variables, new_states):
        # new_state[0] might be smaller than state_variable[0], because state_variable[0]
        # contains max_batch_size entries.

        # Get the update indices for both states in the tuple
        update_indices = (tf.range(0, tf.shape(new_state[0])[0]),
                          tf.range(0, tf.shape(new_state[1])[0]))
        update_ops.extend([
            tf.scatter_update(state_variable[0], update_indices[0], new_state[0]),
            tf.scatter_update(state_variable[1], update_indices[1], new_state[1])
        ])
    return tf.tuple(update_ops)
lstm_util.py 文件源码 项目:tensorlm 作者: batzner 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def get_state_reset_op(state_variables, cell, max_batch_size):
    """Returns an operation to set each variable in a list of LSTMStateTuples to zero.

    See get_state_variables() for more info.

    Args:
        state_variables (tuple[tf.contrib.rnn.LSTMStateTuple]): The LSTM's state variables.
        cell (tf.contrib.rnn.MuliRNNCell): An MultiRNNCell consisting of multiple LSTMCells.
        max_batch_size (int): The maximum size of batches that are be fed to the LSTMCell.

    Returns:
        tf.Operation: An operation that sets the LSTM's state to zero.
    """
    zero_states = cell.zero_state(max_batch_size, tf.float32)
    return get_state_update_op(state_variables, zero_states)
mlp.py 文件源码 项目:auDeep 作者: auDeep 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def optimize(self,
                 loss: tf.Tensor,
                 learning_rate: float) -> tf.Operation:
        with tf.variable_scope("optimize"):
            optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)

            return optimizer.minimize(loss, name="train_op")
base.py 文件源码 项目:auDeep 作者: auDeep 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def train_op(self) -> tf.Operation:
        """
        Returns an operation for performing a single training step.

        Returns
        -------
        tf.Operation
            An operation for performing a single training step
        """
        return self.graph.get_collection("train_op")[0]
helpers.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_operation_footprint(op):
  """ Trace back the inputs of given Op and record all:
  * placholders
  * variables
  * ops
  Those are related to given op.

  The final footprint is concatenated string of all variables,
  placeholders, constants, and Ops

  Note
  ----
  This is just a fair attempt to create short identification of a
  tenorflow Op
  """
  if not isinstance(op, tf.Operation) and hasattr(op, 'op'):
    op = op.op
  var = []
  placeholder = []
  const = []
  ops = [op.type]
  inputs = list(op._inputs)
  while len(inputs) > 0:
    i = inputs.pop()
    o = i.op
    ops.append(o.type)
    if o.type == "VariableV2":
      var.append(i)
    elif o.type == "Placeholder":
      placeholder.append(i)
    elif o.type == "Const":
      const.append(i)
    inputs = list(o._inputs) + inputs
  return ':'.join([get_normalized_name(v) for v in var]) + '|' +\
         ':'.join([get_normalized_name(p) for p in placeholder]) + '|' +\
         ':'.join([get_normalized_name(c) for c in const]) + '|' +\
         ':'.join([j.split(':')[0] for j in ops])
helpers.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, inputs, outputs, updates=[], defaults={},
               training=None):
    self.training = training
    # ====== validate input ====== #
    if isinstance(inputs, Mapping):
      self.inputs_name = inputs.keys()
      inputs = inputs.values()
    elif not isinstance(inputs, (tuple, list)):
      inputs = [inputs]
    self.inputs = flatten_list(inputs, level=None)
    if not hasattr(self, 'inputs_name'):
      self.inputs_name = [i.name.split(':')[0] for i in self.inputs]
    # ====== defaults ====== #
    defaults = dict(defaults)
    self.defaults = defaults
    # ====== validate outputs ====== #
    return_list = True
    if not isinstance(outputs, (tuple, list)):
      outputs = (outputs,)
      return_list = False
    self.outputs = flatten_list(list(outputs), level=None)
    self.return_list = return_list
    # ====== validate updates ====== #
    if isinstance(updates, Mapping):
      updates = updates.items()
    with tf.control_dependencies(self.outputs):
      # create updates ops
      if not isinstance(updates, tf.Operation):
        updates_ops = []
        for update in updates:
          if isinstance(update, (tuple, list)):
            p, new_p = update
            updates_ops.append(tf.assign(p, new_p))
          else: # assumed already an assign op
            updates_ops.append(update)
        self.updates_ops = tf.group(*updates_ops)
      else: # already an tensorflow Ops
        self.updates_ops = updates
nnblocks.py 文件源码 项目:ADD-GAN 作者: zblasingame 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def reset_weights(self):
        """Returns a TensorFlow operation to resets TensorFlow weights
        so the model can be used again.

        Returns:
            list [tf.Operation]: List of operations to reassign weights.
        """

        weights = [entry['weights'] for entry in self.network]
        weights.extend([entry['biases'] for entry in self.network])

        return [weight.assign(tf.random_normal(weight.get_shape(), stddev=0.1))
                for weight in weights]
main.py 文件源码 项目:dvae 作者: dojoteef 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def device_fn(device):
    """ Returns a function that given a tf.Operation it returns what device to put it on """
    def function(operation):
        """ Given a tf.Operation returns what device to put it on """
        if operation.type in _CPU_OPERATIONS:
            return '/cpu:0'
        else:
            return device

    return function
loop.py 文件源码 项目:SSD-Keras_Tensorflow 作者: jedol 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def fit_generator(sess, num_iter, operations=[], batch_generator=None, inputs=[], outputs={},
                  static_inputs={}, print_interval=100):
    if not isinstance(operations, list):
        if isinstance(operations, tuple):
            operations = list(operations)
        assert isinstance(operations, tf.Operation)
        operations = [operations]
    if not isinstance(inputs, list) and not isinstance(inputs, tuple):
        assert isinstance(inputs, tf.Tensor)
        inputs = [inputs]
    if not isinstance(outputs, dict):
        assert isinstance(outputs, tf.Tensor)
        outputs = {'loss':outputs}

    output_names = outputs.keys()
    output_tensors = outputs.values()

    tic = ti.default_timer()
    for step in xrange(1,num_iter+1):
        feed_dict = dict()
        if batch_generator is not None:
            feed_dict.update(dict(zip(inputs,batch_generator.next())))
        feed_dict.update(static_inputs)

        if step % print_interval == 0:
            outputs = sess.run(operations+output_tensors, feed_dict=feed_dict)[len(operations):]

            toc = ti.default_timer()
            eta = (toc-tic)/step*(num_iter-step)
            log = '[Step: {}/{} ETA: {:.0f}s]'.format(step, num_iter, eta)

            for output_name, output in zip(output_names, outputs):
                log += ' {}: {:.4f}'.format(output_name, output)

            print log
        else:
            _ = sess.run(operations, feed_dict=feed_dict)
    toc = ti.default_timer()
    log = '[Step: {}/{} ETA: {:.0f}s]'.format(step, num_iter, toc-tic)
    print log
control.py 文件源码 项目:tensorflow-extenteten 作者: raviqqe 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def with_dependencies(dependencies, tensor):
    """
    This function is documented partially in tensorflow.org.
    But, it cannot be found in a library.
    """
    with tf.control_dependencies(dependencies):
        if isinstance(tensor, tf.Tensor):
            return tf.identity(tensor)
        elif isinstance(tensor, tf.Operation):
            return tf.group(tensor)

        raise ValueError("{} must be tf.Tensor or tf.Operation."
                         .format(tensor))
model.py 文件源码 项目:cxflow-tensorflow 作者: Cognexa 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _create_train_ops(self, dependencies: List[List[tf.Operation]], optimizer_config: Optional[dict]) -> None:
        """
        Create the train ops for training. In order to handle incomplete batches, there must be one train op for
        each number of empty towers. E.g. for 2 GPU training, one must define 2 train ops for 1 and 2 towers
        respectively. The train ops must be named ``train_op_1``, ``train_op_2`` etc.
        wherein the suffixed number stands for the number of towers.

        By default the train ops are constructed in the following way:
            - optimizer is created from the ``model.optimizer`` configuration dict
            - REGULARIZATION_LOSSSES collection is summed to ``regularization_loss``
            - gradients minimizing the respective tower losses and ``regularization_loss`` are computed
            - for each number of non-empty towers
                - gradients of the respective towers are averaged and applied

        To implement a custom behavior, override this method and create your own op named as :py:attr:`TRAIN_OP_NAME`.

        .. code-block:: yaml
            :caption: example optimizer config

            model:
                optimizer:
                    class: RMSPropOptimizer
                    learning_rate: 0.001

        :param dependencies: a list of dependent operations (e.g. batch normalization updates) for each number of towers
        :param optimizer_config: optimizer configuration dict
        """
        if optimizer_config is None:
            raise ValueError('Optimizer config was not specified although it is required for creating the train op. '
                             'Please specify the configuration in `model.optimizer`.')
        grads_and_vars = []
        optimizer = create_optimizer(optimizer_config)
        regularization_losses = self.graph.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
        regularization_loss = tf.reduce_sum(tf.stack(regularization_losses))
        if regularization_losses:
            logging.info('\tAdding regularization losses')
            logging.debug('\tRegularization losses: %s', [var.name for var in regularization_losses])
        for tower in self._towers:
            with tower:
                grads_and_vars.append(optimizer.compute_gradients(tf.reduce_mean(tower.loss) + regularization_loss))
        for i in range(len(self._towers)):
            with tf.control_dependencies(dependencies[i]):
                optimizer.apply_gradients(average_gradients(grads_and_vars[:(i + 1)]),
                                          name=BaseModel.TRAIN_OP_NAME + '_{}'.format(i + 1))
base.py 文件源码 项目:dataset 作者: analysiscenter 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train(self, fetches=None, feed_dict=None, use_lock=False):   # pylint: disable=arguments-differ
        """ Train the model with the data provided

        Parameters
        ----------
        fetches : tuple, list
            a sequence of `tf.Operation` and/or `tf.Tensor` to calculate
        feed_dict : dict
            input data, where key is a placeholder name and value is a numpy value
        use_lock : bool
            if True, the whole train step is locked, thus allowing for multithreading.

        Returns
        -------
        Calculated values of tensors in `fetches` in the same structure

        See also
        --------
        `Tensorflow Session run <https://www.tensorflow.org/api_docs/python/tf/Session#run>`_
        """
        with self.graph.as_default():
            _feed_dict = self._fill_feed_dict(feed_dict, is_training=True)
            if fetches is None:
                _fetches = tuple()
            else:
                _fetches = self._fill_fetches(fetches, default=None)

            if use_lock:
                self._train_lock.acquire()

            _all_fetches = []
            if self.train_step:
                _all_fetches += [self.train_step]
            if _fetches is not None:
                _all_fetches += [_fetches]
            if len(_all_fetches) > 0:
                _, output = self.session.run(_all_fetches, feed_dict=_feed_dict)
            else:
                output = None

            if use_lock:
                self._train_lock.release()

            return self._fill_output(output, _fetches)


问题


面经


文章

微信
公众号

扫码关注公众号