python类get_collection()的实例源码

BaseModel.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _get_summary(self):
        # build the self.summ_op for tensorboard
        # This function could be overwritten
        if not self.flags.visualize or self.flags.visualize=='none':
            return
        summ_collection = "{} {} {} summaries".format(self.flags.comp, self.flags.sol, self.flags.run_name)
        if len(tf.get_collection(tf.GraphKeys.SCALARS)):
            self.scaler_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.SCALARS))
        if len(tf.get_collection(tf.GraphKeys.IMAGES)):
            self.image_op = tf.summary.merge(tf.get_collection(tf.GraphKeys.IMAGES))

        for i in tf.get_collection(tf.GraphKeys.SCALARS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.WEIGHTS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.FEATURE_MAPS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.IMAGES):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.GRADIENTS):
            tf.add_to_collection(summ_collection, i)
        for i in tf.get_collection(tf.GraphKeys.EMBEDDINGS):
            tf.add_to_collection(summ_collection, i)
        self.summ_op = tf.summary.merge(tf.get_collection(summ_collection))
facenet.py 文件源码 项目:facerecognition 作者: guoxiaolu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
    """Add summaries for losses.

    Generates moving average for all losses and associated summaries for
    visualizing the performance of the network.

    Args:
      total_loss: Total loss from loss().
    Returns:
      loss_averages_op: op for generating moving averages of losses.
    """
    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    losses = tf.get_collection('losses')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summmary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.summary.scalar(l.op.name +' (raw)', l)
        tf.summary.scalar(l.op.name, loss_averages.average(l))

    return loss_averages_op
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, ob_space, ac_space, layers=[256], **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        rank = len(ob_space)

        if rank == 3: # pixel input
            for i in range(4):
                x = tf.nn.elu(conv2d(x, 32, "c{}".format(i + 1), [3, 3], [2, 2]))
        elif rank == 1: # plain features
            #x = tf.nn.elu(linear(x, 256, "l1", normalized_columns_initializer(0.01)))
            pass
        else:
            raise TypeError("observation space must have rank 1 or 3, got %d" % rank)

        x = flatten(x)

        for i, layer in enumerate(layers):
            x = tf.nn.elu(linear(x, layer, "l{}".format(i + 1), tf.contrib.layers.xavier_initializer()))

        self.logits = linear(x, ac_space, "action", tf.contrib.layers.xavier_initializer())
        self.vf = tf.reshape(linear(x, 1, "value", tf.contrib.layers.xavier_initializer()), [-1])
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
        self.state_in = []
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, ob_space, ac_space, size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
        x = tf.expand_dims(flatten(x), 1)

        gru = rnn.GRUCell(size)

        h_init = np.zeros((1, size), np.float32)
        self.state_init = [h_init]
        h_in = tf.placeholder(tf.float32, [1, size])
        self.state_in = [h_in]

        gru_outputs, gru_state = tf.nn.dynamic_rnn(
            gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
        x = tf.reshape(gru_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [gru_state[:1]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
model.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, ob_space, ac_space, size=256, **kwargs):
        self.x = x = tf.placeholder(tf.float32, [None] + list(ob_space))

        for i in range(4):
            x = tf.nn.elu(conv2d(x, 32, "l{}".format(i + 1), [3, 3], [2, 2]))
        # introduce a "fake" batch dimension of 1 after flatten so that we can do GRU over time dim
        x = tf.expand_dims(flatten(x), 1)

        gru = rnn.GRUCell(size)

        h_init = np.zeros((1, size), np.float32)
        self.state_init = [h_init]
        h_in = tf.placeholder(tf.float32, [1, size])
        self.state_in = [h_in]

        gru_outputs, gru_state = tf.nn.dynamic_rnn(
            gru, x, initial_state=h_in, sequence_length=[size], time_major=True)
        x = tf.reshape(gru_outputs, [-1, size])
        self.logits = linear(x, ac_space, "action", normalized_columns_initializer(0.01))
        self.vf = tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1])
        self.state_out = [gru_state[:1]]
        self.sample = categorical_sample(self.logits, ac_space)[0, :]
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
tf_util.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def scope_vars(scope, trainable_only=False):
    """
    Get variables inside a scope
    The scope can be specified as a string

    Parameters
    ----------
    scope: str or VariableScope
        scope in which the variables reside.
    trainable_only: bool
        whether or not to return only the variables that were marked as trainable.

    Returns
    -------
    vars: [tf.Variable]
        list of variables in `scope`.
    """
    return tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES if trainable_only else tf.GraphKeys.GLOBAL_VARIABLES,
        scope=scope if isinstance(scope, str) else scope.name
    )
cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
  """Add summaries for losses in CIFAR-10 model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.summary.scalar(l.op.name +' (raw)', l)
    tf.summary.scalar(l.op.name, loss_averages.average(l))

  return loss_averages_op
cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Calculate the average cross entropy loss across the batch.
  labels = tf.cast(labels, tf.int64)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
cifar10.py 文件源码 项目:ml 作者: hohoins 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
  """Add summaries for losses in CIFAR-10 model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.summary.scalar(l.op.name +' (raw)', l)
    tf.summary.scalar(l.op.name, loss_averages.average(l))

  return loss_averages_op
bbbc006.py 文件源码 项目:dcan-tensorflow 作者: lisjin 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def loss(c_fuse, s_fuse, labels):
    """Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        c_fuse: Contours output map from inference().
        s_fuse: Segments output map from inference().
        labels: Labels from distorted_inputs or inputs().

    Returns:
      Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.

    # Split the labels tensor into contours and segments image tensors
    # Each has shape [FLAGS.batch_size, 696, 520, 1]
    contours_labels, segments_labels = tf.split(labels, 2, 3)

    _add_cross_entropy(contours_labels, c_fuse, 'c')
    _add_cross_entropy(segments_labels, s_fuse, 's')

    return tf.add_n(tf.get_collection('losses'), name='total_loss')
bbbc006.py 文件源码 项目:dcan-tensorflow 作者: lisjin 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
    """Add summaries for losses in BBBC006 model.
    Generates moving average for all losses and associated summaries for
    visualizing the performance of the network.

    Args:
        total_loss: Total loss from loss().
    Returns:
        loss_averages_op: op for generating moving averages of losses.
    """
    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    losses = tf.get_collection('losses')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.summary.scalar(l.op.name + '_raw', l)
        tf.summary.scalar(l.op.name, loss_averages.average(l))

    return loss_averages_op
variables.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def add_variable(var, restore=True):
  """Adds a variable to the MODEL_VARIABLES collection.

    Optionally it will add the variable to  the VARIABLES_TO_RESTORE collection.
  Args:
    var: a variable.
    restore: whether the variable should be added to the
      VARIABLES_TO_RESTORE collection.

  """
  collections = [MODEL_VARIABLES]
  if restore:
    collections.append(VARIABLES_TO_RESTORE)
  for collection in collections:
    if var not in tf.get_collection(collection):
      tf.add_to_collection(collection, var)
variables.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_unique_variable(name):
  """Gets the variable uniquely identified by that name.

  Args:
    name: a name that uniquely identifies the variable.

  Returns:
    a tensorflow variable.

  Raises:
    ValueError: if no variable uniquely identified by the name exists.
  """
  candidates = tf.get_collection(tf.GraphKeys.VARIABLES, name)
  if not candidates:
    raise ValueError('Couldnt find variable %s' % name)

  for candidate in candidates:
    if candidate.op.name == name:
      return candidate
  raise ValueError('Variable %s does not uniquely identify a variable', name)
optimizer.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def compute_gradients(self, loss, *args, **kwargs):
        train_vars = None
        if self.trainable_names is not None:
            log.info('All trainable vars:\n'+str([var.name for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)]))
            train_vars = []
            for scope_name in self.trainable_names:
                new_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope_name)
                if len(new_vars) == 0:
                    raise ValueError('The scope name, {}, you specified does not contain any trainable variables.'.format(scope_name))
                train_vars.extend(new_vars)
            log.info('Variables to be trained:\n'+str([var.name for var in train_vars]))
        if train_vars is not None:
            self.var_list = train_vars

        gvs = self._optimizer.compute_gradients(loss,
                                                var_list=train_vars,
                                                *args, **kwargs)
        if self.clip:
            # gradient clipping. Some gradients returned are 'None' because
            # no relation between the variable and loss; so we skip those.
            gvs = [(tf.clip_by_value(grad, -1., 1.), var)
                   for grad, var in gvs if grad is not None]
        return gvs
cnnpredictor.py 文件源码 项目:DmsMsgRcg 作者: bshao001 项目源码 文件源码 阅读 103 收藏 0 点赞 0 评论 0
def __init__(self, session, model_scope, result_dir, result_file, k=1):
        """
        Args:
            model_scope: The variable_scope used for the trained model to be restored.
            session: The TensorFlow session used to run the prediction.
            result_dir: The full path to the folder in which the result file locates.
            result_file: The file that saves the training results.
            k: Optional. Number of elements to be predicted.
        """
        tf.train.import_meta_graph(os.path.join(result_dir, result_file + ".meta"))
        all_vars = tf.global_variables()
        model_vars = [var for var in all_vars if var.name.startswith(model_scope)]
        saver = tf.train.Saver(model_vars)
        saver.restore(session, os.path.join(result_dir, result_file))

        # Retrieve the Ops we 'remembered'.
        logits = tf.get_collection(model_scope+"logits")[0]
        self.images_placeholder = tf.get_collection(model_scope+"images")[0]
        self.keep_prob_placeholder = tf.get_collection(model_scope+"keep_prob")[0]

        # Add an Op that chooses the top k predictions. Apply softmax so that
        # we can have the probabilities (percentage) in the output.
        self.eval_op = tf.nn.top_k(tf.nn.softmax(logits), k=k)
        self.session = session
test_feature.py 文件源码 项目:tf-image-interpreter 作者: ThoughtWorksInc 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def test_vgg():
  vgg = Vgg16()
  image_tensor = tf.placeholder(tf.float32)
  with tf.Session() as sess:
    vgg.build(image_tensor)
    init = tf.initialize_all_variables()
    sess.run(init)

    load_feature_layer_params('/Users/dtong/code/data/tf-image-interpreter/pretrain/vgg16_weights.npz', sess)

    for v in tf.get_collection(tf.GraphKeys.VARIABLES):
      print_op = tf.Print(v, [v], message=v.name, first_n=10)
      sess.run(print_op)

    roidb = RoiDb('val.txt', 2007)
    batch_gen = BatchGenerator(roidb)

    for i in range(10):
      image, scale, bboxes = batch_gen.next_batch()

      print(sess.run(vgg.conv5_3, feed_dict={image_tensor: image}))
net_model.py 文件源码 项目:3D_CNN_jonas 作者: 2015ZxEE 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def loss(logits, label_batch):
    """
    Add L2Loss to all the trainable variables.
    Add summary for "Loss" and "Loss/avg".
    Args:
        logits      -> logits from inference()
        label_batch -> 1D tensor of [batch_size]
    Rtns:
        total_loss  -> float tensor
    """
    # Calculate the average cross entropy loss across the batch.
    label_batch        = tf.cast(label_batch,tf.int64)
    cross_entropy      = tf.nn.sparse_softmax_cross_entropy_with_logits(logits,
                                label_batch,name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses',cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
net_model.py 文件源码 项目:3D_CNN_jonas 作者: 2015ZxEE 项目源码 文件源码 阅读 44 收藏 0 点赞 0 评论 0
def add_loss_summaries(total_loss):
    """
    Generates moving average for all losses and associated summaries for
    visualizing the performance of the network.
    Args:
        total_loss       -> total loss from loss()
    Rtns:
        loss_averages_op -> op for generating moving averages of losses
    """
    # Compute the moving average of all individual losses and the total loss.
    loss_averages    = tf.train.ExponentialMovingAverage(0.9, name='avg')
    losses           = tf.get_collection('losses')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.summary.scalar(l.op.name +' (raw)', l)
        tf.summary.scalar(l.op.name,loss_averages.average(l))
    return loss_averages_op
model.py 文件源码 项目:acdc_segmenter 作者: baumgach 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def training_step(loss, optimizer_handle, learning_rate, **kwargs):
    '''
    Creates the optimisation operation which is executed in each training iteration of the network
    :param loss: The loss to be minimised
    :param optimizer_handle: A handle to one of the tf optimisers 
    :param learning_rate: Learning rate
    :param momentum: Optionally, you can also pass a momentum term to the optimiser. 
    :return: The training operation
    '''


    if 'momentum' in kwargs:
        momentum = kwargs.get('momentum')
        optimizer = optimizer_handle(learning_rate=learning_rate, momentum=momentum)
    else:
        optimizer = optimizer_handle(learning_rate=learning_rate)

    # The with statement is needed to make sure the tf contrib version of batch norm properly performs its updates
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss)

    return train_op
DDPG.py 文件源码 项目:RL_NFSP 作者: Richard-An 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, sess, action_dim, action_bound, learning_rate, t_replace_iter):
        self.sess = sess
        self.a_dim = action_dim
        self.action_bound = action_bound
        self.lr = learning_rate
        self.t_replace_iter = t_replace_iter
        self.t_replace_counter = 0

        with tf.variable_scope('Actor'):
            # input s, output a
            self.a = self._build_net(S, scope='eval_net', trainable=True)

            # input s_, output a, get a_ for critic
            self.a_ = self._build_net(S_, scope='target_net', trainable=False)

        self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval_net')
        self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target_net')
autoencoder_net.py 文件源码 项目:unsupervised-2017-cvprw 作者: imatge-upc 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def tower_loss(name_scope, autoencoder, clips):
    # calculate reconstruction loss
    rec_loss = tf.reduce_mean(tf.abs(clips-autoencoder.rec_vid))

    weight_decay_loss_list = tf.get_collection('losses', name_scope)
    weight_decay_loss = 0.0
    if len(weight_decay_loss_list) > 0:
        weight_decay_loss = tf.add_n(weight_decay_loss_list)

    tf.add_to_collection('losses', rec_loss)
    losses = tf.get_collection('losses', name_scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    return total_loss, rec_loss, weight_decay_loss
mfb_dis_net.py 文件源码 项目:unsupervised-2017-cvprw 作者: imatge-upc 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def tower_loss(name_scope, mfb, use_pretrained_encoder, encoder_gradient_ratio=1.0):
    # get reconstruction and ground truth
    ac_loss = mfb.ac_loss

    weight_decay_loss_list = tf.get_collection('losses', name_scope)
    if use_pretrained_encoder:
        if encoder_gradient_ratio == 0.0:
            weight_decay_loss_list = [var for var in weight_decay_loss_list \
                                      if 'c3d' not in var.name and 'mapping' not in var.name]

    weight_decay_loss = 0.0
    if len(weight_decay_loss_list) > 0:
        weight_decay_loss = tf.add_n(weight_decay_loss_list)

    total_loss = weight_decay_loss * 100 + ac_loss

    return total_loss, ac_loss, weight_decay_loss
train.py 文件源码 项目:tf_classification 作者: visipedia 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def get_trainable_variables(trainable_scopes):
    """Returns a list of variables to train.
    Returns:
        A list of variables to train by the optimizer.
    """

    if trainable_scopes is None:
        return tf.trainable_variables()

    trainable_scopes = [scope.strip() for scope in trainable_scopes]

    variables_to_train = []
    for scope in trainable_scopes:
        variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
        variables_to_train.extend(variables)
    return variables_to_train
logistic_regression.py 文件源码 项目:TFExperiments 作者: gnperdue 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _create_summaries(self):
        base_summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
        with tf.name_scope('summaries/train'):
            train_loss = tf.summary.scalar('loss', self.loss)
            train_histo_loss = tf.summary.histogram(
                'histogram_loss', self.loss
            )
            train_summaries = [train_loss, train_histo_loss]
            train_summaries.extend(base_summaries)
            self.train_summary_op = tf.summary.merge(train_summaries)
        with tf.name_scope('summaries/valid'):
            valid_loss = tf.summary.scalar('loss', self.loss)
            valid_histo_loss = tf.summary.histogram(
                'histogram_loss', self.loss
            )
            valid_summaries = [valid_loss, valid_histo_loss]
            valid_summaries.extend(base_summaries)
            self.valid_summary_op = tf.summary.merge(valid_summaries)
model_mnist.py 文件源码 项目:TFExperiments 作者: gnperdue 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _create_summaries(self):
        base_summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
        with tf.name_scope('summaries/train'):
            train_loss = tf.summary.scalar('loss', self.loss)
            train_histo_loss = tf.summary.histogram(
                'histogram_loss', self.loss
            )
            train_summaries = [train_loss, train_histo_loss]
            train_summaries.extend(base_summaries)
            self.train_summary_op = tf.summary.merge(train_summaries)
        with tf.name_scope('summaries/valid'):
            valid_loss = tf.summary.scalar('loss', self.loss)
            valid_histo_loss = tf.summary.histogram(
                'histogram_loss', self.loss
            )
            valid_summaries = [valid_loss, valid_histo_loss]
            valid_summaries.extend(base_summaries)
            self.valid_summary_op = tf.summary.merge(valid_summaries)
architecture.py 文件源码 项目:squeezenet 作者: mtreml 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
  """Add summaries for losses in CIFAR-10 model.

  Generates moving average for all losses and associated summaries for
  visualizing the performance of the network.

  Args:
    total_loss: Total loss from loss().
  Returns:
    loss_averages_op: op for generating moving averages of losses.
  """
  # Compute the moving average of all individual losses and the total loss.
  loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
  losses = tf.get_collection('losses')
  loss_averages_op = loss_averages.apply(losses + [total_loss])

  # Attach a scalar summary to all individual losses and the total loss; do the
  # same for the averaged version of the losses.
  for l in losses + [total_loss]:
    # Name each loss as '(raw)' and name the moving average version of the loss
    # as the original loss name.
    tf.scalar_summary(l.op.name +' (raw)', l)
    tf.scalar_summary(l.op.name, loss_averages.average(l))

  return loss_averages_op
utils.py 文件源码 项目:DRLModule 作者: halleanwoo 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def update_prmt_dqn(scope_main):
    q_prmts = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES ,  scope_main + "/q_network"  )
    target_prmts = tf.get_collection( tf.GraphKeys.GLOBAL_VARIABLES, scope_main + "/target_network"  )
    sess.run( [tf.assign(t , q)for t,q in zip(target_prmts , q_prmts)])  #***
    print("updating target-network parmeters...")

#
# def local2global():


# def global2local():




# ========= Error Raise =========
clock_model.py 文件源码 项目:deep-time-reading 作者: felixduvallet 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _loss_shared(logits, labels):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
      logits: Logits from inference().
      labels: Labels from distorted_inputs or inputs(). 1-D tensor
              of shape [batch_size]

    Returns:
      Loss tensor of type float.
    """
    # Calculate the average cross entropy loss across the batch.
    labels = tf.cast(labels, tf.int64)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits, labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)

    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
clock_model.py 文件源码 项目:deep-time-reading 作者: felixduvallet 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def _add_loss_summaries(total_loss):
    """Add summaries for losses.

    Generates moving average for all losses and associated summaries for
    visualizing the performance of the network.

    Args:
      total_loss: Total loss from loss().
    Returns:
      loss_averages_op: op for generating moving averages of losses.
    """
    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    losses = tf.get_collection('losses')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.scalar_summary(l.op.name + ' (raw)', l)
        tf.scalar_summary(l.op.name, loss_averages.average(l))

    return loss_averages_op


问题


面经


文章

微信
公众号

扫码关注公众号