deeplab_model.py 文件源码

python
阅读 23 收藏 0 点赞 0 评论 0

项目:TF-deeplab 作者: chenxi116 项目源码 文件源码
def _build_train_op(self):
    """Build training specific ops for the graph."""
    labels_coarse = tf.image.resize_nearest_neighbor(self.labels, 
      [tf.shape(self.pred)[1], tf.shape(self.pred)[2]])
    labels_coarse = tf.squeeze(labels_coarse, squeeze_dims=[3])
    self.labels_coarse = tf.to_int32(labels_coarse)

    # ignore illegal labels
    raw_pred = tf.reshape(self.logits, [-1, self.num_classes])
    raw_gt = tf.reshape(self.labels_coarse, [-1,])
    indices = tf.squeeze(tf.where(tf.less_equal(raw_gt, self.num_classes - 1)), 1)
    remain_pred = tf.gather(raw_pred, indices)
    remain_gt = tf.gather(raw_gt, indices)

    xent = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=remain_pred, 
      labels=remain_gt)
    self.cls_loss = tf.reduce_mean(xent, name='xent')
    self.cost = self.cls_loss + self._decay()
    # tf.summary.scalar('cost', self.cost)

    self.global_step = tf.Variable(0, name='global_step', trainable=False)
    self.learning_rate = tf.train.polynomial_decay(self.lrn_rate, 
      self.global_step, self.lr_decay_step, power=0.9)
    # tf.summary.scalar('learning rate', self.learning_rate)

    tvars = tf.trainable_variables()

    if self.optimizer == 'sgd':
      optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
    elif self.optimizer == 'mom':
      optimizer = tf.train.MomentumOptimizer(self.learning_rate, 0.9)
    else:
      raise NameError("Unknown optimizer type %s!" % self.optimizer)

    grads_and_vars = optimizer.compute_gradients(self.cost, var_list=tvars)
    var_lr_mult = {}
    for var in tvars:
      if var.op.name.find(r'fc1_voc12') > 0 and var.op.name.find(r'biases') > 0:
        var_lr_mult[var] = 20.
      elif var.op.name.find(r'fc1_voc12') > 0:
        var_lr_mult[var] = 10.
      else:
        var_lr_mult[var] = 1.
    grads_and_vars = [((g if var_lr_mult[v] == 1 else tf.multiply(var_lr_mult[v], g)), v) 
        for g, v in grads_and_vars]

    apply_op = optimizer.apply_gradients(grads_and_vars,
        global_step=self.global_step, name='train_step')

    train_ops = [apply_op] + self._extra_train_ops
    self.train_step = tf.group(*train_ops)

  # TODO(xpan): Consider batch_norm in contrib/layers/python/layers/layers.py
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号