optimizer.py 文件源码

python
阅读 28 收藏 0 点赞 0 评论 0

项目:tfutils 作者: neuroailab 项目源码 文件源码
def accumulate_gradients(self, minibatch_grads, num_minibatches=1):
        """Accumulate gradients for `num_minibatches` minibatches."""
        if self.var_list is None:
            self.var_list = tf.trainable_variables()

        if self.grads_and_vars is None:
            self.grads_and_vars = [(
                tf.Variable(tf.zeros_like(var.initialized_value()),
                            dtype=tf.float32,
                            trainable=False),
                var) for var in self.var_list]

        # Add 1/num_minibatches * minibatch_grads to current gradients.
        def _add_op(gv_tmp, mgv_tmp):
            return tf.add(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        def _set_op(gv_tmp, mgv_tmp):
            return tf.assign(gv_tmp, tf.divide(mgv_tmp, num_minibatches))
        #grads = [(gv[0].assign_add(tf.divide(mgv[0], num_minibatches)), gv[1])
        #         for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        #grads = tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _add_op(), fn2 = lambda: _set_op())
        grads = [tf.cond(tf.less(self.mini_flag[0], 0.5), fn1 = lambda: _set_op(gv[0], mgv[0]), fn2 = lambda: _add_op(gv[0], mgv[0]))
                 for (gv, mgv) in zip(self.grads_and_vars, minibatch_grads)]
        with tf.control_dependencies(grads):
            self.mini_flag = tf.assign(self.mini_flag, tf.constant([1], dtype = tf.float32))
        grads = [(only_grad, gv[1])
                 for (gv, only_grad) in zip(self.grads_and_vars, grads)]
        return self.mini_flag, grads
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号