python类Summary()的实例源码

__main__.py 文件源码 项目:Neural-Architecture-Search-with-RL 作者: dhruvramani 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def add_summaries(self, sess):
        if self.config.load:
            path_ = "../results/tensorboard"
        else :
            path_ = "../bin/results/tensorboard"
        summary_writer_train = tf.summary.FileWriter(path_ + "/train", sess.graph)
        summary_writer_val = tf.summary.FileWriter(path_ + "/val", sess.graph)
        summary_writer_test = tf.summary.FileWriter(path_+ "/test", sess.graph)
        summary_writers = {'train': summary_writer_train, 'val': summary_writer_val, 'test': summary_writer_test}
        return summary_writers
callbacks.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary(weight.name, weight)
                    else:
                        tf.summary.histogram(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        if hasattr(tf, 'image_summary'):
                            tf.image_summary(weight.name, w_img)
                        else:
                            tf.summary.image(weight.name, w_img)

                if hasattr(layer, 'output'):
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary('{}_out'.format(layer.name),
                                             layer.output)
                    else:
                        tf.summary.histogram('{}_out'.format(layer.name),
                                             layer.output)

        if hasattr(tf, 'merge_all_summaries'):
            self.merged = tf.merge_all_summaries()
        else:
            self.merged = tf.summary.merge_all()

        if self.write_graph:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir,
                                                    self.sess.graph)
            elif parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir)
callbacks.py 文件源码 项目:keras 作者: NVIDIA 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:

                for weight in layer.weights:
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary(weight.name, weight)
                    else:
                        tf.summary.histogram(weight.name, weight)

                    if self.write_images:
                        w_img = tf.squeeze(weight)

                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)

                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)

                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)

                        if hasattr(tf, 'image_summary'):
                            tf.image_summary(weight.name, w_img)
                        else:
                            tf.summary.image(weight.name, w_img)

                if hasattr(layer, 'output'):
                    if hasattr(tf, 'histogram_summary'):
                        tf.histogram_summary('{}_out'.format(layer.name),
                                             layer.output)
                    else:
                        tf.summary.histogram('{}_out'.format(layer.name),
                                             layer.output)

        if hasattr(tf, 'merge_all_summaries'):
            self.merged = tf.merge_all_summaries()
        else:
            self.merged = tf.summary.merge_all()

        if self.write_graph:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir,
                                                    self.sess.graph)
            elif parse_version(tf.__version__) >= parse_version('0.8.0'):
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir,
                                                     self.sess.graph_def)
        else:
            if hasattr(tf, 'summary') and hasattr(tf.summary, 'FileWriter'):
                self.writer = tf.summary.FileWriter(self.log_dir)
            else:
                self.writer = tf.train.SummaryWriter(self.log_dir)
MADDPG_Morvan_vector.py 文件源码 项目:gym-sandbox 作者: suqi 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter,
                 a_ma, a2_ma, agent_id, agent_num):
        self.agent_id = agent_id
        self.agent_num = agent_num

        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.lr = learning_rate
        self.gamma = gamma
        self.t_replace_iter = t_replace_iter
        self.t_replace_counter = 0

        with tf.variable_scope('Critic{}'.format(self.agent_id)):
            # Input (s, a), output q
            local_a = a_ma[agent_id]
            self.a_ma = tf.concat(a_ma, axis=1)
            self.q = self._build_critic_net(X_MA, self.a_ma, 'eval_net', trainable=True)

            # Input (s_, a_), output q_ for q_target
            a2_ma = tf.concat(a2_ma, axis=1)
            self.q_ = self._build_critic_net(X2_MA, a2_ma, 'target_net', trainable=False)    # target_q is based on a_ from Actor's target_net

            self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/eval_net'.format(agent_id))
            self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/target_net'.format(agent_id))

        with tf.variable_scope('target_q{}'.format(self.agent_id)):
            self.target_q = R + self.gamma * self.q_

        with tf.variable_scope('TD_error{}'.format(self.agent_id)):
            self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))  # MSE

        with tf.variable_scope('C_train{}'.format(self.agent_id)):
            self.train_ops = []
            self.train_ops.append(tf.train.AdamOptimizer(self.lr).minimize(
                self.loss, var_list=self.e_params))  # C train only update c network, don't update a
            self.train_ops.append(self.loss)  # for tf.summary

        with tf.variable_scope('a_grad{}'.format(self.agent_id)):
            # tensor of gradients of each sample (None, a_dim)
            self.a_grads = tf.gradients(self.q, local_a)[0]  # only get dq/da, throw dq/dw
            self.train_ops.append(self.a_grads)
MADDPG_Morvan.py 文件源码 项目:gym-sandbox 作者: suqi 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, sess, state_dim, action_dim, learning_rate, gamma, t_replace_iter,
                 a_ma, a2_ma, agent_id, agent_num):
        self.agent_id = agent_id
        self.agent_num = agent_num

        self.sess = sess
        self.s_dim = state_dim
        self.a_dim = action_dim
        self.lr = learning_rate
        self.gamma = gamma
        self.t_replace_iter = t_replace_iter
        self.t_replace_counter = 0

        with tf.variable_scope('Critic{}'.format(self.agent_id)):
            # Input (s, a), output q
            local_a = a_ma[agent_id]
            self.a_ma = tf.concat(a_ma, axis=1)
            self.q = self._build_critic_net(X_MA, self.a_ma, 'eval_net', trainable=True)

            # Input (s_, a_), output q_ for q_target
            a2_ma = tf.concat(a2_ma, axis=1)
            self.q_ = self._build_critic_net(X2_MA, a2_ma, 'target_net', trainable=False)    # target_q is based on a_ from Actor's target_net

            self.e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/eval_net'.format(agent_id))
            self.t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic{}/target_net'.format(agent_id))

        with tf.variable_scope('target_q{}'.format(self.agent_id)):
            self.target_q = R + self.gamma * self.q_

        with tf.variable_scope('TD_error{}'.format(self.agent_id)):
            self.loss = tf.reduce_mean(tf.squared_difference(self.target_q, self.q))  # MSE

        with tf.variable_scope('C_train{}'.format(self.agent_id)):
            self.train_ops = []
            self.train_ops.append(tf.train.AdamOptimizer(self.lr).minimize(
                self.loss, var_list=self.e_params))  # C train only update c network, don't update a
            self.train_ops.append(self.loss)  # for tf.summary

        with tf.variable_scope('a_grad{}'.format(self.agent_id)):
            # tensor of gradients of each sample (None, a_dim)
            self.a_grads = tf.gradients(self.q, local_a)[0]  # only get dq/da, throw dq/dw
            self.train_ops.append(self.a_grads)


问题


面经


文章

微信
公众号

扫码关注公众号