python类xavier_initializer()的实例源码

policies.py 文件源码 项目:rl_algorithms 作者: DanielTakeshi 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self, sess, ob_dim, ac_dim):
        super().__init__(sess, ob_dim, ac_dim)

        # Placeholders for our inputs. Note that actions are floats.
        self.ob_no = tf.placeholder(shape=[None, ob_dim], name="obs", dtype=tf.float32)
        self.ac_na = tf.placeholder(shape=[None, ac_dim], name="act", dtype=tf.float32)
        self.adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
        self.n     = tf.shape(self.ob_no)[0]

        # Special to the continuous case, the log std vector, it's a parameter.
        # Also, make batch versions so we get shape (n,a) (or (1,a)), not (a,).
        self.logstd_a     = tf.get_variable("logstd", [ac_dim], initializer=tf.zeros_initializer())
        self.oldlogstd_a  = tf.placeholder(name="oldlogstd", shape=[ac_dim], dtype=tf.float32)
        self.logstd_na    = tf.ones(shape=(self.n,ac_dim), dtype=tf.float32) * self.logstd_a
        self.oldlogstd_na = tf.ones(shape=(self.n,ac_dim), dtype=tf.float32) * self.oldlogstd_a

        # The policy network and the logits, which are the mean of a Gaussian.
        # Then don't forget to make an "old" version of that for KL divergences.
        self.hidden1 = layers.fully_connected(self.ob_no, 
                num_outputs=32,
                weights_initializer=layers.xavier_initializer(uniform=True),
                activation_fn=tf.nn.relu)
        self.hidden2 = layers.fully_connected(self.hidden1, 
                num_outputs=32,
                weights_initializer=layers.xavier_initializer(uniform=True),
                activation_fn=tf.nn.relu)
        self.mean_na = layers.fully_connected(self.hidden2, 
                num_outputs=ac_dim,
                weights_initializer=layers.xavier_initializer(uniform=True),
                activation_fn=None)
        self.oldmean_na = tf.placeholder(shape=[None, ac_dim], name='oldmean', dtype=tf.float32)

        # Diagonal Gaussian distribution for sampling actions and log probabilities.
        self.logprob_n  = utils.gauss_log_prob(mu=self.mean_na, logstd=self.logstd_na, x=self.ac_na)
        self.sampled_ac = (tf.random_normal(tf.shape(self.mean_na)) * tf.exp(self.logstd_na) + self.mean_na)[0]

        # Loss function that we'll differentiate to get the policy  gradient
        self.surr_loss = - tf.reduce_mean(self.logprob_n * self.adv_n) 
        self.stepsize  = tf.placeholder(shape=[], dtype=tf.float32) 
        self.update_op = tf.train.AdamOptimizer(self.stepsize).minimize(self.surr_loss)

        # KL divergence and entropy among Gaussian(s).
        self.kl  = tf.reduce_mean(utils.gauss_KL(self.mean_na, self.logstd_na, self.oldmean_na, self.oldlogstd_na))
        self.ent = 0.5 * ac_dim * tf.log(2.*np.pi*np.e) + 0.5 * tf.reduce_sum(self.logstd_a)
doc2vec_train_eval_word_embeds.py 文件源码 项目:kaggle_redefining_cancer_treatment 作者: jorgemf 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def model(self,
              input_doc, input_words, output_label, batch_size,
              vocabulary_size=VOCABULARY_SIZE,
              embedding_size=EMBEDDINGS_SIZE,
              context_size=D2V_CONTEXT_SIZE,
              num_negative_samples=D2V_NEGATIVE_NUM_SAMPLES,
              learning_rate_initial=D2V_LEARNING_RATE_INITIAL,
              learning_rate_decay=D2V_LEARNING_RATE_DECAY,
              learning_rate_decay_steps=D2V_LEARNING_RATE_DECAY_STEPS):
        self.global_step = training_util.get_or_create_global_step()

        # inputs/outputs
        input_doc = tf.reshape(input_doc, [batch_size])
        input_words = tf.reshape(input_words, [batch_size, context_size])
        output_label = tf.reshape(output_label, [batch_size, 1])

        # embeddings
        word_embeddings = _load_embeddings(vocabulary_size, embedding_size,
                                           filename_prefix='word_embeddings',
                                           from_dir=DIR_DATA_DOC2VEC)
        self.word_embeddings = tf.constant(value=word_embeddings,
                                           shape=[vocabulary_size, embedding_size],
                                           dtype=tf.float32, name='word_embeddings')
        self.doc_embeddings = tf.get_variable(shape=[self.dataset.num_docs, embedding_size],
                                              initializer=layers.xavier_initializer(),
                                              dtype=tf.float32, name='doc_embeddings')
        words_embed = tf.nn.embedding_lookup(self.word_embeddings, input_words)
        doc_embed = tf.nn.embedding_lookup(self.word_embeddings, input_doc)
        # average the words_embeds
        words_embed_average = tf.reduce_mean(words_embed, axis=1)
        embed = tf.concat([words_embed_average, doc_embed], axis=1)

        # NCE loss
        nce_weights = tf.get_variable(shape=[vocabulary_size, embedding_size * 2],
                                      initializer=layers.xavier_initializer(),
                                      dtype=tf.float32, name='nce_weights')
        nce_biases = tf.get_variable(shape=[vocabulary_size],
                                     initializer=layers.xavier_initializer(),
                                     dtype=tf.float32, name='nce_biases')
        nce_loss = tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
                                  labels=output_label,
                                  inputs=embed, num_sampled=num_negative_samples,
                                  num_classes=vocabulary_size)
        self.loss = tf.reduce_mean(nce_loss)
        tf.summary.scalar('loss', self.loss)

        # learning rate & optimizer
        self.learning_rate = tf.train.exponential_decay(learning_rate_initial, self.global_step,
                                                        learning_rate_decay_steps,
                                                        learning_rate_decay,
                                                        staircase=True, name='learning_rate')
        tf.summary.scalar('learning_rate', self.learning_rate)
        sgd = tf.train.GradientDescentOptimizer(self.learning_rate)
        self.optimizer = sgd.minimize(self.loss, global_step=self.global_step)
        return None
word2vec_train.py 文件源码 项目:kaggle_redefining_cancer_treatment 作者: jorgemf 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def model(self, input_label, output_word, batch_size, vocabulary_size=VOCABULARY_SIZE,
              embedding_size=EMBEDDINGS_SIZE, num_negative_samples=W2V_NEGATIVE_NUM_SAMPLES,
              learning_rate_initial=W2V_LEARNING_RATE_INITIAL,
              learning_rate_decay=W2V_LEARNING_RATE_DECAY,
              learning_rate_decay_steps=W2V_LEARNING_RATE_DECAY_STEPS):
        self.global_step = training_util.get_or_create_global_step()

        # inputs/outputs
        input_label_reshaped = tf.reshape(input_label, [batch_size])
        output_word_reshaped = tf.reshape(output_word, [batch_size, 1])

        # embeddings
        matrix_dimension = [vocabulary_size, embedding_size]
        self.embeddings = tf.get_variable(shape=matrix_dimension,
                                          initializer=layers.xavier_initializer(), dtype=tf.float32,
                                          name='embeddings')
        embed = tf.nn.embedding_lookup(self.embeddings, input_label_reshaped)

        # NCE loss
        stddev = 1.0 / math.sqrt(embedding_size)
        nce_weights = tf.Variable(tf.truncated_normal(matrix_dimension, stddev=stddev))
        nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
        nce_loss = tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
                                  labels=output_word_reshaped, inputs=embed,
                                  num_sampled=num_negative_samples, num_classes=vocabulary_size)
        self.loss = tf.reduce_mean(nce_loss)
        tf.summary.scalar('loss', self.loss)

        # learning rate & optimizer
        self.learning_rate = tf.train.exponential_decay(learning_rate_initial, self.global_step,
                                                        learning_rate_decay_steps,
                                                        learning_rate_decay, staircase=True,
                                                        name='learning_rate')
        tf.summary.scalar('learning_rate', self.learning_rate)
        sgd = tf.train.GradientDescentOptimizer(self.learning_rate)
        self.optimizer = sgd.minimize(self.loss, global_step=self.global_step)

        # saver to save the model
        self.saver = tf.train.Saver()
        # check a nan value in the loss
        self.loss = tf.check_numerics(self.loss, 'loss is nan')

        # embeddings
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = self.embeddings.name
        filename_tsv = '{}_{}.tsv'.format('word2vec_dataset', vocabulary_size)
        if not os.path.exists(self.log_dir):
            os.makedirs(self.log_dir)
        shutil.copy(os.path.join(DIR_DATA_WORD2VEC, filename_tsv), self.log_dir)
        embedding.metadata_path = filename_tsv
        summary_writer = tf.summary.FileWriter(self.log_dir)
        projector.visualize_embeddings(summary_writer, config)

        # normalize the embeddings to save them
        norm = tf.sqrt(tf.reduce_sum(tf.square(self.embeddings), 1, keep_dims=True))
        self.normalized_embeddings = self.embeddings / norm

        return None
speech_model.py 文件源码 项目:speechT 作者: timediv 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def _convolution(self, value, filter_width, stride, input_channels, out_channels, apply_non_linearity=True):
    """
    Apply a convolutional layer

    Args:
      value: the input tensor to apply the convolution on
      filter_width: the width of the filter (kernel)
      stride: the striding of the filter (kernel)
      input_channels: the number if input channels
      out_channels: the number of output channels
      apply_non_linearity: whether to apply a non linearity

    Returns:
      the output after convolution, added biases and possible non linearity applied

    """

    layer_id = self.convolution_count
    self.convolution_count += 1

    with tf.variable_scope('convolution_layer_{}'.format(layer_id)) as layer:
      # Create variables filter and bias
      filters = tf.get_variable('filters', shape=[filter_width, input_channels, out_channels],
                                dtype=tf.float32, initializer=xavier_initializer())
      bias = tf.Variable(tf.constant(0.0, shape=[out_channels]), name='bias')

      # Apply convolution
      convolution_out = tf.nn.conv1d(value, filters, stride, 'SAME', use_cudnn_on_gpu=True, name='convolution')

      # Create summary
      with tf.name_scope('summaries'):
        # add depth of 1 (=grayscale) leading to shape [filter_width, input_channels, 1, out_channels]
        kernel_with_depth = tf.expand_dims(filters, 2)

        # to tf.image_summary format [batch_size=out_channels, height=filter_width, width=input_channels, channels=1]
        kernel_transposed = tf.transpose(kernel_with_depth, [3, 0, 1, 2])

        # this will display random 3 filters from all the output channels
        tf.summary.image(layer.name + 'filters', kernel_transposed, max_outputs=3)
        tf.summary.histogram(layer.name + 'filters', filters)

        tf.summary.image(layer.name + 'bias', tf.reshape(bias, [1, 1, out_channels, 1]))
        tf.summary.histogram(layer.name + 'bias', bias)

      # Add bias
      convolution_out = tf.nn.bias_add(convolution_out, bias)

      if apply_non_linearity:
        # Add non-linearity
        activations = tf.nn.relu(convolution_out, name='activation')
        tf.summary.histogram(layer.name + 'activation', activations)
        return activations, out_channels
      else:
        return convolution_out, out_channels
pspnet_model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __reslayer(self, inputs, in_filters, out_filters, stride=1):
        """ A regular resnet block """
        with tf.variable_scope('sub1'):
            kernel = tf.get_variable('weights', [3, 3, in_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999)
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub2'):
            kernel = tf.get_variable('weights',
                                     [3, 3, out_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1], padding='SAME',
                                name='conv1')
            bias = self.__batch_norm_wrapper(conv, decay=0.9999)

        with tf.variable_scope('subadd'):
            if in_filters != out_filters:
                kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters],
                                         initializer=xavier_initializer(
                                         dtype=tf.float32),
                                         dtype=tf.float32)
                inputs = tf.nn.conv2d(
                    inputs, kernel, [1, stride, stride, 1], padding='SAME')
            bias += inputs
            conv = tf.nn.elu(bias, 'elu')

            num = np.power(2, np.floor(np.log2(out_filters) / 2))

            grid = self.__put_activations_on_grid(conv, (int(num),
                                                         int(out_filters /
                                                             num)))
            tf.summary.image('sub2/activations', grid, max_outputs=1)

        return conv
pspnet_model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __reslayer_bottleneck(self, inputs, in_filters, out_filters, stride=1):
        """ A regular resnet block """
        with tf.variable_scope('sub1'):
            kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters / 4],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999)
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub2'):
            kernel = tf.get_variable('weights',
                                     [3, 3, out_filters / 4, out_filters / 4],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1], padding='SAME',
                                name='conv1')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999)
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub3'):
            kernel = tf.get_variable('weights', [1, 1, out_filters / 4, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999)

        with tf.variable_scope('subadd'):
            if in_filters != out_filters:
                kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters],
                                         initializer=xavier_initializer(
                                         dtype=tf.float32),
                                         dtype=tf.float32)
                inputs = tf.nn.conv2d(
                    inputs, kernel, [1, stride, stride, 1], padding='SAME')
            batch_norm += inputs
            conv = tf.nn.elu(batch_norm, 'elu')

            num = np.power(2, np.floor(np.log2(out_filters) / 2))

            grid = self.__put_activations_on_grid(conv, (int(num),
                                                         int(out_filters /
                                                             num)))
            tf.summary.image('sub3/activations', grid, max_outputs=1)

        return conv
model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __reslayer(self, inputs, in_filters, out_filters, stride=1):
        """ A regular resnet block """
        with tf.variable_scope('sub1'):
            kernel = tf.get_variable('weights', [3, 3, in_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub2'):
            kernel = tf.get_variable('weights',
                                     [3, 3, out_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1], padding='SAME',
                                name='conv1')
            bias = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])

        with tf.variable_scope('subadd'):
            if in_filters != out_filters:
                kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters],
                                         initializer=xavier_initializer(
                                         dtype=tf.float32),
                                         dtype=tf.float32)
                inputs = tf.nn.conv2d(
                    inputs, kernel, [1, stride, stride, 1], padding='SAME')
            bias += inputs
            conv = tf.nn.elu(bias, 'elu')

            num = np.power(2, np.floor(np.log2(out_filters) / 2))

            grid = self.__put_activations_on_grid(conv, (int(num),
                                                         int(out_filters /
                                                             num)))
            tf.summary.image('sub2/activations', grid, max_outputs=1)

        return conv
model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __reslayer_bottleneck(self, inputs, in_filters, out_filters, stride=1):
        """ A regular resnet block """
        with tf.variable_scope('sub1'):
            kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters / 4],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub2'):
            kernel = tf.get_variable('weights',
                                     [3, 3, out_filters / 4, out_filters / 4],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1], padding='SAME',
                                name='conv1')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub3'):
            kernel = tf.get_variable('weights', [1, 1, out_filters / 4, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, decay=0.9999, shape=[0, 1, 2])

        with tf.variable_scope('subadd'):
            if in_filters != out_filters:
                kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters],
                                         initializer=xavier_initializer(
                                         dtype=tf.float32),
                                         dtype=tf.float32)
                inputs = tf.nn.conv2d(
                    inputs, kernel, [1, stride, stride, 1], padding='SAME')
            batch_norm += inputs
            conv = tf.nn.elu(batch_norm, 'elu')

            num = np.power(2, np.floor(np.log2(out_filters) / 2))

            grid = self.__put_activations_on_grid(conv, (int(num),
                                                         int(out_filters /
                                                             num)))
            tf.summary.image('sub3/activations', grid, max_outputs=1)

        return conv
model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __reslayer(self, inputs, in_filters, out_filters, stride=1):
        """ A regular resnet block """
        with tf.variable_scope('sub1'):
            kernel = tf.get_variable('weights', [3, 3, in_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, shape=[0, 1, 2, 3])
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub2'):
            kernel = tf.get_variable('weights',
                                     [3, 3, out_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1], padding='SAME',
                                name='conv1')
            bias = self.__batch_norm_wrapper(conv, shape=[0, 1, 2, 3])

        with tf.variable_scope('subadd'):
            if in_filters != out_filters:
                # inputs = tf.nn.avg_pool(inputs, (1, stride, stride, 1),
                #                         (1, stride, stride, 1), 'SAME')
                # inputs = tf.pad(inputs, [[0, 0], [0, 0], [0, 0],
                #                          [(out_filters - in_filters) // 2,
                #                           (out_filters - in_filters) // 2]])
                kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
                inputs = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1], padding='SAME')
            bias += inputs
            conv = tf.nn.elu(bias, 'elu')

            num = np.power(2, np.floor(np.log2(out_filters) / 2))

            grid = self.__put_activations_on_grid(conv, (int(num),
                                                         int(out_filters /
                                                             num)))
            tf.summary.image('sub2/activations', grid, max_outputs=1)

        return conv
model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __reslayer_bottleneck(self, inputs, in_filters, out_filters, stride=1):
        """ A regular resnet block """
        with tf.variable_scope('sub1'):
            kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters/4],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, shape=[0, 1, 2, 3])
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub2'):
            kernel = tf.get_variable('weights',
                                     [3, 3, out_filters/4, out_filters/4],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1], padding='SAME',
                                name='conv1')
            batch_norm = self.__batch_norm_wrapper(conv, shape=[0, 1, 2, 3])
            conv = tf.nn.elu(batch_norm, 'elu')

        with tf.variable_scope('sub3'):
            kernel = tf.get_variable('weights', [1, 1, out_filters/4, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(conv, kernel, [1, 1, 1, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, shape=[0, 1, 2, 3])

        with tf.variable_scope('subadd'):
            if in_filters != out_filters:
                # inputs = tf.nn.avg_pool(inputs, (1, stride, stride, 1),
                #                         (1, stride, stride, 1), 'SAME')
                # inputs = tf.pad(inputs, [[0, 0], [0, 0], [0, 0],
                #                          [(out_filters - in_filters) // 2,
                #                           (out_filters - in_filters) // 2]])
                kernel = tf.get_variable('weights', [1, 1, in_filters, out_filters],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
                inputs = tf.nn.conv2d(inputs, kernel, [1, stride, stride, 1], padding='SAME')
            batch_norm += inputs
            conv = tf.nn.elu(batch_norm, 'elu')

            num = np.power(2, np.floor(np.log2(out_filters) / 2))

            grid = self.__put_activations_on_grid(conv, (int(num),
                                                         int(out_filters /
                                                             num)))
            tf.summary.image('sub3/activations', grid, max_outputs=1)

        return conv
model.py 文件源码 项目:woipv 作者: Panaetius 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def inference(self, inputs):
        # resnet
        with tf.variable_scope('first_layer'):
            kernel = tf.get_variable('weights', [7, 7, 3, 64],
                                     initializer=xavier_initializer(
                                         dtype=tf.float32),
                                     dtype=tf.float32)
            conv = tf.nn.conv2d(inputs, kernel, [1, 2, 2, 1],
                                padding='SAME',
                                name='conv')
            batch_norm = self.__batch_norm_wrapper(conv, shape=[0, 1, 2, 3])
            conv = tf.nn.elu(batch_norm, 'elu')

            grid = self.__put_kernels_on_grid(kernel, (8, 8))
            tf.summary.image('conv1/features', grid, max_outputs=1)
            grid = self.__put_activations_on_grid(conv, (8, 8))
            tf.summary.image('conv1/activations', grid, max_outputs=1)

            inputs = tf.nn.max_pool(conv, ksize=[1, 2, 2, 1],
                                    strides=[1, 2, 2, 1], padding='SAME',
                                    name='pool')

        if self.net == NetworkType.RESNET34:
            inputs = self.__resnet34(inputs)
        elif self.net == NetworkType.RESNET50:
            inputs = self.__resnet50(inputs)

        # classify regions and add final region adjustments
        with tf.variable_scope('fully_connected'):
            fc = tf.reduce_mean(inputs, [1, 2])
            class_weights = tf.get_variable('class_weights',
                                            [self.conv_feature_count,
                                             self.num_classes],
                                            initializer=xavier_initializer(
                                                dtype=tf.float32),
                                            dtype=tf.float32)
            class_bias = tf.get_variable("class_bias", [
                self.num_classes],
                initializer=tf.constant_initializer(
                0.1),
                dtype=tf.float32)

            class_score = tf.matmul(fc, class_weights)
            class_score = tf.nn.bias_add(class_score, class_bias)


        return class_score


问题


面经


文章

微信
公众号

扫码关注公众号