python类variable_scope()的实例源码

tf_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def avg_pool2d(inputs,
               kernel_size,
               scope,
               stride=[2, 2],
               padding='VALID'):
  """ 2D avg pooling.

  Args:
    inputs: 4-D tensor BxHxWxC
    kernel_size: a list of 2 ints
    stride: a list of 2 ints

  Returns:
    Variable tensor
  """
  with tf.variable_scope(scope) as sc:
    kernel_h, kernel_w = kernel_size
    stride_h, stride_w = stride
    outputs = tf.nn.avg_pool(inputs,
                             ksize=[1, kernel_h, kernel_w, 1],
                             strides=[1, stride_h, stride_w, 1],
                             padding=padding,
                             name=sc.name)
    return outputs
tf_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def max_pool3d(inputs,
               kernel_size,
               scope,
               stride=[2, 2, 2],
               padding='VALID'):
  """ 3D max pooling.

  Args:
    inputs: 5-D tensor BxDxHxWxC
    kernel_size: a list of 3 ints
    stride: a list of 3 ints

  Returns:
    Variable tensor
  """
  with tf.variable_scope(scope) as sc:
    kernel_d, kernel_h, kernel_w = kernel_size
    stride_d, stride_h, stride_w = stride
    outputs = tf.nn.max_pool3d(inputs,
                               ksize=[1, kernel_d, kernel_h, kernel_w, 1],
                               strides=[1, stride_d, stride_h, stride_w, 1],
                               padding=padding,
                               name=sc.name)
    return outputs
tf_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def avg_pool3d(inputs,
               kernel_size,
               scope,
               stride=[2, 2, 2],
               padding='VALID'):
  """ 3D avg pooling.

  Args:
    inputs: 5-D tensor BxDxHxWxC
    kernel_size: a list of 3 ints
    stride: a list of 3 ints

  Returns:
    Variable tensor
  """
  with tf.variable_scope(scope) as sc:
    kernel_d, kernel_h, kernel_w = kernel_size
    stride_d, stride_h, stride_w = stride
    outputs = tf.nn.avg_pool3d(inputs,
                               ksize=[1, kernel_d, kernel_h, kernel_w, 1],
                               strides=[1, stride_d, stride_h, stride_w, 1],
                               padding=padding,
                               name=sc.name)
    return outputs
tf_util.py 文件源码 项目:pointnet 作者: charlesq34 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def dropout(inputs,
            is_training,
            scope,
            keep_prob=0.5,
            noise_shape=None):
  """ Dropout layer.

  Args:
    inputs: tensor
    is_training: boolean tf.Variable
    scope: string
    keep_prob: float in [0,1]
    noise_shape: list of ints

  Returns:
    tensor variable
  """
  with tf.variable_scope(scope) as sc:
    outputs = tf.cond(is_training,
                      lambda: tf.nn.dropout(inputs, keep_prob, noise_shape),
                      lambda: inputs)
    return outputs
dc_gan.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __discriminator(self, x, scope, reuse):
        with tf.variable_scope(scope, reuse=reuse):
            x1 = tf.layers.conv2d(x, 64, 5, strides=2, padding='same')
            x1 = LeakyReLU(x1, self.alpha)
            # 16x16x64

            x2 = tf.layers.conv2d(x1, 128, 5, strides=2, padding='same')
            x2 = tf.layers.batch_normalization(x2, training=self.training)
            x2 = LeakyReLU(x2, self.alpha)
            # 8x8x128

            x3 = tf.layers.conv2d(x2, 256, 5, strides=2, padding='same')
            x3 = tf.layers.batch_normalization(x3, training=self.training)
            x3 = LeakyReLU(x3, self.alpha)
            # 4x4x256

            # Flatten it
            flat = tf.reshape(x3, (-1, 4*4*256))
            logits = tf.layers.dense(flat, 1)
            out = tf.sigmoid(logits)

        return out, logits

    #---------------------------------------------------------------------------
dc_gan.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def build_discriminator(self, image_size):
        self.inputs_real = tf.placeholder(tf.float32, [None, *image_size],
                                          name='inputs_real')

        #-----------------------------------------------------------------------
        # Process input so that it matches what the generator produces
        #-----------------------------------------------------------------------
        with tf.variable_scope('process_real'):
            processed = self.inputs_real/128-1

        #-----------------------------------------------------------------------
        # Real discriminator
        #-----------------------------------------------------------------------
        ret = self.__discriminator(processed, 'discriminator', False)
        self.dsc_real_out    = ret[0]
        self.dsc_real_logits = ret[1]

        #-----------------------------------------------------------------------
        # Fake discriminator
        #-----------------------------------------------------------------------
        ret = self.__discriminator(self.gen_out, 'discriminator', True)
        self.dsc_fake_out    = ret[0]
        self.dsc_fake_logits = ret[1]

    #---------------------------------------------------------------------------
sentiment_rnn.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get_optimizer(self, learning_rate = 0.001, grad_clip = 5):
        #-----------------------------------------------------------------------
        # Build a loss function
        #-----------------------------------------------------------------------
        with tf.variable_scope('loss'):
            loss = tf.losses.mean_squared_error(self.target, self.output)

        #-----------------------------------------------------------------------
        # Build the optimizer
        #-----------------------------------------------------------------------
        with tf.variable_scope('optimizer'):
            tvars     = tf.trainable_variables()
            grads, _  = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                               grad_clip)
            train_op  = tf.train.AdamOptimizer(learning_rate)
            optimizer = train_op.apply_gradients(zip(grads, tvars))

        return optimizer, loss
mnist_gan.py 文件源码 项目:deep-learning 作者: ljanyst 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, rnd_vec_dim, hidden_units, output_dim, alpha):
        #-----------------------------------------------------------------------
        # Inputs
        #-----------------------------------------------------------------------
        self.inputs_rnd  = tf.placeholder(tf.float32, (None, rnd_vec_dim),
                                          name='inputs_rnd')

        #-----------------------------------------------------------------------
        # The generator
        #-----------------------------------------------------------------------
        self.alpha = alpha
        with tf.variable_scope('generator'):
            h1 = tf.layers.dense(self.inputs_rnd, hidden_units, activation=None)
            h1 = LeakyReLU(h1, self.alpha)

            self.gen_logits = tf.layers.dense(h1, output_dim, activation=None)
            self.gen_out    = tf.tanh(self.gen_logits)

    #---------------------------------------------------------------------------
bidirectional.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _load_optimizer(self):
        """
        Load the SGD optimizer

        :return: None
        """
        # loss function
        with tf.variable_scope("forward"):
            self.loss_fwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_fwd,
                                                        self.labels, self.weights, self.vocab_size)

            # optimizer
            # self.optimizer_fwd = tf.train.MomentumOptimizer(self.learning_rate,
            #                                        self.momentum)
            self.optimizer_fwd = tf.train.GradientDescentOptimizer(self.learning_rate)
            self.train_op_fwd = self.optimizer_fwd.minimize(self.loss_fwd)

        with tf.variable_scope("backward"):
            self.loss_bwd = tf.nn.seq2seq.sequence_loss(self.dec_outputs_bwd,
                                                        self.labels, self.weights, self.vocab_size)

            # optimizer
            # self.optimizer_bwd = tf.train.MomentumOptimizer(self.learning_rate,
            #                                        self.momentum)
            self.optimizer_bwd = tf.train.GradientDescentOptimizer(self.learning_rate)
            self.train_op_bwd = self.optimizer_bwd.minimize(self.loss_bwd)
stacked_simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _load_model(self):
        """
        Creates the encoder decoder model

        :return: None
        """
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            cell = self.get_cell()
            # Stacks layers of RNN's to form a stacked decoder
            self.cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.num_layers)

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
simple.py 文件源码 项目:deep-summarization 作者: harpribot 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _load_model(self):
        """
        Creates the encoder decoder model

        :return: None
        """
        # Initial memory value for recurrence.
        self.prev_mem = tf.zeros((self.train_batch_size, self.memory_dim))

        # choose RNN/GRU/LSTM cell
        with tf.variable_scope("train_test", reuse=True):
            self.cell = self.get_cell()

        # embedding model
        if not self.attention:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_rnn_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)

        else:
            with tf.variable_scope("train_test"):
                self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length)
            with tf.variable_scope("train_test", reuse=True):
                self.dec_outputs_tst, _ = tf.nn.seq2seq.embedding_attention_seq2seq(
                                self.enc_inp, self.dec_inp, self.cell,
                                self.vocab_size, self.vocab_size, self.seq_length, feed_previous=True)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def initialize(self,std=1.0,bias=0.1):
        with tf.variable_scope(self.name):
            self.inputShape = np.product([i.value for i in self.input.get_shape()[1:] if i.value is not None])
            self.W = weightVariable([self.inputShape,self.units],std=std)
            self.b = biasVariable([self.units],bias=bias)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def initialize(self,std=1.0,bias=0.1):
        with tf.variable_scope(self.name):
            self.inputShape = np.product([i.value for i in self.input.get_shape()[1:] if i.value is not None])
            self.W = weightVariable([self.inputShape,self.units],std=std)
            self.b = biasVariable([self.units],bias=bias)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def initialize(self,std=1.0,bias=0.1):
        with tf.variable_scope(self.name):
            self.W = weightVariable(self.shape,std=std)     # YxX patch, Z contrast, outputs to N neurons
            self.b = biasVariable([self.shape[-1]],bias=bias)   # N bias variables to go with the N neurons
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def setupOutput(self):
        with tf.variable_scope(self.name):
            self.output = max_pool_2x2(self.input)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def setupOutput(self):
        with tf.variable_scope(self.name):
            self.output = max_pool(self.input,shape=self.shape)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def setupOutput(self):
        with tf.variable_scope(self.name):
            self.output = tf.nn.l2_normalize(self.input,-1)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def setupOutput(self):
        with tf.variable_scope(self.name):
            try:
                self.output = tf.image.resize_images(self.input,self.outputShape,method=self.method)#,align_corners=self.alignCorners)
            except:
                self.output = tf.image.resize_images(self.input,self.outputShape[0],self.outputShape[1],method=self.method)#,align_corners=self.alignCorners)
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def initialize(self):
        with tf.variable_scope(self.name):
            self.keepProb = tf.placeholder('float')         # Variable to hold the dropout probability
TensorFlowInterface.py 文件源码 项目:IntroToDeepLearning 作者: robb-brown 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def setupOutput(self):
        with tf.variable_scope(self.name):
            self.output = tf.nn.dropout(self.input,self.keepProb)
            self.output.get_shape = self.input.get_shape        # DEBUG: remove this whenever TensorFlow fixes this bug



#*** Main Part ***


问题


面经


文章

微信
公众号

扫码关注公众号