python类nn()的实例源码

_ff.py 文件源码 项目:tensorfx 作者: TensorLab 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _init_parser(parser):
  """Initializes the parser for feed-forward models.
  """
  optimization = parser.add_argument_group(title='Optimization',
    description='Arguments determining the optimizer behavior.')
  optimization.add_argument('--learning-rate', metavar='rate', type=float, default=0.01,
                            help='The magnitude of learning to perform at each step.')

  nn = parser.add_argument_group(title='Neural Network',
    description='Arguments controlling the structure of the neural network.')
  nn.add_argument('--hidden-layers', metavar='units', type=int, required=False,
                  action=parser.var_args_action,
                  help='The size of each hidden layer to add.')
_ff.py 文件源码 项目:tensorfx 作者: TensorLab 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def build_training(self, global_steps, inputs, inferences):
    with tf.name_scope('target'):
      label_indices = self.classification.target_label_indices(inputs)

    with tf.name_scope('error'):
      cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=inferences,
                                                              labels=label_indices,
                                                              name='softmax_cross_entropy')
      loss = tf.reduce_mean(cross_entropy, name='loss')

      averager = tf.train.ExponentialMovingAverage(0.99, name='loss_averager')
      averaging = averager.apply([loss])

    with tf.name_scope(''):
      tf.summary.scalar('metrics/loss', loss)
      tf.summary.scalar('metrics/loss.average', averager.average(loss))

    with tf.control_dependencies([averaging]):
      with tf.name_scope(self.args.optimizer.get_name()):
        gradients = self.args.optimizer.compute_gradients(loss, var_list=tf.trainable_variables())
        train = self.args.optimizer.apply_gradients(gradients, global_steps, name='optimize')

      with tf.name_scope(''):
        for gradient, t in gradients:
          if gradient is not None:
            tf.summary.histogram(t.op.name + '.gradients', gradient)

    return loss, train
tune_mnist_ray.py 文件源码 项目:ray 作者: ray-project 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def conv2d(x, W):
    """conv2d returns a 2d convolution layer with full stride."""
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
tune_mnist_ray.py 文件源码 项目:ray 作者: ray-project 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def max_pool_2x2(x):
    """max_pool_2x2 downsamples a feature map by 2X."""
    return tf.nn.max_pool(
        x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
tune_mnist_ray.py 文件源码 项目:ray 作者: ray-project 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train(config={'activation': 'relu'}, reporter=None):
    global FLAGS, status_reporter, activation_fn
    status_reporter = reporter
    activation_fn = getattr(tf.nn, config['activation'])
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data',
        help='Directory for storing input data')
    FLAGS, unparsed = parser.parse_known_args()
    tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)


# !!! Example of using the ray.tune Python API !!!
layers.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def conv2d(self, filter_size, output_channels, stride=1, padding='SAME', activation_fn=tf.nn.relu, b_value=0.0, s_value=1.0, bn=True, stoch=False):
        """
        :param filter_size: int. assumes square filter
        :param output_channels: int
        :param stride: int
        :param padding: 'VALID' or 'SAME'
        :param activation_fn: tf.nn function
        :param b_value: float
        :param s_value: float
        """
        self.count['conv'] += 1
        self._layer_count += 1
        scope = 'conv_' + str(self.count['conv'])
        if stoch is True:
            clean = False
        else:
            clean = True
        with tf.variable_scope(scope):
            input_channels = self.input.get_shape()[3]
            output_shape = [filter_size, filter_size, input_channels, output_channels]
            w = self.weight_variable(name='weights', shape=output_shape)
            self.input = tf.nn.conv2d(self.input, w, strides=[1, stride, stride, 1], padding=padding)
            if bn is True:
                self.input = self.conv_batch_norm(self.input, clean=clean, count=self._layer_count)
            if stoch is True:
                self.input = tf.random_normal(tf.shape(self.input)) + self.input
                self._noisy_z_dict[self._layer_count] = self.input
            if b_value is not None:
                b = self.const_variable(name='bias', shape=[output_channels], value=b_value)
                self.input = tf.add(self.input, b)
            if s_value is not None:
                s = self.const_variable(name='scale', shape=[output_channels], value=s_value)
                self.input = tf.multiply(self.input, s)
            if activation_fn is not None:
                self.input = activation_fn(self.input)
        self.print_log(scope + ' output: ' + str(self.input.get_shape()))
layers.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def fc(self, output_nodes, keep_prob=1, activation_fn=tf.nn.relu, b_value=0.0, s_value=None, bn=False, stoch=False, ladder=False, clean=False):
        self.count['fc'] += 1
        self._layer_count += 1
        scope = 'fc_' + str(self.count['fc'])
        with tf.variable_scope(scope):
            input_nodes = self.input.get_shape()[1]
            output_shape = [input_nodes, output_nodes]
            w = self.weight_variable(name='weights', shape=output_shape)
            self.input = tf.matmul(self.input, w)
            if bn is True:
                self.input = self.batch_norm(self.input, clean=clean, count=self._layer_count)
                if ladder is True:
                    b_value = s_value = None
                    noisy_z_ind = self.layer_num - self.count['deconv'] - self.count['fc']
                    noisy_z = self._noisy_z_dict[noisy_z_ind]
                    z_hat = self.ladder_g_function(noisy_z, self.input)
                    self._z_hat_bn[noisy_z_ind] = (z_hat - self.clean_batch_dict[noisy_z_ind][0]) / self.clean_batch_dict[noisy_z_ind][1]
            if stoch is True:
                self.input = tf.random_normal(tf.shape(self.input)) + self.input
                self._noisy_z_dict[self._layer_count] = self.input
            if b_value is not None:
                b = self.const_variable(name='bias', shape=[output_nodes], value=b_value)
                self.input = tf.add(self.input, b)
            if s_value is not None:
                s = self.const_variable(name='scale', shape=[output_nodes], value=s_value)
                self.input = tf.multiply(self.input, s)
            if activation_fn is not None:
                self.input = activation_fn(self.input)
            if keep_prob != 1:
                self.input = tf.nn.dropout(self.input, keep_prob=keep_prob)
        self.print_log(scope + ' output: ' + str(self.input.get_shape()))
layers.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def batch_norm(self, x, epsilon=1e-3, clean=False, count=1):
        # Calculate batch mean and variance
        batch_mean1, batch_var1 = tf.nn.moments(x, [0], keep_dims=True)

        # Apply the initial batch normalizing transform
        z1_hat = (x - batch_mean1) / tf.sqrt(batch_var1 + epsilon)
        if clean is True:
            self.clean_batch_dict[count] = (tf.squeeze(batch_mean1), tf.squeeze(batch_var1))
            self._clean_z[count] = z1_hat
        return z1_hat
layers.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def conv_batch_norm(self, x, epsilon=1e-3, clean=False, count=1):
        # Calculate batch mean and variance
        batch_mean1, batch_var1 = tf.nn.moments(x, [0, 1, 2], keep_dims=True)

        # Apply the initial batch normalizing transform
        z1_hat = (x - batch_mean1) / tf.sqrt(batch_var1 + epsilon)
        if clean is True:
            self.clean_batch_dict[count] = (tf.squeeze(batch_mean1), tf.squeeze(batch_var1))
            self._clean_z[count] = z1_hat
        return z1_hat
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def conv2d(self, filter_size, output_channels, stride=1, padding='SAME', bn=True, activation_fn=tf.nn.relu,
               b_value=0.0, s_value=1.0, trainable=True):
        """
        2D Convolutional Layer.
        :param filter_size: int. assumes square filter
        :param output_channels: int
        :param stride: int
        :param padding: 'VALID' or 'SAME'
        :param activation_fn: tf.nn function
        :param b_value: float
        :param s_value: float
        """
        self.count['conv'] += 1
        scope = 'conv_' + str(self.count['conv'])
        with tf.variable_scope(scope):

            # Conv function
            input_channels = self.input.get_shape()[3]
            if filter_size == 0:  # outputs a 1x1 feature map; used for FCN
                filter_size = self.input.get_shape()[2]
                padding = 'VALID'
            output_shape = [filter_size, filter_size, input_channels, output_channels]
            w = self.weight_variable(name='weights', shape=output_shape, trainable=trainable)
            self.input = tf.nn.conv2d(self.input, w, strides=[1, stride, stride, 1], padding=padding)

            if bn is True:  # batch normalization
                self.input = self.batch_norm(self.input)
            if b_value is not None:  # bias value
                b = self.const_variable(name='bias', shape=[output_channels], value=b_value, trainable=trainable)
                self.input = tf.add(self.input, b)
            if s_value is not None:  # scale value
                s = self.const_variable(name='scale', shape=[output_channels], value=s_value, trainable=trainable)
                self.input = tf.multiply(self.input, s)
            if activation_fn is not None:  # activation function
                self.input = activation_fn(self.input)
        print(scope + ' output: ' + str(self.input.get_shape()))
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def fc(self, output_nodes, keep_prob=1, activation_fn=tf.nn.relu, b_value=0.0, s_value=1.0, bn=True,
           trainable=True):
        """
        Fully Connected Layer
        :param output_nodes: int
        :param keep_prob: int. set to 1 for no dropout
        :param activation_fn: tf.nn function
        :param b_value: float or None
        :param s_value: float or None
        :param bn: bool
        """
        self.count['fc'] += 1
        scope = 'fc_' + str(self.count['fc'])
        with tf.variable_scope(scope):

            # Flatten if necessary
            if len(self.input.get_shape()) == 4:
                input_nodes = tf.Dimension(
                    self.input.get_shape()[1] * self.input.get_shape()[2] * self.input.get_shape()[3])
                output_shape = tf.stack([-1, input_nodes])
                self.input = tf.reshape(self.input, output_shape)

            # Matrix Multiplication Function
            input_nodes = self.input.get_shape()[1]
            output_shape = [input_nodes, output_nodes]
            w = self.weight_variable(name='weights', shape=output_shape, trainable=trainable)
            self.input = tf.matmul(self.input, w)

            if bn is True:  # batch normalization
                self.input = self.batch_norm(self.input, 'fc')
            if b_value is not None:  # bias value
                b = self.const_variable(name='bias', shape=[output_nodes], value=b_value, trainable=trainable)
                self.input = tf.add(self.input, b)
            if s_value is not None:  # scale value
                s = self.const_variable(name='scale', shape=[output_nodes], value=s_value, trainable=trainable)
                self.input = tf.multiply(self.input, s)
            if activation_fn is not None:  # activation function
                self.input = activation_fn(self.input)
            if keep_prob != 1:  # dropout function
                self.input = tf.nn.dropout(self.input, keep_prob=keep_prob)
        print(scope + ' output: ' + str(self.input.get_shape()))
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def maxpool(self, k=2, s=None, globe=False):
        """
        Takes max value over a k x k area in each input map, or over the entire map (global = True)
        :param k: int
        :param globe:  int, whether to pool over each feature map in its entirety
        """
        self.count['mp'] += 1
        scope = 'maxpool_' + str(self.count['mp'])
        with tf.variable_scope(scope):
            if globe is True:  # Global Pool Parameters
                k1 = self.input.get_shape()[1]
                k2 = self.input.get_shape()[2]
                s1 = 1
                s2 = 1
                padding = 'VALID'
            else:
                k1 = k
                k2 = k
                if s is None:
                    s1 = k
                    s2 = k
                else:
                    s1 = s
                    s2 = s
                padding = 'SAME'
            # Max Pool Function
            self.input = tf.nn.max_pool(self.input, ksize=[1, k1, k2, 1], strides=[1, s1, s2, 1], padding=padding)
        print(scope + ' output: ' + str(self.input.get_shape()))
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def avgpool(self, k=2, s=None, globe=False):
        """
        Averages the values over a k x k area in each input map, or over the entire map (global = True)
        :param k: int
        :param globe: int, whether to pool over each feature map in its entirety
        """
        self.count['ap'] += 1
        scope = 'avgpool_' + str(self.count['mp'])
        with tf.variable_scope(scope):
            if globe is True:  # Global Pool Parameters
                k1 = self.input.get_shape()[1]
                k2 = self.input.get_shape()[2]
                s1 = 1
                s2 = 1
                padding = 'VALID'
            else:
                k1 = k
                k2 = k
                if s is None:
                    s1 = k
                    s2 = k
                else:
                    s1 = s
                    s2 = s
                padding = 'SAME'
            # Average Pool Function
            self.input = tf.nn.avg_pool(self.input, ksize=[1, k1, k2, 1], strides=[1, s1, s2, 1], padding=padding)
        print(scope + ' output: ' + str(self.input.get_shape()))
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def noisy_and(self, num_classes, trainable=True):
        """ Multiple Instance Learning (MIL), flexible pooling function
        :param num_classes: int, determine number of output maps
        """
        assert self.input.get_shape()[3] == num_classes  # input tensor should have map depth equal to # of classes
        scope = 'noisyAND'
        with tf.variable_scope(scope):
            a = self.const_variable(name='a', shape=[1], value=1.0, trainable=trainable)
            b = self.const_variable(name='b', shape=[1, num_classes], value=0.0, trainable=trainable)
            mean = tf.reduce_mean(self.input, axis=[1, 2])
            self.input = (tf.nn.sigmoid(a * (mean - b)) - tf.nn.sigmoid(-a * b)) / (
                tf.sigmoid(a * (1 - b)) - tf.sigmoid(-a * b))
        print(scope + ' output: ' + str(self.input.get_shape()))
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def weight_variable(name, shape, trainable):
        """
        :param name: string
        :param shape: 4D array
        :return: tf variable
        """
        w = tf.get_variable(name=name, shape=shape, initializer=tf.contrib.layers.variance_scaling_initializer(),
                            trainable=trainable)
        weights_norm = tf.reduce_sum(tf.nn.l2_loss(w),
                                     name=name + '_norm')  # Should user want to optimize weight decay
        tf.add_to_collection('weight_losses', weights_norm)
        return w
pretrained_word_embedding_TF_nn.py 文件源码 项目:Text-Classification-with-Tensorflow 作者: jrzaurin 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def conv1d_layer(inp, filter_shape):
    """This is a 1d conv, so filter_shape = [dim, input_channels, out_channels]"""
    W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.01))
    b = tf.Variable(tf.random_normal(shape=[filter_shape[2]]))
    # or you could initialize it as constant
    # b = tf.Variable(tf.constant(0.1, shape=[filter_shape[3]]))
    x = tf.nn.conv1d(inp,W,stride=1,padding="VALID")
    x = tf.nn.bias_add(x, b)
    x = tf.nn.relu(x)
    return x
pretrained_word_embedding_TF_nn.py 文件源码 项目:Text-Classification-with-Tensorflow 作者: jrzaurin 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def max_pool1d_layer(inp, ksize, strides):
    """tf.nn does not have max_pool_1d, so we have to expand the incoming layer
    as if we were dealing with a 2D convolution and then squeeze it again.
    Again, since this is a 1D conv, the size of the window (ksize) and the stride
    of the sliding window must have only one dimension (height) != 1
    """
    x = tf.expand_dims(inp, 3)
    x = tf.nn.max_pool(x, ksize=ksize, strides=strides, padding="VALID")
    x = tf.squeeze(x, [3])
    return x
pretrained_word_embedding_TF_nn.py 文件源码 项目:Text-Classification-with-Tensorflow 作者: jrzaurin 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def dense_layer(inp, n_neurons):
    # input to a fully connected layer -> 2D [batch_size, n_inputs]
    n_inputs = int(inp.shape[1])
    W = tf.Variable(tf.truncated_normal((n_inputs,n_neurons), stddev=0.1))
    b = tf.Variable(tf.random_normal(shape=[n_neurons]))
    # or if you prefer
    # b = tf.Variable(tf.zeros([n_neurons]))
    x = tf.matmul(inp,W) + b
    x = tf.nn.relu(x)
    return x
model.py 文件源码 项目:liveqa2017 作者: codekansas 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_discriminator_op(self, r_preds, g_preds, d_weights):
        """Returns an op that updates the discriminator weights correctly.

        Args:
            r_preds: Tensor with shape (batch_size, num_timesteps, 1), the
                disciminator predictions for real data.
            g_preds: Tensor with shape (batch_size, num_timesteps, 1), the
                discriminator predictions for generated data.
            d_weights: a list of trainable tensors representing the weights
                associated with the discriminator model.

        Returns:
            dis_op, the op to run to train the discriminator.
        """

        with tf.variable_scope('loss/discriminator'):
            discriminator_opt = tf.train.AdamOptimizer(1e-3)

            eps = 1e-12
            r_loss = -tf.reduce_mean(tf.log(r_preds + eps))
            f_loss = -tf.reduce_mean(tf.log(1 + eps - g_preds))
            dis_loss = r_loss + f_loss
            # dis_loss = tf.reduce_mean(g_preds) - tf.reduce_mean(r_preds)

            # tf.summary.scalar('real', r_loss)
            # tf.summary.scalar('generated', f_loss)

            with tf.variable_scope('regularization'):
                dis_reg_loss = sum([tf.nn.l2_loss(w) for w in d_weights]) * 1e-6
            tf.summary.scalar('regularization', dis_reg_loss)

            total_loss = dis_loss + dis_reg_loss
            with tf.variable_scope('discriminator_update'):
                dis_op = self.get_updates(total_loss, discriminator_opt,
                                          d_weights)
            tf.summary.scalar('total', total_loss)

        return dis_op
autoencoders.py 文件源码 项目:tensorflow-playground 作者: wangz10 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, n_input, n_hidden, activation_func='softplus', 
        optimizer_name='AdamOptimizer',
        learning_rate=0.001,
        logdir='/tmp',
        log_every_n=100, 
        session_kwargs={},
        seed=42,
        tied_weights=False,
        linear_decoder=True,
        ):
        '''
        params:

        activation_func (string): a name of activation_func in tf.nn 
        optimizer_name (string): a name of the optimizer object name tf.train
        '''
        self.n_input = n_input
        self.n_hidden = n_hidden
        self.activation_func = activation_func
        self.optimizer_name = optimizer_name
        self.learning_rate = learning_rate
        self.logdir = logdir
        self.log_every_n = log_every_n
        self.session_kwargs = session_kwargs
        self.seed = seed
        self.tied_weights = tied_weights
        self.linear_decoder = linear_decoder

        self._init_all()


问题


面经


文章

微信
公众号

扫码关注公众号