python类nn()的实例源码

model.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def global_pool(inp, kind='avg', keep_dims=False, name=None):
    if kind not in ['max', 'avg']:
        raise ValueError('Only global avg or max pool is allowed, but'
                            'you requested {}.'.format(kind))
    if name is None:
        name = 'global_{}_pool'.format(kind)
    h, w = inp.get_shape().as_list()[1:3]
    out = getattr(tf.nn, kind + '_pool')(inp,
                                    ksize=[1,h,w,1],
                                    strides=[1,1,1,1],
                                    padding='VALID')
    if keep_dims:
        output = tf.identity(out, name=name)
    else:
        output = tf.reshape(out, [out.get_shape().as_list()[0], -1], name=name)

    return output
autoencoder.py 文件源码 项目:website-fingerprinting 作者: AxelGoetz 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def __init__(self, layers, batch_size, activation_func=tf.nn.sigmoid, saved_graph=None, sess=None, learning_rate=0.0001, batch_norm=False):
        """
        @param layers is a list of integers, determining the amount of layers and their size
            starting with the input size
        """
        if len(layers) < 2:
            print("Amount of layers must be greater than 1")
            exit(0)

        self.batch_size = batch_size
        self.learning_rate = learning_rate
        self.activation_func = activation_func
        self.batch_norm = batch_norm

        self.is_training = True

        # Use this in data preprocessing
        self.layers = layers

        self._make_graph(layers)

        if saved_graph is not None and sess is not None:
            self.import_from_file(sess, saved_graph)
vars.py 文件源码 项目:luminoth 作者: tryolabs 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def variable_summaries(var, name, collections=None):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization).

    Args:
        - var: Tensor for variable from which we want to log.
        - name: Variable name.
        - collections: List of collections to save the summary to.
    """
    with tf.name_scope(name):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean, collections)
        num_params = tf.reduce_prod(tf.shape(var))
        tf.summary.scalar('num_params', num_params, collections)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev, collections)
        tf.summary.scalar('max', tf.reduce_max(var), collections)
        tf.summary.scalar('min', tf.reduce_min(var), collections)
        tf.summary.histogram('histogram', var, collections)
        tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections)
network_builder.py 文件源码 项目:GENNN 作者: JarnoRFB 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def _build_loss(self, readout, labels):
        """Build the layer including the loss and the accuracy.

        Args:
            readout (tensor): The readout layer. A probability distribution over the classes.
            labels (tensor): Labels as integers.

        Returns:
            tensor: The loss tensor (cross entropy).
        """

        with tf.name_scope('loss'):
            self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=readout, labels=labels))
            tf.summary.scalar('cross_entropy', self.loss)
            correct_prediction = tf.nn.in_top_k(readout, labels, 1)
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.summary.scalar('accuracy', self.accuracy)
        return self.loss
_ff.py 文件源码 项目:tensorfx 作者: TensorLab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def build_output(self, inputs, inferences):
    scores = tf.nn.softmax(inferences, name='scores')
    tf.add_to_collection('outputs', scores)

    with tf.name_scope('labels'):
      label_indices = tf.arg_max(inferences, 1, name='arg_max')
      labels = self.classification.output_labels(label_indices)
      tf.add_to_collection('outputs', labels)

    keys = self.classification.keys(inputs)
    if keys:
      # Key feature, if it exists, is a passthrough to the output.
      # The use of identity is to name the tensor and correspondingly the output field.
      keys = tf.identity(keys, name='key')
      tf.add_to_collection('outputs', keys)

    return {
      'label': labels,
      'score': scores
    }
layers.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _latent(self, x):
        if x is None:
            mean = None
            stddev = None
            logits = None
            class_predictions = None
            z = self.epsilon
        else:
            enc_output = tf.reshape(x, [-1, self.flags['hidden_size'] * 2])
            mean, stddev = tf.split(1, 2, enc_output)  # Compute latent variables (z) by calculating mean, stddev
            stddev = tf.nn.softplus(stddev)
            with tf.variable_scope("y_network"):
                mlp = Layers(mean)
                mlp.fc(self.flags['num_classes'])
                logits = mlp.get_output()
                class_predictions = tf.nn.softmax(logits)
            z = (mean + self.epsilon * stddev) #* tf.cast(y_hat, tf.float32)
        return mean, stddev, class_predictions, logits, z
base.py 文件源码 项目:TensorBase 作者: dancsalo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def flatten(self, keep_prob=1):
        """
        Flattens 4D Tensor (from Conv Layer) into 2D Tensor (to FC Layer)
        :param keep_prob: int. set to 1 for no dropout
        """
        self.count['flat'] += 1
        scope = 'flat_' + str(self.count['flat'])
        with tf.variable_scope(scope):
            # Reshape function
            input_nodes = tf.Dimension(
                self.input.get_shape()[1] * self.input.get_shape()[2] * self.input.get_shape()[3])
            output_shape = tf.stack([-1, input_nodes])
            self.input = tf.reshape(self.input, output_shape)

            # Dropout function
            if keep_prob != 1:
                self.input = tf.nn.dropout(self.input, keep_prob=keep_prob)
        print(scope + ' output: ' + str(self.input.get_shape()))
pretrained_word_embedding_TF_nn.py 文件源码 项目:Text-Classification-with-Tensorflow 作者: jrzaurin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def batch_norm_layer(inp):
    """As explained in A. Gerón's book, in the default batch_normalization
    there is no scaling, i.e. gamma is set to 1. This makes sense for layers
    with no activation function or ReLU (like ours), since the next layers
    weights can take care of the scaling. In other circumstances, include
    scaling
    """
    # get the size from input tensor (remember, 1D convolution -> input tensor 3D)
    size = int(inp.shape[2])

    batch_mean, batch_var = tf.nn.moments(inp,[0])
    scale = tf.Variable(tf.ones([size]))
    beta  = tf.Variable(tf.zeros([size]))
    x = tf.nn.batch_normalization(inp,batch_mean,batch_var,beta,scale,
        variance_epsilon=1e-3)
    return x
model.py 文件源码 项目:liveqa2017 作者: codekansas 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, cell, function, reuse=None):
        if not isinstance(cell, tf.contrib.rnn.RNNCell):
            raise TypeError('The parameter cell is not an RNNCell.')

        if isinstance(function, six.string_types):
            try:
                function = getattr(tf, function)
            except AttributeError:
                try:
                    function = getattr(tf.nn, function)
                except AttributeError:
                    raise ValueError('The desired function "%s" was '
                                     'not found.' % function)

        self._cell = cell
        self._function = function
model_utils.py 文件源码 项目:sidenet 作者: shashiongithub 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def multilayer_perceptron(final_output, weights, biases):
  """MLP over output with attention over enc outputs
  Args:
     final_output: [batch_size x 2*size]
  Returns:
     logit:  [batch_size x target_label_size]
  """

  # Layer 1
  layer_1 = tf.add(tf.matmul(final_output, weights["h1"]), biases["b1"])
  layer_1 = tf.nn.relu(layer_1)

  # Layer 2
  layer_2 = tf.add(tf.matmul(layer_1, weights["h2"]), biases["b2"])
  layer_2 = tf.nn.relu(layer_2)

  # output layer
  layer_out = tf.add(tf.matmul(layer_2, weights["out"]), biases["out"])

  return layer_out
model_utils.py 文件源码 项目:sidenet 作者: shashiongithub 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def simple_rnn(rnn_input, initial_state=None):
  """Implements Simple RNN
  Args:
  rnn_input: List of tensors of sizes [-1, sentembed_size]
  Returns:
  encoder_outputs, encoder_state
  """     
  # Setup cell
  cell_enc = get_lstm_cell()

  # Setup RNNs
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  rnn_outputs, rnn_state = tf.nn.rnn(cell_enc, rnn_input, dtype=dtype, initial_state=initial_state)
  # print(rnn_outputs)
  # print(rnn_state)

  return rnn_outputs, rnn_state
tensorbuilder_patch.py 文件源码 项目:tensorbuilder 作者: cgarciae 项目源码 文件源码 阅读 51 收藏 0 点赞 0 评论 0
def sigmoid_layer(builder, size):
    x = builder.tensor()
    m = int(x.get_shape()[1])
    n = size

    w = tf.Variable(tf.random_uniform([m, n], -1.0, 1.0))
    b = tf.Variable(tf.random_uniform([n], -1.0, 1.0))

    y = tf.nn.sigmoid(tf.matmul(x, w) + b)

    return y.builder()
layers_patch.py 文件源码 项目:tensorbuilder 作者: cgarciae 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def register_layer_functions(name, f):
    explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name)

    @TensorBuilder.Register1("tf.contrib.layers", name + "_layer", wrapped=fully_connected, explanation=explanation) #, _return_type=TensorBuilder)
    def layer_function(*args, **kwargs):
        kwargs['activation_fn'] = f
        return fully_connected(*args, **kwargs)
layers_patch.py 文件源码 项目:tensorbuilder 作者: cgarciae 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def register_conv_layer_functions(name, f):
    explanation = """and the keyword argument `activation_fn` is set to `tf.nn.{0}`.""".format(name)

    @TensorBuilder.Register1("tf.contrib.layers", name + "_conv2d_layer", wrapped=convolution2d, explanation=explanation) #, _return_type=TensorBuilder)
    def layer_function(*args, **kwargs):
        kwargs['activation_fn'] = f
        return convolution2d(*args, **kwargs)
model.py 文件源码 项目:tfutils 作者: neuroailab 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _get_func(self, attr):
        custom_func = object.__getattribute__(self, 'CUSTOM_FUNC')
        custom_func_names = [f.__name__ for f in custom_func]
        if attr in custom_func_names:  # is it one of the custom functions?
            func = custom_func[custom_func_names.index(attr)]
        else:
            func = getattr(tf.nn, attr)  # ok, so it is a tf.nn function
        return func
activations.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def parametric_relu(x, name=None):
    alphas = tf.get_variable('{}/alpha'.format(name) if name else 'alpha',
                             x.get_shape()[-1],
                             initializer=tf.constant_initializer(0.0),
                             dtype=tf.float32)
    return tf.nn.relu(x) + alphas * (x - abs(x)) * 0.5
activations.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def selu(x, name=None):
    with tf.name_scope('{}/elu'.format(name) if name else 'elu') as _:
        alpha = 1.6732632423543772848170429916717
        scale = 1.0507009873554804934193349852946
        return scale*tf.where(x >= 0.0, x, alpha*tf.nn.elu(x))


# Aliases
activations.py 文件源码 项目:jack 作者: uclmr 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def activation_from_string(activation_str):
    if activation_str is None:
        return tf.identity
    return getattr(tf.nn, activation_str)
vars.py 文件源码 项目:luminoth 作者: tryolabs 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_activation_function(activation_function):
    if not activation_function:
        return lambda a: a

    try:
        return getattr(tf.nn, activation_function)
    except AttributeError:
        raise ValueError(
            'Invalid activation function "{}"'.format(activation_function))
frame_level_models.py 文件源码 项目:Y8M 作者: mpekalski 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
    """Creates a model which uses a logistic classifier over the average of the
    frame-level features.

    This class is intended to be an example for implementors of frame level
    models. If you want to train a model over averaged features it is more
    efficient to average them beforehand rather than on the fly.

    Args:
      model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
                   input features.
      vocab_size: The number of classes in the dataset.
      num_frames: A vector of length 'batch' which indicates the number of
           frames for each video (before padding).

    Returns:
      A dictionary with a tensor containing the probability predictions of the
      model in the 'predictions' key. The dimensions of the tensor are
      'batch_size' x 'num_classes'.
    """
    num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
    feature_size = model_input.get_shape().as_list()[2]

    denominators = tf.reshape(
        tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
    avg_pooled = tf.reduce_sum(model_input,
                               axis=[1]) / denominators

    output = slim.fully_connected(
        avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
        weights_regularizer=slim.l2_regularizer(1e-8))
    return {"predictions": output}
frame_level_models.py 文件源码 项目:Y8M 作者: mpekalski 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
    """Creates a model which uses a stack of LSTMs to represent the video.

    Args:
      model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
                   input features.
      vocab_size: The number of classes in the dataset.
      num_frames: A vector of length 'batch' which indicates the number of
           frames for each video (before padding).

    Returns:
      A dictionary with a tensor containing the probability predictions of the
      model in the 'predictions' key. The dimensions of the tensor are
      'batch_size' x 'num_classes'.
    """
    lstm_size = FLAGS.lstm_cells
    number_of_layers = FLAGS.lstm_layers

    ## Batch normalize the input
    stacked_lstm = tf.contrib.rnn.MultiRNNCell(
            [
                tf.contrib.rnn.BasicLSTMCell(
                    lstm_size, forget_bias=1.0, state_is_tuple=False)
                for _ in range(number_of_layers)
                ],
            state_is_tuple=False)

    loss = 0.0
    with tf.variable_scope("RNN"):
      outputs, state = tf.nn.dynamic_rnn(stacked_lstm, model_input,
                                         sequence_length=num_frames,
                                         dtype=tf.float32)

    aggregated_model = getattr(video_level_models,
                               FLAGS.video_level_classifier_model)
    return aggregated_model().create_model(
        model_input=state,
        vocab_size=vocab_size,
        **unused_params)
frame_level_models.py 文件源码 项目:Y8M 作者: mpekalski 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
    """
    Args:
      model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
                   input features.
      vocab_size: The number of classes in the dataset.
      num_frames: A vector of length 'batch' which indicates the number of
           frames for each video (before padding).

    Returns:
      A dictionary with a tensor containing the probability predictions of the
      model in the 'predictions' key. The dimensions of the tensor are
      'batch_size' x 'num_classes'.
    """
    num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
    feature_size = model_input.get_shape().as_list()[2]

    denominators = tf.reshape(
        tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
    avg_pooled = tf.reduce_sum(model_input,
                               axis=[1]) / denominators
    # top 5 values for each feature coordinate across frames
    #top_5_val = tf.nn.top_k(model_input, 5).values

    #max_val = tf.nn
    # geometric mean
    #geom_mean = tf.sqrt(tf.reduce_prod(model_input, axis=[1]))
    output = slim.fully_connected(
        avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
        weights_regularizer=slim.l2_regularizer(1e-8))
    with open('frame_level.data','a') as f_handle:
        np.savetxt(f_handle,avg_pooled)
    return {"predictions": output}
network_builder.py 文件源码 项目:GENNN 作者: JarnoRFB 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def feedforward_layer(self, input_tensor, layer_number):
        """Build a feedforward layer ended with an activation function.

        Args:
            input_tensor: The output from the layer before.
            layer_number (int): The number of the layer in the network.

        Returns:
            tensor: The activated output.
        """
        layer_spec = self.network_spec['layers'][layer_number]
        with tf.name_scope('feedforward' + str(layer_number)):
            weighted = self._feedforward_step(input_tensor, layer_spec['size'])
            activation = getattr(tf.nn, layer_spec['activation_function'])(weighted)
        return activation
network_builder.py 文件源码 项目:GENNN 作者: JarnoRFB 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def conv_layer(self, input_tensor, layer_number):
        """Build a convolution layer ended with an activation function.

        Args:
            input_tensor: The output from the layer before.
            layer_number (int): The number of the layer in the network.

        Returns:
            tensor: The activated output.
        """
        inchannels, input_tensor = self._ensure_2d(input_tensor)

        layer_spec = self.network_spec['layers'][layer_number]
        filter_shape = (layer_spec['filter']['height'],
                        layer_spec['filter']['width'],
                        inchannels,
                        layer_spec['filter']['outchannels'])
        filter_strides = (layer_spec['strides']['inchannels'],
                          layer_spec['strides']['x'],
                          layer_spec['strides']['y'],
                          layer_spec['strides']['batch'])
        with tf.name_scope('conv' + str(layer_number)):
            w = self._weight_variable(filter_shape, name='W')
            b = self._bias_variable([layer_spec['filter']['outchannels']], name='b')
            conv = tf.nn.conv2d(input_tensor, w, strides=filter_strides, padding='SAME')
            activation = getattr(tf.nn, layer_spec['activation_function'])(conv + b, name='activation')
        return activation
network_builder.py 文件源码 项目:GENNN 作者: JarnoRFB 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def maxpool_layer(self, input_tensor, layer_number):
        """Build a maxpooling layer.

               Args:
                   input_tensor: The output from the layer before.
                   layer_number (int): The number of the layer in the network.

               Returns:
                   tensor: The max pooled output.
               """

        _, input_tensor = self._ensure_2d(input_tensor)
        layer_spec = self.network_spec['layers'][layer_number]
        kernel_shape = (1,  # First number has to be one for ksize of maxpool layer.
                        layer_spec['kernel']['height'],
                        layer_spec['kernel']['width'],
                        layer_spec['kernel']['outchannels'])
        kernel_strides = (layer_spec['strides']['inchannels'],
                          layer_spec['strides']['x'],
                          layer_spec['strides']['y'],
                          layer_spec['strides']['batch'])

        with tf.name_scope('maxpool' + str(layer_number)):
            pool = tf.nn.max_pool(input_tensor, ksize=kernel_shape,
                                  strides=kernel_strides, padding='SAME', name='maxpool')
        return pool
configurable.py 文件源码 项目:Sing_Par 作者: wanghm92 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def recur_func(self):
    func = self._config.get('Functions', 'recur_func')
    if func == 'identity':
      return tf.identity
    else:
      return getattr(tf.nn, func)
configurable.py 文件源码 项目:Sing_Par 作者: wanghm92 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def mlp_func(self):
    func = self._config.get('Functions', 'mlp_func')
    if func == 'identity':
      return tf.identity
    else:
      return getattr(tf.nn, func)
configurable.py 文件源码 项目:Parser-v1 作者: tdozat 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def recur_func(self):
    func = self._config.get('Functions', 'recur_func')
    if func == 'identity':
      return tf.identity
    elif func == 'leaky_relu':
      return lambda x: tf.maximum(.1*x, x)
    else:
      return getattr(tf.nn, func)
configurable.py 文件源码 项目:Parser-v1 作者: tdozat 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def info_func(self):
    func = self._config.get('Functions', 'info_func')
    if func == 'identity':
      return tf.identity
    elif func == 'leaky_relu':
      return lambda x: tf.maximum(.1*x, x)
    else:
      return getattr(tf.nn, func)
configurable.py 文件源码 项目:Parser-v1 作者: tdozat 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def mlp_func(self):
    func = self._config.get('Functions', 'mlp_func')
    if func == 'identity':
      return tf.identity
    elif func == 'leaky_relu':
      return lambda x: tf.maximum(.1*x, x)
    else:
      return getattr(tf.nn, func)


问题


面经


文章

微信
公众号

扫码关注公众号