python类dropout()的实例源码

model_cifar10.py 文件源码 项目:easy-tensorflow 作者: khanhptnk 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def compute(self, inputs):
    """Compute a batch of outputs of the neural network from a batch of inputs.
      Args:
        inputs: a tensorflow tensor, a batch of input images. Each image is of
          size InputReaderCifar10.IMAGE_SIZE x InputReaderCifar10.IMAGE_SIZE x
          InputReaderCifar10.NUM_CHANNELS.
      Returns:
        net: a tensorflow op, output of the network.
        embedding: a tensorflow op, output of the embedding layer (the second
          last fully connected layer).
    """
    hparams = self._hparams
    net = None
    num_pool_conv_layers = len(hparams.nums_conv_filters)
    for i in xrange(num_pool_conv_layers):
      net = slim.conv2d(inputs if i == 0 else net,
                        hparams.nums_conv_filters[i],
                        [hparams.conv_filter_sizes[i], hparams.conv_filter_sizes[i]],
                        padding="SAME",
                        biases_initializer=tf.constant_initializer(0.1 * i),
                        scope="conv_{0}".format(i))
      net = slim.max_pool2d(net,
                            [hparams.pooling_size, hparams.pooling_size],
                            hparams.pooling_stride,
                            scope="pool_{0}".format(i))

    net = slim.flatten(net, scope="flatten")
    net = slim.fully_connected(net,
                               384,
                               biases_initializer=tf.constant_initializer(0.1),
                               scope="fc_{0}".format(num_pool_conv_layers))

    net = slim.dropout(net,
                       hparams.dropout_prob,
                       scope="dropout_{0}".format(num_pool_conv_layers))

    embedding = slim.fully_connected(net,
                                     192,
                                     biases_initializer=tf.constant_initializer(0.1),
                                     scope="fc_{0}".format(num_pool_conv_layers + 1))

    net = slim.fully_connected(embedding,
                               InputReaderCifar10.NUM_CLASSES,
                               activation_fn=None,
                               biases_initializer=tf.constant_initializer(0.0),
                               scope="fc_{0}".format(num_pool_conv_layers + 2))

    return net, embedding
inception_model.py 文件源码 项目:tensorflow_face 作者: ZhihengCV 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def inference(images, num_classes, is_traiing=True, scope='inception_v3'):
    """Build Inception v3 model architecture.

     See here for reference: http://arxiv.org/abs/1512.00567

    Args:
        images: Images returned from inputs() or distorted_inputs().
        num_classes: number of classes
        for_training: If set to `True`, build the inference model for training.
        Kernels that operate differently for inference during training
        e.g. dropout, are appropriately configured.
        restore_logits: whether or not the logits layers should be restored.
        Useful for fine-tuning a model with different num_classes.
        scope: optional prefix string identifying the ImageNet tower.

    Returns:
        Logits. 2-D float Tensor.
        Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
    """
    # Parameters for BatchNorm.
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # calculate moving average or using exist one
        'is_training': is_traiing
    }
    # Set weight_decay for weights in Conv and FC layers.
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(FLAGS.weight_decay)):
        with slim.arg_scope([slim.conv2d],
                            weights_initializer=slim.variance_scaling_initializer(),
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params):
            logits, endpoints = inception_v3(
                images,
                num_classes=num_classes,
                dropout_keep_prob=0.8,
                is_training=is_traiing,
                scope=scope
            )

    # Add summaries for viewing model statistics on TensorBoard.
    _activation_summaries(endpoints)

    # Grab the logits associated with the side head. Employed during training.
    auxiliary_logits = endpoints['aux_logits']

    return logits, auxiliary_logits
squeezenet_model.py 文件源码 项目:tensorflow_face 作者: ZhihengCV 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def inference(images, num_classes, is_training=True, scope='squeeze'):
    """
    Args:
        images: Images returned from inputs() or distorted_inputs().
        num_classes: number of classes
        for_training: If set to `True`, build the inference model for training.
        Kernels that operate differently for inference during training
        e.g. dropout, are appropriately configured.
        restore_logits: whether or not the logits layers should be restored.
        Useful for fine-tuning a model with different num_classes.
        scope: optional prefix string identifying the ImageNet tower.
    Returns:
        Logits. 2-D float Tensor.
        Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
    """
    # Parameters for BatchNorm.
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # calculate moving average or using exist one
        'is_training': is_training
    }
    # Set weight_decay for weights in Conv and FC layers.
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(FLAGS.weight_decay)):
        with slim.arg_scope([slim.conv2d],
                            weights_initializer=slim.variance_scaling_initializer(),
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params):
            logits, endpoints = squeezenet(
                images,
                num_classes=num_classes,
                keep_prob=0.5,
                is_training=is_training,
                scope=scope
            )
    # Add summaries for viewing model statistics on TensorBoard.
    _activation_summaries(endpoints)

    return logits
vgg_model.py 文件源码 项目:tensorflow_face 作者: ZhihengCV 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def vgg_16(inputs,
           num_classes=1000,
           is_training=True,
           dropout_keep_prob=0.5,
           spatial_squeeze=True,
           scope='vgg_16'):
  """Oxford Net VGG 16-Layers version D Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    is_training: whether or not the model is being trained.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with tf.name_scope(scope, 'vgg_16', [inputs]) as sc:
    end_points_collection = sc + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
                        outputs_collections=end_points_collection):
      net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1')
      net = slim.max_pool2d(net, [2, 2], scope='pool1')
      net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2')
      net = slim.max_pool2d(net, [2, 2], scope='pool2')
      net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3')
      net = slim.max_pool2d(net, [2, 2], scope='pool3')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4')
      net = slim.max_pool2d(net, [2, 2], scope='pool4')
      net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5')
      net = slim.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = slim.conv2d(net, 4096, [3, 3], padding='VALID', scope='fc6')
      net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                         scope='dropout6')
      net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
      net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                         scope='dropout7')
      net = slim.conv2d(net, num_classes, [1, 1],
                        activation_fn=None,
                        normalizer_fn=None,
                        scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = slim.utils.convert_collection_to_dict(end_points_collection)
      if spatial_squeeze:
        net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc + '/fc8'] = net
      return net, end_points
vgg_model.py 文件源码 项目:tensorflow_face 作者: ZhihengCV 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def inference(images, num_classes, is_training=True, scope='vgg_16'):
    """Build Inception v3 model architecture.

     See here for reference: http://arxiv.org/abs/1512.00567

    Args:
        images: Images returned from inputs() or distorted_inputs().
        num_classes: number of classes
        for_training: If set to `True`, build the inference model for training.
        Kernels that operate differently for inference during training
        e.g. dropout, are appropriately configured.
        restore_logits: whether or not the logits layers should be restored.
        Useful for fine-tuning a model with different num_classes.
        scope: optional prefix string identifying the ImageNet tower.

    Returns:
        Logits. 2-D float Tensor.
        Auxiliary Logits. 2-D float Tensor of side-head. Used for training only.
    """
    # Parameters for BatchNorm.
    batch_norm_params = {
        # Decay for the moving averages.
        'decay': BATCHNORM_MOVING_AVERAGE_DECAY,
        # epsilon to prevent 0s in variance.
        'epsilon': 0.001,
        # calculate moving average or using exist one
        'is_training': is_training
    }
    # Set weight_decay for weights in Conv and FC layers.
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        weights_regularizer=slim.l2_regularizer(FLAGS.weight_decay)):
        with slim.arg_scope([slim.conv2d],
                            weights_initializer=slim.variance_scaling_initializer(),
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params):
            logits, endpoints = vgg_16(
                images,
                num_classes=num_classes,
                dropout_keep_prob=0.8,
                is_training=is_training,
                scope=scope
            )

    # Add summaries for viewing model statistics on TensorBoard.
    _activation_summaries(endpoints)

    return logits
ucf101wrapFlow.py 文件源码 项目:deepOF 作者: bryanyzhu 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def VGG16(inputs, outputs, loss_weight, labels):
    """
    Spatial stream based on VGG16

    """

    with slim.arg_scope([slim.conv2d, slim.fully_connected], 
                        activation_fn=tf.nn.relu,
                        weights_initializer=tf.truncated_normal_initializer(0.0, 0.01),
                        weights_regularizer=slim.l2_regularizer(0.0005)):

        # conv1_1 = slim.conv2d(tf.concat(3, [inputs, outputs]), 64, [3, 3], scope='conv1_1')
        conv1_1 = slim.conv2d(inputs, 64, [3, 3], scope='conv1_1')
        conv1_2 = slim.conv2d(conv1_1, 64, [3, 3], scope='conv1_2')
        pool1 = slim.max_pool2d(conv1_2, [2, 2], scope='pool1')

        conv2_1 = slim.conv2d(pool1, 128, [3, 3], scope='conv2_1')
        conv2_2 = slim.conv2d(conv2_1, 128, [3, 3], scope='conv2_2')
        pool2 = slim.max_pool2d(conv2_2, [2, 2], scope='pool2')

        conv3_1 = slim.conv2d(pool2, 256, [3, 3], scope='conv3_1')
        conv3_2 = slim.conv2d(conv3_1, 256, [3, 3], scope='conv3_2')
        conv3_3 = slim.conv2d(conv3_2, 256, [3, 3], scope='conv3_3')
        pool3 = slim.max_pool2d(conv3_3, [2, 2], scope='pool3')

        conv4_1 = slim.conv2d(pool3, 512, [3, 3], scope='conv4_1')
        conv4_2 = slim.conv2d(conv4_1, 512, [3, 3], scope='conv4_2')
        conv4_3 = slim.conv2d(conv4_2, 512, [3, 3], scope='conv4_3')
        pool4 = slim.max_pool2d(conv4_3, [2, 2], scope='pool4')

        conv5_1 = slim.conv2d(pool4, 512, [3, 3], scope='conv5_1')
        conv5_2 = slim.conv2d(conv5_1, 512, [3, 3], scope='conv5_2')
        conv5_3 = slim.conv2d(conv5_2, 512, [3, 3], scope='conv5_3')
        pool5 = slim.max_pool2d(conv5_3, [2, 2], scope='pool5')

        flatten5 = slim.flatten(pool5, scope='flatten5')
        fc6 = slim.fully_connected(flatten5, 4096, scope='fc6')
        dropout6 = slim.dropout(fc6, 0.9, scope='dropout6')
        fc7 = slim.fully_connected(dropout6, 4096, scope='fc7')
        dropout7 = slim.dropout(fc7, 0.9, scope='dropout7')
        fc8 = slim.fully_connected(dropout7, 101, activation_fn=None, scope='fc8')
        prob = tf.nn.softmax(fc8)
        predictions = tf.argmax(prob, 1)

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(fc8, labels)
        actionLoss = tf.reduce_mean(cross_entropy)

        zeroCon = tf.constant(0)
        losses = [zeroCon, zeroCon, zeroCon, zeroCon, zeroCon, zeroCon, actionLoss]
        flows_all = [zeroCon, zeroCon, zeroCon, zeroCon, zeroCon, zeroCon, prob]

        slim.losses.add_loss(actionLoss)

        return losses, flows_all, predictions
base_network.py 文件源码 项目:malmomo 作者: matpalm 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def conv_net_on(self, input_layer, opts):
    # TODO: reinclude batch_norm config, hasn't been helping at all...

    # convert input_layer from uint8 (0, 255) to float32 (0.0, 1.0)
    input_layer = tf.to_float(input_layer) / 255

    # whiten image, per channel, using batch_normalisation layer with
    # params calculated directly from batch.
    axis = list(range(input_layer.get_shape().ndims - 1))
    batch_mean, batch_var = tf.nn.moments(input_layer, axis)  # calcs moments per channel
    whitened_input_layer = tf.nn.batch_normalization(input_layer, batch_mean, batch_var,
                                                     scale=None, offset=None,
                                                     variance_epsilon=1e-6)

    model = slim.conv2d(whitened_input_layer, num_outputs=8, kernel_size=[5, 5], scope='conv1a')
#    model = slim.conv2d(whitened_input_layer, num_outputs=8, kernel_size=[5, 5], scope='conv1b')
    model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool1')
    self.pool1 = model
    print >>sys.stderr, "pool1", util.shape_and_product_of(model)

    model = slim.conv2d(model, num_outputs=16, kernel_size=[5, 5], scope='conv2a')
#    model = slim.conv2d(model, num_outputs=16, kernel_size=[5, 5], scope='conv2b')
    model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool2')
    self.pool2 = model
    print >>sys.stderr, "pool2", util.shape_and_product_of(model)

    model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3a')
#    model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3b')
    model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool3')
    self.pool3 = model
    print >>sys.stderr, "pool3", util.shape_and_product_of(model)

    # a final unpooled conv net just to drop params down. maybe pool here too actually?
#    model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv4a')
#    model = slim.conv2d(model, num_outputs=32, kernel_size=[3, 3], scope='conv3b')
#    model = slim.max_pool2d(model, kernel_size=[2, 2], scope='pool4')
#    self.pool3 = model
#    print >>sys.stderr, "pool4", util.shape_and_product_of(model)

    # do simple maxout on output to reduce dimensionality down for the upcoming
    # fully connected layers. see  https://arxiv.org/abs/1302.4389
#    model = tf.reshape(model, (-1, 15, 20, 8, 4))      # (?, 15, 20, 32) -> (?, 15, 20, 8, 4)
#    model = tf.reduce_max(model, reduction_indices=4)  # (?, 15, 20, 8)
#    print >>sys.stderr, "maxout", util.shape_and_product_of(model)

    model = slim.flatten(model, scope='flat')
    if opts.use_dropout:
      model = slim.dropout(model, is_training=IS_TRAINING, scope="drop" % i)
    return model
network_in_network.py 文件源码 项目:Deep_Learning_In_Action 作者: SunnyMarkLiu 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def build_nin_model(self):
        # input features
        self.x = tf.placeholder(tf.float32, shape=[None, self.input_height, self.input_width, self.input_channels],
                                name='input_layer')
        self.y = tf.placeholder(tf.float32, [None, self.num_classes], name='output_layer')

        # learning_rate placeholder
        self.learning_rate = tf.placeholder(tf.float32, name='learning_rate')
        # dropout layer: keep probability, vgg default value:0.5
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        print('x:' + str(self.x.get_shape().as_list()))

        self.nin_lay_1 = self.mlp_conv(self.x, kernel_size=11, stride=2, num_filters=96,
                                       micro_layer_size=[96, 96], name='nin_lay_1')
        # add dropout
        dropout = slim.dropout(self.nin_lay_1, keep_prob=self.keep_prob)
        self.maxpooling_1 = slim.max_pool2d(dropout, kernel_size=3, stride=2, padding='SAME')
        print('maxpooling_1:' + str(self.maxpooling_1.get_shape().as_list()))

        self.nin_lay_2 = self.mlp_conv(self.maxpooling_1, kernel_size=5, stride=1, num_filters=256,
                                       micro_layer_size=[256, 256], name='nin_lay_2')
        # add dropout
        dropout = slim.dropout(self.nin_lay_2, keep_prob=self.keep_prob)
        self.maxpooling_2 = slim.max_pool2d(dropout, kernel_size=3, stride=2, padding='SAME')
        print('maxpooling_2:' + str(self.maxpooling_2.get_shape().as_list()))

        self.nin_lay_3 = self.mlp_conv(self.maxpooling_2, kernel_size=3, stride=1, num_filters=384,
                                       micro_layer_size=[384, 384], name='nin_lay_3')
        # NO dropout
        self.maxpooling_3 = slim.max_pool2d(self.nin_lay_3, kernel_size=3, stride=2, padding='SAME')
        print('maxpooling_3:' + str(self.maxpooling_3.get_shape().as_list()))

        self.nin_lay_4 = self.mlp_conv(self.maxpooling_3, kernel_size=3, stride=1, num_filters=1024,
                                       micro_layer_size=[1024, self.num_classes], name='nin_lay_4')
        self.maxpooling_4 = slim.max_pool2d(self.nin_lay_4, kernel_size=3, stride=2, padding='SAME')
        print('maxpooling_4:' + str(self.maxpooling_4.get_shape().as_list()))
        # golbal average pooling
        self.digits = self.golbal_average_pooling(self.nin_lay_4)
        self.digits = self.digits[:, 0, 0, :]
        print('golbal_average_pooling:' + str(self.digits.get_shape().as_list()))
        # softmax
        self.read_out_logits = tf.nn.softmax(self.digits)
        print('read_out_logits:' + str(self.read_out_logits.get_shape().as_list()))
model_baseline.py 文件源码 项目:Relation-Network-Tensorflow 作者: gitlimlab 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build(self, is_train=True):

        n = self.a_dim
        conv_info = self.conv_info

        # build loss and accuracy {{{
        def build_loss(logits, labels):
            # Cross-entropy loss
            loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(labels, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return tf.reduce_mean(loss), accuracy
        # }}}

        # Classifier: takes images as input and outputs class label [B, m]
        def C(img, q, scope='Classifier'):
            with tf.variable_scope(scope) as scope:
                log.warn(scope.name)
                conv_1 = conv2d(img, conv_info[0], is_train, s_h=3, s_w=3, name='conv_1')
                conv_2 = conv2d(conv_1, conv_info[1], is_train, s_h=3, s_w=3, name='conv_2')
                conv_3 = conv2d(conv_2, conv_info[2], is_train, name='conv_3')
                conv_4 = conv2d(conv_3, conv_info[3], is_train, name='conv_4')
                conv_q = tf.concat([tf.reshape(conv_4, [self.batch_size, -1]), q], axis=1)
                fc_1 = fc(conv_q, 256, name='fc_1')
                fc_2 = fc(fc_1, 256, name='fc_2')
                fc_2 = slim.dropout(fc_2, keep_prob=0.5, is_training=is_train, scope='fc_3/')
                fc_3 = fc(fc_2, n, activation_fn=None, name='fc_3')
                return fc_3

        logits = C(self.img, self.q, scope='Classifier')
        self.all_preds = tf.nn.softmax(logits)
        self.loss, self.accuracy = build_loss(logits, self.a)

        # Add summaries
        def draw_iqa(img, q, target_a, pred_a):
            fig, ax = tfplot.subplots(figsize=(6, 6))
            ax.imshow(img)
            ax.set_title(question2str(q))
            ax.set_xlabel(answer2str(target_a)+answer2str(pred_a, 'Predicted'))
            return fig

        try:
            tfplot.summary.plot_many('IQA/',
                                     draw_iqa, [self.img, self.q, self.a, self.all_preds],
                                     max_outputs=3,
                                     collections=["plot_summaries"])
        except:
            pass

        tf.summary.scalar("loss/accuracy", self.accuracy)
        tf.summary.scalar("loss/cross_entropy", self.loss)
        log.warn('Successfully loaded the model.')
layers.py 文件源码 项目:vessel-classification 作者: GlobalFishingWatch 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def misconception_model(input,
                        window_size,
                        depths,
                        strides,
                        objective_functions,
                        is_training,
                        sub_count=128,
                        sub_layers=2,
                        keep_prob=0.5):
    """ A misconception tower.

  Args:
    input: a tensor of size [batch_size, 1, width, depth].
    window_size: the width of the conv and pooling filters to apply.
    depth: the depth of the output tensor.
    levels: the height of the tower in misconception layers.
    objective_functions: a list of objective functions to add to the top of
                         the network.
    is_training: whether the network is training.

  Returns:
    a tensor of size [batch_size, num_classes].
  """
    layers = []
    with slim.arg_scope([slim.batch_norm], decay=0.999):
        with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.relu):
            net = input
            layers.append(net)
            for depth, stride in zip(depths, strides):
                net = misconception_with_bypass(net, window_size, stride,
                                                depth, is_training)
                layers.append(net)
            outputs = []
            for ofunc in objective_functions:
                onet = net
                for _ in range(sub_layers - 1):
                    onet = slim.conv2d(
                        onet,
                        sub_count, [1, 1],
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params={'is_training': is_training})

                # Don't use batch norm on last layer, just use dropout.
                onet = slim.conv2d(onet, sub_count, [1, 1], normalizer_fn=None)
                # Global average pool
                n = int(onet.get_shape().dims[1])
                onet = slim.avg_pool2d(onet, [1, n], stride=[1, n])
                onet = slim.flatten(onet)
                #
                onet = slim.dropout(onet, keep_prob, is_training=is_training)
                outputs.append(ofunc.build(onet))

    return outputs, layers
layers.py 文件源码 项目:vessel-classification 作者: GlobalFishingWatch 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def misconception_fishing(input,
                          window_size,
                          depths,
                          strides,
                          objective_function,
                          is_training,
                          pre_count=128,
                          post_count=128,
                          post_layers=1,
                          keep_prob=0.5,
                          internal_keep_prob=0.5,
                          other_objectives=()):

    _, layers = misconception_model(
        input,
        window_size,
        depths,
        strides,
        other_objectives,
        is_training,
        sub_count=post_count,
        sub_layers=2)

    expanded_layers = []
    for i, lyr in enumerate(layers):
        lyr = slim.conv2d(
            lyr,
            pre_count, [1, 1],
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params={'is_training': is_training})
        expanded_layers.append(utility.repeat_tensor(lyr, 2**i))

    embedding = tf.add_n(expanded_layers)

    for _ in range(post_layers - 1):
        embedding = slim.conv2d(
            embedding,
            post_count, [1, 1],
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params={'is_training': is_training})
    embedding = slim.conv2d(
        embedding,
        post_count, [1, 1],
        activation_fn=tf.nn.relu,
        normalizer_fn=None)
    embedding = slim.dropout(embedding, keep_prob, is_training=is_training)

    fishing_outputs = tf.squeeze(
        slim.conv2d(
            embedding, 1, [1, 1], activation_fn=None, normalizer_fn=None),
        squeeze_dims=[1, 3])

    return objective_function.build(fishing_outputs)
layers.py 文件源码 项目:vessel-classification 作者: GlobalFishingWatch 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def misconception_fishing_2(input,
                          window_size,
                          depths,
                          strides,
                          objective_function,
                          is_training,
                          pre_count=128,
                          post_count=128,
                          post_layers=1,
                          keep_prob=0.5,
                          internal_keep_prob=0.5,
                          other_objectives=()):


    dt = tf.exp(input[:, 0, :, 0]) - 1
    dt = tf.maximum(dt, 12 * 60 * 60)
    dt = 0.5 * (dt[:, 1:] +  dt[:, :-1])


    _, layers = misconception_model(
        input,
        window_size,
        depths,
        strides,
        other_objectives,
        is_training,
        sub_count=post_count,
        sub_layers=2)

    expanded_layers = []
    for i, lyr in enumerate(layers):
        lyr = slim.conv2d(
            lyr,
            pre_count, [1, 1],
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params={'is_training': is_training})
        expanded_layers.append(utility.repeat_tensor(lyr, 2**i))

    embedding = tf.add_n(expanded_layers)

    for _ in range(post_layers - 1):
        embedding = slim.conv2d(
            embedding,
            post_count, [1, 1],
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,
            normalizer_params={'is_training': is_training})
    embedding = slim.conv2d(
        embedding,
        post_count, [1, 1],
        activation_fn=tf.nn.relu,
        normalizer_fn=None)
    embedding = slim.dropout(embedding, keep_prob, is_training=is_training)

    fishing_outputs = tf.squeeze(
        slim.conv2d(
            embedding, 1, [1, 1], activation_fn=None, normalizer_fn=None),
        squeeze_dims=[1, 3])

    return objective_function.build(fishing_outputs, dt)
abstract_models.py 文件源码 项目:vessel-classification 作者: GlobalFishingWatch 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def misconception_with_fishing_ranges(self, input, mmsis, is_training):
        """ A misconception tower with additional fishing range classification.

        Args:
            input: a tensor of size [batch_size, 1, width, depth].
            window_size: the width of the conv and pooling filters to apply.
            stride: the downsampling to apply when filtering.
            depth: the depth of the output tensor.
            levels: The height of the tower in misconception layers.

        Returns:
            a tensor of size [batch_size, num_classes].
        """
        with slim.arg_scope([slim.fully_connected], activation_fn=tf.nn.elu):
            net = input

            # Then a tower for classification.
            multiscale_layers = []
            for i in range(self.levels):
                with tf.variable_scope("layer_%d" % i):
                    multiscale_layers.append(utility.repeat_tensor(net, 2**i))

                    net = layers.misconception_with_bypass(
                        net, self.window_size, self.stride, self.feature_depth,
                        is_training)

            # TODO: We currently don't use the last year for fishing classification
            # Since we don't use this for vessel classification currently, perhaps
            # we should rememdy that...

            net = slim.flatten(net)
            net = slim.dropout(net, 0.5, is_training=is_training)
            net = slim.fully_connected(net, 100)
            net = slim.dropout(net, 0.5, is_training=is_training)

            concatenated_multiscale_embedding = tf.concat(3, multiscale_layers)

            fishing_outputs = tf.squeeze(
                slim.conv2d(
                    concatenated_multiscale_embedding,
                    1, [1, 1],
                    activation_fn=None),
                squeeze_dims=[1, 3])

            for of in self.classification_training_objectives:
                of.build(net)

            self.fishing_localisation_objective.build(fishing_outputs)


问题


面经


文章

微信
公众号

扫码关注公众号