python类BatchNormalization()的实例源码

improved_wgan.py 文件源码 项目:keras-contrib 作者: farizrahman4u 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def make_generator():
    """Creates a generator model that takes a 100-dimensional noise vector as a "seed", and outputs images
    of size 28x28x1."""
    model = Sequential()
    model.add(Dense(1024, input_dim=100))
    model.add(LeakyReLU())
    model.add(Dense(128 * 7 * 7))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    if K.image_data_format() == 'channels_first':
        model.add(Reshape((128, 7, 7), input_shape=(128 * 7 * 7,)))
        bn_axis = 1
    else:
        model.add(Reshape((7, 7, 128), input_shape=(128 * 7 * 7,)))
        bn_axis = -1
    model.add(Conv2DTranspose(128, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Convolution2D(64, (5, 5), padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    model.add(Conv2DTranspose(64, (5, 5), strides=2, padding='same'))
    model.add(BatchNormalization(axis=bn_axis))
    model.add(LeakyReLU())
    # Because we normalized training inputs to lie in the range [-1, 1],
    # the tanh function should be used for the output of the generator to ensure its output
    # also lies in this range.
    model.add(Convolution2D(1, (5, 5), padding='same', activation='tanh'))
    return model
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cnn_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        outputs = []
        for i in range(len(self.kernel_sizes)):
            output_i = Conv1D(self.opt['filters_cnn'], kernel_size=self.kernel_sizes[i], activation=None,
                              kernel_regularizer=l2(self.opt['regul_coef_conv']), padding='same')(embed_input)
            output_i = BatchNormalization()(output_i)
            output_i = Activation('relu')(output_i)
            output_i = GlobalMaxPooling1D()(output_i)
            outputs.append(output_i)

        output = concatenate(outputs, axis=1)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None, kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def build_encoder(self,input_shape):
        return [Reshape((*input_shape,1)),
                GaussianNoise(self.parameters['noise']),
                BN(),
                *[Convolution2D(self.parameters['clayer'],(3,3),
                                activation=self.parameters['activation'],padding='same', use_bias=False),
                  Dropout(self.parameters['dropout']),
                  BN(),
                  MaxPooling2D((2,2)),],
                *[Convolution2D(self.parameters['clayer'],(3,3),
                                activation=self.parameters['activation'],padding='same', use_bias=False),
                  Dropout(self.parameters['dropout']),
                  BN(),
                  MaxPooling2D((2,2)),],
                flatten,
                Sequential([
                    Dense(self.parameters['layer'], activation=self.parameters['activation'], use_bias=False),
                    BN(),
                    Dropout(self.parameters['dropout']),
                    Dense(self.parameters['N']*self.parameters['M']),
                ])]
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_encoder(self,input_shape):
        return [Reshape((*input_shape,1)),
                GaussianNoise(0.1),
                BN(),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                Convolution2D(self.parameters['clayer'],(3,3),
                              activation=self.parameters['activation'],padding='same', use_bias=False),
                Dropout(self.parameters['dropout']),
                BN(),
                MaxPooling2D((2,2)),
                flatten,]
dcgan.py 文件源码 项目:GlottGAN 作者: bajibabu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def generator_model(noise_dim=100, aux_dim=47, model_name="generator"):
    # Merge noise and auxilary inputs
    gen_input = Input(shape=(noise_dim,), name="noise_input")
    aux_input = Input(shape=(aux_dim,), name="auxilary_input")
    x = concatenate([gen_input, aux_input], axis=-1)

    # Dense Layer 1
    x = Dense(10 * 100)(x) 
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 10*100

    # Reshape the tensors to support CNNs
    x = Reshape((100, 10))(x) # shape is 100 x 10

    # Conv Layer 1
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 100 x 250
    x = UpSampling1D(size=2)(x) # output shape is 200 x 250

    # Conv Layer 2
    x = Conv1D(filters=100, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x) # output shape is 200 x 100
    x = UpSampling1D(size=2)(x) # output shape is 400 x 100

    # Conv Layer 3
    x = Conv1D(filters=1, kernel_size=13, padding='same')(x)
    x = BatchNormalization()(x)
    x = Activation('tanh')(x) # final output shape is 400 x 1

    generator_model = Model(
        outputs=[x], inputs=[gen_input, aux_input], name=model_name)

    return generator_model
classify_models.py 文件源码 项目:nli_generation 作者: jstarc 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def attention_bnorm_model(hidden_size, glove):

    prem_input = Input(shape=(None,), dtype='int32')
    hypo_input = Input(shape=(None,), dtype='int32')

    prem_embeddings = make_fixed_embeddings(glove, None)(prem_input)
    hypo_embeddings = make_fixed_embeddings(glove, None)(hypo_input)
    premise_layer = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid')(prem_embeddings)
    premise_bn = BatchNormalization()(premise_layer)
    hypo_layer = LSTM(output_dim=hidden_size, return_sequences=True,
                            inner_activation='sigmoid')(hypo_embeddings)
    hypo_bn = BatchNormalization()(hypo_layer)
    attention = LstmAttentionLayer(output_dim = hidden_size) ([hypo_bn, premise_bn])
    att_bn = BatchNormalization()(attention)
    final_dense = Dense(3, activation='softmax')(att_bn)

    model = Model(input=[prem_input, hypo_input], output=final_dense)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
cgan.py 文件源码 项目:shenlan 作者: vector-1127 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def discriminator_model():
    """ return a (b, 1) logits"""
    model = Sequential()
    model.add(Convolution2D(64, 4, 4,border_mode='same',input_shape=(IN_CH*2, img_cols, img_rows)))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(512, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(Convolution2D(1, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))

    model.add(Activation('sigmoid'))
    return model
unet.py 文件源码 项目:lsun_2017 作者: ternaus 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
main.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_simpleCNN(input_shape = (32, 32, 3), num_output = 10):

    h, w, nch = input_shape
    assert h == w, 'expect input shape (h, w, nch), h == w'

    images = Input(shape = (h, h, nch))
    x = Conv2D(64, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(images)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Conv2D(128, (4, 4), strides = (1, 1),
               kernel_initializer = init, padding = 'same')(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size = (2, 2))(x)
    x = Flatten()(x)
    outputs = Dense(num_output, kernel_initializer = init,
                    activation = 'softmax')(x)

    model = Model(inputs = images, outputs = outputs)
    return model
fcn_resunet_blocks.py 文件源码 项目:iterative_inference_segm 作者: adri-romsor 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def _bn_relu_conv(nb_filter, nb_row, nb_col, subsample=False, upsample=False,
                  batch_norm=True, weight_decay=None):

    def f(input):
        processed = input
        if batch_norm:
            processed = BatchNormalization(mode=0, axis=1)(processed)
        processed = Activation('relu')(processed)
        stride = (1, 1)
        if subsample:
            stride = (2, 2)
        if upsample:
            processed = UpSampling2D(size=(2, 2))(processed)
        return Convolution2D(nb_filter=nb_filter, nb_row=nb_row, nb_col=nb_col,
                             subsample=stride, init='he_normal',
                             border_mode='same',
                             W_regularizer=_l2(weight_decay))(processed)

    return f


# Adds a shortcut between input and residual block and merges them with 'sum'
model.py 文件源码 项目:deeppavlov 作者: deepmipt 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def lstm_word_model(self):
        embed_input = Input(shape=(self.opt['max_sequence_length'], self.opt['embedding_dim'],))

        output = Bidirectional(LSTM(self.opt['units_lstm'], activation='tanh',
                                      kernel_regularizer=l2(self.opt['regul_coef_lstm']),
                                      dropout=self.opt['dropout_rate']))(embed_input)

        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(self.opt['dense_dim'], activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        output = Activation('relu')(output)
        output = Dropout(rate=self.opt['dropout_rate'])(output)
        output = Dense(1, activation=None,
                       kernel_regularizer=l2(self.opt['regul_coef_dense']))(output)
        output = BatchNormalization()(output)
        act_output = Activation('sigmoid')(output)
        model = Model(inputs=embed_input, outputs=act_output)
        return model
models.py 文件源码 项目:keras-image-captioning 作者: danieljl 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _build_sequence_model(self, sequence_input):
        RNN = GRU if self._rnn_type == 'gru' else LSTM

        def rnn():
            rnn = RNN(units=self._rnn_output_size,
                      return_sequences=True,
                      dropout=self._dropout_rate,
                      recurrent_dropout=self._dropout_rate,
                      kernel_regularizer=self._regularizer,
                      kernel_initializer=self._initializer,
                      implementation=2)
            rnn = Bidirectional(rnn) if self._bidirectional_rnn else rnn
            return rnn

        input_ = sequence_input
        for _ in range(self._rnn_layers):
            input_ = BatchNormalization(axis=-1)(input_)
            rnn_out = rnn()(input_)
            input_ = rnn_out
        time_dist_dense = TimeDistributed(Dense(units=self._vocab_size))(rnn_out)

        return time_dist_dense
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_encoder(self,input_shape):
        return [GaussianNoise(self.parameters['noise']),
                BN(),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['layer'], activation='relu', use_bias=False),
                BN(),
                Dropout(self.parameters['dropout']),
                Dense(self.parameters['N']*self.parameters['M']),]
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def _build(self,input_shape):
        x = Input(shape=input_shape)
        N = input_shape[0] // 2

        y = Sequential([
            flatten,
            *[Sequential([BN(),
                          Dense(self.parameters['layer'],activation=self.parameters['activation']),
                          Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
            Dense(1,activation="sigmoid")
        ])(x)

        self.loss = bce
        self.net = Model(x, y)
        # self.callbacks.append(self.linear_schedule([0.2,0.5], 0.1))
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
        # self.custom_log_functions['lr'] = lambda: K.get_value(self.net.optimizer.lr)
encoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build(inp, dropout_rate=0.01):
    enet = initial_block(inp)
    enet = BatchNormalization(momentum=0.1)(enet)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    enet = PReLU(shared_axes=[1, 2])(enet)
    enet = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate)  # bottleneck 1.0
    for _ in range(4):
        enet = bottleneck(enet, 64, dropout_rate=dropout_rate)  # bottleneck 1.i

    enet = bottleneck(enet, 128, downsample=True)  # bottleneck 2.0
    # bottleneck 2.x and 3.x
    for _ in range(2):
        enet = bottleneck(enet, 128)  # bottleneck 2.1
        enet = bottleneck(enet, 128, dilated=2)  # bottleneck 2.2
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.3
        enet = bottleneck(enet, 128, dilated=4)  # bottleneck 2.4
        enet = bottleneck(enet, 128)  # bottleneck 2.5
        enet = bottleneck(enet, 128, dilated=8)  # bottleneck 2.6
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.7
        enet = bottleneck(enet, 128, dilated=16)  # bottleneck 2.8
    return enet
core.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def call(self, inputs, **kwargs):
        outputs = K.conv2d(
            inputs,
            self.kernel,
            strides=self.strides,
            padding=self.padding,
            data_format=self.data_format,
            dilation_rate=self.dilation_rate)
        if self.use_bias:
            outputs = K.bias_add(
                outputs,
                self.bias,
                data_format=self.data_format)
        outputs = BatchNormalization(momentum=self.momentum)(outputs)
        if self.activation is not None:
            return self.activation(outputs)
        return outputs
encoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build(inp, dropout_rate=0.01):
    pooling_indices = []
    enet, indices_single = initial_block(inp)
    enet = BatchNormalization(momentum=0.1)(enet)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    enet = PReLU(shared_axes=[1, 2])(enet)
    pooling_indices.append(indices_single)
    enet, indices_single = bottleneck(enet, 64, downsample=True, dropout_rate=dropout_rate)  # bottleneck 1.0
    pooling_indices.append(indices_single)
    for _ in range(4):
        enet = bottleneck(enet, 64, dropout_rate=dropout_rate)  # bottleneck 1.i

    enet, indices_single = bottleneck(enet, 128, downsample=True)  # bottleneck 2.0
    pooling_indices.append(indices_single)
    # bottleneck 2.x and 3.x
    for _ in range(2):
        enet = bottleneck(enet, 128)  # bottleneck 2.1
        enet = bottleneck(enet, 128, dilated=2)  # bottleneck 2.2
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.3
        enet = bottleneck(enet, 128, dilated=4)  # bottleneck 2.4
        enet = bottleneck(enet, 128)  # bottleneck 2.5
        enet = bottleneck(enet, 128, dilated=8)  # bottleneck 2.6
        enet = bottleneck(enet, 128, asymmetric=5)  # bottleneck 2.7
        enet = bottleneck(enet, 128, dilated=16)  # bottleneck 2.8
    return enet, pooling_indices
test_normalization.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_batchnorm_mode_0_or_2():
    for mode in [0, 2]:
        model = Sequential()
        norm_m0 = normalization.BatchNormalization(mode=mode, input_shape=(10,), momentum=0.8)
        model.add(norm_m0)
        model.compile(loss='mse', optimizer='sgd')

        # centered on 5.0, variance 10.0
        X = np.random.normal(loc=5.0, scale=10.0, size=(1000, 10))
        model.fit(X, X, nb_epoch=4, verbose=0)
        out = model.predict(X)
        out -= K.eval(norm_m0.beta)
        out /= K.eval(norm_m0.gamma)

        assert_allclose(out.mean(), 0.0, atol=1e-1)
        assert_allclose(out.std(), 1.0, atol=1e-1)
test_normalization.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_shared_batchnorm():
    '''Test that a BN layer can be shared
    across different data streams.
    '''
    # Test single layer reuse
    bn = normalization.BatchNormalization(input_shape=(10,), mode=0)
    x1 = Input(shape=(10,))
    bn(x1)

    x2 = Input(shape=(10,))
    y2 = bn(x2)

    x = np.random.normal(loc=5.0, scale=10.0, size=(2, 10))
    model = Model(x2, y2)
    assert len(model.updates) == 2
    model.compile('sgd', 'mse')
    model.train_on_batch(x, x)

    # Test model-level reuse
    x3 = Input(shape=(10,))
    y3 = model(x3)
    new_model = Model(x3, y3)
    assert len(model.updates) == 2
    new_model.compile('sgd', 'mse')
    new_model.train_on_batch(x, x)
improved_wgan.py 文件源码 项目:keras-contrib 作者: farizrahman4u 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def make_discriminator():
    """Creates a discriminator model that takes an image as input and outputs a single value, representing whether
    the input is real or generated. Unlike normal GANs, the output is not sigmoid and does not represent a probability!
    Instead, the output should be as large and negative as possible for generated inputs and as large and positive
    as possible for real inputs.

    Note that the improved WGAN paper suggests that BatchNormalization should not be used in the discriminator."""
    model = Sequential()
    if K.image_data_format() == 'channels_first':
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(1, 28, 28)))
    else:
        model.add(Convolution2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Convolution2D(128, (5, 5), kernel_initializer='he_normal', padding='same', strides=[2, 2]))
    model.add(LeakyReLU())
    model.add(Flatten())
    model.add(Dense(1024, kernel_initializer='he_normal'))
    model.add(LeakyReLU())
    model.add(Dense(1, kernel_initializer='he_normal'))
    return model
ror.py 文件源码 项目:keras-contrib 作者: farizrahman4u 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __initial_conv_block(input, k=1, dropout=0.0, initial=False):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == 'th' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if initial:
        if K.image_dim_ordering() == 'th':
            init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)
        else:
            init = Conv2D(16 * k, (1, 1), kernel_initializer='he_normal', padding='same')(init)

    x = BatchNormalization(axis=channel_axis)(input)
    x = Activation('relu')(x)
    x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    x = Conv2D(16 * k, (3, 3), padding='same', kernel_initializer='he_normal')(x)

    m = add([init, x])
    return m
inception_resnet_v2.py 文件源码 项目:keras-inception-resnetV2 作者: kentsommer 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def conv2d_bn(x, nb_filter, nb_row, nb_col,
              border_mode='same', subsample=(1, 1), 
              bias=False, activ_fn='relu', normalize=True):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    if not normalize:
        bias = True
    x = Convolution2D(nb_filter, nb_row, nb_col,
                      subsample=subsample,
                      border_mode=border_mode,
                      bias=bias)(x)
    if normalize:
        x = BatchNormalization(axis=channel_axis)(x)
    if activ_fn:
        x = Activation(activ_fn)(x)
    return x
co_lstm_predict_day.py 文件源码 项目:copper_price_forecast 作者: liyinwei 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
co_lstm_predict_sequence.py 文件源码 项目:copper_price_forecast 作者: liyinwei 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_model():
    """
    ????
    """
    model = Sequential()

    model.add(LSTM(units=Conf.LAYERS[1], input_shape=(Conf.LAYERS[1], Conf.LAYERS[0]), return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(Conf.LAYERS[2], return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(units=Conf.LAYERS[3]))
    # model.add(BatchNormalization(weights=None, epsilon=1e-06, momentum=0.9))
    model.add(Activation("tanh"))
    # act = PReLU(alpha_initializer='zeros', weights=None)
    # act = LeakyReLU(alpha=0.3)
    # model.add(act)

    start = time.time()
    model.compile(loss="mse", optimizer="rmsprop")
    print("> Compilation Time : ", time.time() - start)
    return model
inceptionv4.py 文件源码 项目:dogsVScats 作者: prajwalkr 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def conv2d_bn(x, nb_filter, nb_row, nb_col,
              border_mode='same', subsample=(1, 1), bias=False):
    """
    Utility function to apply conv + BN. 
    (Slightly modified from https://github.com/fchollet/keras/blob/master/keras/applications/inception_v3.py)
    """
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1
    x = Convolution2D(nb_filter, nb_row, nb_col,
                      subsample=subsample,
                      border_mode=border_mode,
                      bias=bias)(x)
    x = BatchNormalization(axis=channel_axis)(x)
    x = Activation('relu')(x)
    return x
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out


问题


面经


文章

微信
公众号

扫码关注公众号