python类Conv2D()的实例源码

model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _bn_relu_conv(filters, kernel_size = (3, 3), strides = (1, 1)):
    def f(inputs):
        x = BatchNormalization()(inputs)
        x = Activation('relu')(x)
        x = Conv2D(filters, kernel_size, strides = strides,
                   kernel_initializer = init, padding = 'same')(x)
        return x
    return f
model.py 文件源码 项目:DeepWorks 作者: daigo0927 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build(input_shape, num_outputs,
              block_fn, repetitions):

        inputs = Input(shape = input_shape)
        conv1 = Conv2D(64, (7, 7), strides = (2, 2),
                       padding = 'same')(inputs)
        conv1 = BatchNormalization()(conv1)
        conv1 = Activation('relu')(conv1)
        pool1 = MaxPooling2D(pool_size = (3, 3), strides = (2, 2),
                            padding = 'same')(conv1)

        x = pool1
        filters = 64
        first_layer = True
        for i, r in enumerate(repetitions):
            x = _residual_block(block_fn, filters = filters,
                                repetitions = r, is_first_layer = first_layer)(x)
            filters *= 2
            if first_layer:
                first_layer = False

        # last activation <- unnecessary???
        # x = BatchNormalization()(x)
        # x = Activation('relu')(x)

        _, w, h, ch = K.int_shape(x)
        pool2 = AveragePooling2D(pool_size = (w, h), strides = (1, 1))(x)
        flat1 = Flatten()(pool2)
        outputs = Dense(num_outputs, kernel_initializer = init,
                        activation = 'softmax')(flat1)

        model = Model(inputs = inputs, outputs = outputs)
        return model
model.py 文件源码 项目:WGAN_GP 作者: daigo0927 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def Discriminator(image_size = 64):

    L = int(image_size)

    images = Input(shape = (L, L, 3))
    x = Conv2D(64, (4, 4), strides = (2, 2),
               kernel_initializer = init, padding = 'same')(images) # shape(L/2, L/2, 32)
    x = LeakyReLU(0.2)(x)
    x = Conv2D(128, (4, 4), strides = (2, 2),
               kernel_initializer = init, padding = 'same')(x) # shape(L/4, L/4, 64)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)
    x = Conv2D(256, (4, 4), strides = (2, 2),
               kernel_initializer = init, padding = 'same')(x) # shape(L/8, L/8, 128)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)
    x = Conv2D(512, (4, 4), strides = (2, 2),
               kernel_initializer = init, padding = 'same')(x) # shape(L/16, L/16, 256)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)
    x = Flatten()(x)
    outputs = Dense(1)(x)

    model = Model(inputs = images, outputs = outputs)
    model.summary()
    return model
decoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    x = Activation('relu')(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = UpSampling2D(size=(2, 2))(other)

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        decoder = Activation('relu')(decoder)

    return decoder
encoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)):
    conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp)
    max_pool = MaxPooling2D()(inp)
    merged = concatenate([conv, max_pool], axis=3)
    return merged
decoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def bottleneck(encoder, output, upsample=False, reverse_module=False):
    internal = output // 4

    x = Conv2D(internal, (1, 1), use_bias=False)(encoder)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)
    if not upsample:
        x = Conv2D(internal, (3, 3), padding='same', use_bias=True)(x)
    else:
        x = Conv2DTranspose(filters=internal, kernel_size=(3, 3), strides=(2, 2), padding='same')(x)
    x = BatchNormalization(momentum=0.1)(x)
    # x = Activation('relu')(x)
    x = PReLU(shared_axes=[1, 2])(x)

    x = Conv2D(output, (1, 1), padding='same', use_bias=False)(x)

    other = encoder
    if encoder.get_shape()[-1] != output or upsample:
        other = Conv2D(output, (1, 1), padding='same', use_bias=False)(other)
        other = BatchNormalization(momentum=0.1)(other)
        if upsample and reverse_module is not False:
            other = MaxUnpooling2D()([other, reverse_module])

    if upsample and reverse_module is False:
        decoder = x
    else:
        x = BatchNormalization(momentum=0.1)(x)
        decoder = add([x, other])
        # decoder = Activation('relu')(decoder)
        decoder = PReLU(shared_axes=[1, 2])(decoder)

    return decoder
encoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def initial_block(inp, nb_filter=13, nb_row=3, nb_col=3, strides=(2, 2)):
    conv = Conv2D(nb_filter, (nb_row, nb_col), padding='same', strides=strides)(inp)
    max_pool, indices = MaxPoolingWithArgmax2D()(inp)
    merged = concatenate([conv, max_pool], axis=3)
    return merged, indices
decoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build(inp, encoder, nc, valid_shapes):
    side = conv_block_side(inp)

    x = Lambda(
        interp,
        arguments={'shape': valid_shapes[3]},
        name='sub24_sum_interp')(encoder)

    main = ConvBN(
        filters=128,
        kernel_size=3,
        dilation_rate=2,
        padding='same',
        name='conv_sub2')(x)

    x = Add(name='sub12_sum')([main, side])
    x = Activation('relu')(x)

    x = Lambda(
        interp,
        arguments={'shape': valid_shapes[2]},
        name='sub12_sum_interp')(x)

    x = Conv2D(
        filters=nc,
        kernel_size=1,
        name='conv6_cls')(x)

    out = Lambda(
        interp,
        arguments={'shape': valid_shapes[0]},
        name='conv6_interp')(x)

    return out
mnist_acgan.py 文件源码 项目:Kerasimo 作者: s-macke 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_discriminator():
    # build a relatively standard conv net, with LeakyReLUs as suggested in
    # the reference paper
    cnn = Sequential()

    cnn.add(Conv2D(32, 3, padding='same', strides=2,
                   input_shape=(1, 28, 28)))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Conv2D(64, 3, padding='same', strides=1))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Conv2D(128, 3, padding='same', strides=2))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Conv2D(256, 3, padding='same', strides=1))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Flatten())

    image = Input(shape=(1, 28, 28))

    features = cnn(image)

    # first output (name=generation) is whether or not the discriminator
    # thinks the image that is being shown is fake, and the second output
    # (name=auxiliary) is the class that the discriminator thinks the image
    # belongs to.
    fake = Dense(1, activation='sigmoid', name='generation')(features)
    aux = Dense(10, activation='softmax', name='auxiliary')(features)

    return Model(image, [fake, aux])
model.py 文件源码 项目:nesgym 作者: codescv 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def q_function(input_shape, num_actions):
    image_input = Input(shape=input_shape)
    out = Conv2D(filters=32, kernel_size=8, strides=(4, 4), padding='valid', activation='relu')(image_input)
    out = Conv2D(filters=64, kernel_size=4, strides=(2, 2), padding='valid', activation='relu')(out)
    out = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding='valid', activation='relu')(out)
    out = Flatten()(out)
    out = Dense(512, activation='relu')(out)
    q_value = Dense(num_actions)(out)

    return image_input, q_value
deepq.py 文件源码 项目:ai-bs-summer17 作者: uchibe 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def createModel(self):

        model = Sequential()
        model.add(Conv2D(16, (3, 3), strides=(2, 2), input_shape=(self.img_rows, self.img_cols, self.img_channels)))
        model.add(Activation('relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Conv2D(16, (3, 3), strides=(2, 2)))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2),strides=(2, 2)))
        model.add(Flatten())
        model.add(Dense(256))
        model.add(Activation('relu'))
        # model.add(Dropout(0.5))
        model.add(Dense(self.output_size))
        # model.add(Activation('softmax'))
        # model.compile(RMSprop(lr=self.learningRate), 'MSE')
        # sgd = SGD(lr=self.learningRate)
        adam = Adam(lr=self.learningRate)
        model.compile(loss='mse', optimizer=adam)
        model.summary()

        return model
learning-from-scratch.py 文件源码 项目:image-classification-cervical-cancer 作者: fblupi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def create_model(opt_='adamax'):
    model = Sequential()
    model.add(Conv2D(4, (3, 3), activation='relu', input_shape=(SIZE, SIZE, 3)))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3)))
    model.add(Conv2D(8, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(3, 3)))
    model.add(Dropout(0.2))
    model.add(Flatten())
    model.add(Dense(12, activation='tanh'))
    model.add(Dropout(0.1))
    model.add(Dense(3, activation='softmax'))
    model.compile(optimizer=opt_, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
    return model
mnist_acgan.py 文件源码 项目:pCVR 作者: xjtushilei 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_discriminator():
    # build a relatively standard conv net, with LeakyReLUs as suggested in
    # the reference paper
    cnn = Sequential()

    cnn.add(Conv2D(32, 3, padding='same', strides=2,
                   input_shape=(1, 28, 28)))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Conv2D(64, 3, padding='same', strides=1))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Conv2D(128, 3, padding='same', strides=2))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Conv2D(256, 3, padding='same', strides=1))
    cnn.add(LeakyReLU())
    cnn.add(Dropout(0.3))

    cnn.add(Flatten())

    image = Input(shape=(1, 28, 28))

    features = cnn(image)

    # first output (name=generation) is whether or not the discriminator
    # thinks the image that is being shown is fake, and the second output
    # (name=auxiliary) is the class that the discriminator thinks the image
    # belongs to.
    fake = Dense(1, activation='sigmoid', name='generation')(features)
    aux = Dense(10, activation='softmax', name='auxiliary')(features)

    return Model(image, [fake, aux])
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
model_gan.py 文件源码 项目:WGAN-in-Keras 作者: tonyabracadabra 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self):
        model = Sequential()
        model.add(Reshape((28, 28, 1), input_shape=(784,)))
        # Convolution Layer 1
        model.add(Conv2D(64, kernel_size=(4, 4), strides=(2, 2), \
            kernel_initializer=self.initializer))
        model.add(LeakyReLU())

        # Convolution Layer 2
        model.add(Conv2D(128, kernel_size=(4, 4), strides=(2, 2), \
            kernel_initializer=self.initializer))
        model.add(LeakyReLU())

        # Batch Normalization
        model.add(BatchNormalization())

        # Flatten the input
        model.add(Flatten())

        # Dense Layer
        model.add(Dense(1024, kernel_initializer=self.initializer))
        model.add(LeakyReLU())

        # Batch Normalization
        model.add(BatchNormalization())

        # To the output that has two classes
        model.add(Dense(2, activation='softmax'))

        return model
dcgan.py 文件源码 项目:WGAN-in-Keras 作者: tonyabracadabra 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __call__(self):
        model = Sequential()
        model.add(Reshape((28, 28, 1), input_shape=(784,)))
        # Convolution Layer 1
        model.add(Conv2D(64, kernel_size=(4, 4), strides=(2, 2), \
            kernel_initializer=self.initializer))
        model.add(LeakyReLU())

        # Convolution Layer 2
        model.add(Conv2D(128, kernel_size=(4, 4), strides=(2, 2), \
            kernel_initializer=self.initializer))
        model.add(LeakyReLU())

        # Batch Normalization
        model.add(BatchNormalization())

        # Flatten the input
        model.add(Flatten())

        # Dense Layer
        model.add(Dense(1024, kernel_initializer=self.initializer))
        model.add(LeakyReLU())

        # Batch Normalization
        model.add(BatchNormalization())

        # To the output that has two classes
        model.add(Dense(2, activation='softmax'))

        return model
keras-CNN-mnist.py 文件源码 项目:keras-mnist-workshop 作者: drschilling 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def cnn_model():
    model = Sequential()

    # A Convolution2D sera a nossa camada de entrada. Podemos observar que ela possui 
    # 32 mapas de features com tamanho de 5 × 5 e 'relu' como funcao de ativacao. 
    model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu'))

    # A camada MaxPooling2D sera nossa segunda camada onde teremos um amostragem de 
    # dimensoes 2 × 2.
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # Durante a regularizacao usamos o metodo de Dropout
    # excluindo 30% dos neuronios na camada, diminuindo nossa chance de overfitting.
    model.add(Dropout(0.3))

    # Usamos a Flatten para converter nossa matriz 2D
    # numa representacao a ser processada pela fully connected.
    model.add(Flatten())

    # Camada fully connected com 128 neuronios e funcao de ativacao 'relu'.
    model.add(Dense(128, activation='relu'))

    # Nossa camada de saida possui o numero de neuronios compativel com o 
    # numero de classes a serem classificadas, com uma funcao de ativacao
    # do tipo 'softmax'.
    model.add(Dense(num_classes, activation='softmax', name='preds'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
keras-CNN-mnist-2.0.py 文件源码 项目:keras-mnist-workshop 作者: drschilling 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def deeper_cnn_model():
    model = Sequential()

    # A Convolution2D sera a nossa camada de entrada. Podemos observar que ela possui 
    # 30 mapas de features com tamanho de 5 × 5 e 'relu' como funcao de ativacao. 
    model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))

    # A camada MaxPooling2D sera nossa segunda camada onde teremos um amostragem de 
    # dimensoes 2 × 2.
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # Uma nova camada convolucional com 15 mapas de features com dimensoes de 3 × 3 
    # e 'relu' como funcao de ativacao. 
    model.add(Conv2D(15, (3, 3), activation='relu'))

    # Uma nova subamostragem com um pooling de dimensoes 2 x 2.
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # Dropout com probabilidade de 20%
    model.add(Dropout(0.2))

    # Flatten preparando os dados para a camada fully connected. 
    model.add(Flatten())

    # Camada fully connected de 128 neuronios.
    model.add(Dense(128, activation='relu'))

    # Seguida de uma nova camada fully connected de 64 neuronios
    model.add(Dense(64, activation='relu'))

    # A camada de saida possui o numero de neuronios compativel com o 
    # numero de classes a serem classificadas, com uma funcao de ativacao
    # do tipo 'softmax'.
    model.add(Dense(num_classes, activation='softmax', name='preds'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model
wgan.py 文件源码 项目:Keras-GAN 作者: eriklindernoren 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_generator(self):

        noise_shape = (100,)

        model = Sequential()

        model.add(Dense(128 * 7 * 7, activation="relu", input_shape=noise_shape))
        model.add(Reshape((7, 7, 128)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(128, kernel_size=4, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(UpSampling2D())
        model.add(Conv2D(64, kernel_size=4, padding="same"))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(Conv2D(1, kernel_size=4, padding="same"))
        model.add(Activation("tanh"))

        model.summary()

        noise = Input(shape=noise_shape)
        img = model(noise)

        return Model(noise, img)


问题


面经


文章

微信
公众号

扫码关注公众号