python类MaxPooling2D()的实例源码

dcgan.py 文件源码 项目:dcgan 作者: kyloon 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(64,5,5,
                            border_mode='same',
                            input_shape=(1,28,28),
                            dim_ordering="th"))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th"))
    model.add(Convolution2D(128,5,5, border_mode='same', dim_ordering="th"))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2,2), dim_ordering="th"))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
cgan.py 文件源码 项目:shenlan 作者: vector-1127 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def discriminator_model():
    """ return a (b, 1) logits"""
    model = Sequential()
    model.add(Convolution2D(64, 4, 4,border_mode='same',input_shape=(IN_CH*2, img_cols, img_rows)))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(512, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))
    model.add(Convolution2D(1, 4, 4,border_mode='same'))
    model.add(BatchNormalization(mode=2))
    model.add(Activation('tanh'))

    model.add(Activation('sigmoid'))
    return model
unet.py 文件源码 项目:lsun_2017 作者: ternaus 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def get_unet0(num_start_filters=32):
    inputs = Input((img_rows, img_cols, num_channels))
    conv1 = ConvBN2(inputs, num_start_filters)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = ConvBN2(pool1, 2 * num_start_filters)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = ConvBN2(pool2, 4 * num_start_filters)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = ConvBN2(pool3, 8 * num_start_filters)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = ConvBN2(pool4, 16 * num_start_filters)

    up6 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = ConvBN2(up6, 8 * num_start_filters)

    up7 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = ConvBN2(up7, 4 * num_start_filters)

    up8 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = ConvBN2(up8, 2 * num_start_filters)

    up9 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(up9)
    conv9 = BatchNormalization()(conv9)
    conv9 = Activation('selu')(conv9)
    conv9 = Conv2D(num_start_filters, (3, 3), padding="same", kernel_initializer="he_uniform")(conv9)
    crop9 = Cropping2D(cropping=((16, 16), (16, 16)))(conv9)
    conv9 = BatchNormalization()(crop9)
    conv9 = Activation('selu')(conv9)

    conv10 = Conv2D(num_mask_channels, (1, 1))(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
dcgan.py 文件源码 项目:Deep-Learning-with-Keras 作者: PacktPublishing 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def discriminator_model():
    model = Sequential()
    model.add(Convolution2D(
                        64, 5, 5,
                        border_mode='same',
                        input_shape=(1, 28, 28)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
keras_LeNet.py 文件源码 项目:Deep-Learning-with-Keras 作者: PacktPublishing 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build(input_shape, classes):
        model = Sequential()
        # CONV => RELU => POOL
        model.add(Conv2D(20, kernel_size=5, padding="same",
            input_shape=input_shape))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # CONV => RELU => POOL
        model.add(Conv2D(50, kernel_size=5, padding="same"))
        model.add(Activation("relu"))
        model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
        # Flatten => RELU layers
        model.add(Flatten())
        model.add(Dense(500))
        model.add(Activation("relu"))

        # a softmax classifier
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model

# network and training
cnn_mnist.py 文件源码 项目:deep_learning_ex 作者: zatonovo 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def init_model():
    """
    """
    start_time = time.time()
    print 'Compiling model...'
    model = Sequential()

    model.add(Convolution2D(64, 3,3, border_mode='valid', input_shape=INPUT_SHAPE))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))
    model.add(Dropout(.25))

    model.add(Flatten())

    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms,
      metrics=['accuracy'])
    print 'Model compiled in {0} seconds'.format(time.time() - start_time)

    model.summary()
    return model
inceptionv4.py 文件源码 项目:dogsVScats 作者: prajwalkr 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def block_reduction_a(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 3, 3, subsample=(2,2), border_mode='valid')

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 3, 3)
    branch_1 = conv2d_bn(branch_1, 256, 3, 3, subsample=(2,2), border_mode='valid')

    branch_2 = MaxPooling2D((3,3), strides=(2,2), border_mode='valid')(input)

    x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis)
    return x
inceptionv4.py 文件源码 项目:dogsVScats 作者: prajwalkr 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def block_reduction_b(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 192, 1, 1)
    branch_0 = conv2d_bn(branch_0, 192, 3, 3, subsample=(2, 2), border_mode='valid')

    branch_1 = conv2d_bn(input, 256, 1, 1)
    branch_1 = conv2d_bn(branch_1, 256, 1, 7)
    branch_1 = conv2d_bn(branch_1, 320, 7, 1)
    branch_1 = conv2d_bn(branch_1, 320, 3, 3, subsample=(2,2), border_mode='valid')

    branch_2 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(input)

    x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis)
    return x
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
nn_arch.py 文件源码 项目:MixtureOfExperts 作者: krishnakalyan3 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def lenet5(self):
        model = Sequential()
        model.add(Conv2D(64, (5, 5,), name='conv1',
                         padding='same',
                                activation='relu',
                                input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        # Local Normalization
        model.add(Conv2D(64, (5, 5,), padding='same', activation='relu', name='conv2'))
        # Local Normalization
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool2'))

        model.add(Flatten())
        model.add(Dense(128, activation='relu', name='dense1'))
        model.add(Dropout(0.5))
        model.add(Dense(64, activation='relu', name='dense2'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense3'))

        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
nn_arch.py 文件源码 项目:MixtureOfExperts 作者: krishnakalyan3 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def simple_nn(self):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))

        model.add(Flatten())
        model.add(Dense(64, activation='relu', name='dense2'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense3'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
nn_arch.py 文件源码 项目:MixtureOfExperts 作者: krishnakalyan3 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def cuda_cnn(self):
        model = Sequential()
        model.add(Conv2D(32, (5, 5),
                         border_mode='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))

        model.add(MaxPooling2D(pool_size=(2, 2)))
        # model.add(contrast normalization)
        model.add(Conv2D(32, (5, 5), border_mode='valid', activation='relu'))
        model.add(AveragePooling2D(border_mode='same'))
        # model.add(contrast normalization)
        model.add(Conv2D(64, (5, 5), border_mode='valid', activation='relu'))
        model.add(AveragePooling2D(border_mode='same'))
        model.add(Flatten())
        model.add(Dense(16, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
nn_arch.py 文件源码 项目:MixtureOfExperts 作者: krishnakalyan3 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def small_nn(self):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(32, activation='relu', name='dense1'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax', name='dense2'))
        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)

        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
nn_arch.py 文件源码 项目:MixtureOfExperts 作者: krishnakalyan3 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def small_nn_soft(self, temp):
        model = Sequential()
        model.add(Conv2D(64, (self.stride, self.stride,), name='conv1',
                         padding='same',
                         activation='relu',
                         input_shape=self.ip_shape[1:]))
        model.add(MaxPooling2D(pool_size=(2, 2), name='pool1'))
        model.add(BatchNormalization())

        model.add(Flatten())
        model.add(Dense(32, activation='relu', name='dense1'))
        model.add(BatchNormalization())
        model.add(Dropout(0.5))
        model.add(Dense(10, name='dense2'))
        model.add(Lambda(lambda x: x / temp))
        model.add(Activation('softmax'))

        adam = keras.optimizers.Adam(lr=self.learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=["accuracy"])
        return model
cnn.py 文件源码 项目:Nature-Conservancy-Fish-Image-Prediction 作者: Brok-Bucholtz 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train(img_shape):
    classes = ['ALB', 'BET', 'DOL', 'LAG', 'NoF', 'OTHER', 'SHARK', 'YFT']

    # Model
    model = Sequential()
    model.add(Convolution2D(
        32, 3, 3, input_shape=img_shape, activation='relu', W_constraint=maxnorm(3)))
    model.add(Dropout(0.2))
    model.add(Convolution2D(32, 3, 3, activation='relu', W_constraint=maxnorm(3)))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(512, activation='relu', W_constraint=maxnorm(3)))
    model.add(Dropout(0.5))
    model.add(Dense(len(classes), activation='softmax'))

    features, labels = get_featurs_labels(img_shape)

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    model.fit(features, labels, nb_epoch=10, batch_size=32, validation_split=0.2, verbose=1)
    return model
all_models.py 文件源码 项目:DeepRL-FlappyBird 作者: hashbangCoder 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def model_default(input_shape):
    model = Sequential()
    model.add(Convolution2D(32,8,8,subsample=(4,4), border_mode='same',init='he_uniform',input_shape=input_shape))

    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64,4,4, subsample=(2,2),border_mode='same' , init='he_uniform'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(64,3,3, subsample=(1,1),border_mode='same' , init='he_uniform'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(512, init='he_uniform'))
    model.add(Activation('relu'))
    model.add(Dense(2, init='he_uniform'))

    return model


# Model WITH BATCHNORM NO MAXPOOL NO Dropout
keras_functional_api.py 文件源码 项目:dsde-deep-learning 作者: broadinstitute 项目源码 文件源码 阅读 43 收藏 0 点赞 0 评论 0
def mnist_cnn(args, input_image):
    shape = (args.channels, args.height, args.width)
    x = Convolution2D(32, 5, 5, 
        activation='relu', 
        border_mode='valid', 
        input_shape=shape)(input_image)
    x = MaxPooling2D((2,2))(x)          
    x = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(x)
    x = Dropout(0.2)(x)
    x = MaxPooling2D((2,2))(x)  
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    x = Dense(64, activation='relu')(x)

    predictions = Dense(args.num_labels, activation='softmax')(x)

    # this creates a model that includes
    # the Input layer and three Dense layers
    model = Model(input=input_image, output=predictions)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    model.summary()
    return model
cnn.py 文件源码 项目:HSICNN 作者: jamesbing 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def Net_model(lr=0.005,decay=1e-6,momentum=0.9):
    model = Sequential()
    model.add(Convolution2D(nb_filters1, nb_conv, nb_conv,
                            border_mode='valid',
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    model.add(Convolution2D(nb_filters2, nb_conv, nb_conv))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(1000)) #Full connection
    model.add(Activation('tanh'))
    #model.add(Dropout(0.5))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))

    sgd = SGD(lr=lr, decay=decay, momentum=momentum, nesterov=True)
    model.compile(loss='categorical_crossentropy', optimizer=sgd)

    return model
milCNN.py 文件源码 项目:rna_protein_binding 作者: wentaozhu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def set_cnn_model(ninstance=4, input_dim = 4, input_length = 107):
    nbfilter = 16
    model = Sequential() # #seqs * seqlen * 4
    #model.add(brnn)
    model.add(Conv2D(input_shape=(ninstance, input_length, input_dim),
                            filters=nbfilter,
                            kernel_size=(1,10),
                            padding="valid",
                            #activation="relu",
                            strides=1))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(1,3))) # 32 16
    # model.add(Dropout(0.25)) # will be better
    model.add(Conv2D(filters=nbfilter*2, kernel_size=(1,32), padding='valid', activation='relu', strides=1))
    # model.add(Flatten())
    #model.add(Softmax4D(axis=1))

    #model.add(MaxPooling1D(pool_length=3))
    #model.add(Flatten())
    #model.add(Recalc(axis=1))
    # model.add(Flatten())
    # model.add(Dense(nbfilter*2, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Conv2D(filters=1, kernel_size=(1,1), padding='valid', activation='sigmoid', strides=1))
    return model
dcgan.py 文件源码 项目:keras-dcgan 作者: jacobgil 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def discriminator_model():
    model = Sequential()
    model.add(
            Conv2D(64, (5, 5),
            padding='same',
            input_shape=(28, 28, 1))
            )
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Conv2D(128, (5, 5)))
    model.add(Activation('tanh'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(Activation('tanh'))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    return model
region-cnn.py 文件源码 项目:exit-signs 作者: daniel-j-h 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def getCNN():
    model = Sequential()

    model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=kNetImageShape))
    model.add(Convolution2D(32, 3, 3, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(kNetNumClasses, activation='softmax'))

    model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

    return model
train.py 文件源码 项目:RankFace 作者: Entropy-xcy 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def make_network():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding='same', input_shape=(128, 128, 3)))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    # model.add(Activation('tanh'))

    return model
1100cars.py 文件源码 项目:keras-convautoencoder 作者: nanopony 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_model(nb_filters=32, nb_pool=2, nb_conv=3):
    C_1 = 64
    C_2 = 32
    C_3 = 16
    c = Convolution2D(C_1, nb_conv, nb_conv, border_mode='same', input_shape=(3, 32, 32))
    mp = MaxPooling2D(pool_size=(nb_pool, nb_pool))
    c2 = Convolution2D(C_2, nb_conv, nb_conv, border_mode='same', input_shape=(3, 32, 32))
    mp2 = MaxPooling2D(pool_size=(nb_pool, nb_pool))
    d = Dense(100)
    encoder = get_encoder(c, c2, d, mp, mp2)
    decoder = get_decoder(C_1, C_2, C_3, c, c2, d, mp, mp2, nb_pool)

    graph = Graph()
    graph.add_input(name='input', input_shape=(3, 32, 32))
    graph.add_node(encoder, name='encoder', input='input')
    graph.add_node(decoder, name='decoder', input='encoder')
    graph.add_output(name='autoencoder_feedback', input='decoder')
    graph.compile('rmsprop', {'autoencoder_feedback': 'mean_squared_error'})

    return graph
conv_autoencoder.py 文件源码 项目:keras-convautoencoder 作者: nanopony 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_model(nb_filters=32, nb_pool=2, nb_conv=3):
    model = models.Sequential()
    d = Dense(30)
    c = Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='same', input_shape=(1, 28, 28))
    mp =MaxPooling2D(pool_size=(nb_pool, nb_pool))
    # =========      ENCODER     ========================
    model.add(c)
    model.add(Activation('tanh'))
    model.add(mp)
    model.add(Dropout(0.25))
    # =========      BOTTLENECK     ======================
    model.add(Flatten())
    model.add(d)
    model.add(Activation('tanh'))
    # =========      BOTTLENECK^-1   =====================
    model.add(DependentDense(nb_filters * 14 * 14, d))
    model.add(Activation('tanh'))
    model.add(Reshape((nb_filters, 14, 14)))
    # =========      DECODER     =========================
    model.add(DePool2D(mp, size=(nb_pool, nb_pool)))
    model.add(Deconvolution2D(c, border_mode='same'))
    model.add(Activation('tanh'))

    return model
inception_v4.py 文件源码 项目:keras-inceptionV4 作者: kentsommer 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def block_reduction_a(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 3, 3, strides=(2,2), padding='valid')

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 3, 3)
    branch_1 = conv2d_bn(branch_1, 256, 3, 3, strides=(2,2), padding='valid')

    branch_2 = MaxPooling2D((3,3), strides=(2,2), padding='valid')(input)

    x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
    return x
inception_v4.py 文件源码 项目:keras-inceptionV4 作者: kentsommer 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def block_reduction_b(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 192, 1, 1)
    branch_0 = conv2d_bn(branch_0, 192, 3, 3, strides=(2, 2), padding='valid')

    branch_1 = conv2d_bn(input, 256, 1, 1)
    branch_1 = conv2d_bn(branch_1, 256, 1, 7)
    branch_1 = conv2d_bn(branch_1, 320, 7, 1)
    branch_1 = conv2d_bn(branch_1, 320, 3, 3, strides=(2,2), padding='valid')

    branch_2 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input)

    x = concatenate([branch_0, branch_1, branch_2], axis=channel_axis)
    return x


问题


面经


文章

微信
公众号

扫码关注公众号