python类merge()的实例源码

get_final_test_dense121_feature.py 文件源码 项目:Baidu-_contest 作者: DeepLJH0001 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter
get_final_test_inceptionv4_feature.py 文件源码 项目:Baidu-_contest 作者: DeepLJH0001 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter
n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def build_mod5(opt=adam()):
    n = 3 * 1024
    in1 = Input((128,), name='x1')
    x1 = fc_block1(in1, n)
    x1 = fc_identity(x1, n)


    in2 = Input((1024,), name='x2')
    x2 = fc_block1(in2, n)
    x2 = fc_identity(x2, n)

    x = merge([x1, x2], mode='concat', concat_axis=1)

    x = fc_identity(x, n)

    out = Dense(4716, activation='sigmoid', name='output')(x)

    model = Model(input=[in1, in2], output=out)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    # model.summary()
    # plot(model=model, show_shapes=True)
    return model
inception_resnet_v2.py 文件源码 项目:keras-inception-resnetV2 作者: kentsommer 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def block17(input, scale=1.0, activation_fn='relu'):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    shortcut = input

    tower_conv = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn)

    tower_conv1_0 = conv2d_bn(input, 128, 1, 1, activ_fn=activation_fn)
    tower_conv1_1 = conv2d_bn(tower_conv1_0, 160, 1, 7, activ_fn=activation_fn)
    tower_conv1_2 = conv2d_bn(tower_conv1_1, 192, 7, 1, activ_fn=activation_fn)

    mixed = merge([tower_conv, tower_conv1_2], mode='concat', concat_axis=channel_axis)

    up = conv2d_bn(mixed, 1088, 1, 1, activ_fn=False, normalize=False)

    up = Lambda(do_scale, output_shape=K.int_shape(up)[1:], arguments={'scale':scale})(up)

    net = merge([shortcut, up], mode='sum')

    if activation_fn:
        net = Activation(activation_fn)(net)
    return net
inception_resnet_v2.py 文件源码 项目:keras-inception-resnetV2 作者: kentsommer 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def block8(input, scale=1.0, activation_fn='relu'):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    shortcut = input

    tower_conv = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn)

    tower_conv1_0 = conv2d_bn(input, 192, 1, 1, activ_fn=activation_fn)
    tower_conv1_1 = conv2d_bn(tower_conv1_0, 224, 1, 3, activ_fn=activation_fn)
    tower_conv1_2 = conv2d_bn(tower_conv1_1, 256, 3, 1, activ_fn=activation_fn)

    mixed = merge([tower_conv, tower_conv1_2], mode='concat', concat_axis=channel_axis)

    up = conv2d_bn(mixed, 2080, 1, 1, activ_fn=False, normalize=False)

    up = Lambda(do_scale, output_shape=K.int_shape(up)[1:], arguments={'scale':scale})(up)

    net = merge([shortcut, up], mode='sum')

    if activation_fn:
        net = Activation(activation_fn)(net)
    return net
trans.py 文件源码 项目:keras-squeezenet 作者: dvbuntu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def fire_module(x, squeeze=16, expand=64):
    x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x)
    x = Activation('relu')(x)

    left = Convolution2D(expand, 1, 1, border_mode='valid')(x)
    left = Activation('relu')(left)

    right= ZeroPadding2D(padding=(1, 1))(x)
    right = Convolution2D(expand, 3, 3, border_mode='valid')(right)
    right = Activation('relu')(right)

    y = merge([left, right], mode='concat', concat_axis=1)
    return y


# Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
models.py 文件源码 项目:keras-squeezenet 作者: dvbuntu 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def fire_module(x, squeeze=16, expand=64):
    x = Convolution2D(squeeze, 1, 1, border_mode='valid')(x)
    x = Activation('relu')(x)

    left = Convolution2D(expand, 1, 1, border_mode='valid')(x)
    left = Activation('relu')(left)

    right= ZeroPadding2D(padding=(1, 1))(x)
    right = Convolution2D(expand, 3, 3, border_mode='valid')(right)
    right = Activation('relu')(right)

    x = merge([left, right], mode='concat', concat_axis=1)
    return x


# Original SqueezeNet from paper. Global Average Pool implemented manually with Average Pooling Layer
inceptionv4.py 文件源码 项目:dogsVScats 作者: prajwalkr 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def block_inception_a(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 96, 1, 1)

    branch_1 = conv2d_bn(input, 64, 1, 1)
    branch_1 = conv2d_bn(branch_1, 96, 3, 3)

    branch_2 = conv2d_bn(input, 64, 1, 1)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)
    branch_2 = conv2d_bn(branch_2, 96, 3, 3)

    branch_3 = AveragePooling2D((3,3), strides=(1,1), border_mode='same')(input)
    branch_3 = conv2d_bn(branch_3, 96, 1, 1)

    x = merge([branch_0, branch_1, branch_2, branch_3], mode='concat', concat_axis=channel_axis)
    return x
inceptionv4.py 文件源码 项目:dogsVScats 作者: prajwalkr 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def block_reduction_a(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 384, 3, 3, subsample=(2,2), border_mode='valid')

    branch_1 = conv2d_bn(input, 192, 1, 1)
    branch_1 = conv2d_bn(branch_1, 224, 3, 3)
    branch_1 = conv2d_bn(branch_1, 256, 3, 3, subsample=(2,2), border_mode='valid')

    branch_2 = MaxPooling2D((3,3), strides=(2,2), border_mode='valid')(input)

    x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis)
    return x
inceptionv4.py 文件源码 项目:dogsVScats 作者: prajwalkr 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def block_reduction_b(input):
    if K.image_dim_ordering() == "th":
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 192, 1, 1)
    branch_0 = conv2d_bn(branch_0, 192, 3, 3, subsample=(2, 2), border_mode='valid')

    branch_1 = conv2d_bn(input, 256, 1, 1)
    branch_1 = conv2d_bn(branch_1, 256, 1, 7)
    branch_1 = conv2d_bn(branch_1, 320, 7, 1)
    branch_1 = conv2d_bn(branch_1, 320, 3, 3, subsample=(2,2), border_mode='valid')

    branch_2 = MaxPooling2D((3, 3), strides=(2, 2), border_mode='valid')(input)

    x = merge([branch_0, branch_1, branch_2], mode='concat', concat_axis=channel_axis)
    return x
models.py 文件源码 项目:keras-tf-Super-Resolution 作者: olgaliak 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def create_model(self, height=32, width=32, channels=3, load_weights=False, batch_size=128):
        """
            Creates a model to be used to scale images of specific height and width.
        """
        init = super(ExpantionSuperResolution, self).create_model(height, width, channels, load_weights, batch_size)

        x = Convolution2D(self.n1, self.f1, self.f1, activation='relu', border_mode='same', name='level1')(init)

        x1 = Convolution2D(self.n2, self.f2_1, self.f2_1, activation='relu', border_mode='same', name='lavel1_1')(x)
        x2 = Convolution2D(self.n2, self.f2_2, self.f2_2, activation='relu', border_mode='same', name='lavel1_2')(x)
        x3 = Convolution2D(self.n2, self.f2_3, self.f2_3, activation='relu', border_mode='same', name='lavel1_3')(x)

        x = merge([x1, x2, x3], mode='ave')

        out = Convolution2D(channels, self.f3, self.f3, activation='relu', border_mode='same', name='output')(x)

        model = Model(init, out)
        adam = optimizers.Adam(lr=1e-3)
        model.compile(optimizer=adam, loss='mse', metrics=[PSNRLoss])
        if load_weights: model.load_weights(self.weight_path)

        self.model = model
        return model
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def repeated_2d_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv2D(k1,kernel_size,padding='same',data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def first_block(tensor_input,filters,kernel_size=3,pooling_size=1,dropout=0.5):
    k1,k2 = filters

    out = Conv1D(k1,1,padding='same')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def repeated_block(x,filters,kernel_size=3,pooling_size=1,dropout=0.5):

    k1,k2 = filters


    out = BatchNormalization()(x)
    out = Activation('relu')(out)
    out = Conv1D(k1,kernel_size,padding='same')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv1D(k2,kernel_size,strides=2,padding='same')(out)


    pooling = MaxPooling1D(pooling_size,strides=2,padding='same')(x)

    out = add([out, pooling])

    #out = merge([out,pooling])
    return out
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def first_2d_block(tensor_input,filters,kernel_size=3,pooling_size=2,dropout=0.5):
    k1,k2 = filters

    out = Conv2D(k1,1,padding='same',data_format='channels_last')(tensor_input)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dropout(dropout)(out)
    out = Conv2D(k2,kernel_size,2,padding='same',data_format='channels_last')(out)


    pooling = MaxPooling2D(pooling_size,padding='same',data_format='channels_last')(tensor_input)


    # out = merge([out,pooling],mode='sum')
    out = add([out,pooling])
    return out
a01_densenet_161.py 文件源码 项目:KAGGLE_CERVICAL_CANCER_2017 作者: ZFTurbo 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter
a02_zf_unet_model.py 文件源码 项目:KAGGLE_CERVICAL_CANCER_2017 作者: ZFTurbo 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def double_conv_layer(x, size, dropout, batch_norm):
    from keras.models import Model
    from keras.layers import Input, merge, Convolution2D, MaxPooling2D, UpSampling2D
    from keras.layers.normalization import BatchNormalization
    from keras.layers.core import Dropout, Activation
    conv = Convolution2D(size, 3, 3, border_mode='same')(x)
    if batch_norm == True:
        conv = BatchNormalization(mode=0, axis=1)(conv)
    conv = Activation('relu')(conv)
    conv = Convolution2D(size, 3, 3, border_mode='same')(conv)
    if batch_norm == True:
        conv = BatchNormalization(mode=0, axis=1)(conv)
    conv = Activation('relu')(conv)
    if dropout > 0:
        conv = Dropout(dropout)(conv)
    return conv
a01_densenet_121.py 文件源码 项目:KAGGLE_CERVICAL_CANCER_2017 作者: ZFTurbo 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
    ''' Build a dense_block where the output of each conv_block is fed to subsequent ones
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_layers: the number of layers of conv_block to append to the model.
            nb_filter: number of filters
            growth_rate: growth rate
            dropout_rate: dropout rate
            weight_decay: weight decay factor
            grow_nb_filters: flag to decide to allow number of filters to grow
    '''

    eps = 1.1e-5
    concat_feat = x

    for i in range(nb_layers):
        branch = i+1
        x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
        concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))

        if grow_nb_filters:
            nb_filter += growth_rate

    return concat_feat, nb_filter
a01_squeezenet.py 文件源码 项目:KAGGLE_CERVICAL_CANCER_2017 作者: ZFTurbo 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def fire_module(x, fire_id, squeeze=16, expand=64, dim_ordering='th'):
    s_id = 'fire' + str(fire_id) + '/'
    if dim_ordering is 'tf':
        c_axis = 3
    else:
        c_axis = 1

    x = Convolution2D(squeeze, 1, 1, border_mode='valid', name=s_id + sq1x1)(x)
    x = Activation('relu', name=s_id + relu + sq1x1)(x)

    left = Convolution2D(expand, 1, 1, border_mode='valid', name=s_id + exp1x1)(x)
    left = Activation('relu', name=s_id + relu + exp1x1)(left)

    right = Convolution2D(expand, 3, 3, border_mode='same', name=s_id + exp3x3)(x)
    right = Activation('relu', name=s_id + relu + exp3x3)(right)

    x = merge([left, right], mode='concat', concat_axis=c_axis, name=s_id + 'concat')
    return x


# Original SqueezeNet from paper.
sd01a.py 文件源码 项目:kaggle-lung-cancer 作者: mdai 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
    subsample = (subsample_factor, subsample_factor, subsample_factor)

    x = BatchNormalization(axis=4)(input_tensor)
    x = Activation('relu')(x)
    x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
    x = BatchNormalization(axis=4)(x)
    x = Activation('relu')(x)
    x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)

    if subsample_factor > 1:
        shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
    else:
        shortcut = input_tensor

    x = merge([x, shortcut], mode='sum')
    return x
m05a.py 文件源码 项目:kaggle-lung-cancer 作者: mdai 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
    subsample = (subsample_factor, subsample_factor)

    x = BatchNormalization(axis=3)(input_tensor)
    x = Activation('relu')(x)
    x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)
    x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)

    if subsample_factor > 1:
        shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
    else:
        shortcut = input_tensor

    x = merge([x, shortcut], mode='sum')
    return x
m09a.py 文件源码 项目:kaggle-lung-cancer 作者: mdai 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
    subsample = (subsample_factor, subsample_factor)

    x = BatchNormalization(axis=3)(input_tensor)
    x = Activation('relu')(x)
    x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)
    x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)

    if subsample_factor > 1:
        shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
    else:
        shortcut = input_tensor

    x = merge([x, shortcut], mode='sum')
    return x
m10a.py 文件源码 项目:kaggle-lung-cancer 作者: mdai 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
    subsample = (subsample_factor, subsample_factor, subsample_factor)

    x = BatchNormalization(axis=4)(input_tensor)
    x = Activation('relu')(x)
    x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x)
    x = BatchNormalization(axis=4)(x)
    x = Activation('relu')(x)
    x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x)

    if subsample_factor > 1:
        shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
    else:
        shortcut = input_tensor

    x = merge([x, shortcut], mode='sum')
    return x
m02a.py 文件源码 项目:kaggle-lung-cancer 作者: mdai 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1):
    subsample = (subsample_factor, subsample_factor)

    x = BatchNormalization(axis=3)(input_tensor)
    x = Activation('relu')(x)
    x = Convolution2D(nb_filters, 3, 3, subsample=subsample, border_mode='same')(x)
    x = BatchNormalization(axis=3)(x)
    x = Activation('relu')(x)
    x = Convolution2D(nb_filters, 3, 3, subsample=(1, 1), border_mode='same')(x)

    if subsample_factor > 1:
        shortcut = Convolution2D(nb_filters, 1, 1, subsample=subsample, border_mode='same')(input_tensor)
    else:
        shortcut = input_tensor

    x = merge([x, shortcut], mode='sum')
    return x


问题


面经


文章

微信
公众号

扫码关注公众号