python类add()的实例源码

residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 65 收藏 0 点赞 0 评论 0
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
residual_model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):
    inp = Input(shape=(time_step,input_dim))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def build_2d_main_residual_network(batch_size,
                                width,
                                height,
                                channel_size,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    inp = Input(shape=(width,height,channel_size))

    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)


    out = Conv2D(128,5,data_format='channels_last')(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_2d_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_2d_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
thaanaocr.py 文件源码 项目:thaanaOCR 作者: Sofwath 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def on_epoch_end(self, epoch, logs={}):

        font = {'family': 'Thaana Unicode Akeh',
                'color':  'darkred',
                'weight': 'normal',
                'size': 12,
        }

        self.model.save_weights(os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))

        #self.model.save('model.h5')  # creates a HDF5 file 

        self.show_edit_distance(256)
        self.num_display_words = 6 
        word_batch = next(self.text_img_gen)[0]
        # add visual augmentations 
        res = decode_batch(self.test_func, word_batch['the_input'][0:6])
        if word_batch['the_input'][0].shape[0] < 256:
            cols = 2
        else:
            cols = 1
        for i in range(self.num_display_words):
            plt.subplot(self.num_display_words // cols, cols, i + 1)

            if K.image_data_format() == 'channels_first':
                the_input = word_batch['the_input'][i, 0, :, :]
            else:
                the_input = word_batch['the_input'][i, :, :, 0]
            plt.imshow(the_input.T, cmap='Greys_r')
            plt.xlabel('Actual = \'%s\'\Prediction = \'%s\'' % (word_batch['source_str'][i][::-1], res[i][::-1]),fontdict=font)
            #print (('Actual = \'%s\'\Prediction = \'%s\'' % (word_batch['source_str'][i], res[i])))
        fig = plt.gcf()

        fig.set_size_inches(10, 13)
        plt.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))

        plt.close()
build_featurizer_test.py 文件源码 项目:pic2vec 作者: datarobot 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def test_splice_layer():
    """Test method splices tensors correctly"""
    # Create spliced and added layers via splicing function
    list_of_spliced_layers = _splice_layer(SPLICING_TENSOR, 3)
    # Add each of the layers together
    x = add(list_of_spliced_layers)
    # Create the spliced and added layers by hand
    check_layer = K.constant(9, shape=(3, 4))
    # Check the math
    assert np.allclose(K.eval(check_layer), K.eval(x), atol=ATOL)
SRResNet_simple.py 文件源码 项目:deblocking 作者: yydlmzyz 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def residual_block(input,filters,kernel_size):
    conv_1 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(input)
    norm_1 = BatchNormalization(axis=-1)(conv_1)
    relu_1 = LeakyReLU(alpha=0.25)(norm_1)
    conv_2 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(relu_1)
    norm_2 = BatchNormalization(axis=-1)(conv_2)
    return add([input, norm_2])
SRResNet.py 文件源码 项目:deblocking 作者: yydlmzyz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def residual_block(input,filters,kernel_size):
    conv_1 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(input)
    norm_1 = BatchNormalization(axis=-1)(conv_1)
    relu_1 = LeakyReLU(alpha=0.25)(norm_1)
    conv_2 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(relu_1)
    norm_2 = BatchNormalization(axis=-1)(conv_2)
    return add([input, norm_2])
SRResNet_v1.py 文件源码 项目:deblocking 作者: yydlmzyz 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def residual_block(input,filters,kernel_size):
    conv_1 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(input)
    norm_1 = BatchNormalization(axis=-1)(conv_1)
    relu_1 = LeakyReLU(alpha=0.25)(norm_1)
    conv_2 = Conv2D(filters, (kernel_size, kernel_size), padding='same',kernel_initializer='glorot_uniform')(relu_1)
    norm_2 = BatchNormalization(axis=-1)(conv_2)
    return add([input, norm_2])
keras_training.py 文件源码 项目:deep_ocr 作者: JinpengLI 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, **kwargs):
        super(KerasLenetExt1Model, self).__init__(**kwargs)
        norm_shape = self.norm_shape
        self.model = Sequential()
        self.model.add(Convolution2D(64, (3, 3), activation='relu',
                                input_shape=(norm_shape[0], norm_shape[1], 1)))

        self.model.add(Convolution2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(1024, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.max_n_label, activation='softmax'))

        # 8. Compile model
        self.model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
keras_training.py 文件源码 项目:deep_ocr 作者: JinpengLI 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, **kwargs):
        self.final_fc_nm = kwargs.get("final_fc_nm", 2048)
        if "final_fc_nm" in kwargs:
            kwargs.pop("final_fc_nm")
        super(KerasLenetExt2Model, self).__init__(**kwargs)
        norm_shape = self.norm_shape
        self.model = Sequential()
        self.model.add(Convolution2D(64, (3, 3), activation='relu',
                                input_shape=(norm_shape[0], norm_shape[1], 1)))

        self.model.add(Convolution2D(64, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(128, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Convolution2D(256, (3, 3), activation='relu'))
        self.model.add(MaxPooling2D(pool_size=(2,2)))
        self.model.add(Dropout(0.25))

        self.model.add(Flatten())
        self.model.add(Dense(self.final_fc_nm, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(self.max_n_label, activation='softmax'))

        # 8. Compile model
        self.model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
keras_training.py 文件源码 项目:deep_ocr 作者: JinpengLI 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __init__(self, **kwargs):
        super(KerasCifar10CNN, self).__init__(**kwargs)
        norm_shape = self.norm_shape
        model = Sequential()
        model.add(Conv2D(32, (3, 3), padding='same',
                         input_shape=(norm_shape[0], norm_shape[1], 1)))
        model.add(Activation('relu'))
        model.add(Conv2D(32, (3, 3), ))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2),))
        model.add(Dropout(0.25))

        model.add(Conv2D(64, (3, 3), padding='same', ))
        model.add(Activation('relu'))
        model.add(Conv2D(64, (3, 3), ))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2),))
        model.add(Dropout(0.25))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(self.max_n_label))
        model.add(Activation('softmax'))
        # initiate RMSprop optimizer
        opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

        # Let's train the model using RMSprop
        model.compile(loss='categorical_crossentropy',
                      optimizer=opt,
              metrics=['accuracy'])
        self.model = model
pspnet.py 文件源码 项目:head-segmentation 作者: szywind 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def unit_1(in_layer, n1=64, n2=64, n3=256, s2=1, p2=1, d2=1):
    '''
    Two-Brach Unit
    :param in_layer:
    :return:
    '''
    # branch 1
    x1 = Conv2D(n1, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(in_layer)
    x1 = BatchNormalization(momentum=0.95)(x1)
    x1 = Activation('relu')(x1)

    x1 = ZeroPadding2D(padding=(p2, p2))(x1)
    x1 = Conv2D(n2, (3, 3), strides=(s2, s2), padding='valid', dilation_rate=(d2, d2), kernel_initializer=he_uniform(), use_bias=False)(x1)
    x1 = BatchNormalization(momentum=0.95)(x1)
    x1 = Activation('relu')(x1)

    x1 = Conv2D(n3, (1, 1), strides=(1, 1), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(x1)
    x1 = BatchNormalization(momentum=0.95)(x1)

    # branch 2
    x2 = Conv2D(n3, (1, 1), strides=(s2, s2), padding='valid', kernel_initializer=he_uniform(), use_bias=False)(in_layer)
    x2 = BatchNormalization(momentum=0.95)(x2)

    x = add([x1, x2])
    x = Activation('relu')(x)
    return x
EEDS.py 文件源码 项目:EEDS-keras 作者: MarkPrecursor 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def model_EEDS():
    _input = Input(shape=(None, None, 1), name='input')
    _EES = EES.model_EES()(_input)
    _EED = EED.model_EED()(_input)
    _EEDS = add(inputs=[_EED, _EES])

    model = Model(input=_input, output=_EEDS)
    Adam = adam(lr=0.0003)
    model.compile(optimizer=Adam, loss='mse')
    return model
EED.py 文件源码 项目:EEDS-keras 作者: MarkPrecursor 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def Res_block():
    _input = Input(shape=(None, None, 64))

    conv = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(_input)
    conv = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='linear')(conv)

    out = add(inputs=[_input, conv])
    out = Activation('relu')(out)

    model = Model(inputs=_input, outputs=out)

    return model
blocks.py 文件源码 项目:keras-fcn 作者: JihongJu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def vgg_upsampling(classes, target_shape=None, scale=1, weight_decay=0., block_name='featx'):
    """A VGG convolutional block with bilinear upsampling for decoding.

    :param classes: Integer, number of classes
    :param scale: Float, scale factor to the input feature, varing from 0 to 1
    :param target_shape: 4D Tuples with targe_height, target_width as
    the 2nd, 3rd elements if `channels_last` or as the 3rd, 4th elements if
    `channels_first`.

    >>> from keras_fcn.blocks import vgg_upsampling
    >>> feat1, feat2, feat3 = feat_pyramid[:3]
    >>> y = vgg_upsampling(classes=21, target_shape=(None, 14, 14, None),
    >>>                    scale=1, block_name='feat1')(feat1, None)
    >>> y = vgg_upsampling(classes=21, target_shape=(None, 28, 28, None),
    >>>                    scale=1e-2, block_name='feat2')(feat2, y)
    >>> y = vgg_upsampling(classes=21, target_shape=(None, 224, 224, None),
    >>>                    scale=1e-4, block_name='feat3')(feat3, y)

    """
    def f(x, y):
        score = Conv2D(filters=classes, kernel_size=(1, 1),
                       activation='linear',
                       padding='valid',
                       kernel_initializer='he_normal',
                       kernel_regularizer=l2(weight_decay),
                       name='score_{}'.format(block_name))(x)
        if y is not None:
            def scaling(xx, ss=1):
                return xx * ss
            scaled = Lambda(scaling, arguments={'ss': scale},
                            name='scale_{}'.format(block_name))(score)
            score = add([y, scaled])
        upscore = BilinearUpSampling2D(
            target_shape=target_shape,
            name='upscore_{}'.format(block_name))(score)
        return upscore
    return f
encoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                            # padding='same',
                            strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other = MaxPooling2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    return encoder
encoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def bottleneck(inp, output, internal_scale=4, asymmetric=0, dilated=0, downsample=False, dropout_rate=0.1):
    # main branch
    internal = output // internal_scale
    encoder = inp

    # 1x1
    input_stride = 2 if downsample else 1  # the 1st 1x1 projection is replaced with a 2x2 convolution when downsampling
    encoder = Conv2D(internal, (input_stride, input_stride),
                     # padding='same',
                     strides=(input_stride, input_stride), use_bias=False)(encoder)
    # Batch normalization + PReLU
    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # conv
    if not asymmetric and not dilated:
        encoder = Conv2D(internal, (3, 3), padding='same')(encoder)
    elif asymmetric:
        encoder = Conv2D(internal, (1, asymmetric), padding='same', use_bias=False)(encoder)
        encoder = Conv2D(internal, (asymmetric, 1), padding='same')(encoder)
    elif dilated:
        encoder = Conv2D(internal, (3, 3), dilation_rate=(dilated, dilated), padding='same')(encoder)
    else:
        raise(Exception('You shouldn\'t be here'))

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = PReLU(shared_axes=[1, 2])(encoder)

    # 1x1
    encoder = Conv2D(output, (1, 1), use_bias=False)(encoder)

    encoder = BatchNormalization(momentum=0.1)(encoder)  # enet_unpooling uses momentum of 0.1, keras default is 0.99
    encoder = SpatialDropout2D(dropout_rate)(encoder)

    other = inp
    # other branch
    if downsample:
        other, indices = MaxPoolingWithArgmax2D()(other)

        other = Permute((1, 3, 2))(other)
        pad_feature_maps = output - inp.get_shape().as_list()[3]
        tb_pad = (0, 0)
        lr_pad = (0, pad_feature_maps)
        other = ZeroPadding2D(padding=(tb_pad, lr_pad))(other)
        other = Permute((1, 3, 2))(other)

    encoder = add([encoder, other])
    encoder = PReLU(shared_axes=[1, 2])(encoder)
    if downsample:
        return encoder, indices
    else:
        return encoder
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def build_multi_input_main_residual_network(batch_size,
                                a2_time_step,
                                d2_time_step,
                                d1_time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    '''
    a multiple residual network for wavelet transformation
    :param batch_size: as you might see
    :param a2_time_step: a2_size
    :param d2_time_step: d2_size
    :param d1_time_step: d1_size
    :param input_dim: input_dim
    :param output_dim: output_dim
    :param loop_depth: depth of residual network
    :param dropout: rate of dropout
    :return: 
    '''
    a2_inp = Input(shape=(a2_time_step,input_dim),name='a2')
    d2_inp = Input(shape=(d2_time_step,input_dim),name='d2')
    d1_inp = Input(shape=(d1_time_step,input_dim),name='a1')

    out = concatenate([a2_inp,d2_inp,d1_inp],axis=1)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inputs=[a2_inp,d2_inp,d1_inp],outputs=[out])

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def build_main_residual_network_with_lstm(batch_size,
                                time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                rnn_layer_num = 2,
                                dropout=0.3):


    inp = Input(shape=(time_step,input_dim))



    # add mask for filter invalid data
    out = TimeDistributed(Masking(mask_value=0))(inp)

    # add LSTM module
    for _ in range(rnn_layer_num):
        out = LSTM(128,return_sequences=True)(out)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inp,out)

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
resnet.py 文件源码 项目:CycleGAN-keras 作者: Shaofanl 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def res_block(input, filters, kernel_size=(3,3), strides=(1,1)):
    # conv_block:add(nn.SpatialReflectionPadding(1, 1, 1, 1))
    # conv_block:add(nn.SpatialConvolution(dim, dim, 3, 3, 1, 1, p, p))
    # conv_block:add(normalization(dim))
    # conv_block:add(nn.ReLU(true))
    x = padding()(input)
    x = Conv2D(filters=filters, 
                kernel_size=kernel_size,
                strides=strides,)(x)
    x = normalize()(x)
    x = Activation('relu')(x)

    x = padding()(x)
    x = Conv2D(filters=filters, 
                kernel_size=kernel_size,
                strides=strides,)(x)
    x = normalize()(x)

#   merged = Concatenate(axis=get_filter_dim())([input, x])
    merged = Add()([input, x])
    return merged
models.py 文件源码 项目:cyclegan_keras 作者: shadySource 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def conv_block(x0, scale):
    x = Conv2D(int(64*scale), (1, 1))(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(64*scale), (3, 3), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(256*scale), (1, 1))(x)
    x = InstanceNormalization()(x)

    x1 = Conv2D(int(256*scale), (1, 1))(x0)
    x1 = InstanceNormalization()(x1)

    x = Add()([x, x1])
    x = LeakyReLU()(x)
    return x
layers_builder.py 文件源码 项目:PSPNet-Keras-tensorflow 作者: Vladkryvoruchko 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def residual_short(prev_layer, level, pad=1, lvl=1, sub_lvl=1, modify_stride=False):
    prev_layer = Activation('relu')(prev_layer)
    block_1 = residual_conv(prev_layer, level,
                            pad=pad, lvl=lvl, sub_lvl=sub_lvl,
                            modify_stride=modify_stride)

    block_2 = short_convolution_branch(prev_layer, level,
                                       lvl=lvl, sub_lvl=sub_lvl,
                                       modify_stride=modify_stride)
    added = Add()([block_1, block_2])
    return added
layers_builder.py 文件源码 项目:PSPNet-Keras-tensorflow 作者: Vladkryvoruchko 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def residual_empty(prev_layer, level, pad=1, lvl=1, sub_lvl=1):
    prev_layer = Activation('relu')(prev_layer)

    block_1 = residual_conv(prev_layer, level, pad=pad,
                            lvl=lvl, sub_lvl=sub_lvl)
    block_2 = empty_branch(prev_layer)
    added = Add()([block_1, block_2])
    return added
decoder.py 文件源码 项目:enet-keras 作者: PavlosMelissinos 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build(inp, encoder, nc, valid_shapes):
    side = conv_block_side(inp)

    x = Lambda(
        interp,
        arguments={'shape': valid_shapes[3]},
        name='sub24_sum_interp')(encoder)

    main = ConvBN(
        filters=128,
        kernel_size=3,
        dilation_rate=2,
        padding='same',
        name='conv_sub2')(x)

    x = Add(name='sub12_sum')([main, side])
    x = Activation('relu')(x)

    x = Lambda(
        interp,
        arguments={'shape': valid_shapes[2]},
        name='sub12_sum_interp')(x)

    x = Conv2D(
        filters=nc,
        kernel_size=1,
        name='conv6_cls')(x)

    out = Lambda(
        interp,
        arguments={'shape': valid_shapes[0]},
        name='conv6_interp')(x)

    return out
models.py 文件源码 项目:cyclegan_keras 作者: shadySource 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def identity_block(x0, scale):
    x = Conv2D(int(64*scale), (1, 1))(x0)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(64*scale), (3, 3), padding='same')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU()(x)

    x = Conv2D(int(256*scale), (1, 1))(x)
    x = InstanceNormalization()(x)

    x = Add()([x, x0])
    x = LeakyReLU()(x)
    return x
resnet_helpers.py 文件源码 项目:Keras-FCN 作者: aurora95 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def identity_block(kernel_size, filters, stage, block, weight_decay=0., batch_momentum=0.99):
    '''The identity_block is the block that has no conv layer at shortcut
    # Arguments
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    '''
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter2, (kernel_size, kernel_size),
                          padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        x = Add()([x, input_tensor])
        x = Activation('relu')(x)
        return x
    return f
resnet_helpers.py 文件源码 项目:Keras-FCN 作者: aurora95 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def atrous_identity_block(kernel_size, filters, stage, block, weight_decay=0., atrous_rate=(2, 2), batch_momentum=0.99):
    '''The identity_block is the block that has no conv layer at shortcut
    # Arguments
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    '''
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter2, (kernel_size, kernel_size), dilation_rate=atrous_rate,
                          padding='same', name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        x = Add()([x, input_tensor])
        x = Activation('relu')(x)
        return x
    return f
resnet_helpers.py 文件源码 项目:Keras-FCN 作者: aurora95 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def atrous_conv_block(kernel_size, filters, stage, block, weight_decay=0., strides=(1, 1), atrous_rate=(2, 2), batch_momentum=0.99):
    '''conv_block is the block that has a conv layer at shortcut
    # Arguments
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    '''
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Conv2D(nb_filter1, (1, 1), strides=strides,
                          name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', dilation_rate=atrous_rate,
                          name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
                                 name=conv_name_base + '1', kernel_regularizer=l2(weight_decay))(input_tensor)
        shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)

        x = Add()([x, shortcut])
        x = Activation('relu')(x)
        return x
    return f
resnet_helpers.py 文件源码 项目:Keras-FCN 作者: aurora95 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def conv_block(kernel_size, filters, stage, block, weight_decay=0., strides=(2, 2), batch_momentum=0.99):
    '''conv_block is the block that has a conv layer at shortcut
    # Arguments
        kernel_size: defualt 3, the kernel size of middle conv layer at main path
        filters: list of integers, the nb_filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    Note that from stage 3, the first conv layer at main path is with strides=(2,2)
    And the shortcut should have strides=(2,2) as well
    '''
    def f(input_tensor):
        nb_filter1, nb_filter2, nb_filter3 = filters
        if K.image_data_format() == 'channels_last':
            bn_axis = 3
        else:
            bn_axis = 1
        conv_name_base = 'res' + str(stage) + block + '_branch'
        bn_name_base = 'bn' + str(stage) + block + '_branch'

        x = Conv2D(nb_filter1, (1, 1), strides=strides,
                          name=conv_name_base + '2a', kernel_regularizer=l2(weight_decay))(input_tensor)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',
                          name=conv_name_base + '2b', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b', momentum=batch_momentum)(x)
        x = Activation('relu')(x)

        x = Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c', kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c', momentum=batch_momentum)(x)

        shortcut = Conv2D(nb_filter3, (1, 1), strides=strides,
                                 name=conv_name_base + '1', kernel_regularizer=l2(weight_decay))(input_tensor)
        shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1', momentum=batch_momentum)(shortcut)

        x = Add()([x, shortcut])
        x = Activation('relu')(x)
        return x
    return f

# Atrous-Convolution version of residual blocks


问题


面经


文章

微信
公众号

扫码关注公众号