python类concatenate()的实例源码

inception_v4.py 文件源码 项目:keras-inceptionV4 作者: kentsommer 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def block_inception_c(input):
    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    branch_0 = conv2d_bn(input, 256, 1, 1)

    branch_1 = conv2d_bn(input, 384, 1, 1)
    branch_10 = conv2d_bn(branch_1, 256, 1, 3)
    branch_11 = conv2d_bn(branch_1, 256, 3, 1)
    branch_1 = concatenate([branch_10, branch_11], axis=channel_axis)


    branch_2 = conv2d_bn(input, 384, 1, 1)
    branch_2 = conv2d_bn(branch_2, 448, 3, 1)
    branch_2 = conv2d_bn(branch_2, 512, 1, 3)
    branch_20 = conv2d_bn(branch_2, 256, 1, 3)
    branch_21 = conv2d_bn(branch_2, 256, 3, 1)
    branch_2 = concatenate([branch_20, branch_21], axis=channel_axis)

    branch_3 = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input)
    branch_3 = conv2d_bn(branch_3, 256, 1, 1)

    x = concatenate([branch_0, branch_1, branch_2, branch_3], axis=channel_axis)
    return x
resnext.py 文件源码 项目:Keras-ResNeXt 作者: titu1994 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __grouped_convolution_block(input, grouped_channels, cardinality, strides, weight_decay=5e-4):
    ''' Adds a grouped convolution block. It is an equivalent block from the paper
    Args:
        input: input tensor
        grouped_channels: grouped number of filters
        cardinality: cardinality factor describing the number of groups
        strides: performs strided convolution for downscaling if > 1
        weight_decay: weight decay term
    Returns: a keras tensor
    '''
    init = input
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    group_list = []

    if cardinality == 1:
        # with cardinality 1, it is a standard convolution
        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(init)
        x = BatchNormalization(axis=channel_axis)(x)
        x = LeakyReLU()(x)
        return x

    for c in range(cardinality):
        x = Lambda(lambda z: z[:, :, :, c * grouped_channels:(c + 1) * grouped_channels]
        if K.image_data_format() == 'channels_last' else
        lambda z: z[:, c * grouped_channels:(c + 1) * grouped_channels, :, :])(input)

        x = Conv2D(grouped_channels, (3, 3), padding='same', use_bias=False, strides=(strides, strides),
                   kernel_initializer='he_normal', kernel_regularizer=l2(weight_decay))(x)

        group_list.append(x)

    group_merge = concatenate(group_list, axis=channel_axis)
    x = BatchNormalization(axis=channel_axis)(group_merge)
    x = LeakyReLU()(x)

    return x
keras_yolo.py 文件源码 项目:YAD2K 作者: allanzelener 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def yolo_boxes_to_corners(box_xy, box_wh):
    """Convert YOLO box predictions to bounding box corners."""
    box_mins = box_xy - (box_wh / 2.)
    box_maxes = box_xy + (box_wh / 2.)

    return K.concatenate([
        box_mins[..., 1:2],  # y_min
        box_mins[..., 0:1],  # x_min
        box_maxes[..., 1:2],  # y_max
        box_maxes[..., 0:1]  # x_max
    ])
stack_lstm_model1.py 文件源码 项目:kaggle-quora-solution-8th 作者: qqgeogor 项目源码 文件源码 阅读 14 收藏 0 点赞 0 评论 0
def baseline():
    embedding_layer = Embedding(nb_words,
        EMBEDDING_DIM,
        weights=[embedding_matrix],
        input_length=MAX_SEQUENCE_LENGTH,
        trainable=False)
    lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm)

    sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_1 = embedding_layer(sequence_1_input)
    x1 = lstm_layer(embedded_sequences_1)

    sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_2 = embedding_layer(sequence_2_input)
    y1 = lstm_layer(embedded_sequences_2)


    merged = concatenate([x1, y1])
    merged = Dropout(rate_drop_dense)(merged)
    merged = BatchNormalization()(merged)

    merged = Dense(num_dense, activation=act)(merged)
    merged = Dropout(rate_drop_dense)(merged)
    merged = BatchNormalization()(merged)

    preds = Dense(1, activation='sigmoid')(merged)


    model = Model(inputs=[sequence_1_input, sequence_2_input], \
        outputs=preds)
    model.compile(loss='binary_crossentropy',
        optimizer='nadam',
        metrics=['acc'])

    return model
stack_lstm_attn_model2.py 文件源码 项目:kaggle-quora-solution-8th 作者: qqgeogor 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def baseline():
    embedding_layer = Embedding(nb_words,
        EMBEDDING_DIM,
        weights=[embedding_matrix],
        input_length=MAX_SEQUENCE_LENGTH,
        trainable=False)
    lstm_layer = LSTM(num_lstm, dropout=rate_drop_lstm, recurrent_dropout=rate_drop_lstm,return_sequences=True)

    sequence_1_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_1 = embedding_layer(sequence_1_input)
    x1 = lstm_layer(embedded_sequences_1)
    x1 = Attention(MAX_SEQUENCE_LENGTH)(x1)
    sequence_2_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences_2 = embedding_layer(sequence_2_input)
    y1 = lstm_layer(embedded_sequences_2)
    y1 = Attention(MAX_SEQUENCE_LENGTH)(y1)

    merged = concatenate([x1, y1])
    merged = Dropout(rate_drop_dense)(merged)
    merged = BatchNormalization()(merged)

    merged = Dense(num_dense, activation=act)(merged)
    merged = Dropout(rate_drop_dense)(merged)
    merged = BatchNormalization()(merged)

    preds = Dense(1, activation='sigmoid')(merged)


    model = Model(inputs=[sequence_1_input, sequence_2_input], \
        outputs=preds)
    model.compile(loss='binary_crossentropy',
        optimizer='nadam',
        metrics=['acc'])

    return model
recurrent.py 文件源码 项目:extkeras 作者: andhus 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def step(self, inputs, states):
        h, c = self._get_hc(inputs, states)
        if self.output_cells:
            output = concatenate([h, c])
        else:
            output = h

        if 0 < self.dropout + self.recurrent_dropout:
            output._uses_learning_phase = True

        return output, [h, c]
recurrent.py 文件源码 项目:extkeras 作者: andhus 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def preprocess_input(self, inputs, training=None):
        if self.implementation == 0:
            cell_mask = inputs[:, :, -self.units:]
            inputs = inputs[:, :, :-self.units]

            inputs_prep = super(CellMaskedLSTM, self).preprocess_input(
                inputs,
                training
            )
            return K.concatenate([inputs_prep, cell_mask], axis=2)
        else:
            return inputs
recurrent.py 文件源码 项目:extkeras 作者: andhus 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, inputs):
        lstm_inputs, time = inputs
        cell_mask = self.cell_mask(time)
        outputs = self.cell_masked_lstm(
            concatenate([lstm_inputs, cell_mask], axis=2)
        )

        return outputs
audiomodels.py 文件源码 项目:gtzan.keras 作者: Hguimaraes 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def cnn_melspect_1D(input_shape):
    kernel_size = 3
    #activation_func = LeakyReLU()
    activation_func = Activation('relu')
    inputs = Input(input_shape)

    # Convolutional block_1
    conv1 = Conv1D(32, kernel_size)(inputs)
    act1 = activation_func(conv1)
    bn1 = BatchNormalization()(act1)
    pool1 = MaxPooling1D(pool_size=2, strides=2)(bn1)

    # Convolutional block_2
    conv2 = Conv1D(64, kernel_size)(pool1)
    act2 = activation_func(conv2)
    bn2 = BatchNormalization()(act2)
    pool2 = MaxPooling1D(pool_size=2, strides=2)(bn2)

    # Convolutional block_3
    conv3 = Conv1D(128, kernel_size)(pool2)
    act3 = activation_func(conv3)
    bn3 = BatchNormalization()(act3)

    # Global Layers
    gmaxpl = GlobalMaxPooling1D()(bn3)
    gmeanpl = GlobalAveragePooling1D()(bn3)
    mergedlayer = concatenate([gmaxpl, gmeanpl], axis=1)

    # Regular MLP
    dense1 = Dense(512,
        kernel_initializer='glorot_normal',
        bias_initializer='glorot_normal')(mergedlayer)
    actmlp = activation_func(dense1)
    reg = Dropout(0.5)(actmlp)

    dense2 = Dense(512,
        kernel_initializer='glorot_normal',
        bias_initializer='glorot_normal')(reg)
    actmlp = activation_func(dense2)
    reg = Dropout(0.5)(actmlp)

    dense2 = Dense(10, activation='softmax')(reg)

    model = Model(inputs=[inputs], outputs=[dense2])
    return model
model.py 文件源码 项目:keras_detect_tool_wear 作者: kidozh 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def build_multi_input_main_residual_network(batch_size,
                                a2_time_step,
                                d2_time_step,
                                d1_time_step,
                                input_dim,
                                output_dim,
                                loop_depth=15,
                                dropout=0.3):
    '''
    a multiple residual network for wavelet transformation
    :param batch_size: as you might see
    :param a2_time_step: a2_size
    :param d2_time_step: d2_size
    :param d1_time_step: d1_size
    :param input_dim: input_dim
    :param output_dim: output_dim
    :param loop_depth: depth of residual network
    :param dropout: rate of dropout
    :return: 
    '''
    a2_inp = Input(shape=(a2_time_step,input_dim),name='a2')
    d2_inp = Input(shape=(d2_time_step,input_dim),name='d2')
    d1_inp = Input(shape=(d1_time_step,input_dim),name='a1')

    out = concatenate([a2_inp,d2_inp,d1_inp],axis=1)



    out = Conv1D(128,5)(out)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)

    out = first_block(out,(64,128),dropout=dropout)

    for _ in range(loop_depth):
        out = repeated_block(out,(64,128),dropout=dropout)

    # add flatten
    out = Flatten()(out)

    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = Dense(output_dim)(out)

    model = Model(inputs=[a2_inp,d2_inp,d1_inp],outputs=[out])

    model.compile(loss='mse',optimizer='adam',metrics=['mse','mae'])
    return model
multi_gpu.py 文件源码 项目:diluvian 作者: aschampion 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def make_parallel(model, gpu_count):
    def get_slice(data, idx, parts):
        # Adapted from:
        # https://github.com/fchollet/keras/issues/2436#issuecomment-291874528
        sh = K.shape(data)
        L = sh[0] / parts
        if idx == parts - 1:
            return data[idx*L:]
        return data[idx*L:(idx+1)*L]

    outputs_all = []
    for i in range(len(model.outputs)):
        outputs_all.append([])

    #Place a copy of the model on each GPU, each getting a slice of the batch
    for i in range(gpu_count):
        with tf.device('/gpu:%d' % i):
            with tf.name_scope('tower_%d' % i) as scope:

                inputs = []
                #Slice each input into a piece for processing on this GPU
                for x in model.inputs:
                    input_shape = tuple(x.get_shape().as_list())[1:]
                    slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx':i,'parts':gpu_count})(x)
                    inputs.append(slice_n)

                outputs = model(inputs)

                if not isinstance(outputs, list):
                    outputs = [outputs]

                #Save all the outputs for merging back together later
                for l in range(len(outputs)):
                    outputs_all[l].append(outputs[l])

    # merge outputs on CPU
    with tf.device('/cpu:0'):
        merged = []
        for outputs in outputs_all:
            merged.append(concatenate(outputs, axis=0))

        # From https://github.com/kuza55/keras-extras/issues/3#issuecomment-264408864
        new_model = Model(inputs=model.inputs, outputs=merged)
        func_type = type(model.save)

        # monkeypatch the save to save just the underlying model
        def new_save(_, *args, **kwargs):
            model.save(*args, **kwargs)
        new_model.save = func_type(new_save, new_model)

        return new_model
parallel.py 文件源码 项目:ml-tools 作者: triagemd 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def make_parallel(model, gpu_count):
    def get_slice(data, idx, parts):
        shape = tf.shape(data)
        total_size = shape[:1]
        slice_size = total_size // parts
        slice_offset = slice_size * idx

        if idx == parts - 1:
            # give the last slice any surplus data, to avoid chopping it off
            slice_size += total_size % parts

        size = tf.concat([slice_size, shape[1:]], axis=0)
        start = tf.concat([slice_offset, shape[1:] * 0], axis=0)
        return tf.slice(data, start, size)

    outputs_all = []
    for i in range(len(model.outputs)):
        outputs_all.append([])

    # Place a copy of the model on each GPU, each getting a slice of the batch
    for i in range(gpu_count):
        with tf.device('/gpu:%d' % i):
            with tf.name_scope('tower_%d' % i):

                inputs = []
                # Slice each input into a piece for processing on this GPU
                for x in model.inputs:
                    input_shape = tuple(x.get_shape().as_list())[1:]
                    slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x)
                    inputs.append(slice_n)

                outputs = model(inputs)

                if not isinstance(outputs, list):
                    outputs = [outputs]

                # Save all the outputs for merging back together later
                for l in range(len(outputs)):
                    outputs_all[l].append(outputs[l])

    # Merge outputs on CPU
    with tf.device('/cpu:0'):
        merged = []
        for outputs in outputs_all:
            merged.append(concatenate(outputs, axis=0))

        return Model(inputs=model.inputs, outputs=merged)
multi_gpu.py 文件源码 项目:mnist-multi-gpu 作者: normanheckscher 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def make_parallel(model, gpu_count):
    def get_slice(data, idx, parts):
        shape = tf.shape(data)
        size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)
        stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)
        start = stride * idx
        return tf.slice(data, start, size)

    outputs_all = []
    for i in range(len(model.outputs)):
        outputs_all.append([])

    # Place a copy of the model on each GPU, each getting a slice of the batch
    for i in range(gpu_count):
        with tf.device('/gpu:%d' % i):
            with tf.name_scope('tower_%d' % i) as scope:

                inputs = []
                # Slice each input into a piece for processing on this GPU
                for x in model.inputs:
                    input_shape = tuple(x.get_shape().as_list())[1:]
                    slice_n = Lambda(get_slice, output_shape=input_shape,
                                     arguments={'idx': i, 'parts': gpu_count})(
                        x)
                    inputs.append(slice_n)

                outputs = model(inputs)

                if not isinstance(outputs, list):
                    outputs = [outputs]

                # Save all the outputs for merging back together later
                for l in range(len(outputs)):
                    outputs_all[l].append(outputs[l])

    # merge outputs on CPU
    with tf.device('/cpu:0'):
        merged = []
        for outputs in outputs_all:
            merged.append(concatenate(inputs=outputs, axis=0))

        return Model(inputs=model.inputs, outputs=merged)
EED.py 文件源码 项目:EEDS-keras 作者: MarkPrecursor 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def model_EED():
    _input = Input(shape=(None, None, 1), name='input')

    Feature = Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding='same', activation='relu')(_input)
    Feature_out = Res_block()(Feature)

    # Upsampling
    Upsampling1 = Conv2D(filters=4, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Feature_out)
    Upsampling2 = Conv2DTranspose(filters=4, kernel_size=(14, 14), strides=(2, 2),
                                  padding='same', activation='relu')(Upsampling1)
    Upsampling3 = Conv2D(filters=64, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Upsampling2)

    # Mulyi-scale Reconstruction
    Reslayer1 = Res_block()(Upsampling3)

    Reslayer2 = Res_block()(Reslayer1)

    # ***************//
    Multi_scale1 = Conv2D(filters=16, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Reslayer2)

    Multi_scale2a = Conv2D(filters=16, kernel_size=(1, 1), strides=(1, 1),
                           padding='same', activation='relu')(Multi_scale1)

    Multi_scale2b = Conv2D(filters=16, kernel_size=(1, 3), strides=(1, 1),
                           padding='same', activation='relu')(Multi_scale1)
    Multi_scale2b = Conv2D(filters=16, kernel_size=(3, 1), strides=(1, 1),
                           padding='same', activation='relu')(Multi_scale2b)

    Multi_scale2c = Conv2D(filters=16, kernel_size=(1, 5), strides=(1, 1),
                           padding='same', activation='relu')(Multi_scale1)
    Multi_scale2c = Conv2D(filters=16, kernel_size=(5, 1), strides=(1, 1),
                           padding='same', activation='relu')(Multi_scale2c)

    Multi_scale2d = Conv2D(filters=16, kernel_size=(1, 7), strides=(1, 1),
                           padding='same', activation='relu')(Multi_scale1)
    Multi_scale2d = Conv2D(filters=16, kernel_size=(7, 1), strides=(1, 1),
                           padding='same', activation='relu')(Multi_scale2d)

    Multi_scale2 = concatenate(inputs=[Multi_scale2a, Multi_scale2b, Multi_scale2c, Multi_scale2d])

    out = Conv2D(filters=1, kernel_size=(1, 1), strides=(1, 1), padding='same', activation='relu')(Multi_scale2)
    model = Model(input=_input, output=out)

    return model
zf_unet_224_model.py 文件源码 项目:ZF_UNET_224_Pretrained_Model 作者: ZFTurbo 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def ZF_UNET_224(dropout_val=0.0, batch_norm=True):
    if K.image_dim_ordering() == 'th':
        inputs = Input((INPUT_CHANNELS, 224, 224))
        axis = 1
    else:
        inputs = Input((224, 224, INPUT_CHANNELS))
        axis = 3
    filters = 32

    conv_224 = double_conv_layer(inputs, filters, dropout_val, batch_norm)
    pool_112 = MaxPooling2D(pool_size=(2, 2))(conv_224)

    conv_112 = double_conv_layer(pool_112, 2*filters, dropout_val, batch_norm)
    pool_56 = MaxPooling2D(pool_size=(2, 2))(conv_112)

    conv_56 = double_conv_layer(pool_56, 4*filters, dropout_val, batch_norm)
    pool_28 = MaxPooling2D(pool_size=(2, 2))(conv_56)

    conv_28 = double_conv_layer(pool_28, 8*filters, dropout_val, batch_norm)
    pool_14 = MaxPooling2D(pool_size=(2, 2))(conv_28)

    conv_14 = double_conv_layer(pool_14, 16*filters, dropout_val, batch_norm)
    pool_7 = MaxPooling2D(pool_size=(2, 2))(conv_14)

    conv_7 = double_conv_layer(pool_7, 32*filters, dropout_val, batch_norm)

    up_14 = concatenate([UpSampling2D(size=(2, 2))(conv_7), conv_14], axis=axis)
    up_conv_14 = double_conv_layer(up_14, 16*filters, dropout_val, batch_norm)

    up_28 = concatenate([UpSampling2D(size=(2, 2))(up_conv_14), conv_28], axis=axis)
    up_conv_28 = double_conv_layer(up_28, 8*filters, dropout_val, batch_norm)

    up_56 = concatenate([UpSampling2D(size=(2, 2))(up_conv_28), conv_56], axis=axis)
    up_conv_56 = double_conv_layer(up_56, 4*filters, dropout_val, batch_norm)

    up_112 = concatenate([UpSampling2D(size=(2, 2))(up_conv_56), conv_112], axis=axis)
    up_conv_112 = double_conv_layer(up_112, 2*filters, dropout_val, batch_norm)

    up_224 = concatenate([UpSampling2D(size=(2, 2))(up_conv_112), conv_224], axis=axis)
    up_conv_224 = double_conv_layer(up_224, filters, 0, batch_norm)

    conv_final = Conv2D(OUTPUT_MASK_CHANNELS, (1, 1))(up_conv_224)
    conv_final = BatchNormalization(axis=axis)(conv_final)
    conv_final = Activation('sigmoid')(conv_final)

    model = Model(inputs, conv_final, name="ZF_UNET_224")
    return model
models.py 文件源码 项目:keras-image-captioning 作者: danieljl 项目源码 文件源码 阅读 13 收藏 0 点赞 0 评论 0
def build(self, vocabs=None):
        if self._keras_model:
            return
        if vocabs is None and self._word_vector_init is not None:
            raise ValueError('If word_vector_init is not None, build method '
                             'must be called with vocabs that are not None!')

        image_input, image_embedding = self._build_image_embedding()
        sentence_input, word_embedding = self._build_word_embedding(vocabs)
        sequence_input = Concatenate(axis=1)([image_embedding, word_embedding])
        sequence_output = self._build_sequence_model(sequence_input)

        model = Model(inputs=[image_input, sentence_input],
                      outputs=sequence_output)
        model.compile(optimizer=Adam(lr=self._learning_rate, clipnorm=5.0),
                      loss=categorical_crossentropy_from_logits,
                      metrics=[categorical_accuracy_with_variable_timestep])

        self._keras_model = model
layers_builder.py 文件源码 项目:PSPNet-Keras-tensorflow 作者: Vladkryvoruchko 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def build_pyramid_pooling_module(res, input_shape):
    """Build the Pyramid Pooling Module."""
    # ---PSPNet concat layers with Interpolation
    feature_map_size = tuple(int(ceil(input_dim / 8.0)) for input_dim in input_shape)
    print("PSP module will interpolate to a final feature map size of %s" % (feature_map_size, ))

    interp_block1 = interp_block(res, 1, feature_map_size, input_shape)
    interp_block2 = interp_block(res, 2, feature_map_size, input_shape)
    interp_block3 = interp_block(res, 3, feature_map_size, input_shape)
    interp_block6 = interp_block(res, 6, feature_map_size, input_shape)

    # concat all these layers. resulted shape=(1,feature_map_size_x,feature_map_size_y,4096)
    res = Concatenate()([res,
                         interp_block6,
                         interp_block3,
                         interp_block2,
                         interp_block1])
    return res
resnet.py 文件源码 项目:CycleGAN-keras 作者: Shaofanl 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def res_block(input, filters, kernel_size=(3,3), strides=(1,1)):
    # conv_block:add(nn.SpatialReflectionPadding(1, 1, 1, 1))
    # conv_block:add(nn.SpatialConvolution(dim, dim, 3, 3, 1, 1, p, p))
    # conv_block:add(normalization(dim))
    # conv_block:add(nn.ReLU(true))
    x = padding()(input)
    x = Conv2D(filters=filters, 
                kernel_size=kernel_size,
                strides=strides,)(x)
    x = normalize()(x)
    x = Activation('relu')(x)

    x = padding()(x)
    x = Conv2D(filters=filters, 
                kernel_size=kernel_size,
                strides=strides,)(x)
    x = normalize()(x)

#   merged = Concatenate(axis=get_filter_dim())([input, x])
    merged = Add()([input, x])
    return merged
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def modelSigmoid(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netSigmoid(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    # Concatenate
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def modelC256P3C256P3C256P3f128_conc_f128(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netC256P3C256P3C256P3f128(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    # Concatenate
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    # Dense
    x = Dense(128, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def modelC256P3C256P3C256P3f128_conc(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netC256P3C256P3C256P3f128(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    # Concatenate
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def modelC256P3C256P3f32_conc_f64(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netC256P3C256P3f32(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    x = Dense(64, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model
testSigmoidSmaller.py 文件源码 项目:kaggle-quora-question-pairs 作者: voletiv 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def modelC256P3C256P3f64_conc_f64(inputLength, inputDim):
    inputA = Input(shape=(inputLength,), dtype='int32')
    inputB = Input(shape=(inputLength,), dtype='int32')
    # One hot encoding
    oheInputA = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputA)
    oheInputB = Lambda(K.one_hot, arguments={
                       'num_classes': inputDim}, output_shape=(inputLength, inputDim))(inputB)
    net = netC256P3C256P3f64(inputLength, inputDim)
    processedA = net(oheInputA)
    processedB = net(oheInputB)
    conc = Concatenate()([processedA, processedB])
    x = BatchNormalization()(conc)
    x = Dense(64, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    predictions = Dense(1, activation='sigmoid')(x)
    model = Model([inputA, inputB], predictions)
    return model


# To encode questions into char indices
demand_simulation_mnist.py 文件源码 项目:DeepIV 作者: jhartford 项目源码 文件源码 阅读 15 收藏 0 点赞 0 评论 0
def conv_embedding(images, output, other_features = [], dropout_rate=0.1,
                   embedding_dropout=0.1, embedding_l2=0.05, constrain_norm=True):
    print("Building conv net")
    x_embedding = architectures.convnet(images, Dense(64, activation='linear'),
                        dropout_rate=embedding_dropout,
                        activations='relu',
                        l2_rate=embedding_l2, constrain_norm=constrain_norm)

    if len(other_features) > 0:
        embedd = Concatenate(axis=1)([x_embedding] + other_features)
    else:
        embedd = x_embedding
    out = architectures.feed_forward_net(embedd, output,
                        hidden_layers=[32],
                        dropout_rate=dropout_rate,
                        activations='relu', constrain_norm=constrain_norm)
    return out
multi_gpu.py 文件源码 项目:keras_multi_gpu 作者: barrykui 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def to_multi_gpu(model, n_gpus=2):
    if n_gpus ==1:
    return model

    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:])
    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        # Deprecated
    #merged = merge(towers, mode='concat', concat_axis=0)
    merged = Concatenate(axis=0)(towers)
    return Model(inputs=[x], outputs=merged)
util.py 文件源码 项目:keras-dogs 作者: ahangchen 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def make_parallel(model, gpu_count):
    def get_slice(data, idx, parts):
        shape = tf.shape(data)
        size = tf.concat([shape[:1] // parts, shape[1:]], axis=0)
        stride = tf.concat([shape[:1] // parts, shape[1:] * 0], axis=0)
        start = stride * idx
        return tf.slice(data, start, size)

    outputs_all = []
    for i in range(len(model.outputs)):
        outputs_all.append([])

    # Place a copy of the model on each GPU, each getting a slice of the batch
    for i in range(gpu_count):
        with tf.device('/gpu:%d' % i):
            with tf.name_scope('tower_%d' % i) as scope:

                inputs = []
                # Slice each input into a piece for processing on this GPU
                for x in model.inputs:
                    input_shape = tuple(x.get_shape().as_list())[1:]
                    slice_n = Lambda(get_slice, output_shape=input_shape, arguments={'idx': i, 'parts': gpu_count})(x)
                    inputs.append(slice_n)

                outputs = model(inputs)

                if not isinstance(outputs, list):
                    outputs = [outputs]

                # Save all the outputs for merging back together later
                for l in range(len(outputs)):
                    outputs_all[l].append(outputs[l])

    # merge outputs on CPU
    with tf.device('/cpu:0'):
        merged = []
        for outputs in outputs_all:
            merged.append(Concatenate(axis=0)(outputs))

        return Model(model.inputs, merged)
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def ConditionalSequential (array, condition, **kwargs):
    def apply1(arg,f):
        # print("applying {}({})".format(f,arg))
        concat = Concatenate(**kwargs)([condition, arg])
        return f(concat)
    return lambda x: reduce(apply1, array, x)
model.py 文件源码 项目:latplan 作者: guicho271828 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def _build(self,input_shape):
        num_actions = 128
        N = input_shape[0] - num_actions
        x = Input(shape=input_shape)
        pre    = wrap(x,tf.slice(x, [0,0], [-1,N]),name="pre")
        action = wrap(x,tf.slice(x, [0,N], [-1,num_actions]),name="action")

        ys = []
        for i in range(num_actions):
            _x = Input(shape=(N,))
            _y = Sequential([
                flatten,
                *[Sequential([BN(),
                              Dense(self.parameters['layer'],activation=self.parameters['activation']),
                              Dropout(self.parameters['dropout']),])
              for i in range(self.parameters['num_layers']) ],
                Dense(1,activation="sigmoid")
            ])(_x)
            _m = Model(_x,_y,name="action_"+str(i))
            ys.append(_m(pre))

        ys = Concatenate()(ys)
        y  = Dot(-1)([ys,action])

        self.loss = bce
        self.net = Model(x, y)
        self.callbacks.append(GradientEarlyStopping(verbose=1,epoch=50,min_grad=self.parameters['min_grad']))
models.py 文件源码 项目:vess2ret 作者: costapt 项目源码 文件源码 阅读 16 收藏 0 点赞 0 评论 0
def concatenate_layers(inputs, concat_axis, mode='concat'):
    if KERAS_2:
        assert mode == 'concat', "Only concatenation is supported in this wrapper"
        return Concatenate(axis=concat_axis)(inputs)
    else:
        return merge(inputs=inputs, concat_axis=concat_axis, mode=mode)
densenet_multi_gpu.py 文件源码 项目:cifar-10-cnn 作者: BIGBALLON 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def to_multi_gpu(model, n_gpus=2):
    if n_gpus ==1:
        return model

    with tf.device('/cpu:0'):
        x = Input(model.input_shape[1:])
    towers = []
    for g in range(n_gpus):
        with tf.device('/gpu:' + str(g)):
            slice_g = Lambda(slice_batch, lambda shape: shape, arguments={'n_gpus':n_gpus, 'part':g})(x)
            towers.append(model(slice_g))

    with tf.device('/cpu:0'):
        merged = Concatenate(axis=0)(towers)
    return Model(inputs=[x], outputs=merged)


问题


面经


文章

微信
公众号

扫码关注公众号