python类Adam()的实例源码

n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_mod10(opt=adam()):
    n = int(1800)
    in1 = Input((128,), name='x1')

    x1 = fc_block1(in1, n)
    x1 = fc_inception(x1, n)
    x1 = fc_inception(x1, n)

    in2 = Input((1024,), name='x2')

    x2 = fc_block1(in2, n)
    x2 = fc_inception(x2, n)
    x2 = fc_inception(x2, n)

    x = merge([x1, x2], mode='concat', concat_axis=1)

    x = fc_inception(x, n)
    x = fc_inception(x, n)
    x = fc_block1(x, 2000)

    out = Dense(4716, activation='sigmoid', name='output')(x)
    model = Model(input=[in1, in2], output=out)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    # model.summary()
    plot(model=model, show_shapes=True)
    return model
n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_mod12(opt=adam()):
    n = int(2 * 1024)
    in1 = Input((128,), name='x1')
    x1 = fc_block1(in1, n, d=0.2)
    x1 = fc_identity(x1, n, d=0.2)
    x1 = fc_identity(x1, n, d=0.2)

    in2 = Input((1024,), name='x2')
    x2 = fc_block1(in2, n, d=0.2)
    x2 = fc_identity(x2, n, d=0.2)
    x2 = fc_identity(x2, n, d=0.2)

    x = merge([x1, x2], mode='concat', concat_axis=1)

    x = fc_identity(x, n, d=0.2)
    x = fc_identity(x, n, d=0.2)
    x = fc_identity(x, n, d=0.2)
    x = fc_block1(x, n)

    out = Dense(4716, activation='sigmoid', name='output')(x)

    model = Model(input=[in1, in2], output=out)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    model.summary()
    # plot(model=model, show_shapes=True)
    return model
n12_pepe_zoo.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_mod13(opt=adam()):
    n = int(2 * 1024)
    in1 = Input((128,), name='x1')
    x1 = fc_block1(in1, n, d=0.2)
    x1 = fc_identity(x1, n, d=0.2)
    x1 = fc_identity(x1, n, d=0.2)
    x1 = fc_identity(x1, n, d=0.2)

    in2 = Input((1024,), name='x2')
    x2 = fc_block1(in2, n, d=0.2)
    x2 = fc_identity(x2, n, d=0.2)
    x2 = fc_identity(x2, n, d=0.2)
    x2 = fc_identity(x2, n, d=0.2)

    x = merge([x1, x2], mode='concat', concat_axis=1)

    x = fc_identity(x, n, d=0.2)
    x = fc_identity(x, n, d=0.2)
    x = fc_block1(x, n)

    out = Dense(4716, activation='sigmoid', name='output')(x)

    model = Model(input=[in1, in2], output=out)
    model.compile(optimizer=opt, loss='categorical_crossentropy')

    model.summary()
    # plot(model=model, show_shapes=True)
    return model
network.py 文件源码 项目:cocktail-party 作者: avivga 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def train(self, x, y, learning_rate=0.01, epochs=200):
        optimizer = optimizers.adam(lr=learning_rate, decay=1e-6)
        self._model.compile(loss="mean_squared_error", optimizer=optimizer)

        self._model.fit(x, y, batch_size=32, validation_split=0.05, epochs=epochs, verbose=1)
network.py 文件源码 项目:cocktail-party 作者: avivga 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def train(self, x, y, learning_rate=0.01, epochs=200):
        optimizer = optimizers.adam(lr=learning_rate, decay=1e-6)
        self._model.compile(loss="mean_squared_error", optimizer=optimizer)

        self._model.fit(x, y, batch_size=32, validation_split=0.05, epochs=epochs, verbose=1)
main.py 文件源码 项目:EEDS-keras 作者: MarkPrecursor 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def model_EES(input_col, input_row):
    _input = Input(shape=(input_col, input_row, 1), name='input')

    EES = Conv2D(nb_filter=8, nb_row=3, nb_col=3, init='he_normal',
                 activation='relu', border_mode='same', bias=True)(_input)
    EES = Deconvolution2D(nb_filter=16, nb_row=14, nb_col=14, output_shape=(None, input_col * 2, input_row * 2, 16),
                          subsample=(2, 2), border_mode='same', init='glorot_uniform', activation='relu')(EES)
    out = Conv2D(nb_filter=1, nb_row=5, nb_col=5, init='glorot_uniform', activation='relu', border_mode='same')(EES)

    model = Model(input=_input, output=out)
    # sgd = SGD(lr=0.0001, decay=0.005, momentum=0.9, nesterov=True)
    Adam = adam(lr=0.001)
    model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error'])
    return model
main.py 文件源码 项目:EEDS-keras 作者: MarkPrecursor 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def model_EEDS(input_col, input_row):
    _input = Input(shape=(input_col, input_row, 1), name='input')
    EES = model_EES(input_col, input_row)(_input)
    EED = model_EED(input_col, input_row)(_input)
    _EEDS = merge(inputs=[EED, EES], mode='sum')

    model = Model(input=_input, output=_EEDS)
    Adam = adam(lr=0.001)
    model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error'])
    return model
EEDS.py 文件源码 项目:EEDS-keras 作者: MarkPrecursor 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def model_EEDS():
    _input = Input(shape=(None, None, 1), name='input')
    _EES = EES.model_EES()(_input)
    _EED = EED.model_EED()(_input)
    _EEDS = add(inputs=[_EED, _EES])

    model = Model(input=_input, output=_EEDS)
    Adam = adam(lr=0.0003)
    model.compile(optimizer=Adam, loss='mse')
    return model
n11_train.py 文件源码 项目:kaggle_yt8m 作者: N01Z3 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_mod(ags):
    dst = os.path.join(ags.wpath, ags.versn)
    b_scr = -1

    if ags.optim == 'adam':
        opt = adam(ags.lrate)
    elif ags.optim == 'sgd':
        opt = sgd(ags.lrate)
    else:
        opt = adam()

    lst = [build_mod2(), build_mod3(), build_mod7(), build_mod9(), build_mod11(), build_mod12(), build_mod13()]

    model = lst[ags.mtype]
    if ags.mtype == 0:
        model = build_mod2(opt)
        logging.info('start with model 2')
    elif ags.mtype == 1:
        model = build_mod3(opt)
        logging.info('start with model 3')
    elif ags.mtype == 2:
        model = build_mod7(opt)
        logging.info('start with model 7')
    elif ags.mtype == 3:
        model = build_mod9(opt)
        logging.info('start with model 9')
    elif ags.mtype == 4:
        model = build_mod11(opt)
        logging.info('start with model 11')
    elif ags.mtype == 5:
        model = build_mod12(opt)
        logging.info('start with model 12')
    elif ags.mtype == 6:
        model = build_mod13(opt)
        logging.info('start with model 13')

    if ags.begin == -1:
        fls = sorted(glob.glob(dst + '/*h5'))
        if len(fls) > 0:
            logging.info('load weights: %s' % fls[-1])
            model.load_weights(fls[-1])
            b_scr = float(os.path.basename(fls[-1]).split('_')[0])

    return model, b_scr
model.py 文件源码 项目:nesgym 作者: codescv 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self,
                 image_shape,
                 num_actions,
                 frame_history_len=4,
                 replay_buffer_size=1000000,
                 training_freq=4,
                 training_starts=5000,
                 training_batch_size=32,
                 target_update_freq=1000,
                 reward_decay=0.99,
                 exploration=LinearSchedule(5000, 0.1),
                 log_dir="logs/"):
        """
            Double Deep Q Network
            params:
            image_shape: (height, width, n_values)
            num_actions: how many different actions we can choose
            frame_history_len: feed this number of frame data as input to the deep-q Network
            replay_buffer_size: size limit of replay buffer
            training_freq: train base q network once per training_freq steps
            training_starts: only train q network after this number of steps
            training_batch_size: batch size for training base q network with gradient descent
            reward_decay: decay factor(called gamma in paper) of rewards that happen in the future
            exploration: used to generate an exploration factor(see 'epsilon-greedy' in paper).
                         when rand(0,1) < epsilon, take random action; otherwise take greedy action.
            log_dir: path to write tensorboard logs
        """
        super().__init__()
        self.num_actions = num_actions
        self.training_freq = training_freq
        self.training_starts = training_starts
        self.training_batch_size = training_batch_size
        self.target_update_freq = target_update_freq
        self.reward_decay = reward_decay
        self.exploration = exploration

        # use multiple frames as input to q network
        input_shape = image_shape[:-1] + (image_shape[-1] * frame_history_len,)
        # used to choose action
        self.base_model = q_model(input_shape, num_actions)
        self.base_model.compile(optimizer=optimizers.adam(clipnorm=10, lr=1e-4, decay=1e-6, epsilon=1e-4), loss='mse')
        # used to estimate q values
        self.target_model = q_model(input_shape, num_actions)

        self.replay_buffer = ReplayBuffer(size=replay_buffer_size, frame_history_len=frame_history_len)
        # current replay buffer offset
        self.replay_buffer_idx = 0

        self.tensorboard_callback = TensorBoard(log_dir=log_dir)
        self.latest_losses = deque(maxlen=100)
main.py 文件源码 项目:EEDS-keras 作者: MarkPrecursor 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def model_EED(input_col, input_row):
    _input = Input(shape=(input_col, input_row, 1), name='input')

    Feature = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
                     activation='relu', border_mode='same', bias=True)(_input)
    Feature = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
                     activation='relu', border_mode='same', bias=True)(Feature)
    Feature3 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
                      activation='relu', border_mode='same', bias=True)(Feature)
    Feature_out = merge(inputs=[Feature, Feature3], mode='sum')

    # Upsampling
    Upsampling1 = Conv2D(nb_filter=8, nb_row=1, nb_col=1, init='glorot_uniform',
                         activation='relu', border_mode='same', bias=True)(Feature_out)
    Upsampling2 = Deconvolution2D(nb_filter=8, nb_row=14, nb_col=14,
                                  output_shape=(None, input_col * 2, input_row * 2, 8),
                                  subsample=(2, 2), border_mode='same',
                                  init='glorot_uniform', activation='relu')(Upsampling1)
    Upsampling3 = Conv2D(nb_filter=64, nb_row=1, nb_col=1, init='glorot_uniform',
                         activation='relu', border_mode='same', bias=True)(Upsampling2)

    # Mulyi-scale Reconstruction
    Reslayer1 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
                       activation='relu', border_mode='same', bias=True)(Upsampling3)
    Reslayer2 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
                       activation='relu', border_mode='same', bias=True)(Reslayer1)
    Block1 = merge(inputs=[Reslayer1, Reslayer2], mode='sum')

    Reslayer3 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
                       activation='relu', border_mode='same', bias=True)(Block1)
    Reslayer4 = Conv2D(nb_filter=64, nb_row=3, nb_col=3, init='glorot_uniform',
                       activation='relu', border_mode='same', bias=True)(Reslayer3)
    Block2 = merge(inputs=[Reslayer3, Reslayer4], mode='sum')

    # ***************//
    Multi_scale1 = Conv2D(nb_filter=16, nb_row=1, nb_col=1, init='glorot_uniform',
                          activation='relu', border_mode='same', bias=True)(Block2)
    Multi_scale2a = Conv2D(nb_filter=16, nb_row=1, nb_col=1, init='glorot_uniform',
                           activation='relu', border_mode='same', bias=True)(Multi_scale1)
    Multi_scale2b = Conv2D(nb_filter=16, nb_row=3, nb_col=3, init='glorot_uniform',
                           activation='relu', border_mode='same', bias=True)(Multi_scale1)
    Multi_scale2c = Conv2D(nb_filter=16, nb_row=5, nb_col=5, init='glorot_uniform',
                           activation='relu', border_mode='same', bias=True)(Multi_scale1)
    Multi_scale2d = Conv2D(nb_filter=16, nb_row=7, nb_col=7, init='glorot_uniform',
                           activation='relu', border_mode='same', bias=True)(Multi_scale1)
    Multi_scale2 = merge(inputs=[Multi_scale2a, Multi_scale2b, Multi_scale2c, Multi_scale2d], mode='concat')

    out = Conv2D(nb_filter=1, nb_row=1, nb_col=1, init='glorot_uniform',
                 activation='relu', border_mode='same', bias=True)(Multi_scale2)
    model = Model(input=_input, output=out)

    Adam = adam(lr=0.001)
    model.compile(optimizer=Adam, loss='mean_squared_error', metrics=['mean_squared_error'])

    return model


问题


面经


文章

微信
公众号

扫码关注公众号