python类Model()的实例源码

qlearner.py 文件源码 项目:reinforced-race 作者: timediv 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, environment: EnvironmentInterface, memory: Memory, image_size: int,
                 random_action_policy: RandomActionPolicy, batch_size: int, discount: float,
                 should_load_model: bool, should_save: bool, action_type: Any,
                 create_model: Callable[[Any, int], Model], batches_per_frame: int):
        self.environment = environment
        self.random_action_policy = random_action_policy
        self.memory = memory
        self.image_size = image_size
        self.batch_size = batch_size
        self.discount = discount
        self.action_type = action_type
        self.should_save = should_save
        self.should_exit = False
        self.default_sigint_handler = signal.getsignal(signal.SIGINT)
        self.training_info = TrainingInfo(should_load_model)
        self.mean_training_time = RunningAverage(1000, self.training_info['mean_training_time'])
        if batches_per_frame:
            self.training_info['batches_per_frame'] = batches_per_frame

        if should_load_model and Path(self.MODEL_PATH).is_file():
            self.model = load_model(self.MODEL_PATH)
        else:
            self.model = create_model((self.image_size, self.image_size, StateAssembler.FRAME_COUNT),
                                      action_type.COUNT)
net.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss_net(self) -> Model:
        """Returns the network that yields a loss given both input spectrograms and labels. Used for training."""
        input_batch = self._input_batch_input
        label_batch = Input(name=Wav2Letter.InputNames.label_batch, shape=(None,), dtype='int32')
        label_lengths = Input(name=Wav2Letter.InputNames.label_lengths, shape=(1,), dtype='int64')

        asg_transition_probabilities_variable = backend.variable(value=self.asg_transition_probabilities,
                                                                 name="asg_transition_probabilities")
        asg_initial_probabilities_variable = backend.variable(value=self.asg_initial_probabilities,
                                                              name="asg_initial_probabilities")
        # Since Keras doesn't currently support loss functions with extra parameters,
        # we define a custom lambda layer yielding one single real-valued CTC loss given the grapheme probabilities:
        loss_layer = Lambda(Wav2Letter._asg_lambda if self.use_asg else Wav2Letter._ctc_lambda,
                            name='asg_loss' if self.use_asg else 'ctc_loss',
                            output_shape=(1,),
                            arguments={"transition_probabilities": asg_transition_probabilities_variable,
                                       "initial_probabilities": asg_initial_probabilities_variable} if self.use_asg else None)

        # ([asg_transition_probabilities_variable, asg_initial_probabilities_variable] if self.use_asg else [])

        # This loss layer is placed atop the predictive network and provided with additional arguments,
        # namely the label batch and prediction/label sequence lengths:
        loss = loss_layer(
            [self.predictive_net(input_batch), label_batch, self._prediction_lengths_input, label_lengths])

        loss_net = Model(inputs=[input_batch, label_batch, self._prediction_lengths_input, label_lengths],
                         outputs=[loss])
        # Since loss is already calculated in the last layer of the net, we just pass through the results here.
        # The loss dummy labels have to be given to satify the Keras API.
        loss_net.compile(loss=lambda dummy_labels, ctc_loss: ctc_loss, optimizer=self.optimizer)
        return loss_net
net.py 文件源码 项目:speechless 作者: JuliusKunze 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def decoding_net(self):
        decoding_layer = Lambda(self._decode_lambda, name='ctc_decode')

        prediction_batch = self.predictive_net(self._input_batch_input)
        decoded = decoding_layer([prediction_batch, self._prediction_lengths_input])

        return Model(inputs=[self._input_batch_input, self._prediction_lengths_input], outputs=[decoded])
joint_optimisation.py 文件源码 项目:ActiveBoundary 作者: MiriamHu 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def create_joint_model(input_dim, init_w, init_b, gamma, weight_hinge, learning_rate, decay, regulariser=None):
    image_input = Input(shape=(input_dim,), dtype='float32', name='image_input')
    db_input = Input(shape=(input_dim,), dtype='float32', name="db_input")
    shared_layer = Dense(1, input_dim=input_dim, kernel_regularizer=regulariser, kernel_initializer='uniform',
                         activation="linear", use_bias=True, name='shared_layer')
    _ = shared_layer(image_input)
    _ = shared_layer(db_input)
    model = Model(inputs=[image_input, db_input], outputs=[shared_layer.get_output_at(0), shared_layer.get_output_at(1)])
    adam = Adam(lr=learning_rate)  # SGD should also work because convex loss function, but Adam converges faster.
    model.compile(optimizer=adam, loss=['hinge', 'mse'], loss_weights=[weight_hinge, gamma],
                  metrics=[my_accuracy, 'mse'])
    return model
rtn_test.py 文件源码 项目:ikelos 作者: braingineer 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def test1():
    seq_size = 10
    batch_size = 10 
    rnn_size = 1
    xin = Input(batch_shape=(batch_size, seq_size,1))
    xtop = Input(batch_shape=(batch_size, seq_size))
    xbranch, xsummary = RTTN(rnn_size, return_sequences=True)([xin, xtop])

    model = Model(input=[xin, xtop], output=[xbranch, xsummary])
    model.compile(loss='MSE', optimizer='SGD')
    data_gen = generate_data_batch(batch_size, seq_size)
    model.fit_generator(generator=data_gen, samples_per_epoch=1000, nb_epoch=100)
surgery.py 文件源码 项目:ikelos 作者: braingineer 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def crop(model, layer_or_tensor):
    if hasattr(layer_or_tensor, '_keras_history'):
        ins, outs = crop_to_tensor(model, layer_or_tensor)
    else:
        ins, outs = crop_to_layer(model, layer_or_tensor)
    return Model(ins, outs, preloaded_data=model.preloaded_data)
aSDAE.py 文件源码 项目:ConvMF_V2.0 作者: daicoolb 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self,first_dimension,output_dimension,item_num,user_feature):

        self.maxlen=item_num
        self.maxfea=user_feature

        model_input_user_rating=Input(shape=[item_num],name='user_rating')
        model_input_user_sideinformation=Input(shape=(user_feature,),name='user_sideinformation')

        #model_input_user_rating=model_input_user_rating+0.5*np.random.normal(loc=0,scale=100,size=item_num)
        #model_input_user_sideinformation=model_input_user_sideinformation+0.5*np.random.normal(loc=0,scale=100,size=user_feature)


        model_input=concatenate([model_input_user_rating,model_input_user_sideinformation])

        encoder_1=Dense(first_dimension,activation='relu',name='encoder_1')(model_input)
        #encoder_conc=concatenate([encoder_1,model_input_user_sideinformation])

        encoder_2=Dense(output_dimension,activation='relu',name='user_matrix')(encoder_1)
        #decoder_conc=concatenate([encoder_2,model_input_user_sideinformation])

        decoder_3=Dense(first_dimension,activation='relu',name='decoder_1')(encoder_2)
        #decoder_conc=concatenate([decoder_3,model_input_user_sideinformation])

        model_output_user_rating=Dense(item_num,activation='sigmoid',name='output_model_rating')(decoder_3)
        model_output_user_sideinformation=Dense(user_feature,activation='sigmoid',name='output_model_side')(decoder_3)

        output_model=Model(inputs=[model_input_user_rating,model_input_user_sideinformation],outputs=[model_output_user_rating,model_output_user_sideinformation,encoder_2])
        output_model.compile(optimizer='rmsprop',loss={'output_model_rating':'mse','output_model_side':'mse','user_matrix':'mse'},loss_weights=[1,1,0])
        self.model=output_model
aSDAE.py 文件源码 项目:PHDMF 作者: daicoolb 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self,first_dimension,output_dimension,item_num,user_feature):

        self.maxlen=item_num
        self.maxfea=user_feature

        model_input_user_rating=Input(shape=[item_num],name='user_rating')
        model_input_user_sideinformation=Input(shape=(user_feature,),name='user_sideinformation')

        #model_input_user_rating=model_input_user_rating+0.5*np.random.normal(loc=0,scale=100,size=item_num)
        #model_input_user_sideinformation=model_input_user_sideinformation+0.5*np.random.normal(loc=0,scale=100,size=user_feature)


        model_input=concatenate([model_input_user_rating,model_input_user_sideinformation])

        encoder_1=Dense(first_dimension,activation='relu',name='encoder_1')(model_input)
        #encoder_conc=concatenate([encoder_1,model_input_user_sideinformation])

        encoder_2=Dense(output_dimension,activation='relu',name='user_matrix')(encoder_1)
        #decoder_conc=concatenate([encoder_2,model_input_user_sideinformation])

        decoder_3=Dense(first_dimension,activation='relu',name='decoder_1')(encoder_2)
        #decoder_conc=concatenate([decoder_3,model_input_user_sideinformation])

        model_output_user_rating=Dense(item_num,activation='sigmoid',name='output_model_rating')(decoder_3)
        model_output_user_sideinformation=Dense(user_feature,activation='sigmoid',name='output_model_side')(decoder_3)

        output_model=Model(inputs=[model_input_user_rating,model_input_user_sideinformation],outputs=[model_output_user_rating,model_output_user_sideinformation,encoder_2])
        output_model.compile(optimizer='rmsprop',loss={'output_model_rating':'mse','output_model_side':'mse','user_matrix':'mse'},loss_weights=[1,1,0])
        self.model=output_model
wavenet.py 文件源码 项目:wavenet 作者: basveeling 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def build_model(fragment_length, nb_filters, nb_output_bins, dilation_depth, nb_stacks, use_skip_connections,
                learn_all_outputs, _log, desired_sample_rate, use_bias, res_l2, final_l2):
    def residual_block(x):
        original_x = x
        # TODO: initalization, regularization?
        # Note: The AtrousConvolution1D with the 'causal' flag is implemented in github.com/basveeling/keras#@wavenet.
        tanh_out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=2 ** i, border_mode='valid', causal=True,
                                             bias=use_bias,
                                             name='dilated_conv_%d_tanh_s%d' % (2 ** i, s), activation='tanh',
                                             W_regularizer=l2(res_l2))(x)
        sigm_out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=2 ** i, border_mode='valid', causal=True,
                                             bias=use_bias,
                                             name='dilated_conv_%d_sigm_s%d' % (2 ** i, s), activation='sigmoid',
                                             W_regularizer=l2(res_l2))(x)
        x = layers.Merge(mode='mul', name='gated_activation_%d_s%d' % (i, s))([tanh_out, sigm_out])

        res_x = layers.Convolution1D(nb_filters, 1, border_mode='same', bias=use_bias,
                                     W_regularizer=l2(res_l2))(x)
        skip_x = layers.Convolution1D(nb_filters, 1, border_mode='same', bias=use_bias,
                                      W_regularizer=l2(res_l2))(x)
        res_x = layers.Merge(mode='sum')([original_x, res_x])
        return res_x, skip_x

    input = Input(shape=(fragment_length, nb_output_bins), name='input_part')
    out = input
    skip_connections = []
    out = CausalAtrousConvolution1D(nb_filters, 2, atrous_rate=1, border_mode='valid', causal=True,
                                    name='initial_causal_conv')(out)
    for s in range(nb_stacks):
        for i in range(0, dilation_depth + 1):
            out, skip_out = residual_block(out)
            skip_connections.append(skip_out)

    if use_skip_connections:
        out = layers.Merge(mode='sum')(skip_connections)
    out = layers.Activation('relu')(out)
    out = layers.Convolution1D(nb_output_bins, 1, border_mode='same',
                               W_regularizer=l2(final_l2))(out)
    out = layers.Activation('relu')(out)
    out = layers.Convolution1D(nb_output_bins, 1, border_mode='same')(out)

    if not learn_all_outputs:
        raise DeprecationWarning('Learning on just all outputs is wasteful, now learning only inside receptive field.')
        out = layers.Lambda(lambda x: x[:, -1, :], output_shape=(out._keras_shape[-1],))(
            out)  # Based on gif in deepmind blog: take last output?

    out = layers.Activation('softmax', name="output_softmax")(out)
    model = Model(input, out)

    receptive_field, receptive_field_ms = compute_receptive_field()

    _log.info('Receptive Field: %d (%dms)' % (receptive_field, int(receptive_field_ms)))
    return model
classifier.py 文件源码 项目:bisemantic 作者: wpm 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create(cls, classes, maximum_tokens, embedding_size, lstm_units, dropout, bidirectional):
        """
        Create a model that labels semantic relationships between text pairs.

        The text pairs are passed in as two aligned matrices of size
        (batch size, maximum embedding tokens, embedding size). They are generated by TextPairEmbeddingGenerator.

        :param classes: the number of distinct classes to categorize
        :type classes: int
        :param maximum_tokens: maximum number of embedded tokens
        :type maximum_tokens: int
        :param embedding_size: size of the embedding vector
        :type embedding_size: int
        :param lstm_units: number of hidden units in the shared LSTM
        :type lstm_units: int
        :param dropout:  dropout rate or None for no dropout
        :type dropout: float or None
        :param bidirectional: should the shared LSTM be bidirectional?
        :type bidirectional: bool
        :return: the created model
        :rtype: TextPairClassifier
        """
        # Create the model geometry.
        input_shape = (maximum_tokens, embedding_size)
        # Input two sets of aligned text pairs.
        input_1 = Input(input_shape)
        input_2 = Input(input_shape)
        # Apply the same LSTM to each.
        if bidirectional:
            lstm = Bidirectional(LSTM(lstm_units), name="lstm")
        else:
            lstm = LSTM(lstm_units, name="lstm")
        r1 = lstm(input_1)
        r2 = lstm(input_2)
        # Concatenate the embeddings with their product and squared difference.
        p = multiply([r1, r2])
        negative_r2 = Lambda(lambda x: -x)(r2)
        d = add([r1, negative_r2])
        q = multiply([d, d])
        v = [r1, r2, p, q]
        lstm_output = concatenate(v)
        if dropout is not None:
            lstm_output = Dropout(dropout, name="dropout")(lstm_output)
        # A single-layer perceptron maps the concatenated vector to the labels. It has a number of hidden states equal
        # to the square root of the length of the concatenated vector.
        m = sum(t.shape[1].value for t in v)
        perceptron = Dense(math.floor(math.sqrt(m)), activation="relu")(lstm_output)
        logistic_regression = Dense(classes, activation="softmax", name="softmax")(perceptron)
        model = Model([input_1, input_2], logistic_regression, "Text pair classifier")
        model.compile(optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
        return cls(model)


问题


面经


文章

微信
公众号

扫码关注公众号