python类models()的实例源码

main.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--phase', default='train', help='Phase: Can be train or predict, the default value is train.')
    parser.add_argument('--model_file', default='./models/arci.config', help='Model_file: MatchZoo model file for the chosen model.')
    args = parser.parse_args()
    model_file =  args.model_file
    with open(model_file, 'r') as f:
        config = json.load(f)
    phase = args.phase
    if args.phase == 'train':
        train(config)
    elif args.phase == 'predict':
        predict(config)
    else:
        print('Phase Error.', end='\n')
    return
main.py 文件源码 项目:MatchZoo 作者: faneshion 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument('--phase', default='train', help='Phase: Can be train or predict, the default value is train.')
    parser.add_argument('--model_file', default='./models/arci.config', help='Model_file: MatchZoo model file for the chosen model.')
    args = parser.parse_args()
    model_file =  args.model_file
    with open(model_file, 'r') as f:
        config = json.load(f)
    phase = args.phase
    if args.phase == 'train':
        train(config)
    elif args.phase == 'predict':
        predict(config)
    else:
        print('Phase Error.', end='\n')
    return
BiLSTM.py 文件源码 项目:emnlp2017-bilstm-cnn-crf 作者: UKPLab 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loadModel(self, modelPath):
        import h5py
        import json
        from neuralnets.keraslayers.ChainCRF import create_custom_objects

        model = keras.models.load_model(modelPath, custom_objects=create_custom_objects())

        with h5py.File(modelPath, 'r') as f:
            mappings = json.loads(f.attrs['mappings'])
            if 'additionalFeatures' in f.attrs:
                self.additionalFeatures = json.loads(f.attrs['additionalFeatures'])

            if 'maxCharLen' in f.attrs:
                self.maxCharLen = int(f.attrs['maxCharLen'])

        self.model = model        
        self.setMappings(None, mappings)
basic_model.py 文件源码 项目:Convolution-neural-networks-made-easy-with-keras 作者: mingruimingrui 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def train(model, X_train, y_train, X_test, y_test):
    sys.stdout.write('Training model\n\n')
    sys.stdout.flush()

    # train each iteration individually to back up current state
    # safety measure against potential crashes
    epoch_count = 0
    while epoch_count < epoch:
        epoch_count += 1
        sys.stdout.write('Epoch count: ' + str(epoch_count) + '\n')
        sys.stdout.flush()
        model.fit(X_train, y_train, batch_size=batch_size,
                  nb_epoch=1, validation_data=(X_test, y_test))
        sys.stdout.write('Epoch {} done, saving model to file\n\n'.format(epoch_count))
        sys.stdout.flush()
        model.save_weights('./models/convnet_weights.h5')

    return model
improved_model.py 文件源码 项目:Convolution-neural-networks-made-easy-with-keras 作者: mingruimingrui 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def train(model, X_train, y_train, X_test, y_test):
    sys.stdout.write('Training model with data augmentation\n\n')
    sys.stdout.flush()

    datagen = image_generator()
    datagen.fit(X_train)

    # train each iteration individually to back up current state
    # safety measure against potential crashes
    epoch_count = 0
    while epoch_count < epoch:
        epoch_count += 1
        sys.stdout.write('Epoch count: ' + str(epoch_count) + '\n')
        sys.stdout.flush()
        model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                            steps_per_epoch=len(X_train) // batch_size,
                            epochs=1,
                            validation_data=(X_test, y_test))
        sys.stdout.write('Epoch {} done, saving model to file\n\n'.format(epoch_count))
        sys.stdout.flush()
        model.save_weights('./models/convnet_improved_weights.h5')

    return model
GA.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def train_process(self):
        client = GAClient.Client()
        for model in self.population.values():
            # if getattr(model, 'parent', None) is not None:
            # has parents means muatetion and weight change, so need to save weights
            keras.models.save_model(model.model, model.config.model_path)
            model.graph.save_params(model.config.output_path+'/graph.json')

            kwargs = dict(
                name=model.config.name,
                epochs=model.config.epochs,
                verbose=model.config.verbose,
                limit_data=model.config.limit_data,
                dataset_type=model.config.dataset_type
            )
            if parallel:
                client.run_self(kwargs)
            else:
                name, score = GAClient.run(**kwargs)
                setattr(self.population[name], 'score', score)

        if parallel:
            client.wait()
            for name, score in client.scores.items():
                setattr(self.population[name], 'score', score)
keras_conversion.py 文件源码 项目:deeplift 作者: kundajelab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def load_keras_model(weights, yaml=None, json=None,
                     normalise_conv_for_one_hot_encoded_input=False,
                     axis_of_normalisation=None,
                     name_of_conv_layer_to_normalise=None): 
    if (normalise_conv_for_one_hot_encoded_input):
        assert axis_of_normalisation is not None,\
         "specify axis of normalisation for normalising one-hot encoded input"
    assert yaml is not None or json is not None,\
     "either yaml or json must be specified"
    assert yaml is None or json is None,\
     "only one of yaml or json must be specified"
    if (yaml is not None):
        from keras.models import model_from_yaml 
        model = model_from_yaml(open(yaml).read()) 
    else:
        from keras.models import model_from_json 
        model = model_from_json(open(json).read()) 
    model.load_weights(weights) 
    if (normalise_conv_for_one_hot_encoded_input):
        mean_normalise_first_conv_layer_weights(
         model,
         axis_of_normalisation=axis_of_normalisation,
         name_of_conv_layer_to_normalise=name_of_conv_layer_to_normalise)
    return model
test_recurrent_stress_tests.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_initial_state_GRU(self):
        data = np.random.rand(1, 1, 2)

        model = keras.models.Sequential()
        model.add(keras.layers.GRU(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
        model.get_layer(index=1).reset_states()

        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        keras_output_1 = model.predict(data)
        coreml_full_output_1 = coreml_model.predict({'data': data})
        coreml_output_1 = coreml_full_output_1['output']
        coreml_output_1 = np.expand_dims(coreml_output_1, 1)

        np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)

        hidden_state = (np.random.rand(1, 5))
        model.get_layer(index=1).reset_states(states=hidden_state)
        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        spec = coreml_model.get_spec()
        keras_output_2 = model.predict(data)
        coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]})
        coreml_output_2 = coreml_full_output_2['output']
        coreml_output_2 = np.expand_dims(coreml_output_2, 1)
        np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
test_recurrent_stress_tests.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_initial_state_SimpleRNN(self):
        data = np.random.rand(1, 1, 2)
        model = keras.models.Sequential()
        model.add(keras.layers.SimpleRNN(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
        model.get_layer(index=1).reset_states()
        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        keras_output_1 = model.predict(data)
        coreml_full_output_1 = coreml_model.predict({'data': data})
        coreml_output_1 = coreml_full_output_1['output']
        coreml_output_1 = np.expand_dims(coreml_output_1, 1)
        np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)

        hidden_state = np.random.rand(1, 5)
        model.get_layer(index=1).reset_states(states=hidden_state)
        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        spec = coreml_model.get_spec()
        keras_output_2 = model.predict(data)
        coreml_full_output_2 = coreml_model.predict({'data': data, spec.description.input[1].name: hidden_state[0]})
        coreml_output_2 = coreml_full_output_2['output']
        coreml_output_2 = np.expand_dims(coreml_output_2, 1)
        np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
alt_i2v.py 文件源码 项目:alt-i2v 作者: GINK03 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def pred():
  tag_index = pickle.loads(open('tag_index.pkl', 'rb').read())
  index_tag = { index:tag for tag, index in tag_index.items() }
  name_img150 = []
  for name in filter(lambda x: '.jpg' in x, sys.argv):
    img = Image.open('{name}'.format(name=name))
    img = img.convert('RGB')
    img150 = np.array(img.resize((150, 150)))
    name_img150.append( (name, img150) )
  model = load_model(sorted(glob.glob('models/*.model'))[-1]) 
  for name, img150 in name_img150:
    result = model.predict(np.array([img150]) )
    result = result.tolist()[0]
    result = { i:w for i,w in enumerate(result)}
    for i,w in sorted(result.items(), key=lambda x:x[1]*-1)[:30]:
      print("{name} tag={tag} prob={prob}".format(name=name, tag=index_tag[i], prob=w) )
alt_i2v_V2.py 文件源码 项目:alt-i2v 作者: GINK03 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def train():
  for i in range(500):
    print('now iter {} load pickled dataset...'.format(i))
    Xs = []
    ys = []
    names = [name for idx, name in enumerate( glob.glob('../dataset/*.pkl') )]
    random.shuffle( names )
    for idx, name in enumerate(names):
      try:
        X,y = pickle.loads(open(name,'rb').read() ) 
      except EOFError as e:
        continue
      if idx%100 == 0:
        print('now scan iter', idx)
      if idx >= 15000:
        break
      Xs.append( X )
      ys.append( y )

    Xs = np.array( Xs )
    ys = np.array( ys )
    model.fit(Xs, ys, epochs=1 )
    print('now iter {} '.format(i))
    model.save_weights('models/{:09d}.h5'.format(i))
alt_i2v_V2.py 文件源码 项目:alt-i2v 作者: GINK03 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def pred():
  """
  tag_index = pickle.loads(open('tag_index.pkl', 'rb').read())
  index_tag = { index:tag for tag, index in tag_index.items() }
  name_img150 = []
  for name in filter(lambda x: '.jpg' in x, sys.argv):
    img = Image.open('{name}'.format(name=name))
    img = img.convert('RGB')
    img150 = np.array(img.resize((150, 150)))
    name_img150.append( (name, img150) )
  """
  model.load_weights(sorted(glob.glob('models/*.h5'))[-1]) 

  tag_index = pickle.loads( open('make_datapair/tag_index.pkl', 'rb').read() )
  index_tag = { index:tag for tag,index in tag_index.items() }


  for name in glob.glob('./make_datapair/dataset/*'):
    X, y = pickle.loads( open(name,'rb').read() )
    result = model.predict(np.array([X]) )
    result = result.tolist()[0]
    result = { i:w for i,w in enumerate(result)}
    for i,w in sorted(result.items(), key=lambda x:x[1]*-1)[:30]:
      print("{name} tag={tag} prob={prob}".format(name=name, tag=index_tag[i], prob=w) )
dqn_keras.py 文件源码 项目:Attention-DQN 作者: chasewind007 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def compile(self, optimizer = None, loss_func = None):
        """Setup all of the TF graph variables/ops.

        This is inspired by the compile method on the
        keras.models.Model class.

        This is the place to create the target network, setup 
        loss function and any placeholders.
        """
        if loss_func is None:
            loss_func = mean_huber_loss
            # loss_func = 'mse'
        if optimizer is None:
            optimizer = Adam(lr = self.learning_rate)
            # optimizer = RMSprop(lr=0.00025)
        with tf.variable_scope("Loss"):
            state = Input(shape = (self.frame_height, self.frame_width, self.num_frames) , name = "states")
            action_mask = Input(shape = (self.num_actions,), name = "actions")
            qa_value = self.q_network(state)
            qa_value = merge([qa_value, action_mask], mode = 'mul', name = "multiply")
            qa_value = Lambda(lambda x: tf.reduce_sum(x, axis=1, keep_dims = True), name = "sum")(qa_value)

        self.final_model = Model(inputs = [state, action_mask], outputs = qa_value)
        self.final_model.compile(loss=loss_func, optimizer=optimizer)
models.py 文件源码 项目:cervantes 作者: textclf 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def load_model(model_spec_file, model_weights_file):
        from json import dumps, load

        params = load(open(model_spec_file, "r"))

        model = keras.models.model_from_json(dumps(params['model']))
        binary = params['binary']
        optimizer = params['optimizer']

        model.load_weights(model_weights_file)
        if binary:
            model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
        else:
            model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])

        lc = LanguageClassifier(model)
        lc.binary = binary
        return lc
trainer.py 文件源码 项目:deepanalytics_compe26_benchmark 作者: takagiwa-ss 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def resnet(repetition=2, k=1):
    '''Wide Residual Network (with a slight modification)
    depth == repetition*6 + 2
    '''
    from keras.models import Model
    from keras.layers import Input, Dense, Flatten, AveragePooling2D
    from keras.regularizers import l2

    input_shape = (1, _img_len, _img_len)
    output_dim = len(_columns)

    x = Input(shape=input_shape)

    z = conv2d(nb_filter=8, k_size=5, downsample=True)(x)        # out_shape ==    8, _img_len/ 2, _img_len/ 2
    z = bn_lrelu(0.01)(z)
    z = residual_block(nb_filter=k*16, repetition=repetition)(z) # out_shape == k*16, _img_len/ 4, _img_len/ 4
    z = residual_block(nb_filter=k*32, repetition=repetition)(z) # out_shape == k*32, _img_len/ 8, _img_len/ 8
    z = residual_block(nb_filter=k*64, repetition=repetition)(z) # out_shape == k*64, _img_len/16, _img_len/16
    z = AveragePooling2D((_img_len/16, _img_len/16))(z)
    z = Flatten()(z)
    z = Dense(output_dim=output_dim, activation='sigmoid', W_regularizer=l2(_Wreg_l2), init='zero')(z)

    return Model(input=x, output=z)
training_base.py 文件源码 项目:DeepJet 作者: mstoye 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loadModel(self,filename):
        #import h5py
        #f = h5py.File(filename, 'r+')
        #del f['optimizer_weights']
        from keras.models import load_model
        self.keras_model=load_model(filename, custom_objects=global_loss_list)
        self.optimizer=self.keras_model.optimizer
        self.compiled=True
evaluate_model.py 文件源码 项目:Sacred_Deep_Learning 作者: AAbercrombie0492 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def define_model(weights_path):
    '''
    Define model structure with weights.
    '''
    from resnet50 import ResNet50
    from keras.models import Model
    from keras.layers import Dense, GlobalAveragePooling2D


    resnet50_model = ResNet50()
    fc1000 = resnet50_model.get_layer('fc1000').output
    final_softmax = Dense(output_dim=2, activation='softmax')(fc1000)
    resnet50_finetune_1skip = Model(input=resnet50_model.input, output=final_softmax)
    resnet50_finetune_1skip.load_weights(weights_path)

    resnet50_finetune_1skip.compile(loss="categorical_crossentropy",
                                optimizer='nadam',
                                metrics=['accuracy'])

    return resnet50_finetune_1skip
example.py 文件源码 项目:residual_block_keras 作者: keunwoochoi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def get_residual_model(is_mnist=True, img_channels=1, img_rows=28, img_cols=28):
    model = keras.models.Sequential()
    first_layer_channel = 128
    if is_mnist: # size to be changed to 32,32
        model.add(ZeroPadding2D((2,2), input_shape=(img_channels, img_rows, img_cols))) # resize (28,28)-->(32,32)
        # the first conv 
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same'))
    else:
        model.add(Convolution2D(first_layer_channel, 3, 3, border_mode='same', input_shape=(img_channels, img_rows, img_cols)))

    model.add(Activation('relu'))
    # [residual-based Conv layers]
    residual_blocks = design_for_residual_blocks(num_channel_input=first_layer_channel)
    model.add(residual_blocks)
    model.add(BatchNormalization(axis=1))
    model.add(Activation('relu'))
    # [Classifier]    
    model.add(Flatten())
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    # [END]
    return model
utils_keras.py 文件源码 项目:cleverhans 作者: tensorflow 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def fprop(self, x):
        """
        Exposes all the layers of the model returned by get_layer_names.
        :param x: A symbolic representation of the network input
        :return: A dictionary mapping layer names to the symbolic
                 representation of their output.
        """
        from keras.models import Model as KerasModel

        if self.keras_model is None:
            # Get the input layer
            new_input = self.model.get_input_at(0)

            # Make a new model that returns each of the layers as output
            out_layers = [x_layer.output for x_layer in self.model.layers]
            self.keras_model = KerasModel(new_input, out_layers)

        # and get the outputs for that model on the input x
        outputs = self.keras_model(x)

        # Keras only returns a list for outputs of length >= 1, if the model
        # is only one layer, wrap a list
        if len(self.model.layers) == 1:
            outputs = [outputs]

        # compute the dict to return
        fprop_dict = dict(zip(self.get_layer_names(), outputs))

        return fprop_dict
keras_future.py 文件源码 项目:vinci 作者: Phylliade 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def Model(input, output, **kwargs):
    if int(keras.__version__.split('.')[0]) >= 2:
        return keras.models.Model(inputs=input, outputs=output, **kwargs)
    else:
        return keras.models.Model(input=input, output=output, **kwargs)
speaker_embedding.py 文件源码 项目:pyannote-audio 作者: pyannote 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, experiment_dir, db_yml=None):

        super(SpeakerEmbedding, self).__init__(
            experiment_dir, db_yml=db_yml)

        # architecture
        if 'architecture' in self.config_:
            architecture_name = self.config_['architecture']['name']
            models = __import__('pyannote.audio.embedding.models',
                                fromlist=[architecture_name])
            Architecture = getattr(models, architecture_name)
            self.architecture_ = Architecture(
                **self.config_['architecture'].get('params', {}))

        # approach
        if 'approach' in self.config_:
            approach_name = self.config_['approach']['name']
            approaches = __import__('pyannote.audio.embedding.approaches',
                                    fromlist=[approach_name])
            Approach = getattr(approaches, approach_name)
            self.approach_ = Approach(
                **self.config_['approach'].get('params', {}))

    # (5, None, None, False) ==> '5'
    # (5, 1, None, False) ==> '1-5'
    # (5, None, 2, False) ==> '5+2'
    # (5, 1, 2, False) ==> '1-5+2'
    # (5, None, None, True) ==> '5x'
keras_item2vec.py 文件源码 项目:MovieTaster-Open 作者: lujiaying 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def skipgram_model(vocab_size, embedding_dim=100, paradigm='Functional'):
    # Sequential paradigm
    if paradigm == 'Sequential':
        target = Sequential()
        target.add(Embedding(vocab_size, embedding_dim, input_length=1))
        context = Sequential()
        context.add(Embedding(vocab_size, embedding_dim, input_length=1))

        # merge the pivot and context models
        model = Sequential()
        model.add(Merge([target, context], mode='dot'))
        model.add(Reshape((1,), input_shape=(1,1)))
        model.add(Activation('sigmoid'))
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    # Functional paradigm
    elif paradigm == 'Functional':
        target = Input(shape=(1,), name='target')
        context = Input(shape=(1,), name='context')
        #print target.shape, context.shape
        shared_embedding = Embedding(vocab_size, embedding_dim, input_length=1, name='shared_embedding')
        embedding_target = shared_embedding(target)
        embedding_context = shared_embedding(context)
        #print embedding_target.shape, embedding_context.shape

        merged_vector = dot([embedding_target, embedding_context], axes=-1)
        reshaped_vector = Reshape((1,), input_shape=(1,1))(merged_vector)
        #print merged_vector.shape
        prediction = Dense(1, input_shape=(1,), activation='sigmoid')(reshaped_vector)
        #print prediction.shape

        model = Model(inputs=[target, context], outputs=prediction)
        model.compile(optimizer='adam', loss='binary_crossentropy')
        return model

    else:
        print('paradigm error')
        return None
keras_item2vec.py 文件源码 项目:MovieTaster-Open 作者: lujiaying 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def cbow_base_model(dict_size, emb_size=100, context_window_size=4):
    model = keras.models.Sequential()
    model.add(Embedding(dict_size, emb_size, 
        input_length=context_window_size,
        embeddings_initializer=keras.initializers.TruncatedNormal(mean=0.0, stddev=0.2),
        ))
    model.add(Lambda(lambda x: K.mean(x, axis=1), output_shape=(emb_size,)))
    model.add(Dense(dict_size))
    model.add(Activation('softmax')) # TODO: use nce

    sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd,
            loss='categorical_crossentropy',)
    return model
keras_item2vec.py 文件源码 项目:MovieTaster-Open 作者: lujiaying 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def train_cbow_base_model():
    min_word_freq = 5
    word_dict = process.get_movie_name_id_dict(min_word_freq=min_word_freq)
    dict_size = len(word_dict)
    emb_size = 100
    context_window_size = 4
    epochs = 20
    batch_size = 128

    model = cbow_base_model(dict_size, emb_size, context_window_size)
    for epoch_id in xrange(epochs):
        # train by batch
        batch_id = 0
        x_batch = []
        y_batch = []
        for movie_ids in process.shuffle(process.reader_creator(word_dict, ngram=context_window_size+1), 10000)():
            batch_id += 1
            if batch_id % (batch_size*50) == 0:
                # Print evaluate log
                score = model.evaluate(np.array(x_batch),
                    keras.utils.to_categorical(y_batch, num_classes=dict_size))
                logger.info('[epoch #%d] batch #%d, train loss:%s' % (epoch_id, batch_id, score))
            if batch_id % batch_size == 0:
                # Convert labels to categorical one-hot encoding
                model.train_on_batch(np.array(x_batch),
                        keras.utils.to_categorical(y_batch, num_classes=dict_size))
                x_batch = []
                y_batch = []
            x = np.array(movie_ids[:context_window_size])
            y = movie_ids[-1]
            x_batch.append(x)
            y_batch.append(y)
    logger.info('model train done')
    # store word embedding
    with open('./models/keras_0804_09_cbow', 'w') as fwrite:
        for idx, vec in enumerate(model.layers[0].get_weights()[0].tolist()):
            fwrite.write('%d %s\n' % (idx, ' '.join([str(_) for _ in vec])))
GA.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def make_init_model(self):
        models = []

        input_data = Input(shape=self.gl_config.input_shape)
        import random
        init_model_index = random.randint(1, 4)
        init_model_index = 1
        if init_model_index == 1:  # one conv layer with kernel num = 64
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d1' )(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 2:  # two conv layers with kernel num = 64
            stem_conv_0 = Conv2D(128, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(128, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 3:  # one conv layer with a wider kernel num = 128
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_1 = PReLU()(stem_conv_1)

        elif init_model_index == 4:  # two conv layers with a wider kernel_num = 128
            stem_conv_0 = Conv2D(256, 3, padding='same', name='conv2d1')(input_data)
            stem_conv_0 = PReLU()(stem_conv_0)
            stem_conv_1 = Conv2D(256, 3, padding='same', name='conv2d2')(stem_conv_0)
            stem_conv_1 = PReLU()(stem_conv_1)
        import keras
        stem_conv_1 = keras.layers.MaxPooling2D(name='maxpooling2d1')(stem_conv_1)
        stem_conv_1 = Conv2D(self.gl_config.nb_class, 3, padding='same', name='conv2d3')(stem_conv_1)
        stem_global_pooling_1 = GlobalMaxPooling2D(name='globalmaxpooling2d1')(stem_conv_1)
        stem_softmax_1 = Activation('softmax', name='activation1')(stem_global_pooling_1)

        model = Model(inputs=input_data, outputs=stem_softmax_1)

        return model
Net2Net.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def copy_model(self, model, config):
        from keras.utils.generic_utils import get_custom_objects
        from Model import IdentityConv, GroupIdentityConv

        get_custom_objects()['IdentityConv'] = IdentityConv
        get_custom_objects()['GroupIdentityConv'] = GroupIdentityConv

        new_model = MyModel(config, model.graph.copy(), keras.models.load_model(model.config.model_path))
        keras.models.save_model(new_model.model, new_model.config.model_path)
        return new_model
keras.py 文件源码 项目:donkey 作者: wroscoe 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def load(self, model_path):
        self.model = keras.models.load_model(model_path)
keras.py 文件源码 项目:donkey 作者: wroscoe 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def default_categorical():
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Dense

    img_in = Input(shape=(120, 160, 3), name='img_in')                      # First layer, input layer, Shape comes from camera.py resolution, RGB
    x = img_in
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)       # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)       # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)       # 64 features, 5px5p kernal window, 2wx2h stride, relu
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)       # 64 features, 3px3p kernal window, 2wx2h stride, relu
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)       # 64 features, 3px3p kernal window, 1wx1h stride, relu

    # Possibly add MaxPooling (will make it less sensitive to position in image).  Camera angle fixed, so may not to be needed

    x = Flatten(name='flattened')(x)                                        # Flatten to 1D (Fully connected)
    x = Dense(100, activation='relu')(x)                                    # Classify the data into 100 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    x = Dense(50, activation='relu')(x)                                     # Classify the data into 50 features, make all negatives 0
    x = Dropout(.1)(x)                                                      # Randomly drop out 10% of the neurons (Prevent overfitting)
    #categorical output of the angle
    angle_out = Dense(15, activation='softmax', name='angle_out')(x)        # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0

    #continous output of throttle
    throttle_out = Dense(1, activation='relu', name='throttle_out')(x)      # Reduce to 1 number, Positive number only

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])
    model.compile(optimizer='adam',
                  loss={'angle_out': 'categorical_crossentropy', 
                        'throttle_out': 'mean_absolute_error'},
                  loss_weights={'angle_out': 0.9, 'throttle_out': .001})

    return model
keras.py 文件源码 项目:donkey 作者: wroscoe 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def default_linear():
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Dense

    img_in = Input(shape=(120,160,3), name='img_in')
    x = img_in
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='linear')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='linear')(x)
    x = Dropout(.1)(x)
    #categorical output of the angle
    angle_out = Dense(1, activation='linear', name='angle_out')(x)

    #continous output of throttle
    throttle_out = Dense(1, activation='linear', name='throttle_out')(x)

    model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])


    model.compile(optimizer='adam',
                  loss={'angle_out': 'mean_squared_error', 
                        'throttle_out': 'mean_squared_error'},
                  loss_weights={'angle_out': 0.5, 'throttle_out': .5})

    return model
keras.py 文件源码 项目:donkey 作者: wroscoe 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def default_n_linear(num_outputs):
    from keras.layers import Input, Dense, merge
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda

    img_in = Input(shape=(120,160,3), name='img_in')
    x = img_in
    x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
    x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (5,5), strides=(1,1), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)

    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)
    x = Dense(50, activation='relu')(x)
    x = Dropout(.1)(x)

    outputs = [] 

    for i in range(num_outputs):
        outputs.append(Dense(1, activation='linear', name='n_outputs' + str(i))(x))

    model = Model(inputs=[img_in], outputs=outputs)


    model.compile(optimizer='adam',
                  loss='mse')

    return model


问题


面经


文章

微信
公众号

扫码关注公众号