python类models()的实例源码

keras.py 文件源码 项目:donkey 作者: wroscoe 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def default_imu(num_outputs, num_imu_inputs):
    '''
    Notes: this model depends on concatenate which failed on keras < 2.0.8
    '''

    from keras.layers import Input, Dense
    from keras.models import Model
    from keras.layers import Convolution2D, MaxPooling2D, Reshape, BatchNormalization
    from keras.layers import Activation, Dropout, Flatten, Cropping2D, Lambda
    from keras.layers.merge import concatenate

    img_in = Input(shape=(120,160,3), name='img_in')
    imu_in = Input(shape=(num_imu_inputs,), name="imu_in")

    x = img_in
    x = Cropping2D(cropping=((60,0), (0,0)))(x) #trim 60 pixels off top
    #x = Lambda(lambda x: x/127.5 - 1.)(x) # normalize and re-center
    x = Convolution2D(24, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(32, (5,5), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(2,2), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Convolution2D(64, (3,3), strides=(1,1), activation='relu')(x)
    x = Flatten(name='flattened')(x)
    x = Dense(100, activation='relu')(x)
    x = Dropout(.1)(x)

    y = imu_in
    y = Dense(14, activation='relu')(y)
    y = Dense(14, activation='relu')(y)
    y = Dense(14, activation='relu')(y)

    z = concatenate([x, y])
    z = Dense(50, activation='relu')(z)
    z = Dropout(.1)(z)
    z = Dense(50, activation='relu')(z)
    z = Dropout(.1)(z)

    outputs = [] 

    for i in range(num_outputs):
        outputs.append(Dense(1, activation='linear', name='out_' + str(i))(z))

    model = Model(inputs=[img_in, imu_in], outputs=outputs)

    model.compile(optimizer='adam',
                  loss='mse')

    return model
utils.py 文件源码 项目:mpi_learn 作者: duanders 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def load_model(filename=None, json_str=None, weights_file=None, custom_objects={}):
    """Loads model architecture from JSON and instantiates the model.
        filename: path to JSON file specifying model architecture
        json_str: (or) a json string specifying the model architecture
        weights_file: path to HDF5 file containing model weights
    custom_objects: A Dictionary of custom classes used in the model keyed by name"""
    import_keras()
    from keras.models import model_from_json
    if filename != None:
        with open( filename ) as arch_f:
            json_str = arch_f.readline()
    model = model_from_json( json_str, custom_objects=custom_objects) 
    if weights_file is not None:
        model.load_weights( weights_file )
    return model
keras_conversion.py 文件源码 项目:deeplift 作者: kundajelab 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def convert_sequential_model(model,
                        num_dims=None,
                        nonlinear_mxts_mode=\
                         NonlinearMxtsMode.DeepLIFT_GenomicsDefault,
                        verbose=True,
                        dense_mxts_mode=DenseMxtsMode.Linear,
                        conv_mxts_mode=ConvMxtsMode.Linear,
                        maxpool_deeplift_mode=default_maxpool_deeplift_mode,
                        layer_overrides={}):
    if (verbose):
        print("nonlinear_mxts_mode is set to: "+str(nonlinear_mxts_mode))
    converted_layers = []
    if (model.layers[0].input_shape is not None):
        input_shape = model.layers[0].input_shape
        assert input_shape[0] is None #batch axis
        num_dims_input = len(input_shape)
        assert num_dims is None or num_dims_input==num_dims,\
        "num_dims argument of "+str(num_dims)+" is incompatible with"\
        +" the number of dims in layers[0].input_shape which is: "\
        +str(model.layers[0].input_shape)
        num_dims = num_dims_input
    else:
        input_shape = None
    converted_layers.append(
        blobs.Input(num_dims=num_dims, shape=input_shape, name="input"))
    #converted_layers is actually mutated to be extended with the
    #additional layers so the assignment is not strictly necessary,
    #but whatever
    converted_layers = sequential_container_conversion(
                layer=model, name="", verbose=verbose,
                nonlinear_mxts_mode=nonlinear_mxts_mode,
                dense_mxts_mode=dense_mxts_mode,
                conv_mxts_mode=conv_mxts_mode,
                maxpool_deeplift_mode=maxpool_deeplift_mode,
                converted_layers=converted_layers,
                layer_overrides=layer_overrides)
    deeplift.util.connect_list_of_layers(converted_layers)
    converted_layers[-1].build_fwd_pass_vars()
    return models.SequentialModel(converted_layers)
test_recurrent_stress_tests.py 文件源码 项目:coremltools 作者: apple 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_initial_state_LSTM(self):
        data = np.random.rand(1, 1, 2)

        model = keras.models.Sequential()
        model.add(keras.layers.LSTM(5, input_shape=(1, 2), batch_input_shape=[1, 1, 2], stateful=True))
        model.get_layer(index=1).reset_states()

        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')

        keras_output_1 = model.predict(data)
        coreml_full_output_1 = coreml_model.predict({'data': data})
        coreml_output_1 = coreml_full_output_1['output']
        coreml_output_1 = np.expand_dims(coreml_output_1, 1)

        np.testing.assert_array_almost_equal(coreml_output_1.T, keras_output_1)

        hidden_state = (np.random.rand(1, 5), np.random.rand(1, 5))
        model.get_layer(index=1).reset_states(states=hidden_state)

        coreml_model = keras_converter.convert(model=model, input_names='data', output_names='output')
        spec = coreml_model.get_spec()

        keras_output_2 = model.predict(data)
        coreml_full_output_2 = coreml_model.predict(
            {'data': data, spec.description.input[1].name: hidden_state[0][0],
             spec.description.input[2].name: hidden_state[1][0]})
        coreml_output_2 = coreml_full_output_2['output']
        coreml_output_2 = np.expand_dims(coreml_output_2, 1)

        np.testing.assert_array_almost_equal(coreml_output_2.T, keras_output_2)
train.py 文件源码 项目:AI-Chatbot 作者: anujdutt9 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def load(self):
        self.model = keras.models.load_model(memoryNetwork.FILE_NAME)
        with open(memoryNetwork.VOCAB_FILE_NAME, 'rb') as file:
            self.word_id = pickle.load(file)
alt_i2v.py 文件源码 项目:alt-i2v 作者: GINK03 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def train():
  print('load lexical dataset...')
  Xs, Ys = loader(db='lexical150.ldb')
  print('build model...')
  model = build_model()
  for i in range(100):
    model.fit(np.array(Xs), np.array(Ys), batch_size=16, nb_epoch=1 )
    if i%1 == 0:
      model.save('models/model%05d.model'%i)
alt_i2v.py 文件源码 项目:alt-i2v 作者: GINK03 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def eval():
  tag_index = pickle.loads(open('tag_index.pkl', 'rb').read())
  index_tag = { index:tag for tag, index in tag_index.items() }
  model = build_model()
  model = load_model(sorted(glob.glob('models/*.model'))[-1]) 
  Xs, Ys = loader(db='lexical_eval.ldb', th=100)
  for i in range(30):
    result = model.predict(np.array([Xs[i]]) )

    for i,w in sorted(result.items(), key=lambda x:x[1]*-1)[:30]:
      print(index_tag[i], i, w)
model.py 文件源码 项目:devise-keras 作者: priyamtejaswin 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, folder):
        super(EpochCheckpoint, self).__init__()
        assert folder is not None, "Err. Please specify a folder where models will be saved"
        self.folder = folder
        print "[LOG] EpochCheckpoint: folder to save models: "+self.folder
model.py 文件源码 项目:devise-keras 作者: priyamtejaswin 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main():
    RUN_TIME = sys.argv[1]


    if RUN_TIME == "TRAIN":
        image_features = Input(shape=(4096,))
        model = build_model(image_features)
        print model.summary()

        # number of training images 
        _num_train = get_num_train_images()

        # Callbacks 
        # remote_cb = RemoteMonitor(root='http://localhost:9000')
        tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
        epoch_cb    = EpochCheckpoint(folder="./snapshots/")
        valid_cb    = ValidCallBack()

        # fit generator
        steps_per_epoch = math.ceil(_num_train/float(BATCH))
        print "Steps per epoch i.e number of iterations: ",steps_per_epoch

        train_datagen = data_generator(batch_size=INCORRECT_BATCH, image_class_ranges=TRAINING_CLASS_RANGES)
        history = model.fit_generator(
                train_datagen,
                steps_per_epoch=steps_per_epoch,
                epochs=250,
                callbacks=[tensorboard, valid_cb]
            )
        print history.history.keys()


    elif RUN_TIME == "TEST":
        from keras.models import load_model 
        model = load_model("snapshots/epoch_49.hdf5", custom_objects={"hinge_rank_loss":hinge_rank_loss})

    K.clear_session()
run10_common_onimage.py 文件源码 项目:FCN_MSCOCO_Food_Segmentation 作者: gakarak 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def loadModelFromJson(pathModelJson):
        if not os.path.isfile(pathModelJson):
            raise Exception('Cant find JSON-file [%s]' % pathModelJson)
        tpathBase = os.path.splitext(pathModelJson)[0]
        tpathModelWeights = '%s.h5' % tpathBase
        if not os.path.isfile(tpathModelWeights):
            raise Exception('Cant find h5-Weights-file [%s]' % tpathModelWeights)
        with open(pathModelJson, 'r') as f:
            tmpStr = f.read()
            model = keras.models.model_from_json(tmpStr)
            model.load_weights(tpathModelWeights)
        return model
model.py 文件源码 项目:baseline 作者: dpressel 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def load(basename, **kwargs):
        model = ConvModel()

        model.impl = keras.models.load_model(basename, **kwargs)
        with open(basename + '.labels', 'r') as f:
            model.labels = json.load(f)

        with open(basename + '.vocab', 'r') as f:
            model.vocab = json.load(f)
        return model
model.py 文件源码 项目:baseline 作者: dpressel 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def create(w2v, labels, **kwargs):
        model = ConvModel()
        model.labels = labels
        model.vocab = w2v.vocab
        filtsz = kwargs['filtsz']
        pdrop = kwargs.get('dropout', 0.5)
        mxlen = int(kwargs.get('mxlen', 100))
        cmotsz = kwargs['cmotsz']
        finetune = bool(kwargs.get('finetune', True))
        nc = len(labels)
        x = Input(shape=(mxlen,), dtype='int32', name='input')

        vocab_size = w2v.weights.shape[0]
        embedding_dim = w2v.dsz

        lut = Embedding(input_dim=vocab_size, output_dim=embedding_dim, weights=[w2v.weights], input_length=mxlen, trainable=finetune)

        embed = lut(x)

        mots = []
        for i, fsz in enumerate(filtsz):
            conv = Conv1D(cmotsz, fsz, activation='relu')(embed)
            gmp = GlobalMaxPooling1D()(conv)
            mots.append(gmp)

        joined = merge(mots, mode='concat')
        cmotsz_all = cmotsz * len(filtsz)
        drop1 = Dropout(pdrop)(joined)

        input_dim = cmotsz_all
        last_layer = drop1
        dense = Dense(output_dim=nc, input_dim=input_dim, activation='softmax')(last_layer)
        model.impl = keras.models.Model(input=[x], output=[dense])
        return model
model.py 文件源码 项目:baseline 作者: dpressel 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def get_vocab(self):
        return self.vocab

# TODO: Add the other models!
models.py 文件源码 项目:cervantes 作者: textclf 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def predict(self, X):
        if type(self.model.input_shape) is tuple:
            X = np.array(X)
            if len(self.model.input_shape) == 2:
                X = X.reshape((X.shape[0], -1))
        else:
            raise LanguageClassifierException('Mult-input models are not supported yet')

        predictions = self.model.predict(X, verbose=True, batch_size=32)
        if (len(predictions.shape) > 1) and (1 not in predictions.shape):
            predictions = predictions.argmax(axis=-1)
        else:
            predictions = 1 * (predictions > 0.5).ravel()
        return predictions
models.py 文件源码 项目:cervantes 作者: textclf 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def predict_proba(self, X):
        if type(self.model.input_shape) is tuple:
            X = np.array(X)
            if len(self.model.input_shape) == 2:
                X = X.reshape((X.shape[0], -1))
        else:
            raise LanguageClassifierException('Mult-input models are not supported yet')
        return self.model.predict(X, verbose=True, batch_size=32)
models.py 文件源码 项目:cervantes 作者: textclf 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def test(self, X, y, verbose=True):
        # if we don't need 3d inputs...
        if type(self.model.input_shape) is tuple:
            X = np.array(X)
            if len(self.model.input_shape) == 2:
                X = X.reshape((X.shape[0], -1))
        else:
            raise LanguageClassifierException('Mult-input models are not supported yet')

        if verbose:
            print("Getting predictions on the test set")
        predictions = self.predict(X)

        if len(predictions) != len(y):
            raise LanguageClassifierException("Non comparable arrays")

        if self.binary:
            acc = (predictions == y).mean()
            prec = np.sum(np.bitwise_and(predictions, y)) * 1.0 / np.sum(predictions)
            recall = np.sum(np.bitwise_and(predictions, y)) * 1.0 / np.sum(y)
            if verbose:
                print("Test set accuracy of {0:.3f}%".format(acc * 100.0))
                print("Test set error of {0:.3f}%".format((1 - acc) * 100.0))
                print("Precision for class=1: {0:.3f}".format(prec))
                print("Recall for class=1: {0:.3f}".format(recall))

            return (acc, prec, recall)
        else:
            # TODO: Obtain more metrics for the multiclass problem
            acc = (predictions == y).mean()
            if verbose:
                print("Test set accuracy of {0:.3f}%".format(acc * 100.0))
                print("Test set error of {0:.3f}%".format((1 - acc) * 100.0))
            return acc
trainer.py 文件源码 项目:deepanalytics_compe26_benchmark 作者: takagiwa-ss 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _load_model(fn):
    from keras.models import model_from_json
    with open(fn + '.json') as f:
        model = model_from_json(f.read())
    model.load_weights(fn + '.h5')
    return model
deep_food.py 文件源码 项目:keras-resnet-food-reverse-engineering 作者: GINK03 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def train():
  print('load lexical dataset...')
  Ys, Xs, Rs = loader()
  print('build model...')
  model = build_model()
  for i in range(100):
    model.fit(np.array(Xs), np.array(Ys), batch_size=16, nb_epoch=1 )
    if i%1 == 0:
      model.save('models/model%05d.model'%i)
deep_food.py 文件源码 项目:keras-resnet-food-reverse-engineering 作者: GINK03 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def eval():
  item_index = pickle.loads(open("cookpad/item_index.pkl", "rb").read())
  index_items = { index:item for item, index in item_index.items()}
  model = build_model()
  model = load_model(sorted(glob.glob('models/*.model'))[-1]) 
  Ys, Xs, Rs = loader(th=10)
  for i in range(len(Xs)):
    result = model.predict(np.array([Xs[i]]) )
    ares   = [(index_items[index], w) for index, w in enumerate(result.tolist()[0]) ]
    print(Rs[i])
    for en, (item, w) in enumerate(sorted(ares, key=lambda x:x[1]*-1)[:10]):
      print(en, item, w)
deep_food.py 文件源码 项目:keras-resnet-food-reverse-engineering 作者: GINK03 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def pred():
  item_index = pickle.loads(open("cookpad/item_index.pkl", "rb").read())
  index_items = { index:item for item, index in item_index.items()}
  model = build_model()
  model = load_model(sorted(glob.glob('models/model00060.model'))[-1]) 
  target_size = (224,224)
  dir_path = "to_pred/*"
  max_size = len(glob.glob(dir_path))
  for i, name in enumerate(glob.glob(dir_path)):
    try:
      img = Image.open(name)
    except OSError as e:
      continue
    print(i, max_size, name.split('/')[-1])
    w, h = img.size
    if w > h :
      blank = Image.new('RGB', (w, w))
    if w <= h :
      blank = Image.new('RGB', (h, h))
    blank.paste(img, (0, 0) )
    blank = blank.resize( target_size )
    Xs = np.array([np.asanyarray(blank)])
    result = model.predict(Xs)
    ares   = [(index_items[index], w) for index, w in enumerate(result.tolist()[0]) ]
    for en, (item, w) in enumerate(sorted(ares, key=lambda x:x[1]*-1)[:10]):
      print(en, item, w)


问题


面经


文章

微信
公众号

扫码关注公众号