python类Model()的实例源码

train.py 文件源码 项目:GeneGAN 作者: Prinsphield 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main():
    parser = argparse.ArgumentParser(description='test', formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument(
        '-a', '--attribute', 
        default='Smiling',
        type=str,
        help='Specify attribute name for training. \ndefault: %(default)s. \nAll attributes can be found in list_attr_celeba.txt'
    )
    parser.add_argument(
        '-g', '--gpu', 
        default='0',
        type=str,
        help='Specify GPU id. \ndefault: %(default)s. \nUse comma to seperate several ids, for example: 0,1'
    )
    args = parser.parse_args()

    celebA = Dataset(args.attribute)
    GeneGAN = Model(is_train=True)
    run(config, celebA, GeneGAN, gpu=args.gpu)
problem.py 文件源码 项目:OpenSAPM 作者: pathfinder14 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _assemble_model(self):
        """
        Create a model
        :return model
        """
        result_model = model.Model({
            "dimension" : self._dimension,
            "type" : self._type,
            "image_path" : self._image_path,
            "elasticity_quotient": self._elasticity_quotient,
            "mu_lame": self._mu_lame,
            "density": self._density,
            "v_p": self._v_p,
            "v_s": self._v_s
        }, self.GRID_SIZE)
        return result_model


    # TODO: produce different source types
train.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def train():
    rnn.train()
    total_loss = 0
    hidden = rnn.init_hidden(args.batch_size)
    for data, label in tqdm(training_data, mininterval=1,
                desc='Train Processing', leave=False):
        optimizer.zero_grad()
        hidden = repackage_hidden(hidden)
        target, hidden = rnn(data, hidden)
        loss = criterion(target, label)

        loss.backward()
        torch.nn.utils.clip_grad_norm(rnn.parameters(), args.clip)
        optimizer.step()

        total_loss += loss.data
    return total_loss[0]/training_data.sents_size

# ##############################################################################
# Save Model
# ##############################################################################
generate.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, model=None, model_source=None, src_dict=None, args=None):
        assert model is not None or model_source is not None

        if model is None:
            model_source = torch.load(model_source, map_location=lambda storage, loc: storage)
            self.dict = model_source["src_dict"]
            self.args = model_source["settings"]
            model = Model(self.args)
            model.load_state_dict(model_source['model'])
        else:
            self.dict = src_dict
            self.args = args

        self.num_directions = 2 if self.args.bidirectional else 1
        self.idx2word = {v: k for k, v in self.dict.items()}
        self.model = model.eval()
train.py 文件源码 项目:torch_light 作者: ne7ermore 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def train():
    model.train()
    total_loss = 0
    for word, char, label in tqdm(training_data, mininterval=1,
                desc='Train Processing', leave=False):

        optimizer.zero_grad()
        loss, _ = model(word, char, label)
        loss.backward()

        optimizer.step()
        optimizer.update_learning_rate()
        total_loss += loss.data
    return total_loss[0]/training_data.sents_size/args.word_max_len

# ##############################################################################
# Save Model
# ##############################################################################
runner.py 文件源码 项目:Stereo-Pose-Machines 作者: ppwwyyxx 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def get_runner(path):
    param_dict = np.load(path, encoding='latin1').item()
    predict_func = OfflinePredictor(PredictConfig(
        model=Model(),
        session_init=ParamRestore(param_dict),
        session_config=get_default_sess_config(0.99),
        input_names=['input'],
        #output_names=['Mconv7_stage6/output']
        output_names=['resized_map']
    ))
    def func_single(img):
        # img is bgr, [0,255]
        # return the output in WxHx15
        return predict_func([[img]])[0][0]
    def func_batch(imgs):
        # img is bgr, [0,255], nhwc
        # return the output in nhwc
        return predict_func([imgs])[0]
    return func_single, func_batch
runner.py 文件源码 项目:Stereo-Pose-Machines 作者: ppwwyyxx 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def get_parallel_runner_1(path):
    param_dict = np.load(path, encoding='latin1').item()
    cfg = PredictConfig(
        model=Model(),
        session_init=ParamRestore(param_dict),
        session_config=get_default_sess_config(0.99),
        input_names=['input'],
        output_names=['resized_map']
    )
    inque = mp.Queue()
    outque = mp.Queue()
    with change_gpu(0):
        proc = MultiProcessQueuePredictWorker(1, inque, outque, cfg)
        proc.start()
    with change_gpu(1):
        pred1 = OfflinePredictor(cfg)
    def func1(img):
        inque.put((0,[[img]]))
    func1.outque = outque
    def func2(img):
        return pred1([[img]])[0][0]
    return func1, func2
bot.py 文件源码 项目:meinkurve 作者: michgur 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, key_right='Right', key_left='Left', color='red', color2='pink',learn=False,iteration = 0,net=None):
        Player.__init__(self, key_right=key_right, key_left=key_left, color=color, color2=color2)

        self.learn = learn
        self.iteration = iteration
        self.file = None
        # if self.learn:
        # self.file = open('data.txt','w')
        # self.file.write('a,a,a,a,a,a,a,a,a,a,x,y,class\n')
        # else:

        self.net = net
        self.model = Model('data.txt')
        self.model_list = []
        self.model_list.append(self.model)
        self.radar = Radar(self, range=1000)
        if not os.path.exists('files'):
            os.makedirs('files')
chatbotNew.py 文件源码 项目:Dave-Godot 作者: finchMFG 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def sample_main(args):
    model_path, config_path, vocab_path = get_paths(args.save_dir)
    # Arguments passed to sample.py direct us to a saved model.
    # Load the separate arguments by which that model was previously trained.
    # That's saved_args. Use those to load the model.
    with open(config_path, 'rb') as f:
        print(f)
        saved_args = pickle.load(f)
    # Separately load chars and vocab from the save directory.
    with open(vocab_path, 'rb') as f:
        chars, vocab = pickle.load(f)
    # Create the model from the saved arguments, in inference mode.
    print("Creating model...")
    net = Model(saved_args, True)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(net.save_variables_list())
        # Restore the saved variables, replacing the initialized values.
        print("Restoring weights...")
        saver.restore(sess, model_path)
        chatbot(net, sess, chars, vocab, args.n, args.beam_width, args.relevance, args.temperature)
        #beam_sample(net, sess, chars, vocab, args.n, args.prime,
            #args.beam_width, args.relevance, args.temperature)
sample.py 文件源码 项目:sequelspeare 作者: raidancampbell 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, save_dir=SAVE_DIR, prime_text=PRIME_TEXT, num_sample_symbols=NUM_SAMPLE_SYMBOLS):
        self.save_dir = save_dir
        self.prime_text = prime_text
        self.num_sample_symbols = num_sample_symbols
        with open(os.path.join(Sampler.SAVE_DIR, 'chars_vocab.pkl'), 'rb') as file:
            self.chars, self.vocab = cPickle.load(file)
            self.model = Model(len(self.chars), is_sampled=True)

            # polite GPU memory allocation: don't grab everything you can.
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True
            config.gpu_options.allocator_type = 'BFC'
            self.sess = tf.Session(config=config)

            tf.initialize_all_variables().run(session=self.sess)
            self.checkpoint = tf.train.get_checkpoint_state(self.save_dir)
            if self.checkpoint and self.checkpoint.model_checkpoint_path:
                tf.train.Saver(tf.all_variables()).restore(self.sess, self.checkpoint.model_checkpoint_path)
chatbot.py 文件源码 项目:chatbot-rnn 作者: zenixls2 项目源码 文件源码 阅读 97 收藏 0 点赞 0 评论 0
def sample_main(args):
    model_path, config_path, vocab_path = get_paths(args.save_dir)
    # Arguments passed to sample.py direct us to a saved model.
    # Load the separate arguments by which that model was previously trained.
    # That's saved_args. Use those to load the model.
    with open(config_path) as f:
        saved_args = cPickle.load(f)
    # Separately load chars and vocab from the save directory.
    with open(vocab_path) as f:
        chars, vocab = cPickle.load(f)
    # Create the model from the saved arguments, in inference mode.
    print("Creating model...")
    net = Model(saved_args, True)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        tf.global_variables_initializer().run()
        saver = tf.train.Saver(net.save_variables_list())
        # Restore the saved variables, replacing the initialized values.
        print("Restoring weights...")
        saver.restore(sess, model_path)
        chatbot(net, sess, chars, vocab, args.n, args.beam_width, args.relevance, args.temperature)
        #beam_sample(net, sess, chars, vocab, args.n, args.prime,
            #args.beam_width, args.relevance, args.temperature)
model_api.py 文件源码 项目:neural-semantic-role-labeler 作者: hiroki13 项目源码 文件源码 阅读 49 收藏 0 点赞 0 评论 0
def set_model(self):
        argv = self.argv

        #####################
        # Network variables #
        #####################
        x = T.ftensor3()
        d = T.imatrix()

        n_in = self.init_emb.shape[1]
        n_h = argv.hidden
        n_y = self.arg_dict.size()
        reg = argv.reg

        #################
        # Build a model #
        #################
        say('\n\nMODEL:  Unit: %s  Opt: %s' % (argv.unit, argv.opt))
        self.model = Model(argv=argv, x=x, y=d, n_in=n_in, n_h=n_h, n_y=n_y, reg=reg)
parser.py 文件源码 项目:TISP 作者: kaayy 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def decode_sentence(kb, sentid, weightfile):
    indepkb = IndepKnowledgeBase()
    model = Model()

    parser = Parser(indepkb, kb, model, State)

    State.model = model
    State.model.weights = pickle.load(open(weightfile))
    State.ExtraInfoGen = ExprGenerator
    ExprGenerator.setup()

    ret = parser.parse(kb.questions[sentid])
    print >> LOGS, "============================="
    print >> LOGS, simplify_expr(ret.get_expr())
    print >> LOGS, "TRACING"
    for s in ret.trace_states():
        print >> LOGS, s, s.extrainfo
caption_image.py 文件源码 项目:CNN-LSTM-Caption-Generator 作者: mosessoh 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def main(argv):
    opts, args = getopt.getopt(argv, 'i:')
    for opt, arg in opts:
        if opt == '-i':
            img_path = arg

    config = Config()
    with tf.variable_scope('CNNLSTM') as scope:
        print '-'*20
        print 'Model info'
        print '-'*20
        model = Model(config)
        print '-'*20
    saver = tf.train.Saver()

    img_vector = forward_cnn(img_path)

    with tf.Session() as session:
        save_path = best_model_dir + '/model-37'
        saver.restore(session, save_path)
        print '2 Layer LSTM loaded'
        print 'Generating caption...'
        caption = model.generate_caption(session, img_vector)
        print 'Output:', caption
sample.py 文件源码 项目:word-rnn-tf 作者: jtoy 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def sample(args):
    with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
        chars, vocab = cPickle.load(f)
    model = Model(saved_args, True)
    val_loss_file = args.save_dir + '/val_loss.json'
    with tf.Session() as sess:
        saver = tf.train.Saver(tf.all_variables())
        if os.path.exists(val_loss_file):
            with open(val_loss_file, "r") as text_file:
                text = text_file.read()
                loss_json = json.loads(text)
                losses = loss_json.keys()
                losses.sort(key=lambda x: float(x))
                loss = losses[0]
                model_checkpoint_path =  loss_json[loss]['checkpoint_path']
                #print(model_checkpoint_path)
                saver.restore(sess, model_checkpoint_path)
                result = model.sample(sess, chars, vocab, args.n, args.prime, args.sample_rule, args.temperature)
                print(result) #add this back in later, not sure why its not working
                output = "/data/output/"+ str(int(time.time())) + ".txt"
                with open(output, "w") as text_file:
                    text_file.write(result)
                print(output)
gui.py 文件源码 项目:olive-gui 作者: dturevski 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def openCollection(self, fileName):

        try:
            f = open(unicode(fileName), 'r')
            Mainframe.model = model.Model()
            Mainframe.model.delete(0)
            for data in yaml.load_all(f):
                Mainframe.model.add(model.makeSafe(data), False)
            f.close()
            Mainframe.model.is_dirty = False
        except IOError:
            msgBox(Lang.value('MSG_IO_failed'))
            Mainframe.model = model.Model()
        except yaml.YAMLError as e:
            msgBox(Lang.value('MSG_YAML_failed') % e)
            Mainframe.model = model.Model()
        else:
            if len(Mainframe.model.entries) == 0:
                Mainframe.model = model.Model()
            Mainframe.model.filename = unicode(fileName)
        finally:
            Mainframe.sigWrapper.sigModelChanged.emit()
gui.py 文件源码 项目:olive-gui 作者: dturevski 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self):
        super(Mainframe, self).__init__()

        Mainframe.model = model.Model()

        self.initLayout()
        self.initActions()
        self.initMenus()
        self.initToolbar()
        self.initSignals()
        self.initFrame()

        self.updateTitle()
        self.overview.rebuild()
        self.show()

        if Conf.value('check-for-latest-binary'):
            self.checkNewVersion = Mainframe.CheckNewVersion(self)
            self.checkNewVersion.start()
gui.py 文件源码 项目:olive-gui 作者: dturevski 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def onImportCcv(self):
        if not self.doDirtyCheck():
            return
        default_dir = './collections/'
        if Mainframe.model.filename != '':
            default_dir, tail = os.path.split(Mainframe.model.filename)
        fileName, encoding = self.getOpenFileNameAndEncoding(
            Lang.value('MI_Import_CCV'), default_dir, "(*.ccv)")
        if not fileName:
            return
        try:
            Mainframe.model = model.Model()
            Mainframe.model.delete(0)
            for data in fancy.readCvv(fileName, encoding):
                Mainframe.model.add(model.makeSafe(data), False)
            Mainframe.model.is_dirty = False
        except IOError:
            msgBox(Lang.value('MSG_IO_failed'))
        except:
            msgBox(Lang.value('MSG_CCV_import_failed'))
        finally:
            if len(Mainframe.model.entries) == 0:
                Mainframe.model = model.Model()
            self.overview.rebuild()
            Mainframe.sigWrapper.sigModelChanged.emit()
main.py 文件源码 项目:ChineseNER 作者: zjy-ucas 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def evaluate_line():
    config = load_config(FLAGS.config_file)
    logger = get_logger(FLAGS.log_file)
    # limit GPU memory
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
        while True:
            # try:
            #     line = input("???????:")
            #     result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
            #     print(result)
            # except Exception as e:
            #     logger.info(e)

                line = input("???????:")
                result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
                print(result)
main.py 文件源码 项目:LSTM-CRF-For-Named-Entity-Recognition 作者: zpppy 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def evaluate_line():
    config = load_config(FLAGS.config_file)
    logger = get_logger(FLAGS.log_file)
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    with open(FLAGS.map_file, "rb") as f:
        char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
    with tf.Session(config=tf_config) as sess:
        model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
        while True:
            # try:
            #     line = input("???????:")
            #     result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
            #     print(result)
            # except Exception as e:
            #     logger.info(e)

                line = input("???????:")
                result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
                print(result)
visualize.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def plot_scatter():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", "-m", type=str, default="model.hdf5")
    args = parser.parse_args()

    dataset_train, dataset_test = chainer.datasets.get_mnist()
    images_train, labels_train = dataset_train._datasets
    images_test, labels_test = dataset_test._datasets

    model = Model()
    assert model.load(args.model)

    # normalize
    images_train = (images_train - 0.5) * 2
    images_test = (images_test - 0.5) * 2

    with chainer.no_backprop_mode() and chainer.using_config("train", False):
        z = model.encode_x_yz(images_test)[1].data
    plot.scatter_labeled_z(z, labels_test, "scatter_gen.png")
visualize.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def plot_representation():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", "-m", type=str, default="model.hdf5")
    args = parser.parse_args()

    dataset_train, dataset_test = chainer.datasets.get_mnist()
    images_train, labels_train = dataset_train._datasets
    images_test, labels_test = dataset_test._datasets

    model = Model()
    assert model.load(args.model)

    # normalize
    images_train = (images_train - 0.5) * 2
    images_test = (images_test - 0.5) * 2

    with chainer.no_backprop_mode() and chainer.using_config("train", False):
        y_onehot, z = model.encode_x_yz(images_test, apply_softmax_y=True)
        representation = model.encode_yz_representation(y_onehot, z).data
    plot.scatter_labeled_z(representation, labels_test, "scatter_r.png")
visualize.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 57 收藏 0 点赞 0 评论 0
def plot_z():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", "-m", type=str, default="model.hdf5")
    args = parser.parse_args()

    dataset_train, dataset_test = chainer.datasets.get_mnist()
    images_train, labels_train = dataset_train._datasets
    images_test, labels_test = dataset_test._datasets

    model = Model()
    assert model.load(args.model)

    # normalize
    images_train = (images_train - 0.5) * 2
    images_test = (images_test - 0.5) * 2

    with chainer.no_backprop_mode() and chainer.using_config("train", False):
        z = model.encode_x_yz(images_test)[1].data
    plot.scatter_labeled_z(z, labels_test, "scatter_z.png")
visualize.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def plot_scatter():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", "-m", type=str, default="model.hdf5")
    args = parser.parse_args()

    dataset_train, dataset_test = chainer.datasets.get_mnist()
    images_train, labels_train = dataset_train._datasets
    images_test, labels_test = dataset_test._datasets

    model = Model()
    assert model.load(args.model)

    # normalize
    images_train = (images_train - 0.5) * 2
    images_test = (images_test - 0.5) * 2

    with chainer.no_backprop_mode() and chainer.using_config("train", False):
        z = model.encode_x_z(images_test).data
    plot.scatter_labeled_z(z, labels_test, "scatter_gen.png")
visualize.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def plot_scatter():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", "-m", type=str, default="model.hdf5")
    args = parser.parse_args()

    dataset_train, dataset_test = chainer.datasets.get_mnist()
    images_train, labels_train = dataset_train._datasets
    images_test, labels_test = dataset_test._datasets

    model = Model()
    assert model.load(args.model)

    # normalize
    images_train = (images_train - 0.5) * 2
    images_test = (images_test - 0.5) * 2

    with chainer.no_backprop_mode() and chainer.using_config("train", False):
        z = model.encode_x_z(images_test).data
    plot.scatter_labeled_z(z, labels_test, "scatter_z.png")
visualize.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def plot_scatter():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", "-m", type=str, default="model.hdf5")
    args = parser.parse_args()

    dataset_train, dataset_test = chainer.datasets.get_mnist()
    images_train, labels_train = dataset_train._datasets
    images_test, labels_test = dataset_test._datasets

    model = Model()
    assert model.load(args.model)

    # normalize
    images_train = (images_train - 0.5) * 2
    images_test = (images_test - 0.5) * 2

    with chainer.no_backprop_mode() and chainer.using_config("train", False):
        z = model.encode_x_yz(images_test)[1].data
    plot.scatter_labeled_z(z, labels_test, "scatter_gen.png")
visualize.py 文件源码 项目:adversarial-autoencoder 作者: musyoku 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def plot_z():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model", "-m", type=str, default="model.hdf5")
    args = parser.parse_args()

    dataset_train, dataset_test = chainer.datasets.get_mnist()
    images_train, labels_train = dataset_train._datasets
    images_test, labels_test = dataset_test._datasets

    model = Model()
    assert model.load(args.model)

    # normalize
    images_train = (images_train - 0.5) * 2
    images_test = (images_test - 0.5) * 2

    with chainer.no_backprop_mode() and chainer.using_config("train", False):
        z = model.encode_x_yz(images_test)[1].data
    plot.scatter_labeled_z(z, labels_test, "scatter_z.png")
weights.py 文件源码 项目:char-rnn-tensorflow-master 作者: JDonnelly1 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def sample(args):
    with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
        chars, vocab = cPickle.load(f)
    model = Model(saved_args, training=False)
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        saver = tf.train.Saver(tf.global_variables())
        ckpt = tf.train.get_checkpoint_state(args.save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            ret, hidden = model.sample(sess, chars, vocab, args.n, args.prime,
                               args.sample)#.encode('utf-8'))
            print("Number of characters generated: ", len(ret))

            for i in range(len(ret)):
                print("Generated character: ", ret[i])
                print("Assosciated hidden state:" , hidden[i])
adversarial_semseg.py 文件源码 项目:keras_zoo 作者: david-vazquez 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def make_discriminator(self):
        # TODO just to have something, 5 layers vgg-like
        inputs = Input(shape=self.img_shape)
        enc1 = self.downsampling_block_basic(inputs, 64, 7)
        enc2 = self.downsampling_block_basic(enc1,   64, 7)
        enc3 = self.downsampling_block_basic(enc2,   92, 7)
        enc4 = self.downsampling_block_basic(enc3,  128, 7)
        enc5 = self.downsampling_block_basic(enc4,  128, 7)
        flat = Flatten()(enc5)
        dense1 = Dense(512, activation='sigmoid')(flat)
        dense2 = Dense(512, activation='sigmoid')(dense1)
        fake = Dense(1, activation='sigmoid', name='generation')(dense2)
        # Dense(2,... two classes : real and fake
        # change last activation to softmax ?
        discriminator = kmodels.Model(input=inputs, output=fake)

        lr = 1e-04
        optimizer = RMSprop(lr=lr, rho=0.9, epsilon=1e-8, clipnorm=10)
        print ('   Optimizer discriminator: rmsprop. Lr: {}. Rho: 0.9, epsilon=1e-8, '
               'clipnorm=10'.format(lr))

        discriminator.compile(loss='binary_crossentropy', optimizer=optimizer)
        # TODO metrics=metrics,
        return discriminator
gan.py 文件源码 项目:keras_zoo 作者: david-vazquez 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def make_gan(self, img_shape, optimizer,
                 the_loss='categorical_crossentropy', metrics=[]):
        # Build stacked GAN model
        gan_input = Input(shape=img_shape)
        H = self.generator(gan_input)
        gan_V = self.discriminator(H)
        GAN = kmodels.Model(gan_input, gan_V)

        # Compile model
        GAN.compile(loss=the_loss, metrics=metrics, optimizer=optimizer)

        # Show model
        if self.cf.show_model:
            print('GAN')
            GAN.summary()
            plot(GAN, to_file=os.path.join(self.cf.savepath, 'model_GAN.png'))

        return GAN

    # Make the network trainable or not


问题


面经


文章

微信
公众号

扫码关注公众号