python类get_session()的实例源码

test_mask_to_seq.py 文件源码 项目:yoctol-keras-layer-zoo 作者: Yoctol 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_seq_data_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.seq_data}
        )
        self.assertTrue(
            np.all(
                mask[:, :self.seq_data_max_length]
            )
        )
        self.assertFalse(
            np.any(
                mask[:, self.seq_data_max_length:]
            )
        )
model.py 文件源码 项目:cloudml-samples 作者: GoogleCloudPlatform 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def to_savedmodel(model, export_path):
  """Convert the Keras HDF5 model into TensorFlow SavedModel."""

  builder = saved_model_builder.SavedModelBuilder(export_path)

  signature = predict_signature_def(inputs={'input': model.inputs[0]},
                                    outputs={'income': model.outputs[0]})

  with K.get_session() as sess:
    builder.add_meta_graph_and_variables(
        sess=sess,
        tags=[tag_constants.SERVING],
        signature_def_map={
            signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}
    )
    builder.save()
gamestates.py 文件源码 项目:neuroblast 作者: ActiveState 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def ExportModel(self):
        import keras.backend as K
        from tensorflow.python.saved_model import builder as saved_model_builder
        from tensorflow.python.saved_model import utils
        from tensorflow.python.saved_model import tag_constants, signature_constants
        from tensorflow.python.saved_model.signature_def_utils_impl import build_signature_def, predict_signature_def
        from tensorflow.contrib.session_bundle import exporter

        print ("EXPORTING MODEL...")

        export_path = 'exported_brain'
        builder = saved_model_builder.SavedModelBuilder(export_path)

        signature = predict_signature_def(inputs={'inputs': self.brain.keras.input},
                                    outputs={'outputs': self.brain.keras.output})

        with K.get_session() as sess:
            builder.add_meta_graph_and_variables(sess=sess,
                                            tags=[tag_constants.TRAINING],
                                            signature_def_map={'predict': signature})
            builder.save()

        print ("...done!")
tfrecord_model.py 文件源码 项目:sample-cnn 作者: tae-jun 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def predict_tfrecord(self, x_batch):
    if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
      ins = [0.]
    else:
      ins = []
    self._make_tfrecord_predict_function()

    try:
      sess = K.get_session()
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(sess=sess, coord=coord)

      outputs = self.predict_function(ins)

    finally:
      # TODO: If you close the queue, you can't open it again..
      # if stop_queue_runners:
      #   coord.request_stop()
      #   coord.join(threads)
      pass

    if len(outputs) == 1:
      return outputs[0]
    return outputs
keras_utils.py 文件源码 项目:deep-learning-essentials 作者: DominicBreuker 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def convert_weights_theano2tensorflow(model_builder,
                                      theano_weights_file,
                                      tensorflow_weights_file):
    """
    Theano and Tensorflow implement convolutional layers differently.
    This functions transforms pretrained weights for a Theano-based CNN
    to Tensorflow format.
    check out https://github.com/fchollet/keras/wiki/Converting-convolution-kernels-from-Theano-to-TensorFlow-and-vice-versa
    """
    assert K._BACKEND == 'tensorflow'
    model = model_builder(theano_weights_file)
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D',
                                        'Convolution2D',
                                        'Convolution3D',
                                        'AtrousConvolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)

    K.get_session().run(ops)
    model.save_weights(tensorflow_weights_file)
general.py 文件源码 项目:neural_style 作者: metaflow-ai 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def export_model(model, absolute_model_dir, best_weights=None, saver=None, global_step=None):
    if not os.path.isdir(absolute_model_dir): 
        os.makedirs(absolute_model_dir)

    model.save_weights(absolute_model_dir + "/last_weights.hdf5", overwrite=True)
    if K._BACKEND == 'tensorflow' and saver != None:
        sess = K.get_session()
        saver.save(sess, absolute_model_dir + '/tf-last_weights', global_step=global_step)

    if best_weights != None:
        model.set_weights(best_weights)
        model.save_weights(absolute_model_dir + "/best_weights.hdf5", overwrite=True)
        if K._BACKEND == 'tensorflow' and saver != None:
            saver.save(sess, absolute_model_dir + '/tf-best_weights', global_step=global_step)

    # Graph
    json = model.to_json()
    open(absolute_model_dir + "/archi.json", 'w').write(json)
    if K._BACKEND == 'tensorflow' and saver != None and global_step == None:
        graph_def = sess.graph.as_graph_def()
        tf.train.write_graph(graph_def, absolute_model_dir, 'tf-model_graph')

        freeze_graph(model, absolute_model_dir, best_weights)
evaluate_imagenet.py 文件源码 项目:keras-inception-resnet-v2 作者: myutwo150 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def evaluate(imagenet_dir, batch_size=100, steps=None, num_threads=4, verbose=False):
    with K.get_session().as_default():
        # setup data tensors
        images, labels, num_samples = prepare_data(imagenet_dir, batch_size, num_threads)
        tf.train.start_queue_runners(coord=tf.train.Coordinator())

        # compile model in order to provide `metrics` and `target_tensors`
        model = InceptionResNetV2(input_tensor=images)
        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['sparse_categorical_accuracy', 'sparse_top_k_categorical_accuracy'],
                      target_tensors=[labels])

        # start evaluation
        if steps is None:
            steps = int(math.ceil(num_samples / batch_size))
        _, acc1, acc5 = model.evaluate(x=None, y=None, steps=steps, verbose=int(verbose))
        print()
        print('Top-1 Accuracy {:.1%}'.format(acc1))
        print('Top-5 Accuracy {:.1%}'.format(acc5))
training_base.py 文件源码 项目:DeepJet 作者: mstoye 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def saveModel(self,outfile):
        self.keras_model.save(self.outputDir+outfile)
        import tensorflow as tf
        import keras.backend as K
        tfsession=K.get_session()
        saver = tf.train.Saver()
        tfoutpath=self.outputDir+outfile+'_tfsession/tf'
        import os
        os.system('rm -rf '+tfoutpath)
        os.system('mkdir -p '+tfoutpath)
        saver.save(tfsession, tfoutpath)


        #import h5py
        #f = h5py.File(self.outputDir+outfile, 'r+')
        #del f['optimizer_weights']
        #f.close()
yolo.py 文件源码 项目:DIL 作者: FoxRow 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def yolo_eval(yolo_outputs, image_shape, max_boxes=10, score_threshold=.6, iou_threshold=.5):
    """Evaluate YOLO model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo_filter_boxes(boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    max_boxes_tensor = K.variable(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes
tanda_keras.py 文件源码 项目:tanda 作者: HazyResearch 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self,
                 tan,
                 featurewise_center=False,
                 samplewise_center=False,
                 featurewise_std_normalization=False,
                 samplewise_std_normalization=False,
                 zca_whitening=False,
                 zca_epsilon=1e-6,
                 rescale=None,
                 preprocessing_function=None,
                 data_format=None):
        super(TANDAImageDataGenerator, self).__init__(
            featurewise_center=featurewise_center,
            samplewise_center=samplewise_center,
            featurewise_std_normalization=featurewise_std_normalization,
            samplewise_std_normalization=samplewise_std_normalization,
            zca_whitening=zca_whitening,
            zca_epsilon=zca_epsilon,
            rescale=rescale,
            preprocessing_function=preprocessing_function,
            data_format=data_format
        )
        self.tan = tan
        self.session = K.get_session()
test_rnn_base.py 文件源码 项目:yoctol-keras-layer-zoo 作者: Yoctol 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertFalse(np.any(mask[:, self.mask_start_point:]))
        self.assertTrue(np.all(mask[:, :self.mask_start_point]))
test_rnn_decoder_with_decoding_size.py 文件源码 项目:yoctol-keras-layer-zoo 作者: Yoctol 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertTrue(np.all(mask[:, :]))
test_convEncoder.py 文件源码 项目:yoctol-keras-layer-zoo 作者: Yoctol 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertFalse(
            np.any(mask[:, self.max_length:])
        )
        self.assertTrue(
            np.all(mask[:, :self.max_length])
        )
test_flatten.py 文件源码 项目:yoctol-keras-layer-zoo 作者: Yoctol 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def test_mask_value(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertFalse(np.any(mask))
test_pick.py 文件源码 项目:yoctol-keras-layer-zoo 作者: Yoctol 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_mask(self):
        mask_cache_key = str(id(self.model.input)) + '_' + str(id(None))
        mask_tensor = self.model._output_mask_cache[mask_cache_key]
        mask = mask_tensor.eval(
            session=K.get_session(),
            feed_dict={self.model.input: self.data}
        )
        self.assertTrue(
            np.all(mask)
        )
word_vectors.py 文件源码 项目:keras-image-captioning 作者: danieljl 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def vectorize_words(self, words):
        vectors = []
        for word in words:
            vector = self._word_vector_of.get(word)
            vectors.append(vector)

        num_unknowns = len(filter(lambda x: x is None, vectors))
        inits = self._initializer(shape=(num_unknowns, self._embedding_size))
        inits = K.get_session().run(inits)
        inits = iter(inits)
        for i in range(len(vectors)):
            if vectors[i] is None:
                vectors[i] = next(inits)

        return np.array(vectors)
adversarial_tools.py 文件源码 项目:textfool 作者: bogdan-kulynych 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def wordwise_grads(self, feature_vectors):
        sess = K.get_session()
        grad_sum = sess.run(self.grad_sum_tensor, feed_dict={
            self.input_tensor: feature_vectors,
            keras.backend.learning_phase(): 0
        })
        return grad_sum
keras_utils.py 文件源码 项目:spark-deep-learning 作者: databricks 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __enter__(self):
        self.old_session = K.get_session()
        self.g = self.requested_graph or tf.Graph()
        self.current_session = tf.Session(graph=self.g)
        K.set_session(self.current_session)
        return (self.current_session, self.g)
builder.py 文件源码 项目:spark-deep-learning 作者: databricks 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, graph=None, using_keras=False):
        self.graph = graph or tf.Graph()
        self.sess = tf.Session(graph=self.graph)
        if using_keras:
            self.using_keras = True
            self.keras_prev_sess = K.get_session()
        else:
            self.using_keras = False
            self.keras_prev_sess = None
concurrency.py 文件源码 项目:keras_experiments 作者: avolkov1 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def start(self):
        # import tensorflow as tf
        # self._sess = tf.get_default_session()
        self._sess = KB.get_session()
        super(ShareSessionThread, self).start()
_multigpu.py 文件源码 项目:keras_experiments 作者: avolkov1 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _run_initsync(self):
        # tparams = [list(chain(*tp)) for tp in self._tower_params]
        tparams = self._tower_params

        # Check to prevent from unnecessarily re-initializing and
        # synchronizing, i.e. when the model loads the weights.
        for v in chain.from_iterable(tparams):
            if getattr(v, '_keras_initialized', False):
                return

        KB.manual_variable_initialization(True)
        sess = KB.get_session()
        KB.manual_variable_initialization(False)

        # glob_variables = tf.global_variables()
        # sess.run(tf.variables_initializer(glob_variables))

        # Initialize on GPU0 and sync to other GPUs
        init_op = tf.variables_initializer(tparams[0])
        # init_op = tf.variables_initializer(self._tower_params[0])
        # init_op = tf.variables_initializer(self.trainable_weights)
        sess.run(init_op)

        # Important if using model_creator. Not necessary of model instance is
        # reused in which case the model layers are shared between slices
        # and are automatically sync'd.
        sync_op = all_sync_params(tparams, self._gdev_list,
                                  usenccl=self._usenccl)
        sess.run(sync_op)

        for v in chain.from_iterable(tparams):
            v._keras_initialized = True


# Data-parallel ref: https://github.com/fchollet/keras/issues/2436
# Tower-parallel:
# ref: https://medium.com/autonomous-agents/multi-gpu-training-of-large-sparse-matrix-on-wide-neuralnetwork-cac7afc52ffe @IgnorePep8
# ref: https://gist.github.com/vvpreetham/1379cc4e208ea33ce3e615067e92fc5e
network.py 文件源码 项目:nuts-ml 作者: maet3608 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def evaluate(self, metrics, predcol=None, targetcol=-1):
        from keras import backend as K

        def compute(metric, targets, preds):
            result = metric(K.variable(targets), K.variable(preds))
            is_theano = K.backend() == 'theano'
            sess = None if is_theano else K.get_session()
            result = result.eval() if is_theano else result.eval(session=sess)
            is_vector = hasattr(result, '__iter__')
            return float(np.mean(result) if is_vector else result)

        return EvalNut(self, metrics, compute, predcol, targetcol)
cifar10_cnn.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def reset_weights(model):
    session = K.get_session()
    for layer in model.layers:
        if isinstance(layer, Dense):
            old = layer.get_weights()
            layer.W.initializer.run(session=session)
            layer.b.initializer.run(session=session)
            print(np.array_equal(old, layer.get_weights()), " after initializer run")
        else:
            print(layer, "not reinitialized")
net2net.py 文件源码 项目:NetworkCompress 作者: luzai 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def reset_weights(model):
    session = K.get_session()
    for layer in model.layers:
        if isinstance(layer, Dense):
            old = layer.get_weights()
            layer.W.initializer.run(session=session)
            layer.b.initializer.run(session=session)
            print(np.array_equal(old, layer.get_weights()), " after initializer run")
        else:
            print(layer, "not reinitialized")
weightnorm.py 文件源码 项目:weightnorm 作者: openai 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def data_based_init(model, input):

    # input can be dict, numpy array, or list of numpy arrays
    if type(input) is dict:
        feed_dict = input
    elif type(input) is list:
        feed_dict = {tf_inp: np_inp for tf_inp,np_inp in zip(model.inputs,input)}
    else:
        feed_dict = {model.inputs[0]: input}

    # add learning phase if required
    if model.uses_learning_phase and K.learning_phase() not in feed_dict:
        feed_dict.update({K.learning_phase(): 1})

    # get all layer name, output, weight, bias tuples
    layer_output_weight_bias = []
    for l in model.layers:
        if hasattr(l, 'W') and hasattr(l, 'b'):
            assert(l.built)
            layer_output_weight_bias.append( (l.name,l.get_output_at(0),l.W,l.b) ) # if more than one node, only use the first

    # iterate over our list and do data dependent init
    sess = K.get_session()
    for l,o,W,b in layer_output_weight_bias:
        print('Performing data dependent initialization for layer ' + l)
        m,v = tf.nn.moments(o, [i for i in range(len(o.get_shape())-1)])
        s = tf.sqrt(v + 1e-10)
        updates = tf.group(W.assign(W/tf.reshape(s,[1]*(len(W.get_shape())-1)+[-1])), b.assign((b-m)/s))
        sess.run(updates, feed_dict)
utils.py 文件源码 项目:keras_text_classifier 作者: cdj0311 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def th2tf( model):
    import tensorflow as tf
    ops = []
    for layer in model.layers:
        if layer.__class__.__name__ in ['Convolution1D', 'Convolution2D']:
            original_w = K.get_value(layer.W)
            converted_w = convert_kernel(original_w)
            ops.append(tf.assign(layer.W, converted_w).op)
    K.get_session().run(ops)
    return model
callbacks.py 文件源码 项目:deform-conv 作者: felixlaumon 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def set_model(self, model):
        self.model = model
        self.sess = K.get_session()
        total_loss = self.model.total_loss
        if self.histogram_freq and self.merged is None:
            for layer in self.model.layers:
                for weight in layer.weights:
                    # dense_1/bias:0 > dense_1/bias_0
                    name = weight.name.replace(':', '_')
                    tf.summary.histogram(name, weight)
                    tf.summary.histogram(
                        '{}_gradients'.format(name),
                        K.gradients(total_loss, [weight])[0]
                    )
                    if self.write_images:
                        w_img = tf.squeeze(weight)
                        shape = w_img.get_shape()
                        if len(shape) > 1 and shape[0] > shape[1]:
                            w_img = tf.transpose(w_img)
                        if len(shape) == 1:
                            w_img = tf.expand_dims(w_img, 0)
                        w_img = tf.expand_dims(tf.expand_dims(w_img, 0), -1)
                        tf.summary.image(name, w_img)

                if hasattr(layer, 'output'):
                    tf.summary.histogram('{}_out'.format(layer.name),
                                         layer.output)
        self.merged = tf.summary.merge_all()

        if self.write_graph:
            self.writer = tf.summary.FileWriter(self.log_dir,
                                                self.sess.graph)
        else:
            self.writer = tf.summary.FileWriter(self.log_dir)
utils.py 文件源码 项目:deform-conv 作者: felixlaumon 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def keras_set_tf_debug():
    sess = K.get_session()
    sess = tf_debug.LocalCLIDebugWrapperSession(sess)
    sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
    K.set_session(sess)
weightnorm.py 文件源码 项目:stuff 作者: yaroslavvb 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def data_based_init(model, input):

    # input can be dict, numpy array, or list of numpy arrays
    if type(input) is dict:
        feed_dict = input
    elif type(input) is list:
        feed_dict = {tf_inp: np_inp for tf_inp,np_inp in zip(model.inputs,input)}
    else:
        feed_dict = {model.inputs[0]: input}

    # add learning phase if required
    if model.uses_learning_phase and K.learning_phase() not in feed_dict:
        feed_dict.update({K.learning_phase(): 1})

    # get all layer name, output, weight, bias tuples
    layer_output_weight_bias = []
    for l in model.layers:
        if hasattr(l, 'W') and hasattr(l, 'b'):
            assert(l.built)
            layer_output_weight_bias.append( (l.name,l.get_output_at(0),l.W,l.b) ) # if more than one node, only use the first

    # iterate over our list and do data dependent init
    sess = K.get_session()
    for l,o,W,b in layer_output_weight_bias:
        print('Performing data dependent initialization for layer ' + l)
        m,v = tf.nn.moments(o, [i for i in range(len(o.get_shape())-1)])
        s = tf.sqrt(v + 1e-10)
        updates = tf.group(W.assign(W/tf.reshape(s,[1]*(len(W.get_shape())-1)+[-1])), b.assign((b-m)/s))
        sess.run(updates, feed_dict)
train.py 文件源码 项目:captcha-breaker 作者: Detry322 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def reset_weights(model):
    session = K.get_session()
    for layer in model.layers:
        if isinstance(layer, Dense):
            old = layer.get_weights()
            layer.W.initializer.run(session=session)
            layer.b.initializer.run(session=session)
            print(np.array_equal(old, layer.get_weights())," after initializer run")
        else:
            print(layer, "not reinitialized")


问题


面经


文章

微信
公众号

扫码关注公众号