def build_from_pb(self):
with tf.gfile.FastGFile(self.FLAGS.pbLoad, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(
graph_def,
name=""
)
with open(self.FLAGS.metaLoad, 'r') as fp:
self.meta = json.load(fp)
self.framework = create_framework(self.meta, self.FLAGS)
# Placeholders
self.inp = tf.get_default_graph().get_tensor_by_name('input:0')
self.feed = dict() # other placeholders
self.out = tf.get_default_graph().get_tensor_by_name('output:0')
self.setup_meta_ops()
python类get_default_graph()的实例源码
def test_create_optimizer(self):
"""Test if create optimizer does work with tf optimizers."""
optimizer_config = {'learning_rate': 0.1}
# test missing required entry `class`
self.assertRaises(AssertionError, create_optimizer, optimizer_config)
optimizer_config['class'] = 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer'
with tf.Session().as_default():
# test if the optimizer is created correctlyW
optimizer = create_optimizer(optimizer_config)
self.assertIsInstance(optimizer, tf.train.GradientDescentOptimizer)
# test if learning_rate variable is created with the correct value
lr_tensor = tf.get_default_graph().get_tensor_by_name('learning_rate:0')
tf.get_default_session().run(tf.global_variables_initializer())
self.assertAlmostEqual(lr_tensor.eval(), 0.1)
optimizer_config2 = {'learning_rate': 0.1, 'class': 'tensorflow.python.training.momentum.MomentumOptimizer'}
# test missing required argument (momentum in this case)
with tf.Graph().as_default():
self.assertRaises(TypeError, create_optimizer, optimizer_config2)
def train_model(self, num_steps=100):
x_train = [1, 2, 3, 4]
y_train = [0, -1, -2, -3]
x = tf.get_default_graph().get_tensor_by_name('model_0/x:0')
y = tf.get_default_graph().get_tensor_by_name('model_0/y:0')
feed_dict = {x: x_train, y: y_train}
pre_global_step = self.sess.run(self.global_step)
for step in range(num_steps):
train_res = self.sess.run(self.train_targets, feed_dict=feed_dict)
self.log.info('Step: {}, loss: {}'.format(step, train_res['loss']))
post_global_step = self.sess.run(self.global_step)
self.assertEqual(pre_global_step + num_steps, post_global_step)
self.step += num_steps
return train_res
load.py 文件源码
项目:how_to_deploy_a_keras_model_to_production
作者: llSourcell
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def init():
json_file = open('model.json','r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
#load woeights into new model
loaded_model.load_weights("model.h5")
print("Loaded Model from disk")
#compile and evaluate loaded model
loaded_model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
#loss,accuracy = model.evaluate(X_test,y_test)
#print('loss:', loss)
#print('accuracy:', accuracy)
graph = tf.get_default_graph()
return loaded_model,graph
def load_frozen_graph(graph_dir, fix_nodes=True, entry=None, output=None):
with gfile.FastGFile(graph_dir, "rb") as file:
graph_def = tf.GraphDef()
graph_def.ParseFromString(file.read())
if fix_nodes:
for node in graph_def.node:
if node.op == 'RefSwitch':
node.op = 'Switch'
for index in range(len(node.input)):
if 'moving_' in node.input[index]:
node.input[index] = node.input[index] + '/read'
elif node.op == 'AssignSub':
node.op = 'Sub'
if 'use_locking' in node.attr:
del node.attr['use_locking']
tf.import_graph_def(graph_def, name="")
if entry is not None:
entry = tf.get_default_graph().get_tensor_by_name(entry)
if output is not None:
output = tf.get_default_graph().get_tensor_by_name(output)
return entry, output
def init_segmenter(args_segmenter_model):
global segmenter_model, rings, sectors, points_per_ring, is_ped, tf_segmenter_graph
segmenter_model = load_model(args_segmenter_model, compile=False)
segmenter_model._make_predict_function() # https://github.com/fchollet/keras/issues/6124
print("Loading segmenter model " + args_segmenter_model)
segmenter_model.summary()
points_per_ring = segmenter_model.get_input_shape_at(0)[0][1]
match = re.search(r'lidarnet-(car|ped)-.*seg-rings_(\d+)_(\d+)-sectors_(\d+)-.*\.hdf5', args_segmenter_model)
is_ped = match.group(1) == 'ped'
rings = range(int(match.group(2)), int(match.group(3)))
sectors = int(match.group(4))
points_per_ring *= sectors
assert len(rings) == segmenter_model.get_input_shape_at(0)[0][2]
print('Loaded segmenter model with ' + str(points_per_ring) + ' points per ring and ' + str(len(rings)) +
' rings from ' + str(rings[0]) + ' to ' + str(rings[-1]) )
if K._backend == 'tensorflow':
tf_segmenter_graph = tf.get_default_graph()
print(tf_segmenter_graph)
return
def test_set_value():
a = tf.Variable(42.)
with single_threaded_session():
set_value(a, 5)
assert a.eval() == 5
g = tf.get_default_graph()
g.finalize()
set_value(a, 6)
assert a.eval() == 6
# test the test
try:
assert a.eval() == 7
except AssertionError:
pass
else:
assert False, "assertion should have failed"
def test_set_value():
a = tf.Variable(42.)
with single_threaded_session():
set_value(a, 5)
assert a.eval() == 5
g = tf.get_default_graph()
g.finalize()
set_value(a, 6)
assert a.eval() == 6
# test the test
try:
assert a.eval() == 7
except AssertionError:
pass
else:
assert False, "assertion should have failed"
def test_set_value():
a = tf.Variable(42.)
with single_threaded_session():
set_value(a, 5)
assert a.eval() == 5
g = tf.get_default_graph()
g.finalize()
set_value(a, 6)
assert a.eval() == 6
# test the test
try:
assert a.eval() == 7
except AssertionError:
pass
else:
assert False, "assertion should have failed"
def _sample(self):
gan = self.gan
z_t = gan.encoder.sample
inputs_t = gan.inputs.x
if self.z is None:
self.z = gan.encoder.sample.eval()
self.target = gan.encoder.sample.eval()
self.input = gan.session.run(gan.inputs.x)
if self.step > self.steps:
self.z = self.target
self.target = gan.encoder.sample.eval()
self.step = 0
percent = float(self.step)/self.steps
z_interp = self.z*(1.0-percent) + self.target*percent
self.step+=1
g=tf.get_default_graph()
with g.as_default():
tf.set_random_seed(1)
return {
'generator': gan.session.run(gan.generator.sample, feed_dict={z_t: z_interp, inputs_t: self.input})
}
def test_set_value():
a = tf.Variable(42.)
with single_threaded_session():
set_value(a, 5)
assert a.eval() == 5
g = tf.get_default_graph()
g.finalize()
set_value(a, 6)
assert a.eval() == 6
# test the test
try:
assert a.eval() == 7
except AssertionError:
pass
else:
assert False, "assertion should have failed"
def test():
with tf.Graph().as_default():
image, label = input.get_input(LABEL_PATH, LABEL_FORMAT, IMAGE_PATH, IMAGE_FORMAT)
logits = model.inference(image)
top_k_op = tf.nn.in_top_k(logits, label, 1)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# Get summaries for TENSOR BOARD
summary_op = tf.merge_all_summaries()
graph_def = tf.get_default_graph().as_graph_def()
summary_writer = tf.train.SummaryWriter(input.FLAGS.eval_dir, graph_def=graph_def)
while True:
evaluate_model(saver, summary_writer, top_k_op, summary_op)
if input.FLAGS.run_once:
break
time.sleep(input.FLAGS.eval_interval_secs)
def testScopeRestore(self):
c1 = conv.Conv2D(
16,
8,
4,
name='conv_2d_0',
padding=conv.VALID,
initializers={
'w':
initializers.restore_initializer(
_checkpoint(), 'w', scope='agent/conv_net_2d/conv_2d_0'),
'b':
initializers.restore_initializer(
_checkpoint(), 'b', scope='agent/conv_net_2d/conv_2d_0')
})
inputs = tf.constant(1 / 255.0, shape=[1, 86, 86, 3])
outputs = c1(inputs)
init = tf.global_variables_initializer()
tf.get_default_graph().finalize()
with self.test_session() as session:
session.run(init)
o = session.run(outputs)
self.assertAllClose(np.linalg.norm(o), _ONE_CONV_LAYER, atol=_TOLERANCE)
def testModuleInfo_multiple_subgraph(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb(ph_0)
with tf.name_scope("foo"):
dumb(ph_0)
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
self.assertEqual(len(sonnet_collection), 1)
self.assertEqual(len(sonnet_collection[0].connected_subgraphs), 2)
connected_subgraph_0 = sonnet_collection[0].connected_subgraphs[0]
connected_subgraph_1 = sonnet_collection[0].connected_subgraphs[1]
self.assertEqual(connected_subgraph_0.name_scope, "dumb_a")
self.assertEqual(connected_subgraph_1.name_scope, "foo/dumb_a")
check()
_copy_default_graph()
check()
def testModuleInfo_sparsetensor(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
sparse_tensor = tf.SparseTensor(
indices=tf.placeholder(dtype=tf.int64, shape=(10, 2,)),
values=tf.placeholder(dtype=tf.float32, shape=(10,)),
dense_shape=tf.placeholder(dtype=tf.int64, shape=(2,)))
dumb(sparse_tensor)
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(
connected_subgraph.inputs["inputs"], tf.SparseTensor)
self.assertIsInstance(connected_subgraph.outputs, tf.SparseTensor)
check()
_copy_default_graph()
check()
def testModuleInfo_tuple(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb((ph_0, ph_1))
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"], tuple)
self.assertIsInstance(connected_subgraph.outputs, tuple)
check()
_copy_default_graph()
check()
def testModuleInfo_dict(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a")
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
ph_1 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
dumb({"ph_0": ph_0, "ph_1": ph_1})
def check():
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"], dict)
self.assertIsInstance(connected_subgraph.outputs, dict)
check()
_copy_default_graph()
check()
def testModuleInfo_recursion(self):
# pylint: disable=not-callable
tf.reset_default_graph()
dumb = DumbModule(name="dumb_a", no_nest=True)
ph_0 = tf.placeholder(dtype=tf.float32, shape=(1, 10,))
val = {"one": ph_0, "self": None}
val["self"] = val
dumb(val)
def check(check_type):
sonnet_collection = tf.get_default_graph().get_collection(
base_info.SONNET_COLLECTION_NAME)
connected_subgraph = sonnet_collection[0].connected_subgraphs[0]
self.assertIsInstance(connected_subgraph.inputs["inputs"]["one"],
tf.Tensor)
self.assertIsInstance(
connected_subgraph.inputs["inputs"]["self"], check_type)
self.assertIsInstance(connected_subgraph.outputs["one"], tf.Tensor)
self.assertIsInstance(connected_subgraph.outputs["self"], check_type)
check(dict)
_copy_default_graph()
check(base_info._UnserializableObject)
def _check_same_graph(self):
"""Checks that the module is not being connect to multiple Graphs.
An instance of a Sonnet module 'owns' the variables it contains, and permits
seamless variable sharing. As such, connecting a single module instance to
multiple Graphs is not possible - this function will raise an error should
that occur.
Raises:
DifferentGraphError: if the module is connected to a different Graph than
it was previously used in.
"""
current_graph = tf.get_default_graph()
if self._graph is None:
self._graph = current_graph
self._set_module_info()
elif self._graph != current_graph:
raise DifferentGraphError("Cannot connect module to multiple Graphs.")
def testInferFeatureSchema(self):
d = tf.placeholder(tf.int64, None)
tensors = {
'a': tf.placeholder(tf.float32, (None,)),
'b': tf.placeholder(tf.string, (1, 2, 3)),
'c': tf.placeholder(tf.int64, None),
'd': d
}
d_column_schema = sch.ColumnSchema(tf.int64, [1, 2, 3],
sch.FixedColumnRepresentation())
api.set_column_schema(d, d_column_schema)
schema = impl_helper.infer_feature_schema(tf.get_default_graph(), tensors)
expected_schema = sch.Schema(column_schemas={
'a': sch.ColumnSchema(tf.float32, [],
sch.FixedColumnRepresentation()),
'b': sch.ColumnSchema(tf.string, [2, 3],
sch.FixedColumnRepresentation()),
'c': sch.ColumnSchema(tf.int64, None,
sch.FixedColumnRepresentation()),
'd': sch.ColumnSchema(tf.int64, [1, 2, 3],
sch.FixedColumnRepresentation())
})
self.assertEqual(schema, expected_schema)
def _print_info(self, data_set, verbose):
logger.info('Config:')
logger.info(pprint.pformat(self.cnf))
data_set.print_info()
logger.info('Max epochs: %d' % self.num_epochs)
if verbose > 0:
util.show_vars(logger, self.trainable_scopes)
# logger.debug("\n---Number of Regularizable vars in model:")
# logger.debug(len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
if verbose > 3:
all_ops = tf.get_default_graph().get_operations()
logger.debug("\n---All ops in graph")
names = map(lambda v: v.name, all_ops)
for n in sorted(names):
logger.debug(n)
util.show_layer_shapes(self.training_end_points, logger)
def _print_info(self, data_set, verbose):
logger.info('Config:')
logger.info(pprint.pformat(self.cnf))
data_set.print_info()
logger.info('Max epochs: %d' % self.num_epochs)
if verbose > 0:
util.show_vars(logger, self.trainable_scopes)
# logger.debug("\n---Number of Regularizable vars in model:")
# logger.debug(len(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
if verbose > 3:
all_ops = tf.get_default_graph().get_operations()
logger.debug("\n---All ops in graph")
names = map(lambda v: v.name, all_ops)
for n in sorted(names):
logger.debug(n)
util.show_layer_shapes(self.training_end_points, logger)
def _define_saver_summary(self, summary = True):
""" Create Summary and Saver
Args:
logdir_train : Path to train summary directory
logdir_test : Path to test summary directory
"""
if (self.logdir_train == None) or (self.logdir_test == None):
raise ValueError('Train/Test directory not assigned')
else:
with tf.device(self.cpu):
self.saver = tf.train.Saver()
if summary:
with tf.device(self.gpu):
self.train_summary = tf.summary.FileWriter(self.logdir_train, tf.get_default_graph())
self.test_summary = tf.summary.FileWriter(self.logdir_test)
#self.weight_summary = tf.summary.FileWriter(self.logdir_train, tf.get_default_graph())
def _initialize_weights(self):
all_weights = dict()
if self.pretrain_flag > 0:
weight_saver = tf.train.import_meta_graph(self.save_file + '.meta')
pretrain_graph = tf.get_default_graph()
feature_embeddings = pretrain_graph.get_tensor_by_name('feature_embeddings:0')
feature_bias = pretrain_graph.get_tensor_by_name('feature_bias:0')
bias = pretrain_graph.get_tensor_by_name('bias:0')
with tf.Session() as sess:
weight_saver.restore(sess, self.save_file)
fe, fb, b = sess.run([feature_embeddings, feature_bias, bias])
all_weights['feature_embeddings'] = tf.Variable(fe, dtype=tf.float32)
all_weights['feature_bias'] = tf.Variable(fb, dtype=tf.float32)
all_weights['bias'] = tf.Variable(b, dtype=tf.float32)
else:
all_weights['feature_embeddings'] = tf.Variable(
tf.random_normal([self.features_M, self.hidden_factor], 0.0, 0.01),
name='feature_embeddings') # features_M * K
all_weights['feature_bias'] = tf.Variable(
tf.random_uniform([self.features_M, 1], 0.0, 0.0), name='feature_bias') # features_M * 1
all_weights['bias'] = tf.Variable(tf.constant(0.0), name='bias') # 1 * 1
return all_weights
def Get_Pre_Trained_Weights(input_vars,name):
with open("vgg16.tfmodel", mode='rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
images = tf.placeholder(tf.float32,shape = (None, 64, 64, 3),name=name)
tf.import_graph_def(graph_def, input_map={ "images": images })
print "graph loaded from disk"
graph = tf.get_default_graph()
with tf.Session() as sess:
init = tf.initialize_all_variables()
sess.run(init)
#batch = np.reshape(input_vars,(-1, 224, 224, 3))
n_timewin = 7
convnets = []
for i in xrange(n_timewin):
feed_dict = { images:input_vars[:,i,:,:,:] }
pool_tensor = graph.get_tensor_by_name("import/pool5:0")
pool_tensor = sess.run(pool_tensor, feed_dict=feed_dict)
convnets.append(tf.contrib.layers.flatten(pool_tensor))
convpool = tf.pack(convnets, axis = 1)
return convpool
svnh_semi_supervised_model_loaded_test.py 文件源码
项目:tf_serving_example
作者: Vetal1977
项目源码
文件源码
阅读 47
收藏 0
点赞 0
评论 0
def load_and_predict_with_saved_model():
'''
Loads saved as protobuf model and make prediction on a single image
'''
with tf.Session(graph=tf.Graph()) as sess:
# restore save model
export_dir = './gan-export/1'
model = tf.saved_model.loader.load(sess, [tf.saved_model.tag_constants.SERVING], export_dir)
# print(model)
loaded_graph = tf.get_default_graph()
# get necessary tensors by name
input_tensor_name = model.signature_def['predict_images'].inputs['images'].name
input_tensor = loaded_graph.get_tensor_by_name(input_tensor_name)
output_tensor_name = model.signature_def['predict_images'].outputs['scores'].name
output_tensor = loaded_graph.get_tensor_by_name(output_tensor_name)
# make prediction
image_file_name = './svnh_test_images/image_3.jpg'
with open(image_file_name, 'rb') as f:
image = f.read()
scores = sess.run(output_tensor, {input_tensor: [image]})
# print results
print("Scores: {}".format(scores))
def train_dynamic(self):
print("inside train")
model_spec = self.get_model_by_name(MyBatch.dynamic_model)
#print(" action for a dynamic model", model_spec)
session = self.pipeline.get_variable("session")
with self.pipeline.get_variable("print lock"):
print("\n\n ================= train dynamic ====================")
print("----- default graph")
#print(tf.get_default_graph().get_operations())
print("----- session graph")
print(session.graph.get_operations())
input_data, model_output = model_spec
res = session.run(model_output, feed_dict={input_data: self.data})
self.pipeline.get_variable("loss history").append(res)
#print(" ", int(res))
return self
def eval_tensor(sess, input_tensor_name, input_val, output_tensor_name):
"""Get output value of a specific tensor.
Assuming the default graph is used.
Args:
sess: tf session object.
input_tensor_name: name of the input tensor.
input_val: input value to the network.
output_tensor_name: name of the output tensor.
Returns:
result of output tensor.
"""
cur_graph = tf.get_default_graph()
input_tensor = cur_graph.get_tensor_by_name(input_tensor_name)
output_tensor = cur_graph.get_tensor_by_name(output_tensor_name)
out_val = sess.run(output_tensor, feed_dict={input_tensor: input_val})
return out_val
def conv_block(name, input_layer, kernel_size, out_channels):
"""
Per Ulyanov et el, this is a block consisting of
- Mirror pad (TODO)
- Number of maps from a convolutional layer equal to out_channels (multiples of 8)
- Spatial BatchNorm
- LeakyReLu
"""
with tf.get_default_graph().name_scope(name):
in_channels = input_layer.get_shape().as_list()[-1]
# Xavier initialization, http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
# The application of this method here seems unorthodox since we're using ReLU, not sigmoid or tanh.
low = -np.sqrt(6.0/(in_channels + out_channels))
high = np.sqrt(6.0/(in_channels + out_channels))
weights = tf.Variable(tf.random_uniform([kernel_size, kernel_size, in_channels, out_channels], minval=low, maxval=high), name='weights')
biases = tf.Variable(tf.random_uniform([out_channels], minval=low, maxval=high), name='biases')
# TODO: Mirror pad the conv2d? I'm not sure how important this is.
conv = conv2d(input_layer, weights, biases)
batch_norm = spatial_batch_norm(conv)
relu = leaky_relu(batch_norm, .01)
return relu
def __init__(self, sess, network, imdb, roidb, output_dir, logdir, pretrained_model=None):
"""Initialize the SolverWrapper."""
self.net = network
self.imdb = imdb
self.roidb = roidb
self.output_dir = output_dir
self.pretrained_model = pretrained_model
print 'Computing bounding-box regression targets...'
if cfg.TRAIN.BBOX_REG:
self.bbox_means, self.bbox_stds = rdl_roidb.add_bbox_regression_targets(roidb)
print 'done'
# For checkpoint
self.saver = tf.train.Saver(max_to_keep=100)
self.writer = tf.summary.FileWriter(logdir=logdir,
graph=tf.get_default_graph(),
flush_secs=5)