def max_sentence_similarity(sentence_input, similarity_matrix):
"""
Parameters
----------
sentence_input: Tensor
Tensor of shape (batch_size, num_sentence_words, rnn_hidden_dim).
similarity_matrix: Tensor
Tensor of shape (batch_size, num_sentence_words, num_sentence_words).
"""
# Shape: (batch_size, passage_len)
def single_instance(inputs):
single_sentence = inputs[0]
argmax_index = inputs[1]
# Shape: (num_sentence_words, rnn_hidden_dim)
return tf.gather(single_sentence, argmax_index)
question_index = tf.arg_max(similarity_matrix, 2)
elems = (sentence_input, question_index)
# Shape: (batch_size, num_sentence_words, rnn_hidden_dim)
return tf.map_fn(single_instance, elems, dtype="float")
python类map_fn()的实例源码
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def _get_bbox_pred(self, proposed_boxes, gt_boxes_per_class):
"""Computes valid bbox_pred from proposals and gt_boxes for each class.
Args:
proposed_boxes: Tensor with shape (num_proposals, 5).
gt_boxes_per_class: Tensor holding the ground truth boxes for each
class. Has shape (num_classes, num_gt_boxes_per_class, 4).
Returns:
A tensor with shape (num_proposals, num_classes * 4), holding the
correct bbox_preds.
"""
def bbox_encode(gt_boxes):
return encode(
proposed_boxes, gt_boxes
)
bbox_pred_tensor = tf.map_fn(
bbox_encode, gt_boxes_per_class,
dtype=tf.float32
)
# We need to explicitly unstack the tensor so that tf.concat works
# properly.
bbox_pred_list = tf.unstack(bbox_pred_tensor)
return tf.concat(bbox_pred_list, 1)
def _build(self, X):
"""Build the graph of this layer."""
n_samples, input_dim = self._get_X_dims(X)
W_shape, _ = self._weight_shapes(self.n_categories)
n_batch = tf.shape(X)[1]
# Layer weights
self.pW = _make_prior(self.std, self.pW, W_shape)
self.qW = _make_posterior(self.std, self.qW, W_shape, self.full)
# Index into the relevant weights rather than using sparse matmul
Wsamples = _sample_W(self.qW, n_samples)
features = tf.map_fn(lambda wx: tf.gather(*wx, axis=0), (Wsamples, X),
dtype=Wsamples.dtype)
# Now concatenate the resulting features on the last axis
f_dims = int(np.prod(features.shape[2:])) # need this for placeholders
Net = tf.reshape(features, [n_samples, n_batch, f_dims])
# Regularizers
KL = kl_sum(self.qW, self.pW)
return Net, KL
def _build(self, X):
"""Build the graph of this layer."""
n_samples, input_shape = self._get_X_dims(X)
Wdim = tuple(input_shape) + (self.output_dim,)
W = tf.Variable(tf.random_normal(shape=Wdim, seed=next(seedgen)),
name="W_map")
# We don't want to copy tf.Variable W so map over X
Net = tf.map_fn(lambda x: tf.matmul(x, W), X)
# Regularizers
penalty = self.l2 * tf.nn.l2_loss(W) + self.l1 * _l1_loss(W)
# Optional Bias
if self.use_bias is True:
b = tf.Variable(tf.random_normal(shape=(1, self.output_dim),
seed=next(seedgen)), name="b_map")
Net += b
penalty += self.l2 * tf.nn.l2_loss(b) + self.l1 * _l1_loss(b)
return Net, penalty
def run(proposals, gt, device='/cpu:0'):
with tf.device(device):
proposals = tf.expand_dims(proposals, axis=1)
proposals = tf.tile(proposals, [1, M, 1])
gt = tf.expand_dims(gt, axis=0)
gt = tf.tile(gt, [N, 1, 1])
proposals = tf.reshape(proposals, (N*M, d))
gt = tf.reshape(gt, (N*M, d))
# shape is N*M x 1
iou_metric = tf.map_fn(model.iou, tf.stack([proposals, gt], axis=1))
iou_metric = tf.reshape(iou_metric, [N, M])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(iou_metric)
# result is 2min48s
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def __call__(self, s_embed, s_src_pwr, s_mix_pwr, s_embed_flat=None):
if s_embed_flat is None:
s_embed_flat = tf.reshape(
s_embed,
[hparams.BATCH_SIZE, -1, hparams.EMBED_SIZE])
with tf.variable_scope(self.name):
s_src_assignment = tf.argmax(s_src_pwr, axis=1)
s_indices = tf.reshape(
s_src_assignment,
[hparams.BATCH_SIZE, -1])
fn_segmean = lambda _: tf.unsorted_segment_sum(
_[0], _[1], hparams.MAX_N_SIGNAL)
s_attractors = tf.map_fn(
fn_segmean, (s_embed_flat, s_indices), hparams.FLOATX)
s_attractors_wgt = tf.map_fn(
fn_segmean, (tf.ones_like(s_embed_flat), s_indices),
hparams.FLOATX)
s_attractors /= (s_attractors_wgt + 1.)
if hparams.DEBUG:
self.debug_fetches = dict()
# float[B, C, E]
return s_attractors
video_input.py 文件源码
项目:tensorflow_video_classification_LSTM
作者: frankgu
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def __init__(self, config, data):
self.batch_size = batch_size = config['batch_size']
self.num_steps = num_steps = config['num_steps']
self.epoch_size = (data.num_examples_per_epoch() // batch_size) - 1
# input_data size: [batch_size, num_steps]
# targets size: [batch_size]
self.input_data, self.targets, self.filenames = distorted_inputs(
data, config)
# Data preprocessing: input_data
# string tensor [batch_size, num_steps] =>
# num_steps * [batch_size, height*width*channels]
self.input_data = tf.map_fn(
decode_video, self.input_data, dtype=tf.float32)
self.input_data = tf.reshape(
self.input_data, [batch_size, num_steps, -1])
self.input_data = [tf.squeeze(input_step, [1])
for input_step in tf.split(self.input_data, num_steps, 1)]
def bbox_to_mask(bbox, region_size, output_size, dtype=tf.float32):
"""Creates a binary mask of size `region_size` where rectangle given by
`bbox` is filled with ones and the rest is zeros. Finally, the binary mask
is resized to `output_size` with bilinear interpolation.
:param bbox: tensor of shape (..., 4)
:param region_size: tensor of shape (..., 2)
:param output_size: 2-tuple of ints
:param dtype: tf.dtype
:return: a tensor of shape = (..., output_size)
"""
shape = tf.concat(axis=0, values=(tf.shape(bbox)[:-1], output_size))
bbox = tf.reshape(bbox, (-1, 4))
region_size = tf.reshape(region_size, (-1, 2))
def create_mask(args):
yy, region_size = args
return _bbox_to_mask_fixed_size(yy, region_size, output_size, dtype)
mask = tf.map_fn(create_mask, (bbox, region_size), dtype=dtype)
return tf.reshape(mask, shape)
def vec_to_tri(vectors, N):
"""
Takes a D x M tensor `vectors' and maps it to a D x matrix_size X matrix_sizetensor
where the where the lower triangle of each matrix_size x matrix_size matrix is
constructed by unpacking each M-vector.
Native TensorFlow version of Custom Op by Mark van der Wilk.
def int_shape(x):
return list(map(int, x.get_shape()))
D, M = int_shape(vectors)
N = int( np.floor( 0.5 * np.sqrt( M * 8. + 1. ) - 0.5 ) )
# Check M is a valid triangle number
assert((matrix * (N + 1)) == (2 * M))
"""
indices = list(zip(*np.tril_indices(N)))
indices = tf.constant([list(i) for i in indices], dtype=tf.int64)
def vec_to_tri_vector(vector):
return tf.scatter_nd(indices=indices, shape=[N, N], updates=vector)
return tf.map_fn(vec_to_tri_vector, vectors)
def mvn_mix_log_probs(samples, q, ndims, num_components=3):
'''Calculate the log probabilities of a MVN mixture model.
Assumes q is [batchsize,numparams]'''
pi = tf.nn.softmax(q[:,:num_components])
mu = tf.reshape(q[:,num_components:num_components*(1+ndims)], [-1, num_components, ndims])
chol_q = q[:,num_components*(1+ndims):]
chol = unpack_cholesky(chol_q, ndims, num_components)
log_probs = []
for c in xrange(num_components):
packed_params = tf.concat(axis=1, values=[mu[:,c,:],tf.reshape(chol[:,c,:,:], [-1,ndims*ndims]), samples])
log_p = tf.map_fn(lambda x: chol_mvn(x[:ndims], tf.reshape(x[ndims:ndims*(1+ndims)],[ndims,ndims])).log_prob(x[ndims*(1+ndims):]), packed_params)
log_probs.append(log_p)
log_probs = tf.transpose(tf.reshape(tf.concat(axis=0, values=log_probs), [num_components, -1]))
log_probs = tf.log(pi)+log_probs
return log_sum_exp(log_probs)
#######################################################################
################ PixelCNN++ utils #####################################
# Some code below taken from OpenAI PixelCNN++ implementation: https://github.com/openai/pixel-cnn
export_model.py 文件源码
项目:Youtube8mdataset_kagglechallenge
作者: jasonlee27
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def get_embedding_graph(self):
data = tf.placeholder(tf.int32, shape=[None, None], name='data')
embeddings = tf.constant(
self.indexer.vectors, tf.float32, name='embeddings')
vectors = tf.map_fn(
lambda d: tf.nn.embedding_lookup(embeddings, d),
data,
tf.float32)
padded = tf.pad(
vectors,
[[0, 0], [0, self.max_length - tf.shape(vectors)[1]], [0, 0]])
return {
'padded': padded,
'data': data
}
def compute_detections_batch(segs, boxes, num_keep,
seg_threshold=0.2,
sigma=5e-3, step=0.2, num_iters=20,
dist_threshold=20.0,
iou_threshold=0.5,
nms_kind='greedy'):
if nms_kind == 'greedy':
# TODO: rename it to CRF?
_compute_frame = (lambda x: compute_detections_greedy(x[0], x[1], num_keep,
seg_threshold,
sigma, step, num_iters,
dist_threshold))
elif nms_kind == 'nms':
_compute_frame = (lambda x: compute_detections_nms(x[0], x[1], num_keep,
seg_threshold,
iou_threshold))
boxes, confidence = tf.map_fn(_compute_frame, (segs, boxes))
return boxes, confidence
def get_accuracy(self, x_test_home, x_test_away, y_test, keep_prop=1.0):
"""
The predictions from x_test_home and x_test_away are mapped to 1 or 0 depending on whether the
home team wins or not. Then it is compared with y_test which is the ground truth.
"""
predict = tf.map_fn(
lambda x: x[0] > x[1],
self.sess.run(
self.hypothesis,
feed_dict={
self.X_home: x_test_home,
self.X_away: x_test_away,
self.Y: y_test,
self.keep_prob: keep_prop}
),
dtype=bool)
real = tf.map_fn(
lambda x: x[0] > x[1],
y_test,
dtype=bool)
return self.sess.run(
tf.divide(
tf.reduce_sum(tf.cast(tf.equal(predict, real), dtype=tf.int32)), len(y_test)))
def build_inputs_and_outputs(self):
if self.frame_features:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
fn = lambda x: self.build_prediction_graph(x)
video_id_output, top_indices_output, top_predictions_output = (
tf.map_fn(fn, serialized_examples,
dtype=(tf.string, tf.int32, tf.float32)))
else:
serialized_examples = tf.placeholder(tf.string, shape=(None,))
video_id_output, top_indices_output, top_predictions_output = (
self.build_prediction_graph(serialized_examples))
inputs = {"example_bytes":
saved_model_utils.build_tensor_info(serialized_examples)}
outputs = {
"video_id": saved_model_utils.build_tensor_info(video_id_output),
"class_indexes": saved_model_utils.build_tensor_info(top_indices_output),
"predictions": saved_model_utils.build_tensor_info(top_predictions_output)}
return inputs, outputs
def process_leafs(self,emb):
with tf.variable_scope("Composition",reuse=True):
cU = tf.get_variable("cU",[self.emb_dim,2*self.hidden_dim])
cb = tf.get_variable("cb",[4*self.hidden_dim])
b = tf.slice(cb,[0],[2*self.hidden_dim])
def _recurseleaf(x):
concat_uo = tf.matmul(tf.expand_dims(x,0),cU) + b
u,o = tf.split(1,2,concat_uo)
o=tf.nn.sigmoid(o)
u=tf.nn.tanh(u)
c = u#tf.squeeze(u)
h = o * tf.nn.tanh(c)
hc = tf.concat(1,[h,c])
hc=tf.squeeze(hc)
return hc
hc = tf.map_fn(_recurseleaf,emb)
return hc
def process_leafs(self,emb):
with tf.variable_scope("Composition",reuse=True):
cUW = tf.get_variable("cUW")
cb = tf.get_variable("cb")
U = tf.slice(cUW,[0,0],[self.emb_dim,2*self.hidden_dim])
b = tf.slice(cb,[0],[2*self.hidden_dim])
def _recurseleaf(x):
concat_uo = tf.matmul(tf.expand_dims(x,0),U) + b
u,o = tf.split(1,2,concat_uo)
o=tf.nn.sigmoid(o)
u=tf.nn.tanh(u)
c = u#tf.squeeze(u)
h = o * tf.nn.tanh(c)
hc = tf.concat(1,[h,c])
hc=tf.squeeze(hc)
return hc
hc = tf.map_fn(_recurseleaf,emb)
return hc
new_data_mlp.py 文件源码
项目:NVDM-For-Document-Classification
作者: cryanzpj
项目源码
文件源码
阅读 36
收藏 0
点赞 0
评论 0
def thres_search(data,label,n):
res = []
for i in range(n):
n_label = tf.cast(tf.reduce_sum(label[i]),tf.int32)
temp = tf.mul(data[i],label[i])
temp = tf.reshape(tf.nn.top_k(temp,n_label +1).values,[1,1,-1,1])
thres = tf.reshape(tf.contrib.layers.avg_pool2d(temp,[1,2],[1,1]),[-1,1])
predicts = tf.map_fn(lambda x: tf.cast(tf.greater_equal(data[i],x),tf.float32),thres)
f1_scores = tf.map_fn(lambda x: f1(x,label[i]),predicts)
thres_opt = thres[tf.cast(tf.arg_max(f1_scores,0),tf.int32)]
res.append(thres_opt)
# R = tf.map_fn(lambda x: tf.contrib.metrics.streaming_recall(x,label[i])[0],predicts)
# P = tf.map_fn(lambda x: tf.contrib.metrics.streaming_precision(x,label[i])[0],predicts)
#thres_opt = thres[np.argsort(map(lambda x: metrics.f1_score(x,sess.run(label[i]),average = "macro") ,predicts))[-1]]
return tf.reshape(res,[-1])
question_encoding.py 文件源码
项目:Constituent-Centric-Neural-Architecture-for-Reading-Comprehension
作者: shrshore
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def process_leafs(self,emb):
#emb: [num_leaves, emd_dim]
with tf.variable_scope("btp_Composition",reuse=True):
cU = tf.get_variable("cU",[self.emb_dim,2*self.hidden_dim])
cb = tf.get_variable("cb",[4*self.hidden_dim])
b = tf.slice(cb,[0],[2*self.hidden_dim])
#?????????input gate??orget gate,??????utput gate ??nput value
#??b??? 2*hidde_dim ?
#x [emb_dim]
def _recurseleaf(x):
#[1, emb_dim], [emb_dim, 2*self.hidden_dim]
concat_uo = tf.matmul(tf.expand_dims(x,0),cU) + b
#??oncat_uo????
#[1*hidden_dim] [1*hidden_dim]
u,o = tf.split(axis=1,num_or_size_splits=2,value=concat_uo)
o=tf.nn.sigmoid(o)
u=tf.nn.tanh(u)
c = u#tf.squeeze(u)
h = o * tf.nn.tanh(c)
hc = tf.concat(axis=1,values=[h,c])
hc=tf.squeeze(hc)
return hc
hc = tf.map_fn(_recurseleaf,emb)
#hc [num_leaves, 2*hidden_dim]
return hc
context_encoding.py 文件源码
项目:Constituent-Centric-Neural-Architecture-for-Reading-Comprehension
作者: shrshore
项目源码
文件源码
阅读 85
收藏 0
点赞 0
评论 0
def process_leafs(self,emb):
#emb: [num_leaves, emd_dim]
with tf.variable_scope("btp_Composition",reuse=True):
cU = tf.get_variable("cU",[self.emb_dim,2*self.hidden_dim])
cb = tf.get_variable("cb",[4*self.hidden_dim])
b = tf.slice(cb,[0],[2*self.hidden_dim])
#??????input gate?forget gate,????output gate ?Input value
def _recurseleaf(x):
#[1, emb_dim], [emb_dim, 2*self.hidden_dim]
concat_uo = tf.matmul(tf.expand_dims(x,0),cU) + b
#?concat_uo???
#[1*hidden_dim] [1*hidden_dim]
u,o = tf.split(axis=1,num_or_size_splits=2,value=concat_uo)
o=tf.nn.sigmoid(o)
u=tf.nn.tanh(u)
c = u#tf.squeeze(u)
h = o * tf.nn.tanh(c)
hc = tf.concat(axis=1,values=[h,c])
hc=tf.squeeze(hc)
return hc
hc = tf.map_fn(_recurseleaf,emb)
#hc [num_leaves, 2*hidden_dim]
return hc
def max_sentence_similarity(sentence_input, similarity_matrix):
"""
Parameters
----------
sentence_input: Tensor
Tensor of shape (batch_size, num_sentence_words, rnn_hidden_dim).
similarity_matrix: Tensor
Tensor of shape (batch_size, num_sentence_words, num_sentence_words).
"""
# Shape: (batch_size, passage_len)
def single_instance(inputs):
single_sentence = inputs[0]
argmax_index = inputs[1]
# Shape: (num_sentence_words, rnn_hidden_dim)
return tf.gather(single_sentence, argmax_index)
question_index = tf.arg_max(similarity_matrix, 2)
elems = (sentence_input, question_index)
# Shape: (batch_size, num_sentence_words, rnn_hidden_dim)
return tf.map_fn(single_instance, elems, dtype="float")
genericDataSetLoader.py 文件源码
项目:Melanoma-Cancer-Detection-V1
作者: vgupta-ai
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def standardizeImages(self):
print "Standardizing Images..."
self.trainingDataXStandardized = []
self.testingDataXStandardized = []
with tf.Session() as sess:
for i in range(self.trainingDataX.shape[0]):
print str(i)+"/"+str(self.trainingDataX.shape[0])
self.trainingDataXStandardized.append(tf.image.per_image_standardization(self.trainingDataX[i]).eval())
for i in range(self.testingDataX.shape[0]):
print str(i)+"/"+str(self.testingDataX.shape[0])
self.testingDataXStandardized.append(tf.image.per_image_standardization(self.testingDataX[i]).eval())
#self.trainingDataX = tf.map_fn(lambda img:tf.image.per_image_standardization(img), self.trainingDataX, dtype=tf.float32)
#self.testingDataX = tf.map_fn(lambda img:tf.image.per_image_standardization(img), self.testingDataX, dtype=tf.float32)
#print self.trainingDataXStandardized[0]
self.trainingDataX = np.array(self.trainingDataXStandardized)
self.testingDataX = np.array(self.testingDataXStandardized)
print self.testingDataX.shape
print self.trainingDataX.shape
#with tf.Session() as sess:
# self.trainingDataX = self.trainingDataX.eval()
# self.testingDataX = self.testingDataX.eval()
print "Images standardized...Saving them..."
self.__save("preparedDataStandardized.pkl")
genericDataSetLoader.py 文件源码
项目:Melanoma-Cancer-Detection-V1
作者: vgupta-ai
项目源码
文件源码
阅读 63
收藏 0
点赞 0
评论 0
def _createBatchAndStandardize(self,imageDataArray,batchSize):
i = 0
standardizedImagesBatch = None
standardizedImages = None
totalNumImages = imageDataArray.shape[0]
print "Total Number of images:"+str(totalNumImages)
while i<totalNumImages:
minIndx = i
maxIndx = min(imageDataArray.shape[0],i+batchSize)
print str(i)+"/"+str(imageDataArray.shape[0])
i = i + batchSize
print i
standardizedImagesBatch = tf.map_fn(lambda img:tf.image.per_image_standardization(img), imageDataArray[minIndx:maxIndx], dtype=tf.float32)
if standardizedImages is None:
standardizedImages = standardizedImagesBatch.eval()
else:
standardizedImages = np.vstack((standardizedImages,standardizedImagesBatch.eval()))
return standardizedImages
def prepare_serialized_examples(self, serialized_examples):
feature_map = {
'image_raw': tf.FixedLenFeature([784], tf.int64),
'label': tf.FixedLenFeature([], tf.int64),
}
features = tf.parse_example(serialized_examples, features=feature_map)
images = tf.cast(features["image_raw"], tf.float32) * (1. / 255)
labels = tf.cast(features['label'], tf.int32)
def dense_to_one_hot(label_batch, num_classes):
one_hot = tf.map_fn(lambda x : tf.cast(slim.one_hot_encoding(x, num_classes), tf.int32), label_batch)
one_hot = tf.reshape(one_hot, [-1, num_classes])
return one_hot
labels = dense_to_one_hot(labels, 10)
return images, labels