def f1_score_keras(y_true, y_pred):
# convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
# y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
# indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true)[0], dtype='int64'), dim=-1)
indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
indices = K.concatenate((indices_x, indices_y))
values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
delta = tf.SparseTensor(indices, values, shape)
y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)
# where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true * y_pred_ones, axis=0)
# for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
# for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
# precision for each class
precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / pred_cnt,
name='precision_f1_semeval')
# recall for each class
recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred / gold_cnt,
name='racall_f1_semeval')
# f1 for each class
f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred),
2 * (precision * recall) / (precision + recall), name='precision_f1_semeval')
# return average f1 score over all classes
return K.mean(f1_class)
python类sparse_tensor_to_dense()的实例源码
def f1_score_semeval(y_true, y_pred):
#convert probas to 0,1
y_pred_ones = K.zeros_like(y_true)
#y_pred_ones[:, K.argmax(y_pred, axis=-1)] = 1
#indices_x = K.arange(start=0, stop=y_true.get_shape()[0])
indices_x = K.expand_dims(K.arange(start=0, stop=tf.shape(y_true, name='get_indicec_x_shape')[0], dtype='int64'), dim=-1)
indices_y = K.expand_dims(K.argmax(y_pred, axis=-1), dim=-1)
indices = K.concatenate((indices_x, indices_y))
values = K.sum(K.ones_like(indices_x, dtype='float32'), axis=-1)
shape = K.cast(tf.shape(y_pred_ones), dtype='int64')
delta = tf.SparseTensor(indices, values, shape)
y_pred_ones = y_pred_ones + tf.sparse_tensor_to_dense(delta)
#where y_ture=1 and y_pred=1 -> true positive
y_true_pred = K.sum(y_true*y_pred_ones, axis=0)
#for each class: how many where classified as said class
pred_cnt = K.sum(y_pred_ones, axis=0)
#for each class: how many are true members of said class
gold_cnt = K.sum(y_true, axis=0)
#precision for each class
precision = tf.select(K.equal(pred_cnt, 0), K.zeros_like(y_true_pred), y_true_pred/pred_cnt, name='precision_f1_semeval')
#recall for each class
recall = tf.select(K.equal(gold_cnt, 0), K.zeros_like(y_true_pred), y_true_pred/gold_cnt, name='racall_f1_semeval')
#f1 for each class
f1_class = tf.select(K.equal(precision + recall, 0), K.zeros_like(y_true_pred), 2*(precision*recall)/(precision+recall), name='precision_f1_semeval')
#return average f1 score over all classes
return (f1_class[0] + f1_class[2])/2.0
def read_records(index=0):
train_queue = tf.train.string_input_producer(['training.tfrecords'], num_epochs=FLAGS.epochs)
validation_queue = tf.train.string_input_producer(['validation.tfrecords'], num_epochs=FLAGS.epochs)
test_queue = tf.train.string_input_producer(['test.tfrecords'], num_epochs=FLAGS.epochs)
queue = tf.QueueBase.from_list(index, [train_queue, validation_queue, test_queue])
reader = tf.TFRecordReader()
_, serialized_example = reader.read(queue)
features = tf.parse_single_example(
serialized_example,
features={
'document': tf.VarLenFeature(tf.int64),
'query': tf.VarLenFeature(tf.int64),
'answer': tf.FixedLenFeature([], tf.int64)
})
document = sparse_ops.serialize_sparse(features['document'])
query = sparse_ops.serialize_sparse(features['query'])
answer = features['answer']
document_batch_serialized, query_batch_serialized, answer_batch = tf.train.shuffle_batch(
[document, query, answer], batch_size=FLAGS.batch_size,
capacity=2000,
min_after_dequeue=1000)
sparse_document_batch = sparse_ops.deserialize_many_sparse(document_batch_serialized, dtype=tf.int64)
sparse_query_batch = sparse_ops.deserialize_many_sparse(query_batch_serialized, dtype=tf.int64)
document_batch = tf.sparse_tensor_to_dense(sparse_document_batch)
document_weights = tf.sparse_to_dense(sparse_document_batch.indices, sparse_document_batch.dense_shape, 1)
query_batch = tf.sparse_tensor_to_dense(sparse_query_batch)
query_weights = tf.sparse_to_dense(sparse_query_batch.indices, sparse_query_batch.dense_shape, 1)
return document_batch, document_weights, query_batch, query_weights, answer_batch
def _parse_example_proto(example_serialized):
# parse record
# decode jpeg
# random select one caption, convert it into integers
# compute the length of the caption
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string),
'image/coco-id': tf.FixedLenFeature([], dtype=tf.int64),
'caption': tf.VarLenFeature(dtype=tf.string),
# 'image/path': tf.FixedLenFeature([], dtype=tf.string),
}
features = tf.parse_single_example(example_serialized, feature_map)
cocoid = features['image/coco-id']
image = tf.image.decode_jpeg(
features['image/encoded'],
channels=3,
try_recover_truncated=True)
# the image COCO_train2014_000000167126.jpg was corrupted
# replaced that image in my train2014/ directory
# but do not want to re encode everything, so just try_recover_truncated
# which is just part of the image
# [0,255) --> [0,1)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
#image_path = features['image/path']
caption = tf.sparse_tensor_to_dense(features['caption'], default_value=".")
caption = tf.random_shuffle(caption)[0]
record_defaults = [[PAD]] * MAX_SEQ_LEN
caption_tids = tf.decode_csv(caption, record_defaults)
caption_tids = tf.pack(caption_tids)
return image, caption_tids, cocoid #, image_path
def write_thin_stack_vals(thin_stack, stack_pointers, new_vals, batch_size,
max_num_concepts):
"""Writes to the thin stack at the given pointers the current decoder position."""
# SparseTensor requires type int64.
stack_inds = tf.transpose(tf.to_int64(tf.pack(
[tf.range(batch_size), stack_pointers]))) # nn_stack_pointers
current_vals = tf.gather_nd(thin_stack, stack_inds)
delta = tf.SparseTensor(stack_inds, new_vals - current_vals,
tf.pack([tf.to_int64(batch_size), max_num_concepts]))
new_thin_stack = thin_stack + tf.sparse_tensor_to_dense(delta)
return new_thin_stack
def read_data_int64(input_fname):
import pdb
with tictoc():
input_fname_queue = tf.train.string_input_producer([input_fname], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(input_fname_queue)
features = {'bit_features' : tf.VarLenFeature(tf.int64)}
parsed_example = tf.parse_single_example(serialized_example, features)
bit_features = parsed_example['bit_features']
bit_features = tf.sparse_tensor_to_dense(bit_features)
bit_features = tf.reshape(bit_features, [-1, 62])
with tf.Session() as sess:
tf.initialize_all_variables().run()
tf.initialize_local_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
i = 0
while not coord.should_stop():
x = bit_features.eval()
if i % 10000 == 0: print("substance {}".format(i))
i += 1
except tf.errors.OutOfRangeError:
pass
finally:
coord.request_stop()
coord.join(threads)
def __put_bboxes_on_image(self, images, boxes, scale_x, scale_y):
output = []
polys1 = tf.sparse_tensor_to_dense(polys, default_value=-1)
mask = bboxes1 >= 0
bboxes1 = tf.boolean_mask(bboxes1, mask)
bboxes = tf.reshape(bboxes1, [1, -1, 4])
bboxes = bboxes * [[scale_y, scale_x, scale_y, scale_x]]
shape = tf.shape(bboxes)
bboxes = self.__clip_bboxes(tf.reshape(bboxes, [-1, 4]), 1.0, 1.0)
y, x, h, w = tf.split(bboxes, 4, axis=1)
bboxes = tf.concat([1.0 - (y + h / 2.0) - 0.001, x - w / 2.0 - 0.001,
1.0 - (y - h / 2.0) + 0.001,
x + w / 2.0 + 0.001],
axis=1)
bboxes = tf.reshape(bboxes, shape)
bboxes = tf.clip_by_value(bboxes, 0.0, 1.0)
image = tf.cond(tf.size(bboxes1) > 0,
lambda: tf.image.draw_bounding_boxes(images,
bboxes),
lambda: images)
return image
def __put_bboxes_on_image(self, images, boxes, scale_x, scale_y):
output = []
bboxes1 = tf.sparse_tensor_to_dense(boxes, default_value=-1)
mask = bboxes1 >= 0
bboxes1 = tf.boolean_mask(bboxes1, mask)
bboxes = tf.reshape(bboxes1, [1, -1, 4])
bboxes = bboxes * [[scale_y, scale_x, scale_y, scale_x]]
shape = tf.shape(bboxes)
bboxes = self.__clip_bboxes(tf.reshape(bboxes, [-1, 4]), 1.0, 1.0)
y, x, h, w = tf.split(bboxes, 4, axis=1)
bboxes = tf.concat([1.0 - (y + h / 2.0) - 0.001, x - w / 2.0 - 0.001,
1.0 - (y - h / 2.0) + 0.001,
x + w / 2.0 + 0.001],
axis=1)
bboxes = tf.reshape(bboxes, shape)
bboxes = tf.clip_by_value(bboxes, 0.0, 1.0)
image = tf.cond(tf.size(bboxes1) > 0,
lambda: tf.image.draw_bounding_boxes(images,
bboxes),
lambda: images)
return image
def __put_bboxes_on_image(self, images, boxes, scale_x, scale_y):
output = []
polys1 = tf.sparse_tensor_to_dense(polys, default_value=-1)
mask = bboxes1 >= 0
bboxes1 = tf.boolean_mask(bboxes1, mask)
bboxes = tf.reshape(bboxes1, [1, -1, 4])
bboxes = bboxes * [[scale_y, scale_x, scale_y, scale_x]]
shape = tf.shape(bboxes)
bboxes = self.__clip_bboxes(tf.reshape(bboxes, [-1, 4]), 1.0, 1.0)
y, x, h, w = tf.split(bboxes, 4, axis=1)
bboxes = tf.concat([1.0 - (y + h / 2.0) - 0.001, x - w / 2.0 - 0.001,
1.0 - (y - h / 2.0) + 0.001,
x + w / 2.0 + 0.001],
axis=1)
bboxes = tf.reshape(bboxes, shape)
bboxes = tf.clip_by_value(bboxes, 0.0, 1.0)
image = tf.cond(tf.size(bboxes1) > 0,
lambda: tf.image.draw_bounding_boxes(images,
bboxes),
lambda: images)
return image
def a_high_classifier(self, page_batch, low_classifier):
"""high level classifier."""
target_batch, un_batch, un_len, la_batch, la_len = page_batch
with tf.variable_scope("low_classifier") as low_scope:
# [batch_size, 1, html_len, we_dim]
target_exp = tf.expand_dims(target_batch, 1)
# [batch_size, 1, num_cats]
target_logits = tf.map_fn(low_classifier,
target_exp,
name="map_fn")
# reuse parameters for low_classifier
low_scope.reuse_variables()
un_rel = tf.sparse_tensor_to_dense(un_batch)
un_rel = tf.reshape(un_rel, [FLAGS.batch_size, -1, FLAGS.html_len,
FLAGS.we_dim])
# call low_classifier to classify relatives
# all relatives of one target composed of one batch
# [batch_size, num_len(variant), num_cats]
un_rel = tf.map_fn(low_classifier, un_rel, name="map_fn")
# labeled relatives
la_rel = tf.sparse_tensor_to_dense(la_batch)
la_rel = tf.reshape(la_rel, [FLAGS.batch_size, -1, FLAGS.num_cats])
# concat all inputs for high-level classifier RNN
# concat_inputs = tf.concat(1, [un_rel, target_logits])
concat_inputs = tf.concat(1, [un_rel, la_rel, target_logits])
# number of pages for each target
num_pages = tf.add(
tf.add(un_len, la_len),
tf.ones(
[FLAGS.batch_size],
dtype=tf.int32))
# high-level classifier - RNN
with tf.variable_scope("dynamic_rnn"):
cell = tf.nn.rnn_cell.GRUCell(num_units=FLAGS.num_cats)
outputs, state = tf.nn.dynamic_rnn(cell,
inputs=concat_inputs,
sequence_length=num_pages,
dtype=tf.float32)
return state
def read_and_decode(filename_queue):
"""read data from one file and decode to tensors."""
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'label': tf.FixedLenFeature(
[], tf.int64),
'target': tf.FixedLenFeature(
[], tf.string),
'un_len': tf.FixedLenFeature(
[], tf.int64),
'unlabeled': tf.VarLenFeature(tf.float32),
'la_len': tf.FixedLenFeature(
[], tf.int64),
'labeled': tf.VarLenFeature(tf.float32),
})
t_dense = features['target']
# decode it using the same numpy type in convert !!
t_decode = tf.decode_raw(t_dense, tf.float32)
# set_shape and reshape are both necessary ???
t_decode.set_shape([FLAGS.html_len * FLAGS.we_dim])
# t_cast = tf.cast(t_decode, tf.float32)
t_reshape = tf.reshape(t_decode, [FLAGS.html_len, FLAGS.we_dim])
un_len = tf.cast(features['un_len'], tf.int32)
un_rel = features['unlabeled']
# u_decode = tf.decode_raw(features['unlabeled'], tf.float32)
# un_rel = tf.sparse_tensor_to_dense(un_rel)
# # u_dense.set_shape(tf.pack([un_len, FLAGS.html_len, FLAGS.we_dim]))
# # u_reshape = tf.reshape(u_dense, [-1, FLAGS.html_len, FLAGS.we_dim])
# un_rel = tf.reshape(un_rel,
# tf.pack([un_len, FLAGS.html_len, FLAGS.we_dim]))
# un_rel = tf.pad(un_rel, [[0, FLAGS.max_relatives], [0, 0], [0, 0]])
# un_rel = tf.slice(un_rel, [0, 0, 0], [FLAGS.max_relatives, FLAGS.html_len,
# FLAGS.we_dim])
la_len = tf.cast(features['la_len'], tf.int32)
la_rel = features['labeled']
# la_rel = tf.sparse_tensor_to_dense(la_rel)
# la_rel = tf.reshape(la_rel, tf.pack([la_len, FLAGS.num_cats]))
# la_rel = tf.pad(la_rel, [[0, FLAGS.max_relatives], [0, 0]])
# la_rel = tf.slice(la_rel, [0, 0], [FLAGS.max_relatives, FLAGS.num_cats])
label = tf.cast(features['label'], tf.int32)
# u_reshape = tf.zeros([3, 4], tf.int32)
# l_reshape = tf.zeros([3, 4], tf.int32)
return t_reshape, un_rel, un_len, la_rel, la_len, label
def get_mask_file(origin_images, mask_file, height, width, channels=3):
"""blur image through a mask file"""
img_bytes = tf.read_file(mask_file)
maskimage = tf.image.decode_jpeg(img_bytes)
maskimage = tf.to_float(maskimage)
m_mean = tf.reduce_mean(maskimage, axis=(1,2))
index = tf.where(m_mean < 1.5)
side_index = tf.where(m_mean >= 1.5)
top_index = side_index + tf.to_int64(1)
down_index = side_index - tf.to_int64(1)
select = tf.zeros_like(m_mean, dtype=tf.float32)
side_select = tf.ones_like(m_mean, dtype=tf.float32)
values = tf.squeeze(tf.ones_like(index, dtype=tf.float32))
side_values = tf.squeeze(tf.ones_like(side_index, dtype=tf.float32))
top_values = tf.scalar_mul(tf.random_uniform([], minval=0, maxval=1), side_values)
down_values = tf.scalar_mul(tf.random_uniform([], minval=0, maxval=1), side_values)
delta = tf.SparseTensor(index, values, [height])
top_delta = tf.SparseTensor(top_index, top_values, [height])
down_delta = tf.SparseTensor(down_index, down_values, [height])
black_select = select + tf.sparse_tensor_to_dense(delta)
top_select = side_select + tf.sparse_tensor_to_dense(top_delta)
down_select = side_select + tf.sparse_tensor_to_dense(down_delta)
top_select = tf.expand_dims(tf.divide(tf.ones_like(top_select), top_select), -1)
top_select = tf.matmul(top_select, tf.ones([1, width]))
top_select = tf.expand_dims(top_select, -1)
down_select = tf.expand_dims(tf.divide(tf.ones_like(down_select), down_select), -1)
down_select = tf.matmul(down_select, tf.ones([1, width]))
down_select = tf.expand_dims(down_select, -1)
black_select = tf.expand_dims(black_select, -1)
black_select = tf.matmul(black_select, tf.ones([1, width]))
black_select = tf.expand_dims(black_select, 0)
black_select = tf.expand_dims(black_select, -1)
top_select = tf.expand_dims(top_select, 0)
down_select = tf.expand_dims(down_select, 0)
source = tf.mul(origin_images, top_select)
source = tf.mul(source, down_select)
source = tf.mul(source, black_select)
return source
def test_reading_inputs():
parse_spec = {
"text": tf.VarLenFeature(tf.string),
"label": tf.FixedLenFeature(shape=(1,), dtype=tf.int64,
default_value=None)
}
sess = tf.Session()
reader = tf.python_io.tf_record_iterator(INPUT_FILE)
ESZ = 4
HSZ = 100
NC = 4
n = 0
text_lookup_table = tf.contrib.lookup.index_table_from_file(
VOCAB_FILE, 10, VOCAB_SIZE)
text_embedding_w = tf.Variable(tf.random_uniform(
[VOCAB_SIZE, ESZ], -1.0, 1.0))
sess.run([tf.tables_initializer()])
for record in reader:
example = tf.parse_single_example(
record,
parse_spec)
text = example["text"]
labels = tf.subtract(example["label"], 1)
text_ids = text_lookup_table.lookup(text)
dense = tf.sparse_tensor_to_dense(text_ids)
print dense.shape
text_embedding = tf.reduce_mean(tf.nn.embedding_lookup(
text_embedding_w, dense), axis=-2)
print text_embedding.shape
text_embedding = tf.expand_dims(text_embedding, -2)
print text_embedding.shape
text_embedding_2 = tf.contrib.layers.bow_encoder(
dense, VOCAB_SIZE, ESZ)
print text_embedding_2.shape
num_classes = 2
logits = tf.contrib.layers.fully_connected(
inputs=text_embedding, num_outputs=4,
activation_fn=None)
sess.run([tf.global_variables_initializer()])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
x = sess.run([text_embedding, text_embedding_2, logits, labels, loss])
print(len(x), list(str(x[i]) for i in range(len(x))))
if n > 2:
break
n += 1
def __init__(self, itm_cnt, usr_cnt, dim_hidden, n_time_step, learning_rate, grad_clip, emb_dim, lamda=0.2, initdelta=0.05,MF_paras=None,model_type="rnn",use_sparse_tensor=False):
"""
Args:
dim_itm_embed: (optional) Dimension of item embedding.
dim_usr_embed: (optional) Dimension of user embedding.
dim_hidden: (optional) Dimension of all hidden state.
n_time_step: (optional) Time step size of LSTM.
usr_cnt: (optional) The size of all users.
itm_cnt: (optional) The size of all items.
"""
self.V_M = itm_cnt
self.V_U = usr_cnt
self.param=MF_paras
self.H = dim_hidden
self.T = n_time_step
self.MF_paras=MF_paras
self.grad_clip = grad_clip
self.weight_initializer = tf.contrib.layers.xavier_initializer()
self.const_initializer = tf.constant_initializer(0.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-1.0, maxval=1.0)
# Place holder for features and captions
if use_sparse_tensor:
self.item_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_U])
self.user_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_M])
self.user_indices = tf.placeholder(tf.int64)
self.user_shape = tf.placeholder(tf.int64)
self.user_values = tf.placeholder(tf.float64)
user_sparse_tensor = tf.SparseTensor(user_indices, user_shape, user_values)
self.user_sequence = tf.sparse_tensor_to_dense(user_sparse_tensor)
self.item_indices = tf.placeholder(tf.int64)
self.item_shape = tf.placeholder(tf.int64)
self.item_values = tf.placeholder(tf.float64)
item_sparse_tensor = tf.SparseTensor(item_indices, item_shape, item_values)
self.item_sequence = tf.sparse_tensor_to_dense(item_sparse_tensor)
else:
self.item_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_U])
self.user_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_M])
self.rating = tf.placeholder(tf.float32, [None,])
self.learning_rate = learning_rate
self.emb_dim = emb_dim
self.lamda = lamda # regularization parameters
self.initdelta = initdelta
self.u = tf.placeholder(tf.int32)
self.i = tf.placeholder(tf.int32)
self.paras_rnn=[]
self.model_type=model_type
def __init__(self, itm_cnt, usr_cnt, dim_hidden, n_time_step, learning_rate, grad_clip, emb_dim, lamda=0.2, initdelta=0.05,MF_paras=None,model_type="rnn",use_sparse_tensor=True,update_rule='adam'):
"""
Args:
dim_itm_embed: (optional) Dimension of item embedding.
dim_usr_embed: (optional) Dimension of user embedding.
dim_hidden: (optional) Dimension of all hidden state.
n_time_step: (optional) Time step size of LSTM.
usr_cnt: (optional) The size of all users.
itm_cnt: (optional) The size of all items.
"""
self.V_M = itm_cnt
self.V_U = usr_cnt
self.param=MF_paras
self.H = dim_hidden
self.T = n_time_step
self.MF_paras=MF_paras
self.grad_clip = grad_clip
self.weight_initializer = tf.random_uniform_initializer(minval=-0.05, maxval=0.05)
self.const_initializer = tf.constant_initializer(0.0)
self.emb_initializer = tf.random_uniform_initializer(minval=-0.05, maxval=0.05)
# Place holder for features and captions
self.sparse_tensor=use_sparse_tensor
if self.sparse_tensor:
self.user_sparse_tensor= tf.sparse_placeholder(tf.float32)
self.user_sequence = tf.sparse_tensor_to_dense(self.user_sparse_tensor)
self.item_sparse_tensor= tf.sparse_placeholder(tf.float32)
self.item_sequence = tf.sparse_tensor_to_dense(self.item_sparse_tensor)
else:
self.item_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_U])
self.user_sequence = tf.placeholder(tf.float32, [None, self.T, self.V_M])
self.rating = tf.placeholder(tf.float32, [None,])
self.learning_rate = learning_rate
self.emb_dim = emb_dim
self.lamda = lamda # regularization parameters
self.initdelta = initdelta
self.u = tf.placeholder(tf.int32)
self.i = tf.placeholder(tf.int32)
self.paras_rnn=[]
self.model_type=model_type
self.update_rule = update_rule
def loss(self, class_scores, labels, images):
# polys = tf.sparse_tensor_to_dense(polys, default_value=-1)
# mask = polys >= 0
# polys = tf.boolean_mask(polys, mask)
# labels_oh = tf.one_hot(labels, self.num_classes+1)
# new_balance = tf.reduce_sum(labels_oh, axis=[0, 1])/tf.reduce_sum(labels_oh)
# class_balance = tf.Variable(tf.zeros([self.num_classes+1]),
# trainable=False, name="class_balance")
# balance = tf.assign(class_balance,
# class_balance * 0.999 + new_balance * (1 - 0.999))
# labels = tf.Print(labels, [balance], "balance", summarize=100)
labels = tf.cast(labels, tf.int64)
if self.exclude_class is not None:
m = tf.cast(tf.not_equal(labels, tf.cast(self.exclude_class, tf.int64)), tf.int64)
labels_without_exclude = labels * m
labs = tf.one_hot(labels_without_exclude, self.num_classes)
else:
labels_without_exclude = labels
labs = tf.one_hot(labels, self.num_classes + 1)
labels_without_exclude = tf.reshape(labels_without_exclude, [self.batch_size, self.height, self.width, self.num_classes])
cls_scores = tf.reshape(class_scores, [self.batch_size, self.height, self.width, self.num_classes, 2])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_without_exclude, logits=cls_scores, name="loss")
# loss = self.__softmax_crossentropy(class_scores, labs)
#weights = tf.gather(self.label_weights, tf.reshape(labels_without_exclude, [-1]))
weights = tf.tile([[[self.label_weights]]], [self.batch_size, self.height, self.width, 1])
weights = weights * tf.cast(labels_without_exclude, dtype=tf.float32)
backgroundweights = tf.tile([[[self.background_weights]]], [self.batch_size, self.height, self.width, 1])
weights = tf.where(tf.equal(weights, 0), tf.ones_like(weights) * backgroundweights, weights)
# weights = tf.reshape(tf.tile(tf.expand_dims(tf.reduce_max(tf.reshape(weights, [-1, self.num_classes]), axis=1), axis=1), [1, self.num_classes]), [self.batch_size, self.height, self.width, self.num_classes])
#weights = tf.Print(weights, [tf.shape(weights), weights], "weights", summarize=1024)
loss = loss * weights
if self.exclude_class is not None:
loss = tf.where(tf.equal(labels, tf.cast(self.exclude_class, tf.int64)), tf.zeros_like(loss, dtype=tf.float32), loss)#tf.boolean_mask(loss, tf.not_equal(labels, tf.cast(self.exclude_class, tf.int64)))
loss2 = tf.reduce_sum(loss)
tf.add_to_collection('losses', tf.identity(loss2,
name="losses"))
return tf.add_n(tf.get_collection('losses'), name='total_loss'), tf.nn.softmax(cls_scores)[0], labels_without_exclude[0], loss[0]
def loss(self, class_scores, labels, images):
# polys = tf.sparse_tensor_to_dense(polys, default_value=-1)
# mask = polys >= 0
# polys = tf.boolean_mask(polys, mask)
# labels_oh = tf.one_hot(labels, self.num_classes+1)
# new_balance = tf.reduce_sum(labels_oh, axis=[0, 1])/tf.reduce_sum(labels_oh)
# class_balance = tf.Variable(tf.zeros([self.num_classes+1]),
# trainable=False, name="class_balance")
# balance = tf.assign(class_balance,
# class_balance * 0.999 + new_balance * (1 - 0.999))
# labels = tf.Print(labels, [balance], "balance", summarize=100)
labels = tf.cast(labels, tf.int64)
if self.exclude_class is not None:
m = tf.cast(tf.not_equal(labels, tf.cast(self.exclude_class, tf.int64)), tf.int64)
labels_without_exclude = labels * m
labs = tf.one_hot(labels_without_exclude, self.num_classes)
else:
labels_without_exclude = labels
labs = tf.one_hot(labels, self.num_classes + 1)
labels_without_exclude = tf.reshape(labels_without_exclude, [self.batch_size, self.height, self.width, self.num_classes])
cls_scores = tf.reshape(class_scores, [self.batch_size, self.height, self.width, self.num_classes, 2])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_without_exclude, logits=cls_scores, name="loss")
# loss = self.__softmax_crossentropy(class_scores, labs)
#weights = tf.gather(self.label_weights, tf.reshape(labels_without_exclude, [-1]))
weights = tf.tile([[[self.label_weights]]], [self.batch_size, self.height, self.width, 1])
weights = weights * tf.cast(labels_without_exclude, dtype=tf.float32)
backgroundweights = tf.tile([[[self.background_weights]]], [self.batch_size, self.height, self.width, 1])
weights = tf.where(tf.equal(weights, 0), tf.ones_like(weights) * backgroundweights, weights)
# weights = tf.reshape(tf.tile(tf.expand_dims(tf.reduce_max(tf.reshape(weights, [-1, self.num_classes]), axis=1), axis=1), [1, self.num_classes]), [self.batch_size, self.height, self.width, self.num_classes])
#weights = tf.Print(weights, [tf.shape(weights), weights], "weights", summarize=1024)
loss = loss * weights
if self.exclude_class is not None:
loss = tf.where(tf.equal(labels, tf.cast(self.exclude_class, tf.int64)), tf.zeros_like(loss, dtype=tf.float32), loss)#tf.boolean_mask(loss, tf.not_equal(labels, tf.cast(self.exclude_class, tf.int64)))
loss2 = tf.reduce_sum(loss)
tf.add_to_collection('losses', tf.identity(loss2,
name="losses"))
return tf.add_n(tf.get_collection('losses'), name='total_loss'), tf.nn.softmax(cls_scores)[0], labels_without_exclude[0], loss[0]