python类equal()的实例源码

beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def _tensor_gather_helper(gather_indices, gather_from, batch_size,
                          range_size, gather_shape):
    """Helper for gathering the right indices from the tensor.
    This works by reshaping gather_from to gather_shape (e.g. [-1]) and then
    gathering from that according to the gather_indices, which are offset by
    the right amounts in order to preserve the batch order.
    Args:
      gather_indices: The tensor indices that we use to gather.
      gather_from: The tensor that we are gathering from.
      batch_size: The input batch size.
      range_size: The number of values in each range. Likely equal to beam_width.
      gather_shape: What we should reshape gather_from to in order to preserve the
        correct values. An example is when gather_from is the attention from an
        AttentionWrapperState with shape [batch_size, beam_width, attention_size].
        There, we want to preserve the attention_size elements, so gather_shape is
        [batch_size * beam_width, -1]. Then, upon reshape, we still have the
        attention_size as desired.
    Returns:
      output: Gathered tensor of shape tf.shape(gather_from)[:1+len(gather_shape)]
    """
    range_ = tf.expand_dims(tf.range(batch_size) * range_size, 1)
    gather_indices = tf.reshape(gather_indices + range_, [-1])
    output = tf.gather(tf.reshape(gather_from, gather_shape), gather_indices)
    final_shape = tf.shape(gather_from)[:1 + len(gather_shape)]
    final_static_shape = (tf.TensorShape([None]).concatenate(gather_from.shape[1:1 + len(gather_shape)]))
    output = tf.reshape(output, final_shape)
    output.set_shape(final_static_shape)
    return output
inference-combine-tfrecords-video.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def build_graph(input_reader, input_data_pattern,
                prediction_reader, prediction_data_pattern,
                batch_size=256):
  """Creates the Tensorflow graph for evaluation.

  Args:
    all_readers: The data file reader. It should inherit from BaseReader.
    model: The core model (e.g. logistic or neural net). It should inherit
           from BaseModel.
    all_data_patterns: glob path to the evaluation data files.
    label_loss_fn: What kind of loss to apply to the model. It should inherit
                from BaseLoss.
    batch_size: How many examples to process at a time.
  """

  video_ids_batch, model_inputs_batch, labels_batch, unused_num_frames = (
      get_input_data_tensors(
          input_reader,
          input_data_pattern,
          batch_size=batch_size))
  video_ids_batch2, model_predictions_batch, labels_batch2, unused_num_frames2 = (
      get_input_data_tensors(
          prediction_reader,
          prediction_data_pattern,
          batch_size=batch_size))

  video_ids_equal = tf.reduce_mean(tf.cast(tf.equal(video_ids_batch, video_ids_batch2), tf.float32))
  labels_equal = tf.reduce_mean(tf.reduce_sum(tf.cast(tf.equal(labels_batch, labels_batch2), tf.float32), axis=1))

  tf.add_to_collection("video_ids_equal", video_ids_equal)
  tf.add_to_collection("labels_equal", labels_equal)
  tf.add_to_collection("video_ids_batch", video_ids_batch)
  tf.add_to_collection("labels_batch", tf.cast(labels_batch, tf.float32))
  tf.add_to_collection("inputs_batch", model_inputs_batch)
  tf.add_to_collection("predictions_batch", model_predictions_batch)
linear_svm.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def inference_fn(W,b,x_data,y_target):
    prediction = tf.sign(tf.subtract(tf.matmul(x_data, W), b))
    accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_target), tf.float32))
    return accuracy
conv-conv.py 文件源码 项目:US-image-prediction 作者: ChengruiWu008 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def correct_prediction(correct_pred=0):
    for i in range(PIXEL):
        correct_prediction = tf.equal(tf.argmax(conv_5[0][i], 1), tf.argmax(y[i], 1))
        correct_pred += correct_prediction
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    return accuracy
main.py 文件源码 项目:scientific-paper-summarisation 作者: EdCo95 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def bilstm_reader(placeholders, vocab_size, emb_dim, drop_keep_prob=1.0):
    # [batch_size, max_seq_length]
    sentences = placeholders['sentences']

    # [batch_size, candidate_size]
    targets = tf.to_float(placeholders['sentence_labels'])

    with tf.variable_scope("embeddings"):
        embeddings = tf.get_variable("word_embeddings", [vocab_size, emb_dim], dtype=tf.float32)

    with tf.variable_scope("embedders") as varscope:
        sentences_embedded = tf.nn.embedding_lookup(embeddings, sentences)

    with tf.variable_scope("bilstm_reader") as varscope1:

        # states: (c_fw, h_fw), (c_bw, h_bw)
        outputs, states = reader(sentences_embedded, placeholders['sentences_lengths'], emb_dim,
                                scope=varscope1, drop_keep_prob=drop_keep_prob)

        # concat fw and bw outputs
        output = tf.concat(1, [states[0][1], states[1][1]])

    scores = tf.contrib.layers.linear(output, 2)  # we don't strictly need this as we've only got 2 targets
    # add non-linearity
    scores = tf.nn.tanh(scores)
    loss = tf.nn.softmax_cross_entropy_with_logits(scores, targets)
    predict = tf.nn.softmax(scores)

    predictions = tf.argmax(predict, axis=1)
    true_vals = tf.argmax(targets, axis=1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(predictions, true_vals), tf.float32))

    saver = tf.train.Saver()

    return scores, loss, predict, accuracy, saver
features_linearclassifier.py 文件源码 项目:scientific-paper-summarisation 作者: EdCo95 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def graph():

    # Define placeholders for the data

    # The sentence to classify, has shape [batch_size x word_dimensions]
    sentence_input = tf.placeholder(tf.float32, shape=[None, NUM_FEATURES])

    # The labels for the sentences as one-hot vectors, of the form [batch_size x num_classes]
    labels = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])

    # Define the computation graph

    # Linear layer
    sent_weight = weight_variable([NUM_FEATURES, NUM_CLASSES])
    sent_bias = bias_variable([NUM_CLASSES])

    output = tf.matmul(sentence_input, sent_weight) + sent_bias

    # Define the loss function
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, labels))
    opt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    # Predictions
    predictions = tf.nn.softmax(output)

    # Calculate accuracy
    pred_answers = tf.argmax(output, axis=1)
    correct_answers = tf.argmax(labels, axis=1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_answers, correct_answers), tf.float32))

    return sentence_input, labels, loss, opt, predictions, pred_answers, correct_answers, accuracy, sent_weight
combined_linear_classifier.py 文件源码 项目:scientific-paper-summarisation 作者: EdCo95 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def graph():
    """
    Function to encapsulate the construction of a TensorFlow computation graph.
    :return: input placeholders, optimisation operation, loss, accuracy, prediction operations
    """

    # Define placeholders for the data

    # The sentence to classify, has shape [batch_size x word_dimensions*2] because the input will be the sentence
    # and abstract concatenated.
    sentence_input = tf.placeholder(tf.float32, shape=[None, WORD_DIMENSIONS + ABSTRACT_DIMENSION + NUM_FEATURES])

    # The labels for the sentences as one-hot vectors, of the form [batch_size x num_classes]
    labels = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])

    # Define the computation graph

    # The keep gate - decides which parts to keep
    keep_weight = weight_variable([WORD_DIMENSIONS + ABSTRACT_DIMENSION + NUM_FEATURES, NUM_CLASSES])
    keep_bias = bias_variable([NUM_CLASSES])
    output = tf.matmul(sentence_input, keep_weight) + keep_bias

    # Define the loss function
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(output, labels))
    opt = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    # Predictions
    predictions = tf.nn.softmax(output)

    # Calculate accuracy
    pred_answers = tf.argmax(output, axis=1)
    correct_answers = tf.argmax(labels, axis=1)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(pred_answers, correct_answers), tf.float32))

    return sentence_input, labels, loss, opt, predictions, pred_answers, correct_answers, accuracy

# Construct the computation graph
BaseModel.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def _get_loss(self,labels):
        # build the self.loss tensor
        # This function could be overwritten

        #print("pred {} label{}".format(self.logit.dtype,labels.dtype))

        with tf.name_scope("Loss"):
            with tf.name_scope("cross_entropy"):
                labels = tf.cast(labels, tf.float32)
                #self.logit = tf.cast(self.logit, tf.float32)
                self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logit, labels=labels))
            self._get_l2_loss()
            with tf.name_scope("accuracy"):
                y_label = tf.argmax(labels, 1)
                yp_label = tf.argmax(self.logit, 1)
                correct_pred = tf.equal(yp_label,y_label)
                self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

        with tf.name_scope("summary"):
            if self.flags.visualize:
                tf.summary.scalar(name='TRAIN_CrossEntropy', tensor=self.loss, collections=[tf.GraphKeys.SCALARS])
                tf.summary.scalar(name='TRAIN_Accuracy', tensor=self.acc, collections=[tf.GraphKeys.SCALARS])

                tf.summary.scalar(name='TRAIN_L2loss', tensor=self.l2loss, collections=[tf.GraphKeys.SCALARS]
)

                if 'acc' in self.flags.visualize:
                    tf.summary.histogram(name='pred', values=yp_label, collections=[tf.GraphKeys.FEATURE_MAPS
])
                    tf.summary.histogram(name='truth', values=y_label, collections=[tf.GraphKeys.FEATURE_MAPS
])
                    for cl in range(self.flags.classes):
                        tf.summary.histogram(name='pred%d'%cl, values=tf.slice(self.logit, [0,cl],[self.flags.batch_size, 1]), collections=[tf.GraphKeys.FEATURE_MAPS])
p9_BiLstmTextRelation_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,
                 vocab_size,embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.hidden_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.initializer=initializer

        # add placeholder (X,label)
        #X:input_x e.g. "how much is the computer? EOS price of laptop"
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X:  concat of two sentence, split by EOS.
        self.input_y = tf.placeholder(tf.int32,[None], name="input_y")  # y [None,num_classes]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
a2_transformer_classification.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, num_classes, learning_rate, batch_size, decay_steps, decay_rate, sequence_length,
                 vocab_size, embed_size,d_model,d_k,d_v,h,num_layer,is_training,
                 initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,l2_lambda=0.0001,use_residual_conn=False):
        """init all hyperparameter here"""
        super(Transformer, self).__init__(d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=num_layer) #init some fields by using parent class.

        self.num_classes = num_classes
        self.sequence_length = sequence_length
        self.vocab_size = vocab_size
        self.embed_size = d_model
        self.learning_rate = tf.Variable(learning_rate, trainable=False, name="learning_rate")
        self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5)
        self.initializer = initializer
        self.clip_gradients=clip_gradients
        self.l2_lambda=l2_lambda

        self.is_training=is_training #self.is_training=tf.placeholder(tf.bool,name="is_training") #tf.bool #is_training
        self.input_x = tf.placeholder(tf.int32, [self.batch_size, self.sequence_length], name="input_x")                 #x  batch_size
        self.input_y_label = tf.placeholder(tf.int32, [self.batch_size], name="input_y_label")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate
        self.use_residual_conn=use_residual_conn

        self.instantiate_weights()
        self.logits = self.inference() #logits shape:[batch_size,self.num_classes]

        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")
        correct_prediction = tf.equal(tf.cast(self.predictions, tf.int32),self.input_y_label)
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy")  # shape=()
        if self.is_training is False:# if it is not training, then no need to calculate loss and back-propagation.
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
p9_twoCNNTextRelation_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, filter_sizes,num_filters,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,vocab_size,
                 embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.filter_sizes=filter_sizes # it is a list of int. e.g. [3,4,5]
        self.num_filters=num_filters
        self.initializer=initializer
        self.num_filters_total=self.num_filters * len(filter_sizes) #how many filters totally.

        # add placeholder (X,label)
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X: first sentence
        self.input_x2 = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x2")  # X: second sentence
        self.input_y = tf.placeholder(tf.int32, [None,],name="input_y")  # y: 0 or 1. 1 means two sentences related to each other;0 means no relation.
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate
        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, 1, name="predictions")  # shape:[None,]

        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
p5_fastTextB_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, label_size, learning_rate, batch_size, decay_steps, decay_rate,num_sampled,sentence_len,vocab_size,embed_size,is_training):
        """init all hyperparameter here"""
        # set hyperparamter
        self.label_size = label_size
        self.batch_size = batch_size
        self.num_sampled = num_sampled
        self.sentence_len=sentence_len
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate

        # add placeholder (X,label)
        self.sentence = tf.placeholder(tf.int32, [None, self.sentence_len], name="sentence")  # X
        self.labels = tf.placeholder(tf.int32, [None], name="Labels")  # y

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]
        if not is_training:
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.labels) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
p8_TextRNN_model.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self,num_classes, learning_rate, batch_size, decay_steps, decay_rate,sequence_length,
                 vocab_size,embed_size,is_training,initializer=tf.random_normal_initializer(stddev=0.1)):
        """init all hyperparameter here"""
        # set hyperparamter
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sequence_length=sequence_length
        self.vocab_size=vocab_size
        self.embed_size=embed_size
        self.hidden_size=embed_size
        self.is_training=is_training
        self.learning_rate=learning_rate
        self.initializer=initializer
        self.num_sampled=20

        # add placeholder (X,label)
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")  # X
        self.input_y = tf.placeholder(tf.int32,[None], name="input_y")  # y [None,num_classes]
        self.dropout_keep_prob=tf.placeholder(tf.float32,name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step=tf.Variable(0,trainable=False,name="Epoch_Step")
        self.epoch_increment=tf.assign(self.epoch_step,tf.add(self.epoch_step,tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #[None, self.label_size]. main computation graph is here.
        if not is_training:
            return
        self.loss_val = self.loss() #-->self.loss_nce()
        self.train_op = self.train()
        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")  # shape:[None,]
        correct_prediction = tf.equal(tf.cast(self.predictions,tf.int32), self.input_y) #tf.argmax(self.logits, 1)-->[batch_size]
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy") # shape=()
conv_net_example.py 文件源码 项目:tfplus 作者: renmengye 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def build_loss(self, inp, output):
        y_gt = inp['y_gt']
        y_out = output['y_out']
        ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
        num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
        ce = tf.reduce_sum(ce) / num_ex_f
        self.add_loss(ce)
        total_loss = self.get_loss()
        self.register_var('loss', total_loss)

        correct = tf.equal(tf.argmax(y_gt, 1), tf.argmax(y_out, 1))
        acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
        self.register_var('acc', acc)
        return total_loss
res_net_example.py 文件源码 项目:tfplus 作者: renmengye 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def build_loss(self, inp, output):
        y_gt = inp['y_gt']
        y_out = output['y_out']
        ce = tfplus.nn.CE()({'y_gt': y_gt, 'y_out': y_out})
        num_ex_f = tf.to_float(tf.shape(inp['x'])[0])
        ce = tf.reduce_sum(ce) / num_ex_f
        self.add_loss(ce)
        total_loss = self.get_loss()
        self.register_var('loss', total_loss)
        correct = tf.equal(tf.argmax(y_gt, 1), tf.argmax(y_out, 1))
        acc = tf.reduce_sum(tf.to_float(correct)) / num_ex_f
        self.register_var('acc', acc)
        return total_loss
train_c3d_ucf101.py 文件源码 项目:C3D-tensorflow 作者: hx173149 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def tower_acc(logit, labels):
  correct_pred = tf.equal(tf.argmax(logit, 1), labels)
  accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
  return accuracy
utils_combine.py 文件源码 项目:adversarial-deep-structural-networks 作者: wentaozhu 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, q_test
utils.py 文件源码 项目:adversarial-deep-structural-networks 作者: wentaozhu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg
utils.py 文件源码 项目:adversarial-deep-structural-networks 作者: wentaozhu 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def model(X, Y, k1, k2, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras, fusion=False)
    #h1, h2, h3, h4 = tf.split(3, 4, hconv4clip)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10, wunary=paras['wunary'])
  else: 
    hconv4clip = buildmodel(X, paras)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10)
  #hconv4log = -tf.log(hconv4clip)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  #q_train = -tf.log(hconv4clip)
  q_trainclip = tf.clip_by_value(q_train, 1e-6, 1.)
  trainenergy = tf.reduce_sum(-tf.log(q_trainclip)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])

  #q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg


问题


面经


文章

微信
公众号

扫码关注公众号