python类to_int64()的实例源码

imgconvnets.py 文件源码 项目:DmsMsgRcg 作者: bshao001 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _build_training_graph(self, logits, labels, learning_rate):
        """
        Build the training graph.
        Args:
            logits: Logits tensor, float - [batch_size, class_count].
            labels: Labels tensor, int32 - [batch_size], with values in the range
                [0, class_count).
            learning_rate: The learning rate for the optimization.
        Returns:
            train_op: The Op for training.
            loss: The Op for calculating loss.
        """
        # Create an operation that calculates loss.
        labels = tf.to_int64(labels)
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=labels, name='xentropy')
        loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
        train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)

        correct_predict = tf.nn.in_top_k(logits, labels, 1)
        accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))

        return train_op, loss, accuracy
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
topn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def insert(self, ids, scores):
    """Insert the ids and scores into the TopN."""
    with tf.control_dependencies(self.last_ops):
      scatter_op = tf.scatter_update(self.id_to_score, ids, scores)
      larger_scores = tf.greater(scores, self.sl_scores[0])

      def shortlist_insert():
        larger_ids = tf.boolean_mask(tf.to_int64(ids), larger_scores)
        larger_score_values = tf.boolean_mask(scores, larger_scores)
        shortlist_ids, new_ids, new_scores = self.ops.top_n_insert(
            self.sl_ids, self.sl_scores, larger_ids, larger_score_values)
        u1 = tf.scatter_update(self.sl_ids, shortlist_ids, new_ids)
        u2 = tf.scatter_update(self.sl_scores, shortlist_ids, new_scores)
        return tf.group(u1, u2)

      # We only need to insert into the shortlist if there are any
      # scores larger than the threshold.
      cond_op = tf.cond(
          tf.reduce_any(larger_scores), shortlist_insert, tf.no_op)
      with tf.control_dependencies([cond_op]):
        self.last_ops = [scatter_op, cond_op]
speech.py 文件源码 项目:fathom 作者: rdadolf 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse( self, labels, label_lengths ):
    """Mike Henry's implementation, with some minor modifications."""
    with self.G.as_default():
      label_shape = tf.shape( labels )
      num_batches_tns = tf.stack( [label_shape[0]] )
      max_num_labels_tns = tf.stack( [label_shape[1]] )

      def range_less_than(previous_state, current_input):
        return tf.expand_dims( tf.range( label_shape[1] ), 0 ) < current_input

      init = tf.cast( tf.fill( max_num_labels_tns, 0 ), tf.bool )
      init = tf.expand_dims( init, 0 )
      dense_mask = functional_ops.scan(range_less_than, label_lengths , initializer=init, parallel_iterations=1)
      dense_mask = dense_mask[ :, 0, : ]

      label_array = tf.reshape( tf.tile( tf.range( 0, label_shape[1] ), num_batches_tns ), label_shape )
      label_ind = tf.boolean_mask( label_array, dense_mask )

      batch_array = tf.transpose( tf.reshape( tf.tile( tf.range( 0,  label_shape[0] ), max_num_labels_tns ), tf.reverse( label_shape,[0]) ) )
      batch_ind = tf.boolean_mask( batch_array, dense_mask )

      indices = tf.transpose( tf.reshape( tf.concat( axis=0, values=[batch_ind, label_ind] ), [2,-1] ) )
      vals_sparse = tf.gather_nd( labels, indices )
      return tf.SparseTensor( tf.to_int64(indices), vals_sparse, tf.to_int64( label_shape ) )
tf_model.py 文件源码 项目:TF-Net 作者: Jorba123 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def loss(logit_tensor, targets_pl, one_hot_labels):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
    logits: Logits from inference().
    labels: Targets Placeholder. 1-D tensor of shape [batch_size]

    Returns:
    Loss tensor of type float.
    """
    targets = tf.to_int64(targets_pl)

    # calculate the average cross entropy loss across the batch.
    if one_hot_labels:
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example')
    else:
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example_sparse')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    tf.add_to_collection('losses', cross_entropy_mean)
    tf.summary.scalar('loss', cross_entropy_mean)
    return cross_entropy_mean
neuralnetwork.py 文件源码 项目:neuroimage-tensorflow 作者: corticometrics 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss(logits, labels):

    # input: logits: Logits tensor, float - [batch_size, 256, 256, NUM_CLASSES].
    # intput: labels: Labels tensor, int32 - [batch_size, 256, 256].
    # output: loss: Loss tensor of type float.

    labels = tf.to_int64(labels)
    print_tensor_shape( logits, 'logits shape before')
    print_tensor_shape( labels, 'labels shape before')

# reshape to match args required for the cross entropy function
    logits_re = tf.reshape( logits, [-1, 2] )
    labels_re = tf.reshape( labels, [-1] )
    print_tensor_shape( logits, 'logits shape after')
    print_tensor_shape( labels, 'labels shape after')

# call cross entropy with logits
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
         logits, labels, name='cross_entropy')

    loss = tf.reduce_mean(cross_entropy, name='1cnn_cross_entropy_mean')
    return loss
train.py 文件源码 项目:neuroimage-tensorflow 作者: corticometrics 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loss_fn(logits, labels):        
    # input:  logits: Logits tensor, float - [batch_size, 256, 256, 256, 2].
    # intput: labels: Labels tensor, int8 - [batch_size, 256, 256, 256].
    # output: loss: Loss tensor of type float.

    labels = tf.to_int64(labels)
    print_tensor_shape( logits, 'logits shape ')
    print_tensor_shape( labels, 'labels shape ')

    # reshape to match args required for the cross entropy function
    logits_re = tf.reshape( logits, [-1, 2] )
    labels_re = tf.reshape( labels, [-1] )
    #print_tensor_shape( logits_re, 'logits shape after')
    #print_tensor_shape( labels_re, 'labels shape after')

    # call cross entropy with logits
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='cross_entropy')
    print_tensor_shape( cross_entropy, 'cross_entropy shape ')

    loss = tf.reduce_mean(cross_entropy, name='1cnn_cross_entropy_mean')
    print_tensor_shape( loss, 'loss shape ')

    return loss
eval_functions.py 文件源码 项目:sat-seg 作者: mshiv 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def loss(logits, labels, num_classes):

    logits = tf.reshape(logits, [-1, num_classes])
    #epsilon = tf.constant(value=1e-4)
    #logits = logits + epsilon

    #CHANGE LABELS TYPE to INT, for sparse_softmax_Cross_...
    # to FLOAT, for softmax_Cross_entropy...
    #labels = tf.to_float(tf.reshape(labels, [-1]))
    labels = tf.to_int64(tf.reshape(labels, [-1]))
    #print (np.unique(labels))
    print ('shape of logits: %s' % str(logits.get_shape()))
    print ('shape of labels: %s' % str(labels.get_shape()))

    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name='Cross_Entropy')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    tf.add_to_collection('losses', cross_entropy_mean)

    loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    #loss = tf.add_n(cross_entropy)
    return loss
tf_model.py 文件源码 项目:Defect-Prediction 作者: Jorba123 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def loss(logit_tensor, targets_pl):
    """Add L2Loss to all the trainable variables.

    Add summary for "Loss" and "Loss/avg".
    Args:
    logits: Logits from inference().
    labels: Targets Placeholder. 1-D tensor of shape [batch_size]

    Returns:
    Loss tensor of type float.
    """
    targets = tf.to_int64(targets_pl)

    # calculate the average cross entropy loss across the batch.
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logit_tensor, targets, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    tf.add_to_collection('losses', cross_entropy_mean)
    tf.summary.scalar('loss', cross_entropy_mean)
    return cross_entropy_mean
seq2seq_helpers.py 文件源码 项目:DeepDeepParser 作者: janmbuys 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def init_thin_stack(batch_size, max_num_concepts):
  """Initializes the thin stack.
  Returns:
    thin_stack: Tensor with the stack content.
    thin_stack_head_next: Index pointers to element after stack head.
  """
  # Stack initialized to -1, points to initial state.
  thin_stack = -tf.ones(tf.pack([batch_size, max_num_concepts]),
      dtype=tf.int32)
  # Reshape to ensure dimension 1 is known.
  thin_stack = tf.reshape(thin_stack, [-1, max_num_concepts])
  # Set to 0 at position 0.
  inds = tf.transpose(tf.to_int64(tf.pack(
   [tf.range(batch_size), tf.zeros(tf.pack([batch_size]), dtype=tf.int32)])))
  delta = tf.SparseTensor(inds, tf.ones(tf.pack([batch_size]), dtype=tf.int32),
      tf.pack([tf.to_int64(batch_size), max_num_concepts]))
  new_thin_stack = thin_stack + tf.sparse_tensor_to_dense(delta)
  # Position 0 is for empty stack; position after head always >= 1.
  thin_stack_head_next = tf.ones(tf.pack([batch_size]),
      dtype=tf.int32)
  return new_thin_stack, thin_stack_head_next
seq2seq_helpers.py 文件源码 项目:DeepDeepParser 作者: janmbuys 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def mask_decoder_reduce(logit, thin_stack_head_next, logit_size, batch_size):
  """Ensures that we can only reduce when the stack has at least 1 item.

  For each batch entry k:
    If thin_stack_head_next == 0, #alternatively, or 1.
      let logit[k][reduce_index] = -np.inf, 
    else don't change.
  """
  # Allow reduce only if at least 1 item on stack, i.e., pointer >= 2.
  update_vals = tf.pack([-np.inf, -np.inf, 0.0])
  update_val = tf.gather(update_vals, 
      tf.minimum(thin_stack_head_next,
      2*tf.ones(tf.pack([batch_size]), dtype=tf.int32)))

  re_filled = tf.fill(tf.pack([batch_size]),
      tf.to_int64(data_utils.REDUCE_ID))
  re_inds = tf.transpose(tf.pack(
      [tf.to_int64(tf.range(batch_size)), re_filled]))
  re_delta = tf.SparseTensor(re_inds, update_val, tf.to_int64(
      tf.pack([batch_size, logit_size])))
  new_logit = logit + tf.sparse_tensor_to_dense(re_delta)
  return new_logit
image.py 文件源码 项目:tensor2tensor 作者: tensorflow 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def imagenet_preprocess_example(example, mode, resize_size=None):
  """Preprocessing used for Imagenet and similar problems."""
  if resize_size is None:
    resize_size = [299, 299]

  def preprocess(img):
    img = tf.image.resize_images(img, [360, 360])
    img = common_layers.image_augmentation(
        tf.to_float(img) / 255., crop_size=resize_size)
    return tf.to_int64(img * 255.)

  def resize(img):
    return tf.to_int64(tf.image.resize_images(img, resize_size))

  inputs = tf.cast(example["inputs"], tf.int64)
  if mode == tf.estimator.ModeKeys.TRAIN:
    example["inputs"] = tf.cond(  # Preprocess 90% of the time.
        tf.less(tf.random_uniform([]), 0.9),
        lambda img=inputs: preprocess(img),
        lambda img=inputs: resize(img))
  else:
    example["inputs"] = resize(inputs)
  return example
text.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
text_RHL.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
seq2seq_model.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _create_predictions(self, decoder_output, features, labels, losses=None):
    """Creates the dictionary of predictions that is returned by the model.
    """
    predictions = {}

    # Add features and, if available, labels to predictions
    predictions.update(_flatten_dict({"features": features}))
    if labels is not None:
      predictions.update(_flatten_dict({"labels": labels}))

    if losses is not None:
      predictions["losses"] = _transpose_batch_time(losses)

    # Decoders returns output in time-major form [T, B, ...]
    # Here we transpose everything back to batch-major for the user
    output_dict = collections.OrderedDict(
        zip(decoder_output._fields, decoder_output))
    decoder_output_flat = _flatten_dict(output_dict)
    decoder_output_flat = {
        k: _transpose_batch_time(v)
        for k, v in decoder_output_flat.items()
    }
    predictions.update(decoder_output_flat)

    # If we predict the ids also map them back into the vocab and process them
    if "predicted_ids" in predictions.keys():
      vocab_tables = graph_utils.get_dict_from_collection("vocab_tables")
      target_id_to_vocab = vocab_tables["target_id_to_vocab"]
      predicted_tokens = target_id_to_vocab.lookup(
          tf.to_int64(predictions["predicted_ids"]))
      # Raw predicted tokens
      predictions["predicted_tokens"] = predicted_tokens

    return predictions
utils_combine.py 文件源码 项目:adversarial-deep-structural-networks 作者: wentaozhu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, q_test
utils.py 文件源码 项目:adversarial-deep-structural-networks 作者: wentaozhu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def cnnmodel(X, Y, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras)
  else: hconv4clip = buildmodel(X, paras)
  #hconv4log = -tf.log(hconv4clip)
  #q_train, q_test = crfrnn(hconv4log, paras['wsmooth'], paras['wcontra'], k1, k2, trainiter=5, testiter=10)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  q_train = -tf.log(hconv4clip)
  trainenergy = tf.reduce_sum((q_train)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])
  q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg
utils.py 文件源码 项目:adversarial-deep-structural-networks 作者: wentaozhu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def model(X, Y, k1, k2, paras, flag='single'):
  assert(flag=='single' or flag=='combine')
  X = tf.reshape(X, shape=[-1, boxheight, boxwidth, 1])
  yreshape = tf.reshape(Y, [-1, boxheight, boxwidth, 1])
  yonehot = tf.concat(3, [1-yreshape, yreshape])
  if flag == 'combine':
    hconv4clip = buildcombmodel(X, paras, fusion=False)
    #h1, h2, h3, h4 = tf.split(3, 4, hconv4clip)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10, wunary=paras['wunary'])
  else: 
    hconv4clip = buildmodel(X, paras)
    q_train, q_test = crfrnn(hconv4clip, paras['wsmooth'], paras['wcontra'], k1, k2, 
      trainiter=5, testiter=10)
  #hconv4log = -tf.log(hconv4clip)
  #q_train = tf.reshape(q_train, [-1, boxheight, boxwidth, 2])
  #q_train = -tf.log(hconv4clip)
  q_trainclip = tf.clip_by_value(q_train, 1e-6, 1.)
  trainenergy = tf.reduce_sum(-tf.log(q_trainclip)*yonehot, reduction_indices=3)
  #trainenergy = tf.reduce_prod(trainenergy, reduction_indices=[1,2])
  trainenergy = tf.reduce_mean(trainenergy, [0,1,2])

  #q_test = hconv4clip
  #q_test = crfrnn(hconv4, paras['wsmooth'], paras['wcontra'], k1, k2, iter=5)
  q_test = tf.reshape(q_test, [-1, boxheight, boxwidth, 2])
  testenergy = tf.reduce_sum(tf.mul(q_test, yonehot), reduction_indices=3)
  #testenergy = tf.reduce_prod(testenergy, reduction_indices=[1,2])
  testenergy = tf.reduce_mean(testenergy, [0,1,2])
  predarg = tf.argmax(q_test, 3)
  yint64 = tf.to_int64(Y)
  acc = tf.equal(yint64, predarg)
  acc = tf.to_float(acc)
  accuracy = tf.reduce_mean(acc, [0,1,2])
  di = dice_tf(tf.reshape(yint64, [-1,]), tf.reshape(predarg, [-1,]))
  return trainenergy, accuracy, di, testenergy, predarg
output_processors.py 文件源码 项目:rlflow 作者: tpbarron 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def cast_int(x):
    """
    Cast the output of x to an int64
    """
    return tf.to_int64(x)


# Q learning DQN style
task.py 文件源码 项目:cloudml-samples 作者: GoogleCloudPlatform 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def generate_top_k_scores_and_ids(logits, top_k):
  """This function computes top K ids and scores from logits tensor.

  Args:
    logits: logit tensor computed in the serving graph.
    top_k: number of top K elements to rank.

  Returns:
    predictions: scores of top K items.
    output_alternatives: ids of the top K items.
  """

  probabilities = tf.nn.softmax(
      logits, name=tf.contrib.learn.PredictionKey.PROBABILITIES)
  top_k_scores, top_k_ids = tf.nn.top_k(
      input=probabilities, k=top_k)
  top_k_ids = tf.contrib.lookup.index_to_string(
      tf.to_int64(top_k_ids),
      mapping=tf.constant([str(i) for i in xrange(MOVIE_VOCAB_SIZE)]))
  predictions = {
      # served as "scores" by Servo in the ClassificationResult
      tf.contrib.learn.PredictionKey.PROBABILITIES:
          top_k_scores,
      # served as "classes" by Servo in the ClassificationResult
      tf.contrib.learn.PredictionKey.CLASSES:
          top_k_ids
  }
  output_alternatives = {DEFAULT_OUTPUT_ALTERNATIVE: (
      tf.contrib.learn.ProblemType.CLASSIFICATION,
      predictions)}
  return predictions, output_alternatives
model.py 文件源码 项目:cloudml-samples 作者: GoogleCloudPlatform 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
model.py 文件源码 项目:cloudml-samples 作者: GoogleCloudPlatform 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
model.py 文件源码 项目:cloudml-samples 作者: GoogleCloudPlatform 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].
  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits=logits, labels=labels, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths):
    # undocumented feature soon to be made public
    from tensorflow.python.ops import functional_ops
    label_shape = tf.shape(labels)
    num_batches_tns = tf.pack([label_shape[0]])
    max_num_labels_tns = tf.pack([label_shape[1]])

    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < tf.fill(max_num_labels_tns, current_input)

    init = tf.cast(tf.fill([1, label_shape[1]], 0), tf.bool)
    dense_mask = functional_ops.scan(range_less_than, label_lengths,
                                     initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
                             label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]),
                                                  max_num_labels_tns), tf.reverse(label_shape, [True])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)
    indices = tf.transpose(tf.reshape(tf.concat(0, [batch_ind, label_ind]), [2, -1]))

    vals_sparse = tf.gather_nd(labels, indices)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))
midi.py 文件源码 项目:tf-midi-id 作者: cghawthorne 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].

  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      logits, labels, name='xentropy')
  loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
  return loss
mnist.py 文件源码 项目:deep_learning_study 作者: jowettcz 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Calculates the loss from the logits and the labels.

  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, int32 - [batch_size].

  Returns:
    loss: Loss tensor of type float.
  """
  labels = tf.to_int64(labels)
  cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
      labels=labels, logits=logits, name='xentropy')
  return tf.reduce_mean(cross_entropy, name='xentropy_mean')
models1.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
    loss = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    return loss
models.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
    loss = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')
    return loss
seq2seq_model.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _create_predictions(self, decoder_output, features, labels, losses=None):
    """Creates the dictionary of predictions that is returned by the model.
    """
    predictions = {}

    # Add features and, if available, labels to predictions
    predictions.update(_flatten_dict({"features": features}))
    if labels is not None:
      predictions.update(_flatten_dict({"labels": labels}))

    if losses is not None:
      predictions["losses"] = _transpose_batch_time(losses)

    # Decoders returns output in time-major form [T, B, ...]
    # Here we transpose everything back to batch-major for the user
    output_dict = collections.OrderedDict(
        zip(decoder_output._fields, decoder_output))
    decoder_output_flat = _flatten_dict(output_dict)

    decoder_output_flat = {
        k: _transpose_batch_time(v)
        for k, v in decoder_output_flat.items()
    }
    predictions.update(decoder_output_flat)

    # If we predict the ids also map them back into the vocab and process them
    if "predicted_ids" in predictions.keys():
      vocab_tables = graph_utils.get_dict_from_collection("vocab_tables")
      target_id_to_vocab = vocab_tables["target_id_to_vocab"]
      predicted_tokens = target_id_to_vocab.lookup(
          tf.to_int64(predictions["predicted_ids"]))
      # Raw predicted tokens
      predictions["predicted_tokens"] = predicted_tokens

    return predictions
nn_with_alpha.py 文件源码 项目:DataMining 作者: lidalei 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(logits, labels, l2_penalty):
    """Calculates the loss from the logits and the labels.    
    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].
      l2_penalty
    Returns:
      loss: Loss tensor of type float.
    """
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, labels, name = 'xentropy')
    loss = tf.reduce_mean(cross_entropy, name = 'xentropy_mean') + l2_penalty
    return loss


问题


面经


文章

微信
公众号

扫码关注公众号