python类sparse_to_dense()的实例源码

model.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    # Reshape the labels into a dense Tensor of shape [batch_size, NUM_CLASSES].
    sparse_labels = tf.reshape(labels, [input.FLAGS.batch_size, 1])
    indices = tf.reshape(tf.range(0, input.FLAGS.batch_size), [input.FLAGS.batch_size, 1])
    concated = tf.concat(1, [indices, sparse_labels])
    dense_labels = tf.sparse_to_dense(concated, [input.FLAGS.batch_size, input.NUM_CLASSES], 1.0, 0.0)
    # Calculate the average cross entropy loss across the batch.
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, dense_labels, name='cross_entropy_per_example')
    cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
    tf.add_to_collection('losses', cross_entropy_mean)
    # The total loss is defined as the cross entropy loss plus all of the weight decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
alexnetbm.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    concated = tf.concat(axis=1, values=[indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                            labels=onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
alexnet_cpu.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 56 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, 1000]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                            onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
alexnetbm.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, 1000]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                            onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
alexnet_cpu.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([batch_size, 1000]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                            onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
models.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    concated = tf.concat(axis=1, values=[indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                            labels=onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
alexnet_cifar10_multi_gpu1.py 文件源码 项目:dlbench 作者: hclhkbu 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss_function(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    concated = tf.concat(axis=1, values=[indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, 10]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                            labels=onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
inputs.py 文件源码 项目:text-classification2 作者: yuhui-lin 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def char_index_batch_to_2d_tensor(batch, batch_size, num_labels):
    sparse_labels = tf.reshape(batch, [batch_size, 1])
    indices = tf.reshape(tf.range(0, batch_size, 1), [batch_size, 1])
    concatenated = tf.concat(1, [indices, sparse_labels])
    concat = tf.concat(0, [[batch_size], [num_labels]])
    output_shape = tf.reshape(concat, [2])
    sparse_to_dense = tf.sparse_to_dense(concatenated, output_shape, 1, 0)
    return tf.reshape(sparse_to_dense, [batch_size, num_labels])
ocr_model.py 文件源码 项目:SpikeFlow 作者: deeperic 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".

  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Reshape the labels into a dense Tensor of
  # shape [batch_size, NUM_CLASSES].
  sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
  indices = tf.reshape(tf.range(0, FLAGS.batch_size), [FLAGS.batch_size, 1])
  concated = tf.concat(axis=1, values=[indices, sparse_labels])
  dense_labels = tf.sparse_to_dense(concated,
                                    [FLAGS.batch_size, NUM_CLASSES],
                                    1.0, 0.0)

  # Calculate the average cross entropy loss across the batch.
  cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
    logits=logits, labels=dense_labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
translate.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def translate(U, theta, out_height, out_width):
    num_batch = tf.shape(U)[0]
    height, width, num_ch = U.get_shape()[1:]
    height = height.value
    width = width.value
    num_ch = num_ch.value
    hwc = height*width*num_ch

    nind = tf.range(num_batch)
    x = repeat(tf.range(height), width)
    y = tf.tile(tf.range(width), tf.pack([height]))
    cind = tf.range(num_ch)

    nind = tf.expand_dims(repeat(nind, hwc), 1)
    x = tf.tile(tf.expand_dims(repeat(x, num_ch), 1), tf.pack([num_batch,1]))
    y = tf.tile(tf.expand_dims(repeat(y, num_ch), 1), tf.pack([num_batch,1]))
    cind = tf.tile(tf.expand_dims(cind, 1), tf.pack([num_batch*height*width,1]))

    dx, dy = tf.split(1, 2, theta)
    dx = tf.cast(tf.clip_by_value(dx, 0, out_height-height), 'int32')
    dx = tf.reshape(tf.tile(dx, tf.pack([1,hwc])), [-1,1])
    dy = tf.cast(tf.clip_by_value(dy, 0, out_width-width), 'int32')
    dy = tf.reshape(tf.tile(dy, tf.pack([1,hwc])), [-1,1])
    x = x + dx
    y = y + dy

    tind = tf.concat(1, [nind, x, y, cind])
    val = tf.reshape(U, [-1])
    T = tf.sparse_to_dense(tind,
            tf.pack([num_batch, out_height, out_width, num_ch]),
            val)
    T.set_shape([None, out_height, out_width, num_ch])
    return T
translate.py 文件源码 项目:tf_practice 作者: juho-lee 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def translate(U, theta, out_height, out_width):
    num_batch = tf.shape(U)[0]
    height, width, num_ch = U.get_shape()[1:]
    height = height.value
    width = width.value
    num_ch = num_ch.value
    hwc = height*width*num_ch

    nind = tf.range(num_batch)
    x = repeat(tf.range(height), width)
    y = tf.tile(tf.range(width), tf.pack([height]))
    cind = tf.range(num_ch)

    nind = tf.expand_dims(repeat(nind, hwc), 1)
    x = tf.tile(tf.expand_dims(repeat(x, num_ch), 1), tf.pack([num_batch,1]))
    y = tf.tile(tf.expand_dims(repeat(y, num_ch), 1), tf.pack([num_batch,1]))
    cind = tf.tile(tf.expand_dims(cind, 1), tf.pack([num_batch*height*width,1]))

    dx, dy = tf.split(1, 2, theta)
    dx = tf.cast(tf.clip_by_value(dx, 0, out_height-height), 'int32')
    dx = tf.reshape(tf.tile(dx, tf.pack([1,hwc])), [-1,1])
    dy = tf.cast(tf.clip_by_value(dy, 0, out_width-width), 'int32')
    dy = tf.reshape(tf.tile(dy, tf.pack([1,hwc])), [-1,1])
    x = x + dx
    y = y + dy

    tind = tf.concat(1, [nind, x, y, cind])
    val = tf.reshape(U, [-1])
    T = tf.sparse_to_dense(tind,
            tf.pack([num_batch, out_height, out_width, num_ch]),
            val)
    T.set_shape([None, out_height, out_width, num_ch])
    return T
benchmark-googlenet.py 文件源码 项目:ck-tensorflow 作者: ctuning 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
    concated = tf.concat([indices, labels], 1 )
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        logits=logits, labels=onehot_labels, name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
benchmark-overfeat.py 文件源码 项目:ck-tensorflow 作者: ctuning 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size, 1), 1)
#if layers configuration is changed, you probably should change stacked array size below.
    concated = tf.concat([indices, labels], 1)
    onehot_labels = tf.sparse_to_dense(
        concated, tf.stack([batch_size, 1000]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
        logits=logits, labels=onehot_labels, name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    return loss
loss.py 文件源码 项目:tensorflow-layer-library 作者: bioinf-jku 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def one_hot_patch(x, depth):
    # workaround by name-name
    sparse_labels = tf.reshape(x, [-1, 1])
    derived_size = tf.shape(sparse_labels)[0]
    indices = tf.reshape(tf.range(0, derived_size, 1), [-1, 1])
    concated = tf.concat(axis=1, values=[indices, sparse_labels])
    outshape = tf.concat(axis=0, values=[tf.reshape(derived_size, [1]), tf.reshape(depth, [1])])
    return tf.sparse_to_dense(concated, outshape, 1.0, 0.0)
net.py 文件源码 项目:TensorNet-TF 作者: timgaripov 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.
    Args:
        logits: input tensor, float - [batch_size, NUM_CLASSES].
        labels: Labels tensor, int32 - [batch_size].
    Returns:
        loss: Loss tensor of type float.
    """
    # Convert from sparse integer labels in the range [0, NUM_CLASSES)
    # to 1-hot dense float vectors (that is we will have batch_size vectors,
    # each with NUM_CLASSES values, all of which are 0.0 except there will
    # be a 1.0 in the entry corresponding to the label).
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    concated = tf.concat([indices, labels], 1)
    onehot_labels = tf.sparse_to_dense(concated,
                                       tf.shape(logits), 1.0, 0.0)


    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                            labels=onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='loss')
    tf.summary.scalar('summary/loss', loss)
    return loss
net.py 文件源码 项目:TensorNet-TF 作者: timgaripov 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def loss(logits, labels):
    """Calculates the loss from the logits and the labels.
    Args:
        logits: input tensor, float - [batch_size, NUM_CLASSES].
        labels: Labels tensor, int32 - [batch_size].
    Returns:
        loss: Loss tensor of type float.
    """
    # Convert from sparse integer labels in the range [0, NUM_CLASSES)
    # to 1-hot dense float vectors (that is we will have batch_size vectors,
    # each with NUM_CLASSES values, all of which are 0.0 except there will
    # be a 1.0 in the entry corresponding to the label).
    batch_size = tf.size(labels)
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, batch_size), 1)
    concated = tf.concat([indices, labels], 1)
    onehot_labels = tf.sparse_to_dense(concated,
                                       tf.shape(logits), 1.0, 0.0)


    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits,
                                                            labels=onehot_labels,
                                                            name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='loss')
    tf.summary.scalar('summary/loss', loss)
    return loss
main.py 文件源码 项目:grad-cam.tensorflow 作者: Ankush96 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def grad_cam(x, vgg, sess, predicted_class, layer_name, nb_classes):
    print("Setting gradients to 1 for target class and rest to 0")
    # Conv layer tensor [?,7,7,512]
    conv_layer = vgg.layers[layer_name]
    # [1000]-D tensor with target class index set to 1 and rest as 0
    one_hot = tf.sparse_to_dense(predicted_class, [nb_classes], 1.0)
    signal = tf.mul(vgg.layers['fc3'], one_hot)
    loss = tf.reduce_mean(signal)

    grads = tf.gradients(loss, conv_layer)[0]
    # Normalizing the gradients
    norm_grads = tf.div(grads, tf.sqrt(tf.reduce_mean(tf.square(grads))) + tf.constant(1e-5))

    output, grads_val = sess.run([conv_layer, norm_grads], feed_dict={vgg.imgs: x})
    output = output[0]           # [7,7,512]
    grads_val = grads_val[0]     # [7,7,512]

    weights = np.mean(grads_val, axis = (0, 1))             # [512]
    cam = np.ones(output.shape[0 : 2], dtype = np.float32)  # [7,7]

    # Taking a weighted average
    for i, w in enumerate(weights):
        cam += w * output[:, :, i]

    # Passing through ReLU
    cam = np.maximum(cam, 0)
    cam = cam / np.max(cam)
    cam = resize(cam, (224,224))

    # Converting grayscale to 3-D
    cam3 = np.expand_dims(cam, axis=2)
    cam3 = np.tile(cam3,[1,1,3])

    return cam3
misc.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def one_hot_mask(labels, num_classes, scope=None):
  """Compute 1-hot encodings for masks.

  Given a label image, this computes the one hot encoding at
  each pixel.

  Args:
    labels: (batch_size, width, height, 1) tensor containing labels.
    num_classes: number of classes
    scope: optional scope name

  Returns:
    Tensor of shape (batch_size, width, height, num_classes) with
    a 1-hot encoding.
  """
  with tf.name_scope(scope, "OneHotMask", [labels]):
    height, width, depth = _shape(labels)
    assert depth == 1
    sparse_labels = tf.to_int32(tf.reshape(labels, [-1, 1]))
    sparse_size, _ = _shape(sparse_labels)
    indices = tf.reshape(tf.range(0, sparse_size, 1), [-1, 1])
    concated = tf.concat(1, [indices, sparse_labels])
    dense_result = tf.sparse_to_dense(concated, [sparse_size, num_classes], 1.0,
                                      0.0)
    result = tf.reshape(dense_result, [height, width, num_classes])
    return result
speech.py 文件源码 项目:fathom 作者: rdadolf 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def decoding(self):
    """Predict labels from learned sequence model."""
    # TODO: label error rate on validation set
    decoded, _ = tf.nn.ctc_greedy_decoder(self.logits_t, self.seq_lens)
    sparse_decode_op = decoded[0] # single-element list
    self.decode_op = tf.sparse_to_dense(sparse_decode_op.indices, sparse_decode_op.dense_shape, sparse_decode_op.values)
    return self.decode_op
alexnet.py 文件源码 项目:DL-Benchmarks 作者: DL-Benchmarks 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def loss(logits, labels, config):
    labels = tf.expand_dims(labels, 1)
    indices = tf.expand_dims(tf.range(0, config.batch_size, 1), 1)
    concated = tf.concat(1, [indices, labels])
    onehot_labels = tf.sparse_to_dense(
        concated, tf.pack([config.batch_size, config.ydim]), 1.0, 0.0)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                            onehot_labels,
                                                            name='entropy')
    loss = tf.reduce_mean(cross_entropy, name='entropy_mean')
    return loss


问题


面经


文章

微信
公众号

扫码关注公众号