def finalTrainingLayer(classCount, finalTensorName, bottleneckTensor):
with tf.name_scope('input'):
bottleneckInput = tf.placeholder_with_default(
bottleneckTensor, shape = [None, BOTTLENECK_TENSOR_SIZE],
name = 'BottleneckInputPlaceholder')
groundTruthInput = tf.placeholder(tf.float32,
[None, classCount],
name = 'GroundTruthInput')
layerName = 'finalLayer'
with tf.name_scope(layerName):
with tf.name_scope('weights'):
initialValue = tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, classCount],
stddev=0.001)
layerWeights = tf.Variable(initialValue, name = 'finalWeights')
tensorBoardUsage(layerWeights)
with tf.name_scope('biases'):
layerBiases = tf.Variable(tf.zeros([classCount]), name='finalBiases')
tensorBoardUsage(layerBiases)
with tf.name_scope('WxPlusB'):
logits = tf.matmul(bottleneckInput, layerWeights) + layerBiases
tf.summary.histogram('pre_activations', logits)
finalTensor = tf.nn.softmax(logits, name=finalTensorName)
tf.summary.histogram('activations', finalTensor)
with tf.name_scope('crossEntropy'):
crossEntropy = tf.nn.softmax_cross_entropy_with_logits(
labels=groundTruthInput, logits=logits)
with tf.name_scope('total'):
crossEntropyMean = tf.reduce_mean(crossEntropy)
tf.summary.scalar('cross_entropy', crossEntropyMean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
trainStep = optimizer.minimize(crossEntropyMean)
return (trainStep, crossEntropyMean, bottleneckInput, groundTruthInput,
finalTensor)
First_Purification.py 文件源码
python
阅读 27
收藏 0
点赞 0
评论 0
评论列表
文章目录