def _entropy(self):
return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
nn.softplus(-self.logits))
python类sigmoid()的实例源码
def __call__(self, inputs, state, scope=None):
"""LSTM cell with layer normalization and recurrent dropout."""
with vs.variable_scope(scope or type(self).__name__) as scope: # LayerNormBasicLSTMCell # pylint: disable=unused-variables
c, h = state
args = array_ops.concat(1, [inputs, h])
concat = self._linear(args)
i, j, f, o = array_ops.split(1, 4, concat)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
return new_h, new_state
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with tf.variable_scope(scope or type(self).__name__): # "GRUCell"
with tf.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(_linear([inputs, state],
2 * self._num_units, True, 1.0), 2, 1)
r, u = sigmoid(r), sigmoid(u)
with tf.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
def __call__(self, inputs, state, scope=None):
with vs.variable_scope(scope or "goru_cell"):
U_init = init_ops.random_uniform_initializer(-0.01, 0.01)
b_init = init_ops.constant_initializer(2.)
mod_b_init = init_ops.constant_initializer(0.01)
U = vs.get_variable("U", [inputs.get_shape()[-1], self._hidden_size * 3], dtype=tf.float32, initializer = U_init)
Ux = math_ops.matmul(inputs, U)
U_cx, U_rx, U_gx = array_ops.split(Ux, 3, axis=1)
W_r = vs.get_variable("W_r", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer = U_init)
W_g = vs.get_variable("W_g", [self._hidden_size, self._hidden_size], dtype=tf.float32, initializer = U_init)
W_rh = math_ops.matmul(state, W_r)
W_gh = math_ops.matmul(state, W_g)
bias_r = vs.get_variable("bias_r", [self._hidden_size], dtype=tf.float32, initializer = b_init)
bias_g = vs.get_variable("bias_g", [self._hidden_size], dtype=tf.float32)
bias_c = vs.get_variable("bias_c", [self._hidden_size], dtype=tf.float32, initializer = mod_b_init)
r_tmp = U_rx + W_rh + bias_r
g_tmp = U_gx + W_gh + bias_g
r = math_ops.sigmoid(r_tmp)
g = math_ops.sigmoid(g_tmp)
Unitaryh = _eunn_loop(state, self._capacity, self.diag_vec, self.off_vec, self.diag, self._fft)
c = modrelu(math_ops.multiply(r, Unitaryh) + U_cx, bias_c, False)
new_state = math_ops.multiply(g, state) + math_ops.multiply(1 - g, c)
return new_state, new_state
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = tanh(linear([inputs, r * state], self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
c, h = array_ops.split(1, 2, state)
concat = linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
new_h = tanh(new_c) * sigmoid(o)
return new_h, array_ops.concat(1, [new_c, new_h])
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(3, 2, _conv([inputs, state],
2 * self._num_units, self._k_size, True, initializer=self._initializer))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_conv([inputs, r * state],
self._num_units, self._k_size, True, initializer=self._initializer))
new_h = u * state + (1 - u) * c
return new_h, new_h
def __call__(self, inputs, state, scope=None):
"""Convolutional Long short-term memory cell (ConvLSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(3, 2, state)
s1 = vs.get_variable("s1", initializer=tf.ones([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
s2 = vs.get_variable("s2", initializer=tf.ones([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
# s3 = vs.get_variable("s3", initializer=tf.ones([self._batch_size, self._num_units]), dtype=tf.float32)
b1 = vs.get_variable("b1", initializer=tf.zeros([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
b2 = vs.get_variable("b2", initializer=tf.zeros([self._height, self._width, 4 * self._num_units]), dtype=tf.float32)
# b3 = vs.get_variable("b3", initializer=tf.zeros([self._batch_size, self._num_units]), dtype=tf.float32)
input_below_ = _conv([inputs], 4 * self._num_units, self._k_size, False, initializer=self._initializer, scope="out_1")
input_below_ = ln(input_below_, s1, b1)
state_below_ = _conv([h], 4 * self._num_units, self._k_size, False, initializer=self._initializer, scope="out_2")
state_below_ = ln(state_below_, s2, b2)
lstm_matrix = tf.add(input_below_, state_below_)
i, j, f, o = array_ops.split(3, 4, lstm_matrix)
# batch_size * height * width * channel
# concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
# i, j, f, o = array_ops.split(3, 4, lstm_matrix)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(3, [new_c, new_h])
return new_h, new_state
def __call__(self, inputs, state, scope=None):
with _checked_scope(
self, scope or "attention_based_gru_cell", reuse=self._reuse):
with vs.variable_scope("gates"):
# We start with bias of 1.0 to not reset and not update.
inputs, g_t = array_ops.split(
inputs, num_or_size_splits=[self._num_units, 1], axis=1)
reset_gate = sigmoid(_linear(
[inputs, state], self._num_units, True, 1.0))
with vs.variable_scope("candidate"):
h_tilde = self._activation(_linear(
[inputs, reset_gate * state], self._num_units, True))
new_h = g_t * h_tilde + (1 - g_t) * state
return new_h, new_h
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, _linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, _linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
eps = 1e-13
temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(new_h, state), 1), \
math_ops.reduce_sum(math_ops.mul(state,state), 1) + eps)
dummy = array_ops.transpose(state)
t1 = math_ops.mul(dummy, temp)
t1 = array_ops.transpose(t1)
distract_h = new_h - state * t1
return distract_h, distract_h
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__):
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
eps = 1e-13
temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(c, new_c),1),math_ops.reduce_sum(math_ops.mul(c,c),1) + eps)
dummy = array_ops.transpose(c)
t1 = math_ops.mul(dummy, temp)
t1 = array_ops.transpose(t1)
distract_c = new_c - t1
new_h = self._activation(distract_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
return new_h, new_state
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__):
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 5 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate, g= distract_gate
i, j, f, o, g = array_ops.split(1, 5, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
distract_c = new_c - c
new_h = self._activation(distract_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
return new_h, new_state
def call(self, inputs, state, scope=None):
with vs.variable_scope(scope or type(self).__name__): # "GruRcnCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0.
w_zrw = self._conv(inputs, self._num_outputs*3, self._ih_filter_h_length, self._ih_filter_w_length,
self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WzrwConv")
u_zr = self._conv(state, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
"SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UzrConv")
w_z, w_r, w =tf.split(value=w_zrw, num_or_size_splits=3, axis=3, name="w_split")
u_z, u_r =tf.split(value=u_zr, num_or_size_splits=2, axis=3, name="u_split")
z_bias = tf.get_variable(
name="z_biases",
shape=[self._num_outputs],
initializer=init_ops.ones_initializer()
)
z_gate = math_ops.sigmoid(tf.nn.bias_add(w_z + u_z, z_bias))
r_bias = tf.get_variable(
name="r_biases",
shape=[self._num_outputs],
initializer=init_ops.ones_initializer())
r_gate = math_ops.sigmoid(tf.nn.bias_add(w_r + u_r, r_bias))
with vs.variable_scope("Candidate"):
# w = self._conv(inputs, self._num_outputs, self._ih_filter_h_length, self._ih_filter_w_length,
# self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WConv")
u = self._conv(r_gate * state, self._num_outputs, self._hh_filter_h_length, self._hh_filter_w_length,
[1, 1, 1, 1], "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UConv")
c_bias = tf.get_variable(
name="c_biases",
shape=[self._num_outputs],
initializer=init_ops.ones_initializer())
c = math_ops.tanh(tf.nn.bias_add(w + u, c_bias))
new_h = z_gate * state + (1 - z_gate) * c
return new_h, new_h
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or type(self).__name__): # "GRUCell"
with vs.variable_scope("Gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(1, 2, _linear([inputs, state],
2 * self._num_units, True, 1.0))
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("Candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True))
new_h = u * state + (1 - u) * c
return new_h, new_h
head.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def _logits_to_predictions(self, logits):
"""Returns a dict of predictions.
Args:
logits: logits `Output` after applying possible centered bias.
Returns:
Dict of prediction `Output` keyed by `PredictionKey`.
"""
with ops.name_scope(None, "predictions", (logits,)):
two_class_logits = _one_class_to_two_class_logits(logits)
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.LOGISTIC:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.LOGISTIC),
prediction_key.PredictionKey.PROBABILITIES:
nn.softmax(
two_class_logits,
name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.argmax(
two_class_logits,
1,
name=prediction_key.PredictionKey.CLASSES)
}
head.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def _logits_to_predictions(self, logits):
"""See `_MultiClassHead`."""
with ops.name_scope(None, "predictions", (logits,)):
return {
prediction_key.PredictionKey.LOGITS:
logits,
prediction_key.PredictionKey.PROBABILITIES:
math_ops.sigmoid(
logits, name=prediction_key.PredictionKey.PROBABILITIES),
prediction_key.PredictionKey.CLASSES:
math_ops.to_int64(
math_ops.greater(logits, 0),
name=prediction_key.PredictionKey.CLASSES)
}
logistic_regressor_test.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def _logistic_regression_model_fn(features, labels, mode):
_ = mode
logits = layers.linear(
features,
1,
weights_initializer=init_ops.zeros_initializer(),
# Intentionally uses really awful initial values so that
# AUC/precision/recall/etc will change meaningfully even on a toy dataset.
biases_initializer=init_ops.constant_initializer(-10.0))
predictions = math_ops.sigmoid(logits)
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return predictions, loss, train_op
bernoulli.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _entropy(self):
return (-self.logits * (math_ops.sigmoid(self.logits) - 1) +
nn.softplus(-self.logits))
logistic.py 文件源码
项目:DeepLearning_VirtualReality_BigData_Project
作者: rashmitripathi
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))