python类sigmoid()的实例源码

bn_gru.py 文件源码 项目:website-fingerprinting 作者: AxelGoetz 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with _checked_scope(self, scope or "gru_cell"):
      with vs.variable_scope("gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        value = sigmoid(_linear(
          [inputs, state], 2 * self._num_units, True, 1.0))
        r, u = array_ops.split(
            value=value,
            num_or_size_splits=2,
            axis=1)
      with vs.variable_scope("candidate"):
        res = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))

        if self._batch_norm:
          c = batch_norm(res,
                         center=True, scale=True,
                         is_training=self._is_training,
                         scope='bn1')
        else:
          c = res

      new_h = u * state + (1 - u) * c
    return new_h, new_h
model.py 文件源码 项目:merlin 作者: CSTR-Edinburgh 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__ (self,n_in,hidden_layer_size,n_out,hidden_layer_type,output_type="linear",dropout_rate=0,loss_function="mse",optimizer="adam"):

        #self.session=tf.InteractiveSession()
        self.n_in  = int(n_in)
        self.n_out = int(n_out)

        self.n_layers = len(hidden_layer_size)

        self.hidden_layer_size = hidden_layer_size
        self.hidden_layer_type = hidden_layer_type

        assert len(self.hidden_layer_size) == len(self.hidden_layer_type)

        self.output_type   = output_type
        self.dropout_rate  = dropout_rate
        self.loss_function = loss_function
        self.optimizer     = optimizer
        #self.activation    ={"tanh":tf.nn.tanh,"sigmoid":tf.nn.sigmoid}
        self.graph=tf.Graph()
        #self.saver=tf.train.Saver()
model.py 文件源码 项目:merlin 作者: CSTR-Edinburgh 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = rnn_cell_impl._linear([inputs, state], 2 * self._num_units, True, bias_ones,\
                  self._kernel_initializer)
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
      r,u=layer_normalization(r,scope="r/"),layer_normalization(u,scope="u/")
      r,u=math_ops.sigmoid(r),math_ops.sigmoid(u)
    with vs.variable_scope("candidate"):
      c = self._activation(rnn_cell_impl._linear([inputs, r * state], self._num_units, True, self._bias_initializer, self._kernel_initializer))
    new_h = u * state + (1 - u) * c
    return new_h, new_h
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def predictions(self, examples):
    """Add operations to compute predictions by the model.

    If logistic_loss is being used, predicted probabilities are returned.
    Otherwise, (raw) linear predictions (w*x) are returned.

    Args:
      examples: Examples to compute predictions on.

    Returns:
      An Operation that computes the predictions for examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(
        ['example_weights', 'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)

    result = self._linear_predictions(examples)
    if self._options['loss_type'] == 'logistic_loss':
      # Convert logits to probability for logistic loss predictions.
      with name_scope('sdca/logistic_prediction'):
        result = math_ops.sigmoid(result)
    return result
bernoulli.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _kl_bernoulli_bernoulli(a, b, name=None):
  """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.

  Args:
    a: instance of a Bernoulli distribution object.
    b: instance of a Bernoulli distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_bernoulli_bernoulli".

  Returns:
    Batchwise KL(a || b)
  """
  with ops.name_scope(name, "kl_bernoulli_bernoulli", [a.logits, b.logits]):
    return (math_ops.sigmoid(a.logits) * (-nn.softplus(-a.logits) +
                                          nn.softplus(-b.logits)) +
            math_ops.sigmoid(-a.logits) * (-nn.softplus(a.logits) +
                                           nn.softplus(b.logits)))
sdca_ops.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def predictions(self, examples):
    """Add operations to compute predictions by the model.

    If logistic_loss is being used, predicted probabilities are returned.
    Otherwise, (raw) linear predictions (w*x) are returned.

    Args:
      examples: Examples to compute predictions on.

    Returns:
      An Operation that computes the predictions for examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(
        ['example_weights', 'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)

    result = self._linear_predictions(examples)
    if self._options['loss_type'] == 'logistic_loss':
      # Convert logits to probability for logistic loss predictions.
      with name_scope('sdca/logistic_prediction'):
        result = math_ops.sigmoid(result)
    return result
bernoulli.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __init__(self,
               p=None,
               dtype=dtypes.int32,
               validate_args=False,
               allow_nan_stats=True,
               name="BernoulliWithSigmoidP"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name) as ns:
      super(BernoulliWithSigmoidP, self).__init__(
          p=nn.sigmoid(p),
          dtype=dtype,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
fops.py 文件源码 项目:shuttleNet 作者: shiyemin 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        """Memory grid (MemGrid) with nunits cells."""
        with tf.variable_scope(scope or type(self).__name__):  # "MemGrid"
            with tf.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset and not update.
                r, u = tf.split(self.unbalance_linear([inputs, self._memory],
                                                    2 * self._mem_dim, True, 1.0), 2, 2)
                r, u = sigmoid(r), sigmoid(u)
            with tf.variable_scope("Candidate"):
                c = self._activation(self.unbalance_linear([inputs, r * self._memory],
                                            self._mem_dim, True))
            # Decide which line to write: line weights
            l = att_weight(inputs, tf.concat([c, self._memory], 2), self.echocell, scope="Line_weights")
            l = tf.reshape(l, [self._batch_size, self._mem_size, 1])
            t_memory = u * self._memory + (1 - u) * c
            self._memory = self._memory * (1 - l) + t_memory * l

            #  hl = att_weight(inputs, self._memory, echocell, scope="hidden_lw")
            #  hl = tf.reshape(hl, [self._batch_size, self._mem_size, 1])
            #  output = tf.reduce_sum(hl * self._memory, 1)
            output = tf.reduce_sum(l * self._memory, 1)
            output = tf.reshape(output, [self._batch_size, self._mem_dim])

            return output, state
rnn_cell.py 文件源码 项目:DL-Benchmarks 作者: DL-Benchmarks 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, mask, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = array_ops.split(1, 2, state)
      concat = linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
      mask = array_ops.expand_dims(mask, 1)
      new_c =  mask * new_c + (1. - mask) * c

      new_h = tanh(new_c) * sigmoid(o)
      new_h = mask * new_h + (1. - mask) * h

    return new_h, array_ops.concat(1, [new_c, new_h])
convLSTM.py 文件源码 项目:Tensorflow-SegNet 作者: tkuanlun350 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)

      # batch_size * height * width * channel
      concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(3, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
GRU.py 文件源码 项目:R-net 作者: minsangkim142 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        """Run one step of SRU."""
        with tf.variable_scope(scope or type(self).__name__):  # "SRUCell"
            with tf.variable_scope("x_hat"):
                x = linear([inputs], self._num_units, False)
            with tf.variable_scope("gates"):
                concat = tf.sigmoid(linear([inputs], 2 * self._num_units, True))
                f, r = tf.split(concat, 2, axis = 1)
            with tf.variable_scope("candidates"):
                c = self._activation(f * state + (1 - f) * x)
                # variational dropout as suggested in the paper (disabled)
                # if self._is_training and Params.dropout is not None:
                #     c = tf.nn.dropout(c, keep_prob = 1 - Params.dropout)
            # highway connection
            # Our implementation is slightly different to the paper
            # https://arxiv.org/abs/1709.02755 in a way that highway network
            # uses x_hat instead of the cell inputs. Check equation (7) from the original
            # paper for SRU.
            h = r * c + (1 - r) * x
        return h, c
GRU.py 文件源码 项目:R-net 作者: minsangkim142 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def call(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = math_ops.sigmoid(
          linear([inputs, state], 2 * self._num_units, True, bias_ones,
                  self._kernel_initializer))
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
    with vs.variable_scope("candidate"):
      c = self._activation(
          linear([inputs, r * state], self._num_units, True,
                  self._bias_initializer, self._kernel_initializer))
    #   recurrent dropout as proposed in https://arxiv.org/pdf/1603.05118.pdf (currently disabled)
      #if self._is_training and Params.dropout is not None:
        #c = tf.nn.dropout(c, 1 - Params.dropout)
    new_h = u * state + (1 - u) * c
    return new_h, new_h
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u, g = array_ops.split(1, 3, _linear([inputs, state],
                                             3 * self._num_units, True, 1.0))
        r, u, g = sigmoid(r), sigmoid(u), sigmoid(g)
      with vs.variable_scope("Candidate"):
        c = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))
      new_h = u * state + (1 - u) * c

      eps = 1e-13
      temp = math_ops.div(math_ops.reduce_sum(math_ops.mul(new_h, state),1), \
                          math_ops.reduce_sum(math_ops.mul(state,state),1) + eps)

      m = array_ops.transpose(g)

      t1 = math_ops.mul(m , temp)
      t1 = array_ops.transpose(t1) 

      distract_h = new_h  -  state * t1
    return distract_h, distract_h
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"

      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u = array_ops.split(1, 2, _linear([inputs, state],
                                  2 * self._num_units, True, 1.0))
        r, u = sigmoid(r), sigmoid(u)

      with vs.variable_scope("Candidate"):
        c = self._activation(_linear([inputs, r * state],
                                     self._num_units, True))

      new_h = u * state + (1 - u) * c

      distract_h = new_h  -  state

    return distract_h, distract_h
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(1, 2, state)
      concat = _linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(1, [new_c, new_h])
      return new_h, new_state
attention_gru_cell.py 文件源码 项目:Dynamic-Memory-Networks-in-TensorFlow 作者: barronalex 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        """Attention GRU with nunits cells."""
        with vs.variable_scope(scope or "attention_gru_cell"):
            with vs.variable_scope("gates"):  # Reset gate and update gate.
                # We start with bias of 1.0 to not reset and not update.
                if inputs.get_shape()[-1] != self._num_units + 1:
                    raise ValueError("Input should be passed as word input concatenated with 1D attention on end axis")
                # extract input vector and attention
                inputs, g = array_ops.split(inputs,
                        num_or_size_splits=[self._num_units,1],
                        axis=1)
                r = _linear([inputs, state], self._num_units, True)
                r = sigmoid(r)
            with vs.variable_scope("candidate"):
                r = r*_linear(state, self._num_units, False)
            with vs.variable_scope("input"):
                x = _linear(inputs, self._num_units, True)
            h_hat = self._activation(r + x)

            new_h = (1 - g) * state + g * h_hat
        return new_h, new_h
rnn_cell.py 文件源码 项目:ROLO 作者: Guanghan 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(1, 2, state)
      concat = _linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(1, [new_c, new_h])
      return new_h, new_state
ConvLSTMCell.py 文件源码 项目:Conv3D_CLSTM 作者: GuangmingZhu 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, k_size=3, scope=None):
    """Convolutional Long short-term memory cell (ConvLSTM)."""
    with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
      if self._state_is_tuple:
        c, h = state
      else:
        c, h = array_ops.split(3, 2, state)

      # batch_size * height * width * channel
      concat = _conv([inputs, h], 4 * self._num_units, k_size, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(3, 4, concat)

      new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
               self._activation(j))
      new_h = self._activation(new_c) * sigmoid(o)

      if self._state_is_tuple:
        new_state = LSTMStateTuple(new_c, new_h)
      else:
        new_state = array_ops.concat(3, [new_c, new_h])
      return new_h, new_state
sdca_ops.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def predictions(self, examples):
    """Add operations to compute predictions by the model.

    If logistic_loss is being used, predicted probabilities are returned.
    Otherwise, (raw) linear predictions (w*x) are returned.

    Args:
      examples: Examples to compute predictions on.

    Returns:
      An Operation that computes the predictions for examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(
        ['example_weights', 'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)

    result = self._linear_predictions(examples)
    if self._options['loss_type'] == 'logistic_loss':
      # Convert logits to probability for logistic loss predictions.
      with name_scope('sdca/logistic_prediction'):
        result = math_ops.sigmoid(result)
    return result
bernoulli.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self,
               logits=None,
               dtype=dtypes.int32,
               validate_args=False,
               allow_nan_stats=True,
               name="BernoulliWithSigmoidProbs"):
    parameters = locals()
    parameters.pop("self")
    with ops.name_scope(name) as ns:
      super(BernoulliWithSigmoidProbs, self).__init__(
          probs=nn.sigmoid(logits, name="sigmoid_probs"),
          dtype=dtype,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
    self._parameters = parameters
bernoulli.py 文件源码 项目:DeepLearning_VirtualReality_BigData_Project 作者: rashmitripathi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _kl_bernoulli_bernoulli(a, b, name=None):
  """Calculate the batched KL divergence KL(a || b) with a and b Bernoulli.

  Args:
    a: instance of a Bernoulli distribution object.
    b: instance of a Bernoulli distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_bernoulli_bernoulli".

  Returns:
    Batchwise KL(a || b)
  """
  with ops.name_scope(name, "kl_bernoulli_bernoulli",
                      values=[a.logits, b.logits]):
    delta_probs0 = nn.softplus(-b.logits) - nn.softplus(-a.logits)
    delta_probs1 = nn.softplus(b.logits) - nn.softplus(a.logits)
    return (math_ops.sigmoid(a.logits) * delta_probs0
            + math_ops.sigmoid(-a.logits) * delta_probs1)
module_cell.py 文件源码 项目:dnnQuery 作者: richardxiong 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def call(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = math_ops.sigmoid(
          _linear([inputs, state], 2 * self._num_units, True, bias_ones,
                  self._kernel_initializer))
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
    with vs.variable_scope("candidate"):
      c = self._activation(
          _linear([inputs, r * state], self._num_units, True,
                  self._bias_initializer, self._kernel_initializer))
    new_h = u * state + (1 - u) * c
    return new_h, new_h
module_cell.py 文件源码 项目:dnnQuery 作者: richardxiong 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def call(self, inputs, state):
    """Long short-term memory cell (LSTM)."""
    sigmoid = math_ops.sigmoid
    # Parameters of gates are concatenated into one multiply for efficiency.
    if self._state_is_tuple:
      c, h = state
    else:
      c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)

    concat = _linear([inputs, h], 4 * self._num_units, True)

    # i = input_gate, j = new_input, f = forget_gate, o = output_gate
    i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)

    new_c = (
        c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
    new_h = self._activation(new_c) * sigmoid(o)

    if self._state_is_tuple:
      new_state = LSTMStateTuple(new_c, new_h)
    else:
      new_state = array_ops.concat([new_c, new_h], 1)
    return new_h, new_state
cell.py 文件源码 项目:DMN-tensorflow 作者: sufengniu 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, episodic_gate, scope=None):
        """Gated recurrent unit (GRU) with nunits cells."""

        with vs.variable_scope("MGRUCell"):  # "GRUCell"
            with vs.variable_scope("Gates"):    # Reset gate and update gate.
                # We start with bias of 1.0 to not reset and not update.
                r = rnn_cell.linear([inputs, state], self._num_units, True, 1.0, scope=scope)
                r = sigmoid(r)
            with vs.variable_scope("Candidate"):
                c = tanh(rnn_cell.linear([inputs, r * state], self._num_units, True))

            new_h = tf.mul(episodic_gate, c) + tf.mul((1 - episodic_gate), state)
        return new_h, new_h
rnn_cell.py 文件源码 项目:reading-comprehension 作者: kellywzhang 项目源码 文件源码 阅读 192 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, time_mask, scope=None):
    """Gated recurrent unit (GRU) with state_size dimension cells."""
    with tf.variable_scope(self._scope or type(self).__name__):  # "GRUCell"
        input_size = self._input_size
        state_size = self._state_size

        hidden = tf.concat(1, [state, inputs])

        with tf.variable_scope("Gates"):  # Reset gate and update gate.
            # We start with bias of 1.0 to not reset and not update.
            self.W_reset = tf.get_variable(name="reset_weight", shape=[state_size+input_size, state_size], \
                initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
            self.W_update = tf.get_variable(name="update_weight", shape=[state_size+input_size, state_size], \
                initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
            self.b_reset = tf.get_variable(name="reset_bias", shape=[state_size], \
                initializer=tf.constant_initializer(1.0))
            self.b_update = tf.get_variable(name="update_bias", shape=[state_size], \
                initializer=tf.constant_initializer(1.0))

            reset = sigmoid(tf.matmul(hidden, self.W_reset) + self.b_reset)
            update = sigmoid(tf.matmul(hidden, self.W_update) + self.b_update)

        with tf.variable_scope("Candidate"):
            self.W_candidate = tf.get_variable(name="candidate_weight", shape=[state_size+input_size, state_size], \
                initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))
            self.b_candidate = tf.get_variable(name="candidate_bias", shape=[state_size], \
                initializer=tf.random_normal_initializer(mean=0.0, stddev=0.1))

            reset_input = tf.concat(1, [reset * state, inputs])
            candidate = self._activation(tf.matmul(reset_input, self.W_reset) + self.b_candidate)

        # Complement of time_mask
        anti_time_mask = tf.cast(time_mask<=0, tf.float32)
        new_h = update * state + (1 - update) * candidate
        new_h = time_mask * new_h + anti_time_mask * state

    return new_h, new_h

    def zero_state(self, batch_size):
        return tf.Variable(tf.zeros([batch_size, state_size]), dtype=tf.float32)
my_lstm.py 文件源码 项目:joint-many-task-model 作者: rubythonode 项目源码 文件源码 阅读 17 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        """LSTM as mentioned in paper."""
        with vs.variable_scope(scope or "basic_lstm_cell"):
            # Parameters of gates are concatenated into one multiply for
            # efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                c, h = array_ops.split(
                    value=state, num_or_size_splits=2, split_dim=1)
            g = tf.concat(1, [inputs, h])
            concat = linear([g], 4 * self._num_units, True, scope=scope)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i, j, f, o = array_ops.split(
                value=concat, num_split=4, split_dim=1)

            new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
                     self._activation(j))
            new_h = self._activation(new_c) * sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(new_c, new_h)
            else:
                new_state = array_ops.concat_v2([new_c, new_h], 1)
            return new_h, new_state
dnn.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _predictions(logits, n_classes):
  """Returns predictions for the given logits and n_classes."""
  predictions = {}
  if n_classes == 2:
    predictions[_LOGISTIC] = math_ops.sigmoid(logits)
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
  predictions[_PROBABILITIES] = nn.softmax(logits)
  predictions[_CLASSES] = array_ops.reshape(
      math_ops.argmax(logits, 1), shape=(-1, 1))
  return predictions
bernoulli.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self,
               p=None,
               dtype=dtypes.int32,
               validate_args=False,
               allow_nan_stats=True,
               name="BernoulliWithSigmoidP"):
    with ops.name_scope(name) as ns:
      super(BernoulliWithSigmoidP, self).__init__(
          p=nn.sigmoid(p),
          dtype=dtype,
          validate_args=validate_args,
          allow_nan_stats=allow_nan_stats,
          name=ns)
rnn_cell.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """LSTM cell with layer normalization and recurrent dropout."""

    with vs.variable_scope(scope or type(self).__name__) as scope:  # LayerNormBasicLSTMCell  # pylint: disable=unused-variables
      c, h = state
      args = array_ops.concat(1, [inputs, h])
      concat = self._linear(args)

      i, j, f, o = array_ops.split(1, 4, concat)
      if self._layer_norm:
        i = self._norm(i, "input")
        j = self._norm(j, "transform")
        f = self._norm(f, "forget")
        o = self._norm(o, "output")

      g = self._activation(j)
      if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
        g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

      new_c = (c * math_ops.sigmoid(f + self._forget_bias)
               + math_ops.sigmoid(i) * g)
      if self._layer_norm:
        new_c = self._norm(new_c, "state")
      new_h = self._activation(new_c) * math_ops.sigmoid(o)

      new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
      return new_h, new_state
head.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _logits_to_predictions(self, logits):
    """See `_MultiClassHead`."""
    predictions = {prediction_key.PredictionKey.LOGITS: logits}
    if self.logits_dimension == 1:
      predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
          logits)
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[prediction_key.PredictionKey.PROBABILITIES] = math_ops.sigmoid(
        logits)
    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.to_int64(
        math_ops.greater(logits, 0))
    return predictions


问题


面经


文章

微信
公众号

扫码关注公众号