python类tanh()的实例源码

fops.py 文件源码 项目:shuttleNet 作者: shiyemin 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, batch_size, num_mem, num_round, input_offset,
                 cell=None,
                 echocell=None,
                 mem_size=2,
                 mem_dim=1024,
                 activation=tanh,
                 dummy_value=0.0):
        """
        args:
            num_mem: number of cells
            mem_size: number of memory lines, only work for MemGrid
            mem_dim: length of memory line, only work for MemGrid
            num_round:  the round number of processing in the cell
        """
        self._batch_size = batch_size
        self._num_mem = num_mem
        self._mem_dim = mem_dim
        self._num_round = num_round
        self._input_offset = input_offset
        if cell is None:
            self.check = True
            self._mem_cells = [MemGrid(batch_size, mem_size, mem_dim, "Mem_%d"%i,
                                    activation=activation, dummy_value=dummy_value)
                            for i in xrange(num_mem)]
        else:
            self.check = False
            self._mem_cells = [cell] * num_mem
        self.echocell = echocell
rnn_cell.py 文件源码 项目:DL-Benchmarks 作者: DL-Benchmarks 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicRNNCell"
      output = tanh(linear([inputs, state], self._num_units, True))
    return output, output
rnn_cell.py 文件源码 项目:DL-Benchmarks 作者: DL-Benchmarks 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        r, u = array_ops.split(1, 2, linear([inputs, state],
                                            2 * self._num_units, True, 1.0))
        r, u = sigmoid(r), sigmoid(u)
      with vs.variable_scope("Candidate"):
        c = tanh(linear([inputs, r * state], self._num_units, True))
      new_h = u * state + (1 - u) * c
    return new_h, new_h
rnn_cell.py 文件源码 项目:DL-Benchmarks 作者: DL-Benchmarks 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = array_ops.split(1, 2, state)
      concat = linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
      new_h = tanh(new_c) * sigmoid(o)

    return new_h, array_ops.concat(1, [new_c, new_h])
convLSTM.py 文件源码 项目:Tensorflow-SegNet 作者: tkuanlun350 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, num_units, k_size=3, height=23, width=30, input_size=None, activation=tanh, initializer=None):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation
    self._initializer = initializer
    self._k_size = k_size
    self._height = height
    self._width = width
convLSTM.py 文件源码 项目:Tensorflow-SegNet 作者: tkuanlun350 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __init__(self, num_units, k_size=3, batch_size=4, height=23, width=30, input_size=None,
               use_peepholes=False, cell_clip=None,
               initializer=None, num_proj=None, proj_clip=None,
               num_unit_shards=1, num_proj_shards=1,
               forget_bias=1.0, state_is_tuple=False,
               activation=tanh):

    if not state_is_tuple:
      logging.warn(
          "%s: Using a concatenated state is slower and will soon be "
          "deprecated.  Use state_is_tuple=True." % self)
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated." % self)

    #self._use_peepholes = use_peepholes
    #self._cell_clip = cell_clip
    #self._initializer = initializer
    #self._num_proj = num_proj
    #self._num_unit_shards = num_unit_shards
    #self._num_proj_shards = num_proj_shards

    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation
    self._initializer = initializer
    self._k_size = k_size
    self._height = height
    self._width = width
    self._batch_size = batch_size
convLSTM.py 文件源码 项目:Tensorflow-SegNet 作者: tkuanlun350 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, num_units, k_size=3, batch_size=4, height=23, width=30, input_size=None,
               use_peepholes=False, cell_clip=None,
               initializer=None, num_proj=None, proj_clip=None,
               num_unit_shards=1, num_proj_shards=1,
               forget_bias=1.0, state_is_tuple=False,
               activation=tanh):

    if not state_is_tuple:
      logging.warn(
          "%s: Using a concatenated state is slower and will soon be "
          "deprecated.  Use state_is_tuple=True." % self)
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated." % self)

    #self._use_peepholes = use_peepholes
    #self._cell_clip = cell_clip
    #self._initializer = initializer
    #self._num_proj = num_proj
    #self._num_unit_shards = num_unit_shards
    #self._num_proj_shards = num_proj_shards

    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation
    self._initializer = initializer
    self._k_size = k_size
    self._height = height
    self._width = width
    self._batch_size = batch_size
GRU.py 文件源码 项目:R-net 作者: minsangkim142 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, num_units, activation=None, is_training = True, reuse=None):
        self._num_units = num_units
        self._activation = activation or tf.tanh
        self._is_training = is_training
GRU.py 文件源码 项目:R-net 作者: minsangkim142 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self,
               num_units,
               activation=None,
               reuse=None,
               kernel_initializer=None,
               bias_initializer=None,
               is_training = True):
    super(GRUCell, self).__init__(_reuse=reuse)
    self._num_units = num_units
    self._activation = activation or math_ops.tanh
    self._kernel_initializer = kernel_initializer
    self._bias_initializer = bias_initializer
    self._is_training = is_training
rnn_cell.py 文件源码 项目:odin 作者: imito 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, num_units,
               kernel_initializer=None,
               bias_initializer=tf.constant_initializer(value=0.),
               activation=None,
               reuse=None):
    super(BasicRNNCell, self).__init__(_reuse=reuse)
    self._num_units = num_units
    self._activation = activation or tf.nn.tanh
    self._bias_initializer = bias_initializer
    self._kernel_initializer = kernel_initializer
basicRNNCellGauss.py 文件源码 项目:dizzy_layer 作者: Pastromhaug 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation
layers.py 文件源码 项目:tf-layer-norm 作者: pbhatia243 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      print("%s: The input_size parameter is deprecated." % self)
    self._num_units = num_units
    self._activation = activation
layers.py 文件源码 项目:tf-layer-norm 作者: pbhatia243 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, num_units, forget_bias=1.0, input_size=None,
               state_is_tuple=False, activation=tanh, hyper_num_units=128, hyper_embedding_size=32, is_layer_norm = True):
    """Initialize the basic LSTM cell.
    Args:
      num_units: int, The number of units in the LSTM cell.
      hyper_num_units: int, The number of units in the HyperLSTM cell.
      forget_bias: float, The bias added to forget gates (see above).
      input_size: Deprecated and unused.
      state_is_tuple: If True, accepted and returned states are 2-tuples of
        the `c_state` and `m_state`.  By default (False), they are concatenated
        along the column axis.  This default behavior will soon be deprecated.
      activation: Activation function of the inner states.
    """
    if not state_is_tuple:
      print("%s: Using a concatenated state is slower and will soon be "
                   "deprecated.  Use state_is_tuple=True.", self)
    if input_size is not None:
        print("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._forget_bias = forget_bias
    self._state_is_tuple = state_is_tuple
    self._activation = activation
    self.hyper_num_units = hyper_num_units
    self.total_num_units = self._num_units + self.hyper_num_units
    self.hyper_cell = rnn_cell.BasicLSTMCell(hyper_num_units)
    self.hyper_embedding_size= hyper_embedding_size
    self.is_layer_norm = is_layer_norm
attention_cell.py 文件源码 项目:rnn_sent 作者: bill-kalog 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh, reuse=None):
        if input_size is not None:
            logging.warn("%s: The input_size parameter is deprecated.", self)
        self._num_units = num_units
        self._activation = activation
        self._reuse = reuse
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation
rnn_cell.py 文件源码 项目:diversity_based_attention 作者: PrekshaNema25 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def __init__(self, num_units, input_size=None, activation=tanh):
    if input_size is not None:
      logging.warn("%s: The input_size parameter is deprecated.", self)
    self._num_units = num_units
    self._activation = activation
rcn.py 文件源码 项目:u8m_test 作者: hxkk 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def call(self, inputs, state, scope=None):
        with vs.variable_scope(scope or type(self).__name__):  # "GruRcnCell"
            with vs.variable_scope("Gates"):  # Reset gate and update gate.
                # We start with bias of 1.0.
                w_zrw = self._conv(inputs, self._num_outputs*3, self._ih_filter_h_length, self._ih_filter_w_length,
                                 self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WzrwConv")

                u_zr = self._conv(state, self._num_outputs*2, self._hh_filter_h_length, self._hh_filter_w_length, [1, 1, 1, 1],
                                 "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UzrConv")

                w_z, w_r, w =tf.split(value=w_zrw, num_or_size_splits=3, axis=3, name="w_split")
                u_z, u_r =tf.split(value=u_zr, num_or_size_splits=2, axis=3, name="u_split")

                z_bias = tf.get_variable(
                    name="z_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer()
                )
                z_gate = math_ops.sigmoid(tf.nn.bias_add(w_z + u_z, z_bias))

                r_bias = tf.get_variable(
                    name="r_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                r_gate = math_ops.sigmoid(tf.nn.bias_add(w_r + u_r, r_bias))

            with vs.variable_scope("Candidate"):
#                 w = self._conv(inputs, self._num_outputs, self._ih_filter_h_length, self._ih_filter_w_length,
#                                self._ih_strides, self._ih_pandding, init_ops.truncated_normal_initializer(stddev=0.01), scope="WConv")
                u = self._conv(r_gate * state, self._num_outputs, self._hh_filter_h_length, self._hh_filter_w_length,
                               [1, 1, 1, 1], "SAME", init_ops.truncated_normal_initializer(stddev=0.01), scope="UConv")
                c_bias = tf.get_variable(
                    name="c_biases",
                    shape=[self._num_outputs],
                    initializer=init_ops.ones_initializer())
                c = math_ops.tanh(tf.nn.bias_add(w + u, c_bias))
            new_h = z_gate * state + (1 - z_gate) * c
        return new_h, new_h


问题


面经


文章

微信
公众号

扫码关注公众号