python类tanh()的实例源码

rhn.py 文件源码 项目:RecurrentHighwayNetworks 作者: julian121266 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
    current_state = state[0]
    noise_i = state[1]
    noise_h = state[2]
    for i in range(self.depth):
      with tf.variable_scope('h_'+str(i)):
        if i == 0:
          h = tf.tanh(linear([inputs * noise_i, current_state * noise_h], self._num_units, True))
        else:
          h = tf.tanh(linear([current_state * noise_h], self._num_units, True))
      with tf.variable_scope('t_'+str(i)):
        if i == 0:
          t = tf.sigmoid(linear([inputs * noise_i, current_state * noise_h], self._num_units, True, self.forget_bias))
        else:
          t = tf.sigmoid(linear([current_state * noise_h], self._num_units, True, self.forget_bias))
      current_state = (h - current_state)* t + current_state

    return current_state, [current_state, noise_i, noise_h]
level2_model.py 文件源码 项目:Skeleton-key 作者: feiyu1990 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _lstm(self, input_h, input_c, input_x, reuse=False):
        with tf.variable_scope('level2_lstm', reuse=reuse):
            w_i2h_ = np.transpose(self.model_load['/core/i2h_1/weight'][:], (1, 0))
            b_i2h_ = self.model_load['/core/i2h_1/bias'][:]
            w_h2h_ = np.transpose(self.model_load['/core/h2h_1/weight'][:], (1, 0))
            b_h2h_ = self.model_load['/core/h2h_1/bias'][:]

            w_i2h = tf.get_variable('w_i2h', initializer=w_i2h_)
            b_i2h = tf.get_variable('b_i2h', initializer=b_i2h_)
            w_h2h = tf.get_variable('w_h2h', initializer=w_h2h_)
            b_h2h = tf.get_variable('b_h2h', initializer=b_h2h_)

            input_x = tf.cast(input_x, tf.float32)
            i2h = tf.matmul(input_x, w_i2h) + b_i2h
            h2h = tf.matmul(input_h, w_h2h) + b_h2h
            all_input_sums = i2h + h2h
            reshaped = tf.reshape(all_input_sums, [-1, 4, self.H])
            n1, n2, n3, n4 = tf.unstack(reshaped, axis=1)
            in_gate = tf.sigmoid(n1)
            forget_gate = tf.sigmoid(n2)
            out_gate = tf.sigmoid(n3)
            in_transform = tf.tanh(n4)
            c = tf.multiply(forget_gate, input_c) + tf.multiply(in_gate, in_transform)
            h = tf.multiply(out_gate, tf.tanh(c))
            return c, h
level1_model.py 文件源码 项目:Skeleton-key 作者: feiyu1990 项目源码 文件源码 阅读 86 收藏 0 点赞 0 评论 0
def _get_initial_lstm(self, features):
        with tf.variable_scope('level1/initial_lstm'):
            features_mean = tf.reduce_mean(features, 1)

            w2_init = np.transpose(self.model_load['/init_network/weight2'][:], (1, 0))
            b2_init = self.model_load['/init_network/bias2'][:]

            w_1_ = np.transpose(self.model_load['/init_network/weight1'][:], (1, 0))
            w_1 = tf.get_variable('w_w1', initializer=w_1_)
            b_1 = tf.get_variable('w_b1', initializer=self.model_load['/init_network/bias1'][:])
            h1 = tf.nn.relu(tf.matmul(features_mean, w_1) + b_1)
            # todo: this dropout can be added later
            # if self.dropout:
            # h1 = tf.nn.dropout(h1, 0.5)

            w_h = tf.get_variable('w_h', initializer=w2_init[:, self.H:])
            b_h = tf.get_variable('b_h', initializer=b2_init[self.H:])
            h = tf.nn.tanh(tf.matmul(h1, w_h) + b_h)

            w_c = tf.get_variable('w_c', initializer=w2_init[:, :self.H])
            b_c = tf.get_variable('b_c', initializer=b2_init[:self.H])
            c = tf.nn.tanh(tf.matmul(h1, w_c) + b_c)

            return c, h
level1_model.py 文件源码 项目:Skeleton-key 作者: feiyu1990 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _project_features(self, features):
        with tf.variable_scope('level1/project_features'):
            # features_proj --> proj_ctx
            # todo: features_proj = tf.matmul(features_flat, w) + b

            w1_ = np.transpose(self.model_load['/core/context_proj1/weight'][:], (1, 0))
            b1_ = self.model_load['/core/context_proj1/bias'][:]
            w2_ = np.transpose(self.model_load['/core/context_proj2/weight'][:], (1, 0))
            b2_ = self.model_load['/core/context_proj2/bias'][:]

            w1 = tf.get_variable('w1', initializer=w1_)
            b1 = tf.get_variable('b1', initializer=b1_)
            w2 = tf.get_variable('w2', initializer=w2_)
            b2 = tf.get_variable('b2', initializer=b2_)

            features_flat = tf.reshape(features, [-1, self.D])
            features_proj1 = tf.nn.tanh(tf.matmul(features_flat, w1) + b1)
            features_proj = tf.matmul(features_proj1, w2) + b2
            features_proj = tf.reshape(features_proj, [-1, self.L, self.D])
            return features_proj
lstm.py 文件源码 项目:rnnprop 作者: vfleaking 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def lstm_func(x, h, c, wx, wh, b):
    """
        x: (N, D)
        h: (N, H)
        c: (N, H)
        wx: (D, 4H)
        wh: (H, 4H)
        b: (4H, )
    """
    N, H = tf.shape(h)[0], tf.shape(h)[1]
    a = tf.reshape(tf.matmul(x, wx) + tf.matmul(h, wh) + b, (N, -1, H))
    i, f, o, g = a[:,0,:], a[:,1,:], a[:,2,:], a[:,3,:]
    i = tf.sigmoid(i)
    f = tf.sigmoid(f)
    o = tf.sigmoid(o)
    g = tf.tanh(g)
    next_c = f * c + i * g
    next_h = o * tf.tanh(next_c)
    return next_h, next_c
rnn_cell.py 文件源码 项目:XMUNMT 作者: XMUNLP 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, inputs, state, scope=None):
        with tf.variable_scope(scope, default_name="gru_cell",
                               values=[inputs, state]):
            if not isinstance(inputs, (list, tuple)):
                inputs = [inputs]

            all_inputs = list(inputs) + [state]
            r = tf.nn.sigmoid(linear(all_inputs, self._num_units, False, False,
                                     scope="reset_gate"))
            u = tf.nn.sigmoid(linear(all_inputs, self._num_units, False, False,
                                     scope="update_gate"))
            all_inputs = list(inputs) + [r * state]
            c = linear(all_inputs, self._num_units, True, False,
                       scope="candidate")

            new_state = (1.0 - u) * state + u * tf.tanh(c)

        return new_state, new_state
a3c.py 文件源码 项目:DeepRL 作者: arnomoonens 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, state_shape, n_hidden, summary=True):
        super(CriticNetwork, self).__init__()
        self.state_shape = state_shape
        self.n_hidden = n_hidden

        with tf.variable_scope("critic"):
            self.states = tf.placeholder("float", [None] + self.state_shape, name="states")
            self.r = tf.placeholder(tf.float32, [None], name="r")

            L1 = tf.contrib.layers.fully_connected(
                inputs=self.states,
                num_outputs=self.n_hidden,
                activation_fn=tf.tanh,
                weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.02),
                biases_initializer=tf.zeros_initializer(),
                scope="L1")

            self.value = tf.reshape(linear(L1, 1, "value", normalized_columns_initializer(1.0)), [-1])

            self.loss = tf.reduce_sum(tf.square(self.value - self.r))
            self.summary_loss = self.loss
            self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
reinforce.py 文件源码 项目:DeepRL 作者: arnomoonens 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_network(self):
        # Symbolic variables for observation, action, and advantage
        self.states = tf.placeholder(tf.float32, [None, self.env_runner.nO], name="states")  # Observation
        self.a_n = tf.placeholder(tf.float32, name="a_n")  # Discrete action
        self.adv_n = tf.placeholder(tf.float32, name="adv_n")  # Advantage

        L1 = tf.contrib.layers.fully_connected(
            inputs=self.states,
            num_outputs=self.config["n_hidden_units"],
            activation_fn=tf.tanh,
            weights_initializer=tf.random_normal_initializer(),
            biases_initializer=tf.zeros_initializer())

        self.probs = tf.contrib.layers.fully_connected(
            inputs=L1,
            num_outputs=self.env_runner.nA,
            activation_fn=tf.nn.softmax,
            weights_initializer=tf.random_normal_initializer(),
            biases_initializer=tf.zeros_initializer())

        self.action = tf.squeeze(tf.multinomial(tf.log(self.probs), 1), name="action")
reinforce.py 文件源码 项目:DeepRL 作者: arnomoonens 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def build_network_normal(self):
        # Symbolic variables for observation, action, and advantage
        self.states = tf.placeholder(tf.float32, [None, self.env_runner.nO], name="states")  # Observation
        self.a_n = tf.placeholder(tf.float32, name="a_n")  # Continuous action
        self.adv_n = tf.placeholder(tf.float32, name="adv_n")  # Advantage

        L1 = tf.contrib.layers.fully_connected(
            inputs=self.states,
            num_outputs=self.config["n_hidden_units"],
            activation_fn=tf.tanh,
            weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.02),
            biases_initializer=tf.zeros_initializer())

        mu, sigma = mu_sigma_layer(L1, 1)

        self.normal_dist = tf.contrib.distributions.Normal(mu, sigma)
        self.action = self.normal_dist.sample(1)
        self.action = tf.clip_by_value(self.action, self.env.action_space.low[0], self.env.action_space.high[0])
mnist_model.py 文件源码 项目:tensorflow_tutorial 作者: lpty 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def generator_graph(fake_imgs, units_size, out_size, alpha=0.01):
        # ????????????? ????scope
        with tf.variable_scope('generator'):
            # ????????
            layer = tf.layers.dense(fake_imgs, units_size)
            # leaky ReLU ????
            relu = tf.maximum(alpha * layer, layer)
            # dropout ?????
            drop = tf.layers.dropout(relu, rate=0.2)
            # logits
            # out_size??????size??
            logits = tf.layers.dense(drop, out_size)
            # ???? ??????????? ? ????????
            # ??tanh????sigmoid???
            # ????(-1, 1) ??sigmoid??[0, 1]
            outputs = tf.tanh(logits)
            return logits, outputs
ddpg.py 文件源码 项目:zjhReinforcementLearning 作者: jhneo 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _build_net(self,S,scope,trainable):
        #create scope
        #hidden dimension 30
        #input S into fully connnected layer and then relu activation function
        #input hidden units into fully connected layer and tanh activation function
        #scale action to action bound
        with tf.variable_scope(scope):
            l1_dim = 30
            w1 = tf.Variable(tf.truncated_normal([self.state_dim, l1_dim],mean = 0,stddev = 0.3,seed = 1234),trainable=trainable)
            b1 = tf.Variable(tf.constant(0.1,shape=[l1_dim]),trainable  = trainable)
            l1 = tf.add(tf.matmul(S,w1),b1)
            net = tf.nn.relu(l1)

            with tf.variable_scope('a'):
                w2 = tf.Variable(tf.truncated_normal([l1_dim,self.a_dim],mean = 0, stddev = 0.3,seed = 1234),trainable =trainable)
                b2 = tf.Variable(tf.constant(0.1,shape=[self.a_dim]),trainable = trainable)
                a = tf.tanh(tf.add(tf.matmul(l1,w2),b2))
                scaled_a = tf.multiply(a, self.action_bound)

        return scaled_a

    #add grad to tensorflow graph
    #input: 
    #   a_grads: dq/da from critic
network_continous_rnn.py 文件源码 项目:trpo 作者: jjkke88 项目源码 文件源码 阅读 20 收藏 0 点赞 0 评论 0
def __call__(self , inputs , state , scope=None):
        """
            Long short-term memory cell (LSTM).
            implement from BasicLSTMCell.__call__
        """
        with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            c , h = tf.split(1 , 2 , state)
            concat = self.linear([inputs , h] , 4 * self._num_units , True)

            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            i , j , f , o = tf.split(1 , 4 , concat)

            new_c = c * tf.sigmoid(f + self._forget_bias) + tf.sigmoid(i) * tf.tanh(j)
            new_h = tf.tanh(new_c) * tf.sigmoid(o)

            return new_h , tf.concat(1 , [new_c , new_h])
deterministic_layer.py 文件源码 项目:vae_renyi_divergence 作者: YingzhenLi 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def __init__(self, input_size, output_size, activation):
        self.input_size = input_size
        self.output_size = output_size       
        # activation function
        self.name = activation
        if activation == 'softplus':
            self._activation = tf.nn.softplus
        if activation == 'relu':
            self._activation = tf.nn.relu
        if activation == 'sigmoid':
            self._activation = tf.sigmoid
        if activation == 'tanh':
            self._activation = tf.tanh
        if activation == 'linear':
            self._activation = lambda x: x
        if activation == 'softmax':
            self._activation = tf.nn.softmax
        # parameters
        W = tf.Variable(init_weights(input_size, output_size))
        b = tf.Variable(tf.zeros([output_size]))
        #b = tf.Variable(init_weights(output_size, 0))
        self.params = [W, b]
mirror.py 文件源码 项目:udacity 作者: kensk8er 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def lstm_cell(X, output, state):
            """Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
            Note that in this formulation, we omit the various connections between the
            previous state and the gates."""
            X_output = tf.concat(1, [X, output])
            all_logits = tf.matmul(X_output, W_lstm) + b_lstm

            input_gate = tf.sigmoid(all_logits[:, :NUM_NODES])
            forget_gate = tf.sigmoid(all_logits[:, NUM_NODES: NUM_NODES * 2])
            output_gate = tf.sigmoid(all_logits[:, NUM_NODES * 2: NUM_NODES * 3])
            temp_state = all_logits[:, NUM_NODES * 3:]
            state = forget_gate * state + input_gate * tf.tanh(temp_state)

            return output_gate * tf.tanh(state), state


        # Input data.
twitter_pos.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
timit_fcn.py 文件源码 项目:GELUs 作者: hendrycks 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def gelu_fast(_x):
            return 0.5 * _x * (1 + tf.tanh(tf.sqrt(2 / np.pi) * (_x + 0.044715 * tf.pow(_x, 3))))
encoders.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def _make_rnn_cell(self, i):
        if self._cell_type == "lstm":
            cell = tf.contrib.rnn.LSTMCell(self.output_size)
        elif self._cell_type == "gru":
            cell = tf.contrib.rnn.GRUCell(self.output_size)
        elif self._cell_type == "basic-tanh":
            cell = tf.contrib.rnn.BasicRNNCell(self.output_size)
        else:
            raise ValueError("Invalid RNN Cell type")
        cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self._dropout, seed=8 + 33 * i)
        return cell
encoders.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def encode(self, inputs, _input_length, _parses):
        with tf.variable_scope('BagOfWordsEncoder'):
            W = tf.get_variable('W', (self.embed_size, self.output_size))
            b = tf.get_variable('b', shape=(self.output_size,), initializer=tf.constant_initializer(0, tf.float32))

            enc_hidden_states = tf.tanh(tf.tensordot(inputs, W, [[2], [0]]) + b)
            enc_final_state = tf.reduce_sum(enc_hidden_states, axis=1)

            #assert enc_hidden_states.get_shape()[1:] == (self.config.max_length, self.config.hidden_size)
            if self._cell_type == 'lstm':
                enc_final_state = (tf.contrib.rnn.LSTMStateTuple(enc_final_state, enc_final_state),)

            enc_output = tf.nn.dropout(enc_hidden_states, keep_prob=self._dropout, seed=12345)

            return enc_output, enc_final_state
tree_encoder.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def _make_rnn_cell(self, i):
        if self._cell_type == "lstm":
            cell = tf.contrib.rnn.LSTMCell(self.output_size)
        elif self._cell_type == "gru":
            cell = tf.contrib.rnn.GRUCell(self.output_size)
        elif self._cell_type == "basic-tanh":
            cell = tf.contrib.rnn.BasicRNNCell(self.output_size)
        else:
            raise ValueError("Invalid RNN Cell type")
        cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=self._dropout, seed=8 + 33 * i)
        return cell
tree_encoder.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _make_tree_cell(self, i):
        if self._cell_type == "lstm":
            cell = TreeLSTM(self.output_size)
        elif self._cell_type in ("gru", "basic-tanh"):
            raise NotImplementedError("GRU/basic-tanh tree cells not implemented yet")
        else:
            raise ValueError("Invalid RNN Cell type")
        cell = TreeDropoutWrapper(cell, output_keep_prob=self._dropout, seed=8 + 33 * i)
        return cell


问题


面经


文章

微信
公众号

扫码关注公众号