python类bool()的实例源码

tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def switch(condition, then_expression, else_expression):
    '''Switches between two operations
    depending on a scalar value (int or bool).
    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    # Arguments
        condition: scalar tensor.
        then_expression: TensorFlow operation.
        else_expression: TensorFlow operation.
    '''
    x_shape = copy.copy(then_expression.get_shape())
    if condition.dtype != tf.bool:
        condition = tf.cast(condition, 'bool')
    x = _cond(condition,
              lambda: then_expression,
              lambda: else_expression)
    x.set_shape(x_shape)
    return x
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type bool. output_i is True if
        targets_i is within top-k values of predictions_i
    '''
    return tf.nn.in_top_k(predictions, targets, k)


# CONVOLUTIONS
speech.py 文件源码 项目:fathom 作者: rdadolf 项目源码 文件源码 阅读 47 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse( self, labels, label_lengths ):
    """Mike Henry's implementation, with some minor modifications."""
    with self.G.as_default():
      label_shape = tf.shape( labels )
      num_batches_tns = tf.stack( [label_shape[0]] )
      max_num_labels_tns = tf.stack( [label_shape[1]] )

      def range_less_than(previous_state, current_input):
        return tf.expand_dims( tf.range( label_shape[1] ), 0 ) < current_input

      init = tf.cast( tf.fill( max_num_labels_tns, 0 ), tf.bool )
      init = tf.expand_dims( init, 0 )
      dense_mask = functional_ops.scan(range_less_than, label_lengths , initializer=init, parallel_iterations=1)
      dense_mask = dense_mask[ :, 0, : ]

      label_array = tf.reshape( tf.tile( tf.range( 0, label_shape[1] ), num_batches_tns ), label_shape )
      label_ind = tf.boolean_mask( label_array, dense_mask )

      batch_array = tf.transpose( tf.reshape( tf.tile( tf.range( 0,  label_shape[0] ), max_num_labels_tns ), tf.reverse( label_shape,[0]) ) )
      batch_ind = tf.boolean_mask( batch_array, dense_mask )

      indices = tf.transpose( tf.reshape( tf.concat( axis=0, values=[batch_ind, label_ind] ), [2,-1] ) )
      vals_sparse = tf.gather_nd( labels, indices )
      return tf.SparseTensor( tf.to_int64(indices), vals_sparse, tf.to_int64( label_shape ) )
seq_batch.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values
seq_batch.py 文件源码 项目:lang2program 作者: kelvinguu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def change_pad_value(values, mask, pad_val):
    """Given a set of values and a pad mask, change the value of all pad entries.

    Args:
        values (Tensor): of shape [batch_size, seq_length, :, ..., :].
        mask (Tensor): binary float tensor of shape [batch_size, seq_length]
        pad_val (float): value to set all pad entries to

    Returns:
        Tensor: a new Tensor of same shape as values
    """
    # broadcast the mask to match shape of values
    mask = expand_dims_for_broadcast(mask, values)  # (batch_size, seq_length, 1, ..., 1)
    mask = broadcast(mask, values)
    mask = tf.cast(mask, tf.bool)  # cast to bool

    # broadcast val
    broadcast_val = pad_val * tf.ones(tf.shape(values))

    new_values = tf.select(mask, values, broadcast_val)
    return new_values
beam_aligner.py 文件源码 项目:almond-nnparser 作者: Stanford-Mobisocial-IoT-Lab 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def initialize(self):
        """Initialize the decoder.
        Args:
          name: Name scope for any created operations.
        Returns:
          `(finished, start_inputs, initial_state)`.
        """
        start_inputs = self._embedding_fn(self._tiled_start_tokens)
        print('start_inputs', start_inputs)
        finished = tf.zeros((self.batch_size, self._beam_width), dtype=tf.bool)

        self._initial_num_available_beams = tf.ones((self._batch_size,), dtype=tf.int32)
        self._full_num_available_beams = tf.fill((self._batch_size,), self._beam_width)

        with tf.name_scope('first_beam_mask'):
            self._first_beam_mask = self._make_beam_mask(self._initial_num_available_beams)
        with tf.name_scope('full_beam_mask'):
            self._full_beam_mask = self._make_beam_mask(self._full_num_available_beams)
        with tf.name_scope('minus_inifinity_scores'):
            self._minus_inifinity_scores = tf.fill((self.batch_size, self._beam_width, self._output_size), -1e+8)

        self._batch_size_range = tf.range(self.batch_size)
        initial_state = BeamSearchOptimizationDecoderState(
            cell_state=self._tiled_initial_cell_state,
            previous_logits=tf.zeros([self.batch_size, self._beam_width, self._output_size], dtype=tf.float32),
            previous_score=tf.zeros([self.batch_size, self._beam_width], dtype=tf.float32),
            # During the first time step we only consider the initial beam
            num_available_beams=self._initial_num_available_beams,
            gold_beam_id=tf.zeros([self.batch_size], dtype=tf.int32),
            finished=finished)

        return (finished, start_inputs, initial_state)
text.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
text_RHL.py 文件源码 项目:AVSR-Deep-Speech 作者: pandeydivesh15 项目源码 文件源码 阅读 38 收藏 0 点赞 0 评论 0
def ctc_label_dense_to_sparse(labels, label_lengths, batch_size):
    # The second dimension of labels must be equal to the longest label length in the batch
    correct_shape_assert = tf.assert_equal(tf.shape(labels)[1], tf.reduce_max(label_lengths))
    with tf.control_dependencies([correct_shape_assert]):
        labels = tf.identity(labels)

    label_shape = tf.shape(labels)
    num_batches_tns = tf.stack([label_shape[0]])
    max_num_labels_tns = tf.stack([label_shape[1]])
    def range_less_than(previous_state, current_input):
        return tf.expand_dims(tf.range(label_shape[1]), 0) < current_input

    init = tf.cast(tf.fill(max_num_labels_tns, 0), tf.bool)
    init = tf.expand_dims(init, 0)
    dense_mask = tf.scan(range_less_than, label_lengths, initializer=init, parallel_iterations=1)
    dense_mask = dense_mask[:, 0, :]

    label_array = tf.reshape(tf.tile(tf.range(0, label_shape[1]), num_batches_tns),
          label_shape)
    label_ind = tf.boolean_mask(label_array, dense_mask)

    batch_array = tf.transpose(tf.reshape(tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), tf.reverse(label_shape, [0])))
    batch_ind = tf.boolean_mask(batch_array, dense_mask)

    indices = tf.transpose(tf.reshape(tf.concat([batch_ind, label_ind], 0), [2, -1]))
    shape = [batch_size, tf.reduce_max(label_lengths)]
    vals_sparse = gather_nd(labels, indices, shape)

    return tf.SparseTensor(tf.to_int64(indices), vals_sparse, tf.to_int64(label_shape))

# Validate and normalize transcriptions. Returns a cleaned version of the label
# or None if it's invalid.
readers.py 文件源码 项目:youtube-8m 作者: wangheda 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def prepare_reader(self, filename_queue):

    reader = tf.TFRecordReader()
    _, serialized_example = reader.read(filename_queue)

    contexts, features = tf.parse_single_sequence_example(
        serialized_example,
        context_features={
            "video_id": tf.FixedLenFeature([], tf.string),
            "labels": tf.VarLenFeature(tf.int64)},
        sequence_features={
            "rgb": tf.FixedLenSequenceFeature([], dtype=tf.string),
            "audio": tf.FixedLenSequenceFeature([], dtype=tf.string),
        })

    # read ground truth labels
    labels = (tf.cast(
        tf.sparse_to_dense(contexts["labels"].values, (self.num_classes,), 1,
            validate_indices=False),
        tf.bool))

    rgbs, num_frames = self.get_video_matrix(features["rgb"], 1024, self.max_frames)
    audios, num_frames = self.get_video_matrix(features["audio"], 1024, self.max_frames)

    batch_video_ids = tf.expand_dims(contexts["video_id"], 0)
    batch_rgbs = tf.expand_dims(rgbs, 0)
    batch_audios = tf.expand_dims(audios, 0)
    batch_labels = tf.expand_dims(labels, 0)
    batch_frames = tf.expand_dims(num_frames, 0)

    return batch_video_ids, batch_rgbs, batch_audios, batch_labels, batch_frames
mlp_policy.py 文件源码 项目:distributional_perspective_on_RL 作者: Kiwoo 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _init(self, ob_space, ac_space, hid_size, num_hid_layers, gaussian_fixed_var=True):
        assert isinstance(ob_space, gym.spaces.Box)

        self.pdtype = pdtype = make_pdtype(ac_space)
        sequence_length = None

        ob = U.get_placeholder(name="ob", dtype=tf.float32, shape=[sequence_length] + list(ob_space.shape))

        # with tf.variable_scope("obfilter"):
        #     self.ob_rms = RunningMeanStd(shape=ob_space.shape)
        # Kiwoo: Currently block MPI, need to consider entire code in here 

        obz = tf.clip_by_value((ob - self.ob_rms.mean) / self.ob_rms.std, -5.0, 5.0)
        last_out = obz
        for i in range(num_hid_layers):
            last_out = tf.nn.tanh(U.dense(last_out, hid_size, "vffc%i"%(i+1), weight_init=U.normc_initializer(1.0)))
        self.vpred = U.dense(last_out, 1, "vffinal", weight_init=U.normc_initializer(1.0))[:,0]

        last_out = obz
        for i in range(num_hid_layers):
            last_out = tf.nn.tanh(U.dense(last_out, hid_size, "polfc%i"%(i+1), weight_init=U.normc_initializer(1.0)))
        if gaussian_fixed_var and isinstance(ac_space, gym.spaces.Box):
            mean = U.dense(last_out, pdtype.param_shape()[0]//2, "polfinal", U.normc_initializer(0.01))            
            logstd = tf.get_variable(name="logstd", shape=[1, pdtype.param_shape()[0]//2], initializer=tf.zeros_initializer())
            pdparam = U.concatenate([mean, mean * 0.0 + logstd], axis=1)
        else:
            pdparam = U.dense(last_out, pdtype.param_shape()[0], "polfinal", U.normc_initializer(0.01))

        self.pd = pdtype.pdfromflat(pdparam)

        self.state_in = []
        self.state_out = []

        stochastic = tf.placeholder(dtype=tf.bool, shape=())
        ac = U.switch(stochastic, self.pd.sample(), self.pd.mode())
        self._act = U.function([stochastic, ob], [ac, self.vpred])
beam_search_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_step(self):
    beam_state = beam_search.BeamSearchState(
        log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
        lengths=tf.constant(
            2, shape=[self.config.beam_width], dtype=tf.int32),
        finished=tf.zeros(
            [self.config.beam_width], dtype=tf.bool))

    logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
    logits_[0, 2] = 1.9
    logits_[0, 3] = 2.1
    logits_[1, 3] = 3.1
    logits_[1, 4] = 0.9
    logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
    log_probs = tf.nn.log_softmax(logits)

    outputs, next_beam_state = beam_search.beam_search_step(
        time_=2, logits=logits, beam_state=beam_state, config=self.config)

    with self.test_session() as sess:
      outputs_, next_state_, state_, log_probs_ = sess.run(
          [outputs, next_beam_state, beam_state, log_probs])

    np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 2])
    np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
    np.testing.assert_array_equal(next_state_.lengths, [3, 3, 3])
    np.testing.assert_array_equal(next_state_.finished, [False, False, False])

    expected_log_probs = state_.log_probs[[1, 0, 0]]
    expected_log_probs[0] += log_probs_[1, 3]
    expected_log_probs[1] += log_probs_[0, 3]
    expected_log_probs[2] += log_probs_[0, 2]
    np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
beam_search_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def test_step_with_eos(self):
    beam_state = beam_search.BeamSearchState(
        log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
        lengths=tf.convert_to_tensor(
            [2, 1, 2], dtype=tf.int32),
        finished=tf.constant(
            [False, True, False], dtype=tf.bool))

    logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
    logits_[0, 2] = 1.1
    logits_[1, 2] = 1.0
    logits_[2, 2] = 1.0
    logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
    log_probs = tf.nn.log_softmax(logits)

    outputs, next_beam_state = beam_search.beam_search_step(
        time_=2, logits=logits, beam_state=beam_state, config=self.config)

    with self.test_session() as sess:
      outputs_, next_state_, state_, log_probs_ = sess.run(
          [outputs, next_beam_state, beam_state, log_probs])

    np.testing.assert_array_equal(outputs_.predicted_ids, [0, 2, 2])
    np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 2])
    np.testing.assert_array_equal(next_state_.lengths, [1, 3, 3])
    np.testing.assert_array_equal(next_state_.finished, [True, False, False])

    expected_log_probs = state_.log_probs[outputs_.beam_parent_ids]
    expected_log_probs[1] += log_probs_[0, 2]
    expected_log_probs[2] += log_probs_[2, 2]
    np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
beam_search_test.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def test_step_with_new_eos(self):
    beam_state = beam_search.BeamSearchState(
        log_probs=tf.nn.log_softmax(tf.ones(self.config.beam_width)),
        lengths=tf.constant(
            2, shape=[self.config.beam_width], dtype=tf.int32),
        finished=tf.zeros(
            [self.config.beam_width], dtype=tf.bool))

    logits_ = np.full([self.config.beam_width, self.config.vocab_size], 0.0001)
    logits_[0, 0] = 1.9
    logits_[0, 3] = 2.1
    logits_[1, 3] = 3.1
    logits_[1, 4] = 0.9
    logits = tf.convert_to_tensor(logits_, dtype=tf.float32)
    log_probs = tf.nn.log_softmax(logits)

    outputs, next_beam_state = beam_search.beam_search_step(
        time_=2, logits=logits, beam_state=beam_state, config=self.config)

    with self.test_session() as sess:
      outputs_, next_state_, state_, log_probs_ = sess.run(
          [outputs, next_beam_state, beam_state, log_probs])

    np.testing.assert_array_equal(outputs_.predicted_ids, [3, 3, 0])
    np.testing.assert_array_equal(outputs_.beam_parent_ids, [1, 0, 0])
    np.testing.assert_array_equal(next_state_.lengths, [3, 3, 2])
    np.testing.assert_array_equal(next_state_.finished, [False, False, True])

    expected_log_probs = state_.log_probs[[1, 0, 0]]
    expected_log_probs[0] += log_probs_[1, 3]
    expected_log_probs[1] += log_probs_[0, 3]
    expected_log_probs[2] += log_probs_[0, 0]
    np.testing.assert_array_equal(next_state_.log_probs, expected_log_probs)
benchmark_cnn.py 文件源码 项目:benchmarks 作者: tensorflow 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def add_sync_queues_and_barrier(self, name_prefix, enqueue_after_list):
    """Adds ops to enqueue on all worker queues.

    Args:
      name_prefix: prefixed for the shared_name of ops.
      enqueue_after_list: control dependency from ops.

    Returns:
      an op that should be used as control dependency before starting next step.
    """
    self.sync_queue_counter += 1
    with tf.device(self.sync_queue_devices[(
        self.sync_queue_counter % len(self.sync_queue_devices))]):
      sync_queues = [
          tf.FIFOQueue(self.num_workers, [tf.bool], shapes=[[]],
                       shared_name='%s%s' % (name_prefix, i))
          for i in range(self.num_workers)]
      queue_ops = []
      # For each other worker, add an entry in a queue, signaling that it can
      # finish this step.
      token = tf.constant(False)
      with tf.control_dependencies(enqueue_after_list):
        for i, q in enumerate(sync_queues):
          if i == self.task_index:
            queue_ops.append(tf.no_op())
          else:
            queue_ops.append(q.enqueue(token))

      # Drain tokens off queue for this worker, one for each other worker.
      queue_ops.append(
          sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))

      return tf.group(*queue_ops)
aggressive_multi_head_UNET_2d.py 文件源码 项目:lung-cancer-detector 作者: YichenGong 项目源码 文件源码 阅读 45 收藏 0 点赞 0 评论 0
def create_inputs(self, image_size):
        print("Creating input Placeholders...")
        #input image
        self.X = tf.placeholder(dtype=tf.float32, 
            shape=[None, image_size[0], image_size[1], 1],
            name="in")

        #Outputs of the different heads

        #Nodule head
        self.Y_nodule = tf.placeholder(dtype=tf.float32,
            shape=[None, image_size[0], image_size[1], 1],
            name="out_nodule")
        self.Y_nodule_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="nodule_weight")

        #Cancer head
        self.Y_cancer = tf.placeholder(dtype=tf.float32,
            shape=[None, 1],
            name="out_cancer")
        self.Y_cancer_weight = tf.placeholder_with_default(input=1.0,
            shape=None,
            name="cancer_weight")

        #Boolean variables to check head and mode
        self.is_training = tf.placeholder(dtype=tf.bool,
            name="is_training")
        self.is_nodule = tf.placeholder(dtype=tf.bool,
            name="is_nodule")
        self.is_cancer = tf.placeholder(dtype=tf.bool,
            name="is_cancer")

        #Probability for dropout
        self.drop_prob = tf.placeholder_with_default(input=0.0,
            shape=None,
            name="dropout_probability")

        print("Created input placeholders!")
cnn.py 文件源码 项目:kaggle-review 作者: daxiongshu 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _build(self):
        V = self.V # vocabulary size
        M = self.flags.embedding_size # 64
        C = self.flags.classes
        W = self.flags.window_size
        S = self.flags.seq_len*2+1
        B = self.flags.batch_size
        H = 32
        is_training = tf.placeholder(dtype=tf.bool)
        netname = "CBOW"
        with tf.variable_scope(netname):
            self.inputs = tf.placeholder(dtype=tf.int32,shape=[None, S]) #[B,S]
            # each element is a word id.

            layer_name = "{}/embedding".format(netname)
            x = self._get_embedding(layer_name, self.inputs, V, M, reuse=False) # [B, S, M]       
        netname = "BaoBaoMiaoCnn"
        with tf.variable_scope(netname):
            x = tf.expand_dims(x, axis=3) # [B,S,M,1]

            net1 = self.conv_maxpool(x,W,M,S,H,"%s/conv1"%netname,1)     # [B,1,1,16]
            net2 = self.conv_maxpool(x,W*2,M,S,H,"%s/conv2"%netname,1)
            net3 = self.conv_maxpool(x,W//2,M,S,H,"%s/conv3"%netname,1)
            net = tf.concat([net1,net2,net3],axis=3) # [B,1,1,48]
            net = self._batch_normalization(net, layer_name='%s/batch_norm1'%(netname))
            net = tf.squeeze(net) # [B,48]
            #net = self._fc(net, fan_in=H*3, fan_out=H, layer_name="%s/fc0"%netname, activation='relu')
            net = self._fc(net, fan_in=H*3, fan_out=C, layer_name="%s/fc1"%netname, activation=None)
            self.logit = net
        self.is_training = is_training
a2_transformer.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def __init__(self, num_classes, learning_rate, batch_size, decay_steps, decay_rate, sequence_length,
                 vocab_size, embed_size,d_model,d_k,d_v,h,num_layer,is_training,decoder_sent_length=6,
                 initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,l2_lambda=0.0001):
        """init all hyperparameter here"""
        super(Transformer, self).__init__(d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=num_layer) #init some fields by using parent class.

        self.num_classes = num_classes
        self.sequence_length = sequence_length
        self.vocab_size = vocab_size
        self.embed_size = d_model
        self.learning_rate = tf.Variable(learning_rate, trainable=False, name="learning_rate")
        self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5)
        self.initializer = initializer
        self.decoder_sent_length=decoder_sent_length
        self.clip_gradients=clip_gradients
        self.l2_lambda=l2_lambda

        self.is_training=is_training #self.is_training=tf.placeholder(tf.bool,name="is_training") #tf.bool #is_training
        self.input_x = tf.placeholder(tf.int32, [self.batch_size, self.sequence_length], name="input_x")                 #x  batch_size
        self.decoder_input = tf.placeholder(tf.int32, [self.batch_size, self.decoder_sent_length],name="decoder_input")  #y, but shift None
        self.input_y_label = tf.placeholder(tf.int32, [self.batch_size, self.decoder_sent_length], name="input_y_label") #y, but shift None
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate

        self.instantiate_weights()
        self.logits = self.inference() #logits shape:[batch_size,decoder_sent_length,self.num_classes]

        self.predictions = tf.argmax(self.logits, axis=2, name="predictions")
        self.accuracy = tf.constant(0.5)  # fuke accuracy. (you can calcuate accuracy outside of graph using method calculate_accuracy(...) in train.py)
        if self.is_training is False:# if it is not training, then no need to calculate loss and back-propagation.
            return
        self.loss_val = self.loss_seq2seq()
        self.train_op = self.train()
a2_transformer_classification.py 文件源码 项目:text_classification 作者: brightmart 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, num_classes, learning_rate, batch_size, decay_steps, decay_rate, sequence_length,
                 vocab_size, embed_size,d_model,d_k,d_v,h,num_layer,is_training,
                 initializer=tf.random_normal_initializer(stddev=0.1),clip_gradients=5.0,l2_lambda=0.0001,use_residual_conn=False):
        """init all hyperparameter here"""
        super(Transformer, self).__init__(d_model, d_k, d_v, sequence_length, h, batch_size, num_layer=num_layer) #init some fields by using parent class.

        self.num_classes = num_classes
        self.sequence_length = sequence_length
        self.vocab_size = vocab_size
        self.embed_size = d_model
        self.learning_rate = tf.Variable(learning_rate, trainable=False, name="learning_rate")
        self.learning_rate_decay_half_op = tf.assign(self.learning_rate, self.learning_rate * 0.5)
        self.initializer = initializer
        self.clip_gradients=clip_gradients
        self.l2_lambda=l2_lambda

        self.is_training=is_training #self.is_training=tf.placeholder(tf.bool,name="is_training") #tf.bool #is_training
        self.input_x = tf.placeholder(tf.int32, [self.batch_size, self.sequence_length], name="input_x")                 #x  batch_size
        self.input_y_label = tf.placeholder(tf.int32, [self.batch_size], name="input_y_label")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

        self.global_step = tf.Variable(0, trainable=False, name="Global_Step")
        self.epoch_step = tf.Variable(0, trainable=False, name="Epoch_Step")
        self.epoch_increment = tf.assign(self.epoch_step, tf.add(self.epoch_step, tf.constant(1)))
        self.decay_steps, self.decay_rate = decay_steps, decay_rate
        self.use_residual_conn=use_residual_conn

        self.instantiate_weights()
        self.logits = self.inference() #logits shape:[batch_size,self.num_classes]

        self.predictions = tf.argmax(self.logits, axis=1, name="predictions")
        correct_prediction = tf.equal(tf.cast(self.predictions, tf.int32),self.input_y_label)
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name="Accuracy")  # shape=()
        if self.is_training is False:# if it is not training, then no need to calculate loss and back-propagation.
            return
        self.loss_val = self.loss()
        self.train_op = self.train()
policy_agent.py 文件源码 项目:DeepPath 作者: xwhan 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def __init__(self, scope = 'policy_network', learning_rate = 0.001):
        self.initializer = tf.contrib.layers.xavier_initializer()
        with tf.variable_scope(scope):
            self.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')
            self.action = tf.placeholder(tf.int32, [None], name = 'action')
            self.target = tf.placeholder(tf.float32, name = 'target')
            self.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)

            action_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)
            self.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)

            self.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)*self.target) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope=scope))
            self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
            self.train_op = self.optimizer.minimize(self.loss)
sl_policy.py 文件源码 项目:DeepPath 作者: xwhan 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, learning_rate = 0.001):
        self.initializer = tf.contrib.layers.xavier_initializer()
        with tf.variable_scope('supervised_policy'):
            self.state = tf.placeholder(tf.float32, [None, state_dim], name = 'state')
            self.action = tf.placeholder(tf.int32, [None], name = 'action')
            self.action_prob = policy_nn(self.state, state_dim, action_space, self.initializer)

            action_mask = tf.cast(tf.one_hot(self.action, depth = action_space), tf.bool)
            self.picked_action_prob = tf.boolean_mask(self.action_prob, action_mask)

            self.loss = tf.reduce_sum(-tf.log(self.picked_action_prob)) + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES, scope = 'supervised_policy'))
            self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate)
            self.train_op = self.optimizer.minimize(self.loss)


问题


面经


文章

微信
公众号

扫码关注公众号