python类bool()的实例源码

keras_extensions.py 文件源码 项目:onto-lstm 作者: pdasigi 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def switch(condition, then_tensor, else_tensor):
    """
    Keras' implementation of switch for tensorflow uses tf.switch which accepts only scalar conditions.
    It should use tf.select instead.
    """
    if K.backend() == 'tensorflow':
        import tensorflow as tf
        condition_shape = condition.get_shape()
        input_shape = then_tensor.get_shape()
        if condition_shape[-1] != input_shape[-1] and condition_shape[-1] == 1:
            # This means the last dim is an embedding dim. Keras does not mask this dimension. But tf wants
            # the condition and the then and else tensors to be the same shape.
            condition = K.dot(tf.cast(condition, tf.float32), tf.ones((1, input_shape[-1])))
        return tf.select(tf.cast(condition, dtype=tf.bool), then_tensor, else_tensor)
    else:
        import theano.tensor as T
        return T.switch(condition, then_tensor, else_tensor)
beam_search.py 文件源码 项目:seq2seq 作者: google 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def create_initial_beam_state(config):
  """Creates an instance of `BeamState` that can be used on the first
  call to `beam_step`.

  Args:
    config: A BeamSearchConfig

  Returns:
    An instance of `BeamState`.
  """
  return BeamSearchState(
      log_probs=tf.zeros([config.beam_width]),
      finished=tf.zeros(
          [config.beam_width], dtype=tf.bool),
      lengths=tf.zeros(
          [config.beam_width], dtype=tf.int32))
architecture.py 文件源码 项目:traffic_detection_yolo2 作者: wAuner 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def build_model(inp_ph, train_flag_ph, lbl_ph, mask_ph):
    head = build_graph(inp_ph, train_flag_ph, 32, 5, 3)
    pred = yolo_prediction(head)
    loss = yolo_loss(lbl_ph, pred, mask_ph)

    return loss

#
# input_tensor = tf.placeholder(dtype=tf.float32, shape=(None, 416, 416, 3))
# train_flag = tf.placeholder(dtype=tf.bool)
# labels = tf.placeholder(tf.float32, shape=(None, 13, 13, 5, 7))
# mask = tf.placeholder(tf.bool, shape=(None, 13, 13, 5))
#
# head = build_graph(input_tensor, train_flag, 32, 5, 3)
# pred = yolo_prediction(head)
# loss = yolo_loss(labels, pred, mask)
keras_patches.py 文件源码 项目:keras-image-captioning 作者: danieljl 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def clip_norm(g, c, n):
    if c > 0:
        if K.backend() == 'tensorflow':
            import tensorflow as tf
            import copy
            condition = n >= c
            then_expression = tf.scalar_mul(c / n, g)
            else_expression = g

            if hasattr(then_expression, 'get_shape'):
                g_shape = copy.copy(then_expression.get_shape())
            elif hasattr(then_expression, 'dense_shape'):
                g_shape = copy.copy(then_expression.dense_shape)
            if condition.dtype != tf.bool:
                condition = tf.cast(condition, 'bool')
            g = K.tensorflow_backend.control_flow_ops.cond(
                condition, lambda: then_expression, lambda: else_expression)
            if hasattr(then_expression, 'get_shape'):
                g.set_shape(g_shape)
            elif hasattr(then_expression, 'dense_shape'):
                g._dense_shape = g_shape
        else:
            g = K.switch(n >= c, g * c / n, g)
    return g
agent.py 文件源码 项目:IntelAct-Vizdoom 作者: chendagui16 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def __make_net(self, input_images, input_measure, input_actions, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()
        fc_val_params = copy.deepcopy(self.__fc_joint_params)
        fc_val_params[-1]['out_dims'] = self.__target_dim

        fc_adv_params = copy.deepcopy(self.__fc_joint_params)
        fc_adv_params[-1]['out_dims'] = len(self.__net_discrete_actions) * self.__target_dim

        if self.verbose:
            print 'fc_val_params:', fc_val_params
            print 'fc_adv_params:', fc_adv_params

        p_img_conv = ly.conv_encoder(input_images, self.__conv_params, 'p_img_conv', msra_coeff=0.9)
        p_img_fc = ly.fc_net(ly.flatten(p_img_conv), self.__fc_img_params, 'p_img_fc', msra_coeff=0.9)
        p_meas_fc = ly.fc_net(input_measure, self.__fc_measure_params, 'p_meas_fc', msra_coeff=0.9)
        p_val_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1),
                             fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9)
        p_adv_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1),
                             fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9)
        p_adv_fc_nomean = p_adv_fc - tf.reduce_mean(p_adv_fc, reduction_indices=1, keep_dims=True)

        self.__pred_all_nomean = tf.reshape(p_adv_fc_nomean, [-1, len(self.__net_discrete_actions), self.__target_dim])
        self.__pred_all = self.__pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.__target_dim])
        self.__pred_relevant = tf.boolean_mask(self.__pred_all, tf.cast(input_actions, tf.bool))
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def switch(condition, then_expression, else_expression):
    '''Switches between two operations
    depending on a scalar value (int or bool).
    Note that both `then_expression` and `else_expression`
    should be symbolic tensors of the *same shape*.

    # Arguments
        condition: scalar tensor.
        then_expression: TensorFlow operation.
        else_expression: TensorFlow operation.
    '''
    x_shape = copy.copy(then_expression.get_shape())
    if condition.dtype != tf.bool:
        condition = tf.cast(condition, 'bool')
    x = _cond(condition,
              lambda: then_expression,
              lambda: else_expression)
    x.set_shape(x_shape)
    return x
tensorflow_backend.py 文件源码 项目:keras 作者: GeekLiB 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def in_top_k(predictions, targets, k):
    '''Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape batch_size x classess and type float32.
        targets: A tensor of shape batch_size and type int32 or int64.
        k: An int, number of top elements to consider.

    # Returns
        A tensor of shape batch_size and type bool. output_i is True if
        targets_i is within top-k values of predictions_i
    '''
    return tf.nn.in_top_k(predictions, targets, k)


# CONVOLUTIONS
hmc.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self, initial_stepsize, adapt_step_size, gamma, t0, kappa,
                 delta):
        with tf.name_scope("StepsizeTuner"):
            self.adapt_step_size = tf.convert_to_tensor(
                adapt_step_size, dtype=tf.bool, name="adapt_step_size")
            self.initial_stepsize = initial_stepsize

            self.gamma = tf.convert_to_tensor(gamma, dtype=tf.float32,
                                              name="gamma")
            self.t0 = tf.convert_to_tensor(t0, dtype=tf.float32, name="t0")
            self.kappa = tf.convert_to_tensor(kappa, dtype=tf.float32,
                                              name="kappa")
            self.delta = tf.convert_to_tensor(delta, dtype=tf.float32,
                                              name="delta")
            self.mu = tf.constant(10 * initial_stepsize, dtype=tf.float32,
                                  name="mu")

            self.step = tf.Variable(0.0, dtype=tf.float32,
                                    name="step", trainable=False)
            self.log_epsilon_bar = tf.Variable(
                0.0, dtype=tf.float32, name="log_epsilon_bar", trainable=False)
            self.h_bar = tf.Variable(0.0, dtype=tf.float32,
                                     name="h_bar", trainable=False)
utils.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def is_same_dynamic_shape(x, y):
    """
    Whether `x` and `y` has the same dynamic shape.

    :param x: A Tensor.
    :param y: A Tensor.
    :return: A scalar Tensor of `bool`.
    """
    # There is a BUG of Tensorflow for not doing static shape inference
    # right in nested tf.cond()'s, so we are not comparing x and y's
    # shape directly but working with their concatenations.
    return tf.cond(
        tf.equal(tf.rank(x), tf.rank(y)),
        lambda: tf.reduce_all(tf.equal(
            tf.concat([tf.shape(x), tf.shape(y)], 0),
            tf.concat([tf.shape(y), tf.shape(x)], 0))),
        lambda: tf.convert_to_tensor(False, tf.bool))
LeNetBN.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
VGGDirectDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder to enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
LeNet.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
LeNetDirectDropout.py 文件源码 项目:dynamic-training-bench 作者: galeone 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def get(self, images, num_classes, train_phase=False, l2_penalty=0.0):
        """ define the model with its inputs.
        Use this function to define the model in training and when exporting the model
        in the protobuf format.

        Args:
            images: model input
            num_classes: number of classes to predict
            train_phase: set it to True when defining the model, during train
            l2_penalty: float value, weight decay (l2) penalty

        Returns:
            is_training_: tf.bool placeholder enable/disable training ops at run time
            logits: the model output
        """
        is_training_ = tf.placeholder_with_default(
            False, shape=(), name="is_training_")
        # build a graph that computes the logits predictions from the images
        logits = self._inference(images, num_classes, is_training_, train_phase,
                                 l2_penalty)

        return is_training_, logits
config.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def init_training_mode():
    """  init_training_mode.

    Creates `is_training` variable and its ops if they haven't be created
    yet. This op is required if you are using layers such as dropout or
    batch normalization independently of TFLearn models (DNN or Trainer class).

    """
    # 'is_training' collection stores the training mode variable
    coll = tf.get_collection('is_training')
    if len(coll) == 0:
        tr_var = variable(
            "is_training", dtype=tf.bool, shape=[],
            initializer=tf.constant_initializer(False),
            trainable=False)
        tf.add_to_collection('is_training', tr_var)
        # 'is_training_ops' stores the ops to update training mode variable
        a = tf.assign(tr_var, True)
        b = tf.assign(tr_var, False)
        tf.add_to_collection('is_training_ops', a)
        tf.add_to_collection('is_training_ops', b)
recommender_wide_and_deep.py 文件源码 项目:tflearn 作者: tflearn 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def __init__(self, model_type="wide+deep", verbose=None, name=None, tensorboard_verbose=3, 
                 wide_learning_rate=0.001, deep_learning_rate=0.001, checkpoints_dir=None):
        '''
        model_type = `str`: wide or deep or wide+deep
        verbose = `bool`
        name = `str` used for run_id (defaults to model_type)
        tensorboard_verbose = `int`: logging level for tensorboard (0, 1, 2, or 3)
        wide_learning_rate = `float`: defaults to 0.001
        deep_learning_rate = `float`: defaults to 0.001
        checkpoints_dir = `str`: where checkpoint files will be stored (defaults to "CHECKPOINTS")
        '''
        self.model_type = model_type or "wide+deep"
        assert self.model_type in self.AVAILABLE_MODELS
        self.verbose = verbose or 0
        self.tensorboard_verbose = tensorboard_verbose
        self.name = name or self.model_type # name is used for the run_id
        self.data_columns = COLUMNS
        self.continuous_columns = CONTINUOUS_COLUMNS
        self.categorical_columns = CATEGORICAL_COLUMNS  # dict with category_name: category_size
        self.label_column = LABEL_COLUMN
        self.checkpoints_dir = checkpoints_dir or "CHECKPOINTS"
        if not os.path.exists(self.checkpoints_dir):
            os.mkdir(self.checkpoints_dir)
            print("Created checkpoints directory %s" % self.checkpoints_dir)
        self.build_model([wide_learning_rate, deep_learning_rate])
attention_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def setUp(self):
    super(AttentiveReadTest, self).setUp()

    self._batch_size = 3
    self._memory_size = 4
    self._memory_word_size = 1
    self._query_word_size = 2
    self._memory = tf.reshape(
        tf.cast(tf.range(0, 3 * 4 * 1), dtype=tf.float32), shape=[3, 4, 1])
    self._query = tf.reshape(
        tf.cast(tf.range(0, 3 * 2), dtype=tf.float32), shape=[3, 2])
    self._memory_mask = tf.convert_to_tensor(
        [
            [True, True, True, True],
            [True, True, True, False],
            [True, True, False, False],
        ],
        dtype=tf.bool)
    self._attention_logit_mod = ConstantZero()
    self._attention_mod = snt.AttentiveRead(self._attention_logit_mod)
attention_test.py 文件源码 项目:sonnet 作者: deepmind 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def testNoMemorySlotsLeft(self):
    # Every example must have at least one unmasked memory slot for attention
    # to work.
    memory_mask = tf.convert_to_tensor(
        [
            [True, True, True, True],
            [True, True, True, False],
            [False, False, False, False],
        ],
        dtype=tf.bool)
    attention_output = self._attention_mod(
        self._memory, self._query, memory_mask=memory_mask)
    x = attention_output.read
    with self.test_session() as sess:
      with self.assertRaises(tf.errors.InvalidArgumentError):
        sess.run(x)
beam_search.py 文件源码 项目:conv_seq2seq 作者: tobyyouup 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def create_initial_beam_state(config):
  """Creates an instance of `BeamState` that can be used on the first
  call to `beam_step`.

  Args:
    config: A BeamSearchConfig

  Returns:
    An instance of `BeamState`.
  """
  return BeamSearchState(
      log_probs=tf.zeros([config.beam_width]),
      finished=tf.zeros(
          [config.beam_width], dtype=tf.bool),
      lengths=tf.zeros(
          [config.beam_width], dtype=tf.int32))
schema_io_v1_json_reader.py 文件源码 项目:transform 作者: tensorflow 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _from_domain_dict(domain):
  """Translate a JSON domain dict into a Domain."""
  if domain.get('ints') is not None:
    def maybe_to_int(s):
      return int(s) if s is not None else None
    return sch.IntDomain(
        tf.int64,
        maybe_to_int(domain['ints'].get('min')),
        maybe_to_int(domain['ints'].get('max')),
        domain['ints'].get('isCategorical'),
        domain['ints'].get('vocabularyFile', ''))
  if domain.get('floats') is not None:
    return sch.FloatDomain(tf.float32)
  if domain.get('strings') is not None:
    return sch.StringDomain(tf.string)
  if domain.get('bools') is not None:
    return sch.BoolDomain(tf.bool)
  raise ValueError('Unknown domain: {}'.format(domain))
future_predictor_agent_basic.py 文件源码 项目:DirectFuturePrediction 作者: IntelVCL 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def make_net(self, input_images, input_measurements, input_actions, input_objectives, reuse=False):
        if reuse:
            tf.get_variable_scope().reuse_variables()

        self.fc_joint_params['out_dims'][-1] = len(self.net_discrete_actions) * self.target_dim
        p_img_conv = my_ops.conv_encoder(input_images, self.conv_params, 'p_img_conv', msra_coeff=0.9)
        p_img_fc = my_ops.fc_net(my_ops.flatten(p_img_conv), self.fc_img_params, 'p_img_fc', msra_coeff=0.9)
        p_meas_fc = my_ops.fc_net(input_measurements, self.fc_meas_params, 'p_meas_fc', msra_coeff=0.9)
        if isinstance(self.fc_obj_params, np.ndarray):
            p_obj_fc = my_ops.fc_net(input_objectives, self.fc_obj_params, 'p_obj_fc', msra_coeff=0.9)
            p_concat_fc = tf.concat([p_img_fc,p_meas_fc,p_obj_fc], 1)
        else:
            p_concat_fc = tf.concat([p_img_fc,p_meas_fc], 1)
            if self.random_objective_coeffs:
                raise Exception('Need fc_obj_params with randomized objectives')

        p_joint_fc = my_ops.fc_net(p_concat_fc, self.fc_joint_params, 'p_joint_fc', last_linear=True, msra_coeff=0.9)
        pred_all = tf.reshape(p_joint_fc, [-1, len(self.net_discrete_actions), self.target_dim])
        pred_relevant = tf.boolean_mask(pred_all, tf.cast(input_actions, tf.bool))

        return pred_all, pred_relevant
test_layers.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_output(self):
        """Test the DynamicDecoder.output() method."""

        helper = mock.Mock()
        decoder = mock.Mock()
        zero_output = tf.constant([[0, 0, 0], [0, 0, 0]], dtype=tf.float32)
        decoder.zero_output.side_effect = [zero_output]

        output = tf.constant([[23, 23, 23], [23, 23, 23]], dtype=tf.float32)
        finished = tf.constant([True, False], dtype=tf.bool)

        dyndec = layers.DynamicDecoder(decoder, helper)
        act_output_t = dyndec.output(output, finished)
        exp_output = np.asarray([[0, 0, 0], [23, 23, 23]], dtype=np.float32) # pylint: disable=I0011,E1101

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            act_output = sess.run(act_output_t)

        helper.finished.assert_not_called()
        decoder.zero_output.assert_called_once()
        self.assertAllEqual(exp_output, act_output)
test_layers.py 文件源码 项目:LiTeFlow 作者: petrux 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def test_condition(self):
        """Test the DynamicDecoder.condition() method."""

        helper = mock.Mock()
        decoder = mock.Mock()

        dyndec = layers.DynamicDecoder(decoder, helper)
        finished = [(tf.constant([True], dtype=tf.bool), False),
                    (tf.constant([False], dtype=tf.bool), True),
                    (tf.constant([True, False], dtype=tf.bool), True),
                    (tf.constant([True, True], dtype=tf.bool), False)]

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            for tensor, expected in finished:
                actual = sess.run(dyndec.cond(None, None, None, tensor, None))
                self.assertEqual(expected, actual)

        helper.assert_not_called()
        decoder.assert_not_called()
tensorflow_backend.py 文件源码 项目:keraflow 作者: ipod825 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def switch(self, condition, then_expression, else_expression):
        '''Switches between two operations depending on a scalar value (int or bool).
        Note that both `then_expression` and `else_expression`
        should be symbolic tensors of the *same shape*.

        # Arguments
            condition: scalar tensor.
            then_expression: TensorFlow operation.
            else_expression: TensorFlow operation.
        '''
        x_shape = copy.copy(then_expression.get_shape())
        x = tf.python.control_flow_ops.cond(self.cast(condition, 'bool'),
                                            lambda: then_expression,
                                            lambda: else_expression)
        x.set_shape(x_shape)
        return x
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def var(x, axis=None, keepdims=False):
    """Variance of a tensor, alongside the specified axis.

    # Arguments
        x: A tensor or variable.
        axis: An integer, the axis to compute the variance.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1. If `keepdims` is `True`,
            the reduced dimension is retained with length 1.

    # Returns
        A tensor with the variance of elements of `x`.
    """
    axis = _normalize_axis(axis, ndim(x))
    if x.dtype.base_dtype == tf.bool:
        x = tf.cast(x, floatx())
    m = tf.reduce_mean(x, reduction_indices=axis, keep_dims=True)
    devs_squared = tf.square(x - m)
    return tf.reduce_mean(devs_squared,
                          reduction_indices=axis,
                          keep_dims=keepdims)
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def mean(x, axis=None, keepdims=False):
    """Mean of a tensor, alongside the specified axis.

    # Arguments
        x: A tensor or variable.
        axis: A list of integer. Axes to compute the mean.
        keepdims: A boolean, whether to keep the dimensions or not.
            If `keepdims` is `False`, the rank of the tensor is reduced
            by 1 for each entry in `axis`. If `keep_dims` is `True`,
            the reduced dimensions are retained with length 1.

    # Returns
        A tensor with the mean of elements of `x`.
    """
    axis = _normalize_axis(axis, ndim(x))
    if x.dtype.base_dtype == tf.bool:
        x = tf.cast(x, floatx())
    return tf.reduce_mean(x, reduction_indices=axis, keep_dims=keepdims)
tensorflow_backend.py 文件源码 项目:deep-learning-keras-projects 作者: jasmeetsb 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def in_top_k(predictions, targets, k):
    """Returns whether the `targets` are in the top `k` `predictions`

    # Arguments
        predictions: A tensor of shape `batch_size` x classes and type `float32`.
        targets: A tensor of shape batch_size and type `int32` or `int64`.
        k: An `int`, number of top elements to consider.

    # Returns
        A tensor of shape `batch_size` and type `bool`. `output_i` is `True` if
        `targets_i` is within top-k values of `predictions_i`
    """
    return tf.nn.in_top_k(predictions, targets, k)


# CONVOLUTIONS
beam_search.py 文件源码 项目:tefla 作者: openAGI 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def create_initial_beam_state(config):
    """Creates an instance of `BeamState` that can be used on the first
    call to `beam_step`.

    Args:
      config: A BeamSearchConfig

    Returns:
      An instance of `BeamState`.
    """
    return BeamSearchState(
        log_probs=tf.zeros([config.beam_width]),
        finished=tf.zeros(
            [config.beam_width], dtype=tf.bool),
        lengths=tf.zeros(
            [config.beam_width], dtype=tf.int32))
environment.py 文件源码 项目:mgail 作者: itaicaspi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def step(self, action, mode):
        qvel, qpos = [], []
        if mode == 'tensorflow':
            if self.random_initialization:
                state, reward, done, qval, qpos = tf.py_func(self._step, inp=[action], Tout=[tf.float32, tf.float32, tf.bool, tf.float32, tf.float32], name='env_step_func')
            else:
                state, reward, done = tf.py_func(self._step, inp=[action],
                                                 Tout=[tf.float32, tf.float32, tf.bool],
                                                 name='env_step_func')

            state = tf.reshape(state, shape=(self.state_size,))
            done.set_shape(())
        else:
            if self.random_initialization:
                state, reward, done, qvel, qpos = self._step(action)
            else:
                state, reward, done = self._step(action)

        return state, reward, done, 0., qvel, qpos
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def testSingleUpdateSomeMissingKIs2(self):
    predictions = tf.constant(self._np_predictions,
                              shape=(self._batch_size, self._num_classes),
                              dtype=tf.float32)
    labels = tf.constant(self._np_labels, shape=(self._batch_size,))
    weights = tf.constant([0, 1, 1, 1], shape=(self._batch_size,),
                          dtype=tf.float32)
    mask = tf.constant([False, False, True, False], shape=(self._batch_size,),
                       dtype=tf.bool)
    recall, update_op = metrics.streaming_recall_at_k(
        predictions, labels, k=2, ignore_mask=mask, weights=weights)

    with self.test_session() as sess:
      sess.run(tf.initialize_local_variables())
      self.assertEqual(1.0, sess.run(update_op))
      self.assertEqual(1.0, recall.eval())
metric_ops_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def testSomePresentOneUpdate(self):
    with self.test_session() as sess:
      values = tf.constant([2, 4, 6, 8], shape=(1, 4), dtype=tf.float32)
      mask = tf.constant([False, True, False, False], shape=(1, 4),
                         dtype=tf.bool)
      weights = tf.constant([1, 1, 0, 1], shape=(1, 4), dtype=tf.float32)

      pcnt0, update_op0 = metrics.streaming_percentage_less(
          values, 100, ignore_mask=mask, weights=weights, name='high')
      pcnt1, update_op1 = metrics.streaming_percentage_less(
          values, 7, ignore_mask=mask, weights=weights, name='medium')
      pcnt2, update_op2 = metrics.streaming_percentage_less(
          values, 1, ignore_mask=mask, weights=weights, name='low')

      sess.run(tf.initialize_local_variables())
      self.assertListEqual([1.0, 0.5, 0.0],
                           sess.run([update_op0, update_op1, update_op2]))

      pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
      self.assertAlmostEqual(1.0, pcnt0, 5)
      self.assertAlmostEqual(0.5, pcnt1, 5)
      self.assertAlmostEqual(0.0, pcnt2, 5)


问题


面经


文章

微信
公众号

扫码关注公众号