python类add()的实例源码

clock_model.py 文件源码 项目:deep-time-reading 作者: felixduvallet 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = _variable_on_cpu(
        name,
        shape,
        tf.truncated_normal_initializer(stddev=stddev, dtype=tf.float32))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
clock_model.py 文件源码 项目:deep-time-reading 作者: felixduvallet 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def inference(images, num_classes):
    """ Build a time reading model for *either* hours or minutes.

    Args:
      images: Images returned from distorted_inputs() or inputs().
      num_classes: 12 for hours, 60 for minutes.

    Returns:
      Logits.
    """

    local4 = _inference_shared(images)

    dim = num_classes

    # softmax, i.e. softmax(WX + b)
    with tf.variable_scope('softmax_linear') as scope:
        weights = _variable_with_weight_decay('weights', [192, dim],
                                              stddev=1 / 192.0, wd=0.0)
        biases = _variable_on_cpu('biases', [dim],
                                  tf.constant_initializer(0.0))
        softmax_linear = tf.add(tf.matmul(local4, weights), biases,
                                name=scope.name)
        _activation_summary(softmax_linear)
    return softmax_linear
general.py 文件源码 项目:qrn 作者: uwnlp 项目源码 文件源码 阅读 37 收藏 0 点赞 0 评论 0
def variable_with_weight_decay(name, shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    var = variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
layers.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def __init__(self, incoming, pattern, **kwargs):
        super(DimshuffleLayer, self).__init__(incoming, **kwargs)

        # Sanity check the pattern
        used_dims = set()
        for p in pattern:
            if isinstance(p, int):
                # Dimension p
                if p in used_dims:
                    raise ValueError("pattern contains dimension {0} more "
                                     "than once".format(p))
                used_dims.add(p)
            elif p == 'x':
                # Broadcast
                pass
            else:
                raise ValueError("pattern should only contain dimension"
                                 "indices or 'x', not {0}".format(p))

        self.pattern = pattern

        # try computing the output shape once as a sanity check
        self.get_output_shape_for(self.input_shape)
layers.py 文件源码 项目:third_person_im 作者: bstadie 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def unique(l):
    """Filters duplicates of iterable.
    Create a new list from l with duplicate entries removed,
    while preserving the original order.
    Parameters
    ----------
    l : iterable
        Input iterable to filter of duplicates.
    Returns
    -------
    list
        A list of elements of `l` without duplicates and in the same order.
    """
    new_list = []
    seen = set()
    for el in l:
        if el not in seen:
            new_list.append(el)
            seen.add(el)

    return new_list
test_base.py 文件源码 项目:zhusuan 作者: thu-ml 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def test_tensor_conversion(self):
        with BayesianNet(observed={'a': 1., 'c': 1.}):
            a = StochasticTensor('a', Mock(dtype=tf.float32), 1)
            b = tf.add(1., a)
            c = StochasticTensor('c', Mock(dtype=tf.float32), 1)
            # tensorflow will try to convert c to the same type with 1 (int32)
            # calling the registered tensor conversion function of c.
            # If failed, it will try not to request the type. So an error
            # will be raised by the operator.
            with self.assertRaisesRegexp(
                    TypeError, "type float32.*not match.*type int32"):
                _ = tf.add(1, c)
        with self.test_session(use_gpu=True):
            self.assertNear(b.eval(), 2., 1e-6)
        with self.assertRaisesRegexp(ValueError, "Ref type not supported"):
            _ = StochasticTensor._to_tensor(a, as_ref=True)
tf_dataset.py 文件源码 项目:kaggle_redefining_cancer_treatment 作者: jorgemf 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _map(self, example_serialized, features=None):
        """
        Maps a example_serialized read from the dataset into the final set of tf.Tensors
        to return to the model.

        Simple example:

        def _parse(line, features=None):
            a, b = [np.int32(x) for x in line.split()]
            return a, b

        t_input, t_ouptut = tf.py_func(_parse, [line], [tf.int32, tf.int32],
                                       stateful=True, name='py_parse_example')
        t_ouptut = tf.add(t_ouptut, 1)

        return t_input, t_ouptut

        :param example_serialized: the example serialized
        :param features: do not use this as it is deprecated after 1.2
        :return: a tuple of the tensors to return when get_next is called. Usually (inputs,outputs)
        """
        pass
lab6_runTFMultiANN_spiraldata.py 文件源码 项目:EveryBodyTensorFlow 作者: jwkanggist 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def neural_net(x):
    # Hidden fully connected layer with 7 neurons
    layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer_1 = tf.nn.relu(layer_1)

    # Hidden fully connected layer with 7 neurons
    layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
    layer_2 = tf.nn.relu(layer_2)

    # Hidden fully connected layer with 4 neurons
    layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
    layer_3 = tf.nn.relu(layer_3)

    # Output fully connected layer with a neuron for each class
    out_layer = tf.matmul(layer_3, weights['out']) + biases['out']
    return out_layer

# Construct model
SENN.py 文件源码 项目:Multi-channel-speech-extraction-using-DNN 作者: zhr1201 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def _double_conv_layer_wrapper(self, input1, input2, out_feature_maps,
                                   filter_length, is_train):
        '''Two parallele convolution layers for each channel
        using shared weights'''
        filter_width = input1.get_shape()[1].value
        in_feature_maps = input1.get_shape()[-1].value
        # shared weights
        W_conv = weight_variable(
            [filter_width, filter_length, in_feature_maps, out_feature_maps],
            regularizer=tf.contrib.layers.l2_regularizer(self.reg_fac))
        # shared bias
        b_conv = bias_variable([out_feature_maps])
        h_conv_t1 = tf.add(conv2d(input1, W_conv), b_conv)
        h_conv_b1 = self._batch_norm_wrapper(h_conv_t1, is_train)
        h_conv_t2 = tf.add(conv2d(input2, W_conv), b_conv)
        h_conv_b2 = self._batch_norm_wrapper(h_conv_t2, is_train)
        return tf.nn.relu(h_conv_b1), tf.nn.relu(h_conv_b2)
variational_autoencoder.py 文件源码 项目:hyperchamber 作者: 255BITS 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _create_network(self):
        # Initialize autoencode network weights and biases
        network_weights = self._initialize_weights(**self.network_architecture)

        # Use recognition network to determine mean and 
        # (log) variance of Gaussian distribution in latent
        # space
        self.z_mean, self.z_log_sigma_sq = \
            self._recognition_network(network_weights["weights_recog"], 
                                      network_weights["biases_recog"])

        # Draw one sample z from Gaussian distribution
        n_z = self.network_architecture["n_z"]
        eps = tf.random_normal((self.batch_size, n_z), 0, 1, 
                               dtype=tf.float32)
        # z = mu + sigma*epsilon
        self.z = tf.add(self.z_mean, 
                        tf.mul(tf.sqrt(tf.exp(self.z_log_sigma_sq)), eps))

        # Use generator to determine mean of
        # Bernoulli distribution of reconstructed input
        self.x_reconstr_mean = \
            self._generator_network(network_weights["weights_gener"],
                                    network_weights["biases_gener"])
test_computations.py 文件源码 项目:ngraph 作者: NervanaSystems 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def test_binary_ops_combined(self):
        # computation
        a = tf.placeholder(tf.float32, shape=(2, 3))
        b = tf.placeholder(tf.float32, shape=(2, 3))
        c = tf.add(a, b)
        d = tf.mul(c, a)
        e = tf.div(d, b)
        f = tf.sub(a, e)
        g = tf.maximum(a, f)

        # value
        a_val = np.random.rand(*tf_obj_shape(a))
        b_val = np.random.rand(*tf_obj_shape(b))

        # test
        self.run(g, tf_feed_dict={a: a_val, b: b_val})
bmf.py 文件源码 项目:DeepFM 作者: dwt0317 项目源码 文件源码 阅读 40 收藏 0 点赞 0 评论 0
def build_model(user_indices, item_indices, rank, ratings, user_cnt, item_cnt, lr, lamb, mu, init_value):


    W_user = tf.Variable(tf.truncated_normal([user_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'user_embedding', dtype=tf.float32)
    W_item = tf.Variable(tf.truncated_normal([item_cnt, rank], stddev=init_value/math.sqrt(float(rank)), mean=0), name = 'item_embedding', dtype=tf.float32)

    W_user_bias = tf.concat([W_user, tf.ones((user_cnt,1), dtype=tf.float32)], 1, name='user_embedding_bias')
    W_item_bias = tf.concat([tf.ones((item_cnt,1), dtype=tf.float32), W_item], 1, name='item_embedding_bias')

    user_feature = tf.nn.embedding_lookup(W_user_bias, user_indices, name = 'user_feature')
    item_feature = tf.nn.embedding_lookup(W_item_bias, item_indices, name = 'item_feature') 


    preds = tf.add(tf.reduce_sum( tf.multiply(user_feature , item_feature) , 1), mu)

    square_error = tf.sqrt(tf.reduce_mean( tf.squared_difference(preds, ratings)))
    loss = square_error + lamb*(tf.reduce_mean(tf.nn.l2_loss(W_user)) + tf.reduce_mean(tf.nn.l2_loss(W_item)))

    tf.summary.scalar('square_error', square_error)
    tf.summary.scalar('loss', loss)
    merged_summary = tf.summary.merge_all()
    #tf.global_variables_initializer()
    train_step = tf.train.GradientDescentOptimizer(lr).minimize(loss)   # tf.train.AdadeltaOptimizer(learning_rate=lr).minimize(loss)    #

    return train_step, square_error, loss, merged_summary
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 65 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
cifar10.py 文件源码 项目:facial-emotion-detection-dl 作者: dllatas 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
multiclass_svm.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def kernel_pred(x_data, prediction_grid):
    A = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
    B = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
    square_distance = tf.add(tf.subtract(A, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))),
                             tf.transpose(B))
    return tf.exp(tf.multiply(gamma, tf.abs(square_distance)))
linear_svm.py 文件源码 项目:TensorFlow-World 作者: astorfi 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def loss_fn(W,b,x_data,y_target):
    logits = tf.subtract(tf.matmul(x_data, W),b)
    norm_term = tf.divide(tf.reduce_sum(tf.multiply(tf.transpose(W),W)),2)
    classification_loss = tf.reduce_mean(tf.maximum(0., tf.subtract(FLAGS.delta, tf.multiply(logits, y_target))))
    total_loss = tf.add(tf.multiply(FLAGS.C_param,classification_loss), tf.multiply(FLAGS.Reg_param,norm_term))
    return total_loss
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()
classifier_tf.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def write_summaries(self, X, y, label, step, summary_writer=None):
        if not X:
            return
        y_pred, loss = self.predict_proba_with_loss(X, y)
        metrics = classification_metrics(y, y_pred, self.threshold)
        metrics['loss'] = loss
        if summary_writer is not None:
            summary = tf.Summary()
            for key, value in metrics.items():
                summary.value.add(tag="metrics/{}".format(key), simple_value=float(value))
            if not self.summary_tensors:
                self.summary_tensors["positive_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "positive_predictions_input")
                self.summary_tensors["positive_predictions"] = tf.summary.histogram(
                    "positive_predictions", self.summary_tensors["positive_predictions_input"])
                self.summary_tensors["negative_predictions_input"] = tf.placeholder(
                    tf.float32, [None], "negative_predictions_input")
                self.summary_tensors["negative_predictions"] = tf.summary.histogram(
                    "negative_predictions", self.summary_tensors["negative_predictions_input"])
            summary_writer.add_summary(
                self.summary_tensors["positive_predictions"].eval(
                    feed_dict={self.summary_tensors["positive_predictions_input"]: y_pred[y]}),
                step)
            summary_writer.add_summary(
                self.summary_tensors["negative_predictions"].eval(
                    feed_dict={self.summary_tensors["negative_predictions_input"]: y_pred[~y]}),
                step)
            summary_writer.add_summary(summary, step)
            summary_writer.flush()


问题


面经


文章

微信
公众号

扫码关注公众号