python类float64()的实例源码

tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _convert_string_dtype(dtype):
    if dtype == 'float16':
        return tf.float16
    if dtype == 'float32':
        return tf.float32
    elif dtype == 'float64':
        return tf.float64
    elif dtype == 'int16':
        return tf.int16
    elif dtype == 'int32':
        return tf.int32
    elif dtype == 'int64':
        return tf.int64
    elif dtype == 'uint8':
        return tf.int8
    elif dtype == 'uint16':
        return tf.uint16
    else:
        raise ValueError('Unsupported dtype:', dtype)
tensorflow_backend.py 文件源码 项目:keras-customized 作者: ambrite 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def conv1d(x, kernel, stride=1, border_mode='valid',
           image_shape=None, filter_shape=None):
    '''1D convolution.

    # Arguments
        kernel: kernel tensor.
        strides: stride integer.
        border_mode: string, "same" or "valid".
    '''
    # pre-process dtype
    x_dtype = dtype(x)
    if x_dtype == 'float64':
        x = tf.cast(x, 'float32')
        kernel = tf.cast(kernel, 'float32')
    padding = _preprocess_border_mode(border_mode)
    x = tf.nn.conv1d(x, kernel, stride, padding=padding)
    # post-process dtype
    if x_dtype == 'float64':
        x = tf.cast(x, 'float64')
    return x
parametric_GP.py 文件源码 项目:ParametricGP-in-Python 作者: maziarraissi 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def train(self):
        print("Total number of parameters: %d" % (self.hyp.shape[0]))

        X_tf = tf.placeholder(tf.float64)
        y_tf = tf.placeholder(tf.float64)
        hyp_tf = tf.Variable(self.hyp, dtype=tf.float64)

        train = self.likelihood(hyp_tf, X_tf, y_tf)

        init = tf.global_variables_initializer()
        self.sess.run(init)

        start_time = timeit.default_timer()
        for i in range(1,self.max_iter+1):
            # Fetch minibatch
            X_batch, y_batch = fetch_minibatch(self.X,self.y,self.N_batch)
            self.sess.run(train, {X_tf:X_batch, y_tf:y_batch})

            if i % self.monitor_likelihood == 0:
                elapsed = timeit.default_timer() - start_time
                nlml = self.sess.run(self.nlml)
                print('Iteration: %d, NLML: %.2f, Time: %.2f' % (i, nlml, elapsed))
                start_time = timeit.default_timer()

        self.hyp = self.sess.run(hyp_tf)
test_complete_graphs.py 文件源码 项目:autodiff 作者: bgavran 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def setUp(self):
        np.random.seed(1337)
        h_val = np.random.randn(2, 5)
        b0_val = np.random.randn(5)
        b1_val = np.random.randn(1, 5)
        b2_val = 7

        self.my_h = ad.Variable(h_val, name="h")
        self.my_b0 = ad.Variable(b0_val, name="b0")
        self.my_b1 = ad.Variable(b1_val, name="b1")
        self.my_b2 = ad.Variable(b2_val, name="b2")

        self.tf_h = tf.constant(h_val, dtype=tf.float64)
        self.tf_b0 = tf.constant(b0_val, dtype=tf.float64)
        self.tf_b1 = tf.constant(b1_val, dtype=tf.float64)
        self.tf_b2 = tf.constant(b2_val, dtype=tf.float64)
reader.py 文件源码 项目:neural_style_tensorflow 作者: burness 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def preprocess(image, size, max_length):
    shape = tf.shape(image)
    size_t = tf.constant(size, tf.float64)
    height = tf.cast(shape[0], tf.float64)
    width = tf.cast(shape[1], tf.float64)

    cond_op = tf.less(width, height) if max_length else tf.less(height, width)

    new_height, new_width = tf.cond(
        cond_op, lambda: (size_t, (width * size_t) / height),
        lambda: ((height * size_t) / width, size_t))
    new_size = [tf.to_int32(new_height), tf.to_int32(new_width)]
    resized_image = tf.image.resize_images(image, new_size)
    normalised_image = resized_image - mean_pixel
    return normalised_image


# max_length: Wether size dictates longest or shortest side. Default longest
temporal_softmax.py 文件源码 项目:image_captioning 作者: AgrawalAmey 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def forward(self, x, y, mask):

        self.N = x.get_shape()[0].value
        self.T = x.get_shape()[1].value 
        self.V = x.get_shape()[2].value

        x_flat = tf.reshape(x, [self.N * self.T, self.V])
        y_flat = tf.reshape(y, [self.N * self.T])
        mask_flat = tf.cast(tf.reshape(mask, [self.N * self.T]), tf.float64)

        probs = tf.exp(x_flat - tf.reduce_max(x_flat, reduction_indices=[1], keep_dims=True))
        probs /= tf.reduce_sum(probs, reduction_indices=[1], keep_dims=True)
        coords = tf.transpose(tf.pack([tf.range(self.N * self.T), y_flat]))
        loss = -tf.reduce_sum(mask_flat * tf.log(tf.gather_nd(probs, coords))) / self.N

        self.y_flat, self.mask_flat, self.probs = y_flat, mask_flat, probs

        return loss
nn_test.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def _Inputs(self, x=None, y=None, q=3.0, dtype=tf.float64, sizes=None):
    x = [-100, -2, -2, 0, 2, 2, 2, 100] if x is None else x
    y = [0, 0, 1, 0, 0, 1, 0.5, 1] if y is None else y
    assert len(x) == len(y)
    sizes = sizes if sizes else [len(x)]
    logits = tf.constant(x, shape=sizes, dtype=dtype, name="logits")
    targets = tf.constant(y, shape=sizes, dtype=dtype, name="targets")
    losses = np.array(self._WeightedCrossEntropy(x, y, q)).reshape(*sizes)
    return logits, targets, q, losses

  # def testConstructionNamed(self):
  #   with self.test_session():
  #     logits, targets, pos_weight, _ = self._Inputs()
  #     loss = tf.nn.weighted_cross_entropy_with_logits(logits, targets,
  #                                                     pos_weight, name="mybce")
  #   self.assertEqual("mybce", loss.op.name)
nn_test.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 35 收藏 0 点赞 0 评论 0
def testSoftmax(self):
    x_shape = [5, 10]
    x_np = np.random.randn(*x_shape).astype(np.float32)
    y_np = self._softmax(x_np)
    with self.test_session():
      x_tf = tf.constant(x_np)
      y_tf = tf.nn.softmax(x_tf)
      y_tf_np = y_tf.eval()
    eps = 1e-3
    self.assertAllClose(y_tf_np, y_np, eps)

  # def testGradient(self):
  #   x_shape = [5, 10]
  #   x_np = np.random.randn(*x_shape).astype(np.float64)
  #   with self.test_session():
  #     x_tf = tf.constant(x_np)
  #     y_tf = tf.nn.softmax(x_tf)
  #     err = tf.test.compute_gradient_error(x_tf, x_shape, y_tf, x_shape)
  #   eps = 1e-8
  #   self.assertLess(err, eps)


# use work-around from https://github.com/tensorflow/tensorflow/issues/2511
nn_test.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def testL2Loss(self):
    with self.test_session():
      x = tf.constant([1.0, 0.0, 3.0, 2.0], shape=[2, 2], name="x")
      l2loss = tf.nn.l2_loss(x)
      value = l2loss.eval()
    self.assertAllClose(7.0, value)

  # def testGradient(self):
  #   x_shape = [20, 7, 3]
  #   np.random.seed(1)  # Make it reproducible.
  #   x_val = np.random.random_sample(x_shape).astype(np.float64)
  #   with self.test_session():
  #     x = tf.constant(x_val, name="x")
  #     output = tf.nn.l2_loss(x)
  #     err = tf.test.compute_gradient_error(x, x_shape, output, [1])
  #   print("L2Loss gradient err = %g " % err)
  #   err_tolerance = 1e-11
  #   self.assertLess(err, err_tolerance)
nn_test.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 42 收藏 0 点赞 0 评论 0
def testL2Normalize(self):
    x_shape = [20]
    np.random.seed(1)
    x_np = np.random.random_sample(x_shape).astype(np.float32)
    for dim in range(len(x_shape)):
      y_np = self._l2Normalize(x_np, dim)
      with self.test_session():
        x_tf = tf.constant(x_np, name="x")
        y_tf = tf.nn.l2_normalize(x_tf, dim)
        self.assertAllClose(y_np, y_tf.eval())

  # def testL2NormalizeGradient(self):
  #   x_shape = [20, 7, 3]
  #   np.random.seed(1)
  #   x_np = np.random.random_sample(x_shape).astype(np.float64)
  #   for dim in range(len(x_shape)):
  #     with self.test_session():
  #       x_tf = tf.constant(x_np, name="x")
  #       y_tf = tf.nn.l2_normalize(x_tf, dim)
  #       err = tf.test.compute_gradient_error(x_tf, x_shape, y_tf, x_shape)
  #     print("L2Normalize gradient err = %g " % err)
  #     self.assertLess(err, 1e-4)
env_cache_test.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def testSum1CacheCpu(self):
    env = imperative.Env(tf)
    is_graph_changed(env)
    env.disable_gc()
    with env.g.device("cpu:0"):
      val1 = env.numpy_to_itensor([1, 2, 3])
      val2 = env.numpy_to_itensor([4, 5, 6])
      val3 = env.numpy_to_itensor([4, 5, 6], dtype=tf.float64)
      try:
        out1 = env.sum1(val1)
      except:
        import pdb;
        pdb.post_mortem()
      self.assertTrue(is_graph_changed(env))
      out2 = env.sum1(val2)
      self.assertFalse(is_graph_changed(env))
      out3 = env.sum1(val3)
      self.assertTrue(is_graph_changed(env))
env_cache_test.py 文件源码 项目:imperative 作者: yaroslavvb 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def testSum1CacheGpu(self):
    if not tf.test.is_built_with_cuda():
      return True
    if not self.haveGpu0():
      return True

    env = imperative.Env(tf)
    with env.g.device("cpu:0"):
      val1 = env.numpy_to_itensor([1, 2, 3])
      val2 = env.numpy_to_itensor([4, 5, 6])
      val3 = env.numpy_to_itensor([4, 5, 6], dtype=tf.float64)
      try:
        out1 = env.sum1(val1)
      except:
        import pdb;
        pdb.post_mortem()
      self.assertTrue(is_graph_changed(env))
      out2 = env.sum1(val2)
      self.assertFalse(is_graph_changed(env))
      out3 = env.sum1(val3)
      self.assertTrue(is_graph_changed(env))
metrics.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def average_precision_voc12(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2012 and ILSVRC guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc12', [precision, recall]):
        # Convert to float64 to decrease error on Riemann sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)

        # Add bounds values to precision and recall.
        precision = tf.concat([[0.], precision, [0.]], axis=0)
        recall = tf.concat([[0.], recall, [1.]], axis=0)
        # Ensures precision is increasing in reverse order.
        precision = tfe_math.cummax(precision, reverse=True)

        # Riemann sums for estimating the integral.
        # mean_pre = (precision[1:] + precision[:-1]) / 2.
        mean_pre = precision[1:]
        diff_rec = recall[1:] - recall[:-1]
        ap = tf.reduce_sum(mean_pre * diff_rec)
        return ap
metrics.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def average_precision_voc07(precision, recall, name=None):
    """Compute (interpolated) average precision from precision and recall Tensors.

    The implementation follows Pascal 2007 guidelines.
    See also: https://sanchom.wordpress.com/tag/average-precision/
    """
    with tf.name_scope(name, 'average_precision_voc07', [precision, recall]):
        # Convert to float64 to decrease error on cumulated sums.
        precision = tf.cast(precision, dtype=tf.float64)
        recall = tf.cast(recall, dtype=tf.float64)
        # Add zero-limit value to avoid any boundary problem...
        precision = tf.concat([precision, [0.]], axis=0)
        recall = tf.concat([recall, [np.inf]], axis=0)

        # Split the integral into 10 bins.
        l_aps = []
        for t in np.arange(0., 1.1, 0.1):
            mask = tf.greater_equal(recall, t)
            v = tf.reduce_max(tf.boolean_mask(precision, mask))
            l_aps.append(v / 11.)
        ap = tf.add_n(l_aps)
        return ap
metrics.py 文件源码 项目:Deep-Fashion 作者: TomPyonsuke 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _precision_recall(n_gbboxes, n_detections, scores, tp, fp, scope=None):
    """Compute precision and recall from scores, true positives and false
    positives booleans arrays
    """
    # Sort by score.
    with tf.name_scope(scope, 'prec_rec', [n_gbboxes, scores, tp, fp]):
        # Sort detections by score.
        scores, idxes = tf.nn.top_k(scores, k=n_detections, sorted=True)
        tp = tf.gather(tp, idxes)
        fp = tf.gather(fp, idxes)
        # Computer recall and precision.
        dtype = tf.float64
        tp = tf.cumsum(tf.cast(tp, dtype), axis=0)
        fp = tf.cumsum(tf.cast(fp, dtype), axis=0)
        recall = _safe_div(tp, tf.cast(n_gbboxes, dtype), 'recall')
        precision = _safe_div(tp, tp + fp, 'precision')

        return tf.tuple([precision, recall])
siamese_lstm_network.py 文件源码 项目:tensorflow-quorakaggle 作者: ram1988 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def buildRNN(self,x,scope):
        print(x)
        x = tf.transpose(x, [1, 0, 2])        
        #print(x)
        x = tf.reshape(x, [-1,self.nfeatures])
        #print(x)
        x = tf.split(x, self.n_steps, 0)
        print(x)
        #lstm_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0) for _ in range(self.n_layers)], state_is_tuple=True)
        #outputs, states = tf.nn.dynamic_rnn(lstm_cell, x, dtype=tf.float64)
        with tf.name_scope("fw"+scope),tf.variable_scope("fw"+scope):
            fw_cell_array = []
            print(tf.get_variable_scope().name)
            for _ in range(self.n_layers):
                fw_cell = rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
                #fw_cell = rnn.DropoutWrapper(fw_cell,output_keep_prob=self.dropout)                
                fw_cell_array.append(fw_cell)
            fw_cell = rnn.MultiRNNCell(fw_cell_array, state_is_tuple=True)
        with tf.name_scope("bw"+scope),tf.variable_scope("bw"+scope):
            bw_cell_array = []
            print(tf.get_variable_scope().name)
            for _ in range(self.n_layers):
                bw_cell = rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=True)
                #bw_cell = rnn.DropoutWrapper(bw_cell,output_keep_prob=self.dropout)
                bw_cell_array.append(bw_cell)
            bw_cell = rnn.MultiRNNCell(bw_cell_array, state_is_tuple=True)

        outputs, _,_ = tf.contrib.rnn.static_bidirectional_rnn(fw_cell, bw_cell, x, dtype=tf.float64)
        #outputs, = tf.nn.bidirectional_dynamic_rnn(fw_cell, bw_cell, x, dtype=tf.float64)


        print(outputs)
        print(outputs[-1])

        return outputs[-1]
siamese_lstm_network1.py 文件源码 项目:tensorflow-quorakaggle 作者: ram1988 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def buildSiameseNN(self, left_nn, right_nn):
        #construct fully connected layer-extend even more networks
        print(self.nfeatures)
        weights = {
          'out': tf.Variable(tf.random_normal([2*self.nfeatures, self.n_classes],dtype=tf.float64),dtype = tf.float64)
        }
        biases = {
          'out': tf.Variable(tf.random_normal([self.n_classes],dtype=tf.float64),dtype = tf.float64)
        }

        joint_layer = tf.concat([left_nn,right_nn],1)
        print("joint layer-->"+str(joint_layer))
        batch_normalized = self.insertBatchNNLayer(joint_layer,[0],[2*self.nfeatures])        
        batch_normalized = tf.matmul(batch_normalized, weights['out']) + biases['out']
        result = tf.nn.softmax(batch_normalized)
        #add softmax layer
        return result
reader.py 文件源码 项目:prisma 作者: hijkzzz 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def preprocess(image, size):
    shape = tf.shape(image)
    size_t = tf.constant(size, tf.float64)
    height = tf.cast(shape[0], tf.float64)
    width = tf.cast(shape[1], tf.float64)

    cond_op = tf.less(height, width)

    # ?????
    new_height, new_width = tf.cond(
        cond_op,
        lambda: (size_t, (width * size_t) / height),
        lambda: ((height * size_t) / width, size_t))

    resized_image = tf.image.resize_images(
            image,
            [tf.to_int32(new_height), tf.to_int32(new_width)],
            method=tf.image.ResizeMethod.BICUBIC)
    cropped = tf.image.resize_image_with_crop_or_pad(resized_image, size, size)

    return cropped
dgp.py 文件源码 项目:Doubly-Stochastic-DGP 作者: ICL-SML 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def multisample_conditional(self, X, full_cov=False):
        if full_cov is True:
            # this is unlikely to be called in a performance critical application, so we use
            # this clear but slow implementation
            f = lambda a: self.conditional(a, full_cov=full_cov)
            mean, var = tf.map_fn(f, X, dtype=(tf.float64, tf.float64))
            return tf.stack(mean), tf.stack(var)
        else:
            # this should be faster as only computes the Z_uu once, but could be made faster
            # still perhaps by avoiding reshaping (but need to rewrite conditional)
            S, N, D = shape_as_list(X)
            X_flat = tf.reshape(X, [S*N, D])
            mean, var = self.conditional(X_flat)
            return [tf.reshape(m, [S, N, -1]) for m in [mean, var]]
losses.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def wasserstein_disagreement_map(prediction, ground_truth, M):
    """
    Function to calculate the pixel-wise Wasserstein distance between the
    flattened pred_proba and the flattened labels (ground_truth) with respect
    to the distance matrix on the label space M.

    :param prediction: the logits after softmax
    :param ground_truth: segmentation ground_truth
    :param M: distance matrix on the label space
    :return: the pixelwise distance map (wass_dis_map)
    """
    # pixel-wise Wassertein distance (W) between flat_pred_proba and flat_labels
    # wrt the distance matrix on the label space M
    n_classes = K.int_shape(prediction)[-1]
    # unstack_labels = tf.unstack(ground_truth, axis=-1)
    ground_truth = tf.cast(ground_truth, dtype=tf.float64)
    # unstack_pred = tf.unstack(prediction, axis=-1)
    prediction = tf.cast(prediction, dtype=tf.float64)
    # print("shape of M", M.shape, "unstacked labels", unstack_labels,
    #       "unstacked pred" ,unstack_pred)
    # W is a weighting sum of all pairwise correlations (pred_ci x labels_cj)
    pairwise_correlations = []
    for i in range(n_classes):
        for j in range(n_classes):
            pairwise_correlations.append(
                M[i, j] * tf.multiply(prediction[:,i], ground_truth[:,j]))
    wass_dis_map = tf.add_n(pairwise_correlations)
    return wass_dis_map


问题


面经


文章

微信
公众号

扫码关注公众号