python类floor()的实例源码

metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def recall_3(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 3]
    mask_pred = y_pred_decision[:, :, :, :, 3]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon())
metrics.py 文件源码 项目:segmentation_DLMI 作者: imatge-upc 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def recall_4(y_true, y_pred):
    y_pred_decision = tf.floor(y_pred / K.max(y_pred, axis=4, keepdims=True))
    mask_true = y_true[:, :, :, :, 4]
    mask_pred = y_pred_decision[:, :, :, :, 4]

    y_sum = K.sum(mask_true * mask_pred)

    return (y_sum + K.epsilon()) / (K.sum(mask_true) + K.epsilon())


# -------------------------- Masked metrics --------------------------------
rnn.py 文件源码 项目:seq2seq 作者: eske 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def _dropout(values, recurrent_noise, keep_prob):
        def dropout(index, value, noise):
            random_tensor = keep_prob + noise
            binary_tensor = tf.floor(random_tensor)
            ret = tf.div(value, keep_prob) * binary_tensor
            ret.set_shape(value.get_shape())
            return ret

        return DropoutGRUCell._enumerated_map_structure(dropout, values, recurrent_noise)
effects.py 文件源码 项目:py-noisemaker 作者: aayars 项目源码 文件源码 阅读 39 收藏 0 点赞 0 评论 0
def normalize(tensor):
    """
    Squeeze the given Tensor into a range between 0 and 1.

    :param Tensor tensor: An image tensor.
    :return: Tensor
    """

    floor = tf.reduce_min(tensor)
    ceil = tf.reduce_max(tensor)

    return (tensor - floor) / (ceil - floor)
layers.py 文件源码 项目:text-gan-tensorflow 作者: tokestermw 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def _apply_dropout_mask(tensor_shape, keep_prob=1.0, normalize=True):
    random_tensor = keep_prob + tf.random_uniform(tensor_shape, dtype=tf.float32)
    binary_mask = tf.floor(random_tensor)
    if normalize:
        binary_mask = tf.reciprocal(keep_prob) * binary_mask
    return binary_mask
rbm_chords.py 文件源码 项目:tensorflow-music-generator 作者: burliEnterprises 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def sample(probs):
    # Takes in a vector of probabilities, and returns a random vector of 0s and 1s sampled from the input vector
    return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1))


# This function runs the gibbs chain. We will call this function in two places:
#    - When we define the training update step
#    - When we sample our music segments from the trained RBM
rbm_chords.py 文件源码 项目:Music_Generator_Demo 作者: llSourcell 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def sample(probs):
    #Takes in a vector of probabilities, and returns a random vector of 0s and 1s sampled from the input vector
    return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1))

#This function runs the gibbs chain. We will call this function in two places:
#    - When we define the training update step
#    - When we sample our music segments from the trained RBM
nasnet_utils.py 文件源码 项目:Classification_Nets 作者: BobLiu20 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def drop_path(net, keep_prob, is_training=True):
  """Drops out a whole example hiddenstate with the specified probability."""
  if is_training:
    batch_size = tf.shape(net)[0]
    noise_shape = [batch_size, 1, 1, 1]
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
    binary_tensor = tf.floor(random_tensor)
    net = tf.div(net, keep_prob) * binary_tensor
  return net
core_test.py 文件源码 项目:lsdc 作者: febert 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def setUp(self):
    super(CoreUnaryOpsTest, self).setUp()

    self.ops = [
        ('abs', operator.abs, tf.abs, core.abs_function),
        ('neg', operator.neg, tf.neg, core.neg),
        # TODO(shoyer): add unary + to core TensorFlow
        ('pos', None, None, None),
        ('sign', None, tf.sign, core.sign),
        ('reciprocal', None, tf.reciprocal, core.reciprocal),
        ('square', None, tf.square, core.square),
        ('round', None, tf.round, core.round_function),
        ('sqrt', None, tf.sqrt, core.sqrt),
        ('rsqrt', None, tf.rsqrt, core.rsqrt),
        ('log', None, tf.log, core.log),
        ('exp', None, tf.exp, core.exp),
        ('log', None, tf.log, core.log),
        ('ceil', None, tf.ceil, core.ceil),
        ('floor', None, tf.floor, core.floor),
        ('cos', None, tf.cos, core.cos),
        ('sin', None, tf.sin, core.sin),
        ('tan', None, tf.tan, core.tan),
        ('acos', None, tf.acos, core.acos),
        ('asin', None, tf.asin, core.asin),
        ('atan', None, tf.atan, core.atan),
        ('lgamma', None, tf.lgamma, core.lgamma),
        ('digamma', None, tf.digamma, core.digamma),
        ('erf', None, tf.erf, core.erf),
        ('erfc', None, tf.erfc, core.erfc),
        ('lgamma', None, tf.lgamma, core.lgamma),
    ]
    total_size = np.prod([v.size for v in self.original_lt.axes.values()])
    self.test_lt = core.LabeledTensor(
        tf.cast(self.original_lt, tf.float32) / total_size,
        self.original_lt.axes)
cnn_model.py 文件源码 项目:num-seq-recognizer 作者: gmlove 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def _setup_net(self):
    with tf.variable_scope('bbox'):
      inputs_0 = tf.Variable(trainable=False, validate_shape=(None, self.config.size[0], self.config.size[1], 3))
      self.bbox_infer_model._setup_input(inputs_0)
      assign_op = tf.assign(inputs_0, self.data_batches)
      with tf.control_dependencies([assign_op]):
        self.bbox_infer_model._setup_net()

    def crop_bbox(width, height, input, bbox):
      expand_rate = 0.1
      top = tf.maximum(tf.floor(bbox[1] * height - height * expand_rate), 0)
      bottom = tf.minimum(tf.floor((bbox[1] + bbox[3]) * height + height * expand_rate), height)
      left = tf.maximum(tf.floor(bbox[0] * width - width * expand_rate), 0)
      right = tf.minimum((tf.floor(bbox[0] + bbox[2]) * width + width * expand_rate), width)
      top = tf.cond(top >= bottom, lambda: tf.identity(0), lambda: tf.identity(top))
      bottom = tf.cond(top >= bottom, lambda: tf.identity(height), lambda: tf.identity(bottom))
      left = tf.cond(left >= right, lambda: tf.identity(0), lambda: tf.identity(left))
      right = tf.cond(left >= right, lambda: tf.identity(width), lambda: tf.identity(right))
      return input[top:bottom, left:right, :]

    with tf.variable_scope('nsr'):
      origin_width, origin_height = 512, 512
      inputs_1 = tf.Variable(trainable=False, validate_shape=(None, self.config.size[0], self.config.size[1], 3))
      self.infer_model._setup_input(inputs_1)
      inputs = self.bbox_infer_model.inputs
      bboxes = self.bbox_infer_model.model_output
      inputs = tf.stack([crop_bbox(origin_width, origin_height, inputs[i], bboxes[i]) for i in range(self.config.batch_size)])
      inputs = tf.image.resize_images(inputs, self.config.size)
      assign_op = tf.assign(inputs_1, inputs)
      with tf.control_dependencies([assign_op]):
        self.infer_model._setup_net()

    vars_dict = self._vars()
    assign_ops = assign_vars(vars_dict, self.bbox_vars_dict, 'bbox')
    assign_ops.extend(assign_vars(vars_dict, self.vars_dict, 'nsr'))
    with tf.control_dependencies(assign_ops):
      self.output = stack_output(self.max_number_length, self.length_output, self.numbers_output)
dcgan_w_var2.py 文件源码 项目:deeplearning 作者: zxjzxj9 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _build_graph(self):
        with tf.variable_scope("generator") as scope:
            print("### Print Generator Intermediate Parameter")
            self.prior = tf.placeholder(dtype=tf.float32, shape=(None, 100), name="prior_gen")
            self.is_training = tf.placeholder(dtype=tf.bool, shape = (), name="training_flag")
            prior_proj = tf.contrib.layers.fully_connected(inputs=self.prior, num_outputs=4*4*1024, 
                                                           activation_fn=None, scope="prior_projection")
            prior_proj = tf.contrib.layers.batch_norm(inputs=prior_proj, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                                  is_training= self.is_training, scope="bn0")
            conv0 = tf.reshape(prior_proj, (-1, 4, 4, 1024))
            conv1 = tf.contrib.layers.convolution2d_transpose(inputs=conv0, num_outputs=512, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv1")
            conv1 = tf.contrib.layers.batch_norm(inputs=conv1, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn1")
            print(conv1.shape)
            conv2 = tf.contrib.layers.convolution2d_transpose(inputs=conv1, num_outputs=256, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv2")
            conv2 = tf.contrib.layers.batch_norm(inputs=conv2, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn2")
            print(conv2.shape)
            conv3 = tf.contrib.layers.convolution2d_transpose(inputs=conv2, num_outputs=128, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv3")
            conv3 = tf.contrib.layers.batch_norm(inputs=conv3, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn3")
            print(conv3.shape)
            conv4 = tf.contrib.layers.convolution2d_transpose(inputs=conv3, num_outputs=3, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv4")
            self.gen_img = tf.nn.tanh(conv4)
            self.gen_img_out = tf.cast(x= tf.floor(self.gen_img*128.0 + 128.0), dtype=tf.int32)
            print(conv4.shape)
            print("### End Print Generator Intermediate Parameter")

# tf.reset_default_graph()
# g = Generator()
dcgan_w_gp.py 文件源码 项目:deeplearning 作者: zxjzxj9 项目源码 文件源码 阅读 22 收藏 0 点赞 0 评论 0
def _build_graph(self):
        with tf.variable_scope("generator") as scope:
            print("### Print Generator Intermediate Parameter")
            self.prior = tf.placeholder(dtype=tf.float32, shape=(None, 100), name="prior_gen")
            self.is_training = tf.placeholder(dtype=tf.bool, shape = (), name="training_flag")
            prior_proj = tf.contrib.layers.fully_connected(inputs=self.prior, num_outputs=4*4*1024, 
                                                           activation_fn=None, scope="prior_projection")
            prior_proj = tf.contrib.layers.batch_norm(inputs=prior_proj, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                                  is_training= self.is_training, scope="bn0")
            conv0 = tf.reshape(prior_proj, (-1, 4, 4, 1024))
            conv1 = tf.contrib.layers.convolution2d_transpose(inputs=conv0, num_outputs=512, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv1")
            conv1 = tf.contrib.layers.batch_norm(inputs=conv1, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn1")
            print(conv1.shape)
            conv2 = tf.contrib.layers.convolution2d_transpose(inputs=conv1, num_outputs=256, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv2")
            conv2 = tf.contrib.layers.batch_norm(inputs=conv2, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn2")
            print(conv2.shape)
            conv3 = tf.contrib.layers.convolution2d_transpose(inputs=conv2, num_outputs=128, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv3")
            conv3 = tf.contrib.layers.batch_norm(inputs=conv3, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn3")
            print(conv3.shape)
            conv4 = tf.contrib.layers.convolution2d_transpose(inputs=conv3, num_outputs=3, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv4")
            self.gen_img = tf.nn.tanh(conv4)
            self.gen_img_out = tf.cast(x= tf.floor(self.gen_img*128.0 + 128.0), dtype=tf.int32)
            print(conv4.shape)
            print("### End Print Generator Intermediate Parameter")

# tf.reset_default_graph()
# g = Generator()
dcgan_var2.py 文件源码 项目:deeplearning 作者: zxjzxj9 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def _build_graph(self):
        with tf.variable_scope("generator") as scope:
            print("### Print Generator Intermediate Parameter")
            self.prior = tf.placeholder(dtype=tf.float32, shape=(None, 100), name="prior_gen")
            self.is_training = tf.placeholder(dtype=tf.bool, shape = (), name="training_flag")
            prior_proj = tf.contrib.layers.fully_connected(inputs=self.prior, num_outputs=4*4*1024, 
                                                           activation_fn=None, scope="prior_projection")
            prior_proj = tf.contrib.layers.batch_norm(inputs=prior_proj, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                                  is_training= self.is_training, scope="bn0")
            conv0 = tf.reshape(prior_proj, (-1, 4, 4, 1024))
            conv1 = tf.contrib.layers.convolution2d_transpose(inputs=conv0, num_outputs=512, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv1")
            conv1 = tf.contrib.layers.batch_norm(inputs=conv1, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn1")
            print(conv1.shape)
            conv2 = tf.contrib.layers.convolution2d_transpose(inputs=conv1, num_outputs=256, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv2")
            conv2 = tf.contrib.layers.batch_norm(inputs=conv2, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn2")
            print(conv2.shape)
            conv3 = tf.contrib.layers.convolution2d_transpose(inputs=conv2, num_outputs=128, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv3")
            conv3 = tf.contrib.layers.batch_norm(inputs=conv3, center=True, scale=True, activation_fn=tf.nn.leaky_relu, 
                                             is_training= self.is_training, scope="bn3")
            print(conv3.shape)
            conv4 = tf.contrib.layers.convolution2d_transpose(inputs=conv3, num_outputs=3, activation_fn=None,
                                                          kernel_size=(5,5), stride=(2,2), padding="SAME",scope="deconv4")
            self.gen_img = tf.nn.tanh(conv4)
            self.gen_img_out = tf.cast(x= tf.floor(self.gen_img*128.0 + 128.0), dtype=tf.int32)
            print(conv4.shape)
            print("### End Print Generator Intermediate Parameter")


# tf.reset_default_graph()
# g = Generator()
span_prediction_ops.py 文件源码 项目:document-qa 作者: allenai 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def to_unpacked_coordinates(ix, l, bound):
    ix = tf.cast(ix, tf.int32)
    # You can actually compute the lens in closed form:
    # lens = tf.floor(0.5 * (-tf.sqrt(4 * tf.square(l) + 4 * l - 8 * ix + 1) + 2 * l + 1))
    # but it is very ugly and rounding errors could cause problems, so this approach seems safer
    lens = []
    for i in range(bound):
        lens.append(tf.fill((l - i,), i))
    lens = tf.concat(lens, axis=0)
    lens = tf.gather(lens, ix)
    answer_start = ix - l * lens + lens * (lens - 1) // 2
    return tf.stack([answer_start, answer_start+lens], axis=1)
rbm_chords.py 文件源码 项目:Happy_Music_Generator 作者: rahuldeo2047 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def sample(probs):
    #Takes in a vector of probabilities, and returns a random vector of 0s and 1s sampled from the input vector
    return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1))

#This function runs the gibbs chain. We will call this function in two places:
#    - When we define the training update step
#    - When we sample our music segments from the trained RBM
music_generator.py 文件源码 项目:Music_AI 作者: jw84 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def sample(probs):
    #Takes in a vector of probabilities, and returns a random vector of 0s and 1s sampled from the input vector
    return tf.floor(probs + tf.random_uniform(tf.shape(probs), 0, 1))

#This function runs the gibbs chain. We will call this function in two places:
#    - When we define the training update step
#    - When we sample our music segments from the trained RBM
ops.py 文件源码 项目:DMNN 作者: magnux 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def dk_mod(x, y):
    """Differentiable mod, Donald Knuth style
    Args
        x: first argument
        y: second argument
    Returns
        mod between x and y
    """
    return x - y * tf.floor(x / y)

# Register the gradient for the mod operation. tf.mod() does not have a gradient implemented.
distributions.py 文件源码 项目:iaf 作者: openai 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def discretized_logistic(mean, logscale, binsize=1 / 256.0, sample=None):
    scale = tf.exp(logscale)
    sample = (tf.floor(sample / binsize) * binsize - mean) / scale
    logp = tf.log(tf.sigmoid(sample + binsize / scale) - tf.sigmoid(sample) + 1e-7)
    return tf.reduce_sum(logp, [1, 2, 3])
backend_tensorflow.py 文件源码 项目:statestream 作者: VolkerFischer 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def floor(x):
    return tf.floor(x)
image_sample.py 文件源码 项目:ternarynet 作者: czhu95 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def test_sample(self):
        import numpy as np
        h, w = 3, 4
        def np_sample(img, coords):
            # a reference implementation
            coords = np.maximum(coords, 0)
            coords = np.minimum(coords,
                                np.array([img.shape[1]-1, img.shape[2]-1]))
            xs = coords[:,:,:,1].reshape((img.shape[0], -1))
            ys = coords[:,:,:,0].reshape((img.shape[0], -1))

            ret = np.zeros((img.shape[0], coords.shape[1], coords.shape[2],
                            img.shape[3]), dtype='float32')
            for k in range(img.shape[0]):
                xss, yss = xs[k], ys[k]
                ret[k,:,:,:] = img[k,yss,xss,:].reshape((coords.shape[1],
                                                         coords.shape[2], 3))
            return ret

        bimg = np.random.rand(2, h, w, 3).astype('float32')

        #mat = np.array([
            #[[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]],
            #[[[1,1], [1.2,1.2]], [[-1, -1], [2.5, 2.5]]]
        #], dtype='float32')  #2x2x2x2
        mat = (np.random.rand(2, 5, 5, 2) - 0.2) * np.array([h + 3, w + 3])
        true_res = np_sample(bimg, np.floor(mat + 0.5).astype('int32'))

        inp, mapping = self.make_variable(bimg, mat)
        output = sample(inp, tf.cast(tf.floor(mapping+0.5), tf.int32))
        res = self.run_variable(output)

        self.assertTrue((res == true_res).all())


问题


面经


文章

微信
公众号

扫码关注公众号