python类batch_to_space()的实例源码

model_tmmd.py 文件源码 项目:opt-mmd 作者: dougalsutherland 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def imageRearrange(self, image, block=4):
        image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
        x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
        image_r = tf.reshape(tf.transpose(tf.reshape(x1,
            [self.output_size, block, self.output_size, block, self.c_dim])
            , [1, 0, 3, 2, 4]),
            [1, self.output_size * block, self.output_size * block, self.c_dim])
        return image_r
model_mmd_fm.py 文件源码 项目:opt-mmd 作者: dougalsutherland 项目源码 文件源码 阅读 33 收藏 0 点赞 0 评论 0
def imageRearrange(self, image, block=4):
        image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
        x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
        image_r = tf.reshape(tf.transpose(tf.reshape(x1,
            [self.output_size, block, self.output_size, block, self.c_dim])
            , [1, 0, 3, 2, 4]),
            [1, self.output_size * block, self.output_size * block, self.c_dim])
        return image_r
model_mmd.py 文件源码 项目:opt-mmd 作者: dougalsutherland 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def imageRearrange(self, image, block=4):
        image = tf.slice(image, [0, 0, 0, 0], [block * block, -1, -1, -1])
        x1 = tf.batch_to_space(image, [[0, 0], [0, 0]], block)
        image_r = tf.reshape(tf.transpose(tf.reshape(x1,
            [self.output_size, block, self.output_size, block, self.c_dim])
            , [1, 0, 3, 2, 4]),
            [1, self.output_size * block, self.output_size * block, self.c_dim])
        return image_r
layers.py 文件源码 项目:tf_img_tech 作者: david-berthelot 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def unboxn(vin, n):
    """vin = (batch, h, w, depth), returns vout = (batch, n*h, n*w, depth), each pixel is duplicated."""
    s = tf.shape(vin)
    vout = tf.concat(0, [vin] * (n ** 2))  # Poor man's replacement for tf.tile (required for Adversarial Training support).
    vout = tf.reshape(vout, [s[0] * (n ** 2), s[1], s[2], s[3]])
    vout = tf.batch_to_space(vout, [[0, 0], [0, 0]], n)
    return vout
layers.py 文件源码 项目:tf_img_tech 作者: david-berthelot 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def __call__(self, vin):
        # TODO: replace with atrous_2d
        vout = tf.space_to_batch(vin, [[0, 0], [0, 0]], self.dilation)
        vout = LayerConv.__call__(self, vout)
        vout = tf.batch_to_space(vout, [[0, 0], [0, 0]], self.dilation)
        return vout
data.py 文件源码 项目:BinaryNet.tf 作者: itayhubara 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def group_batch_images(x):
    sz = x.get_shape().as_list()
    num_cols = int(math.sqrt(sz[0]))
    img = tf.slice(x, [0,0,0,0],[num_cols ** 2, -1, -1, -1])
    img = tf.batch_to_space(img, [[0,0],[0,0]], num_cols)

    return img
util.py 文件源码 项目:inverse-compositional-STN 作者: ericlin79119 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def imageSummary(opt,image,tag,H,W):
    blockSize = opt.visBlockSize
    imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize)
    imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1])
    imageTransp = tf.transpose(imagePermute,[1,0,3,2,4])
    imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1])
    imageBlocks = tf.cast(imageBlocks*255,tf.uint8)
    summary = tf.summary.image(tag,imageBlocks)
    return summary

# make image summary from image batch (mean/variance)
util.py 文件源码 项目:inverse-compositional-STN 作者: ericlin79119 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def imageSummary(opt,image,tag,H,W):
    blockSize = opt.visBlockSize
    imageOne = tf.batch_to_space(image[:blockSize**2],crops=[[0,0],[0,0]],block_size=blockSize)
    imagePermute = tf.reshape(imageOne,[H,blockSize,W,blockSize,-1])
    imageTransp = tf.transpose(imagePermute,[1,0,3,2,4])
    imageBlocks = tf.reshape(imageTransp,[1,H*blockSize,W*blockSize,-1])
    imageBlocks = tf.cast(imageBlocks*255,tf.uint8)
    summary = tf.summary.image(tag,imageBlocks)
    return summary

# make image summary from image batch (mean/variance)
model.py 文件源码 项目:pgnet 作者: galeone 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def atrous_conv2d(value, filters, rate, name):
    """ Returns the result of a convolution with holes from value and filters.
    Do not use the tensorflow implementation because of issues with shape definition
    of the result. The semantic is the same.
    It uses only the "VALID" padding.

    Warning: this implementation is PGNet specific. It's used only to define the last
    convolutional layer and therefore depends on pgnet constants
    """

    pad_top = 0
    pad_bottom = 0
    pad_left = 0
    pad_right = 0

    in_height = value.get_shape()[1].value + pad_top + pad_bottom
    in_width = value.get_shape()[2].value + pad_left + pad_right

    # More padding so that rate divides the height and width of the input.
    pad_bottom_extra = (rate - in_height % rate) % rate
    pad_right_extra = (rate - in_width % rate) % rate

    # The paddings argument to space_to_batch includes both padding components.
    space_to_batch_pad = ((pad_top, pad_bottom + pad_bottom_extra),
                          (pad_left, pad_right + pad_right_extra))

    value = tf.space_to_batch(
        input=value, paddings=space_to_batch_pad, block_size=rate)

    value = tf.nn.conv2d(
        input=value,
        filter=filters,
        strides=(1, LAST_CONV_OUTPUT_STRIDE, LAST_CONV_OUTPUT_STRIDE, 1),
        padding="VALID",
        name=name)

    # The crops argument to batch_to_space is just the extra padding component.
    batch_to_space_crop = ((0, pad_bottom_extra), (0, pad_right_extra))

    value = tf.batch_to_space(
        input=value, crops=batch_to_space_crop, block_size=rate)

    return value


问题


面经


文章

微信
公众号

扫码关注公众号