def _meshgrid(self, height, width):
with tf.variable_scope('_meshgrid'):
# This should be equivalent to:
# x_t, y_t = np.meshgrid(np.linspace(-1, 1, width),
# np.linspace(-1, 1, height))
# ones = np.ones(np.prod(x_t.shape))
# grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
x_t = tf.matmul(tf.ones(shape=tf.pack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
tf.ones(shape=tf.pack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(0, [x_t_flat, y_t_flat, ones])
return grid
python类linspace()的实例源码
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def __init__(self,input_shape,control_points_ratio):
self.num_batch = input_shape[0]
self.depth = input_shape[1]
self.height = input_shape[2]
self.width = input_shape[3]
self.num_channels = input_shape[4]
self.out_height = self.height
self.out_width = self.width
self.out_depth = self.depth
self.X_controlP_number = int(input_shape[3] / \
(control_points_ratio))
self.Y_controlP_number = int(input_shape[2] / \
(control_points_ratio))
self.Z_controlP_number = int(input_shape[1] / \
(control_points_ratio))
init_x = np.linspace(-5,5,self.X_controlP_number)
init_y = np.linspace(-5,5,self.Y_controlP_number)
init_z = np.linspace(-5,5,self.Z_controlP_number)
x_s = np.tile(init_x, [self.Y_controlP_number*self.Z_controlP_number])
y_s = np.tile(np.repeat(init_y,self.X_controlP_number),[self.Z_controlP_number])
z_s = np.repeat(init_z,self.X_controlP_number*self.Y_controlP_number)
self.initial = np.array([x_s,y_s,z_s])
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def _local_Networks(self,input_dim,x):
with tf.variable_scope('_local_Networks'):
x = tf.reshape(x,[-1,self.height*self.width*self.depth*self.num_channels])
W_fc_loc1 = weight_variable([self.height*self.width*self.depth*self.num_channels, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number*3])
initial = self.initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1, W_fc_loc2) + b_fc_loc2)
#temp use
if Debug == True:
x = np.linspace(-1.0,1.0,self.X_controlP_number)
y = np.linspace(-1.0,1.0,self.Y_controlP_number)
z = np.linspace(-1.0,1.0,self.Z_controlP_number)
x_s = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number],'float64')
y_s = tf.tile(self._repeat(y,self.X_controlP_number,'float64'),[self.Z_controlP_number])
z_s = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float64')
h_fc_loc2 = tf.concat([x_s,y_s,z_s],0)
h_fc_loc2 = tf.tile(h_fc_loc2,[self.num_batch])
h_fc_loc2 = tf.reshape(h_fc_loc2,[self.num_batch,-1])
#2*(4*4*4)*3->(2,192)
return h_fc_loc2
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def _local_Networks(self,input_dim,x):
with tf.variable_scope('_local_Networks'):
x = tf.reshape(x,[-1,self.height*self.width*self.depth*self.num_channels])
W_fc_loc1 = weight_variable([self.height*self.width*self.depth*self.num_channels, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number*3])
initial = self.initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1, W_fc_loc2) + b_fc_loc2)
#temp use
if Debug == True:
x = np.linspace(-1.0,1.0,self.X_controlP_number)
y = np.linspace(-1.0,1.0,self.Y_controlP_number)
z = np.linspace(-1.0,1.0,self.Z_controlP_number)
x_s = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number],'float64')
y_s = tf.tile(self._repeat(y,self.X_controlP_number,'float64'),[self.Z_controlP_number])
z_s = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float64')
h_fc_loc2 = tf.concat([x_s,y_s,z_s],0)
h_fc_loc2 = tf.tile(h_fc_loc2,[self.num_batch])
h_fc_loc2 = tf.reshape(h_fc_loc2,[self.num_batch,-1])
#2*(4*4*4)*3->(2,192)
return h_fc_loc2
def __init__(self,input_shape,control_points_ratio):
self.num_batch = input_shape[0]
self.depth = input_shape[1]
self.height = input_shape[2]
self.width = input_shape[3]
self.num_channels = input_shape[4]
self.out_height = self.height
self.out_width = self.width
self.out_depth = self.depth
self.X_controlP_number = int(input_shape[3] / \
(control_points_ratio))
self.Y_controlP_number = int(input_shape[2] / \
(control_points_ratio))
self.Z_controlP_number = int(input_shape[1] / \
(control_points_ratio))
init_x = np.linspace(-5,5,self.X_controlP_number)
init_y = np.linspace(-5,5,self.Y_controlP_number)
init_z = np.linspace(-5,5,self.Z_controlP_number)
x_s = np.tile(init_x, [self.Y_controlP_number*self.Z_controlP_number])
y_s = np.tile(np.repeat(init_y,self.X_controlP_number),[self.Z_controlP_number])
z_s = np.repeat(init_z,self.X_controlP_number*self.Y_controlP_number)
self.initial = np.array([x_s,y_s,z_s])
def _local_Networks(self,input_dim,x):
with tf.variable_scope('_local_Networks'):
x = tf.reshape(x,[-1,self.height*self.width*self.depth*self.num_channels])
W_fc_loc1 = weight_variable([self.height*self.width*self.depth*self.num_channels, 20])
b_fc_loc1 = bias_variable([20])
W_fc_loc2 = weight_variable([20, self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number*3])
initial = self.initial.astype('float32')
initial = initial.flatten()
b_fc_loc2 = tf.Variable(initial_value=initial, name='b_fc_loc2')
h_fc_loc1 = tf.nn.tanh(tf.matmul(x, W_fc_loc1) + b_fc_loc1)
h_fc_loc2 = tf.nn.tanh(tf.matmul(h_fc_loc1, W_fc_loc2) + b_fc_loc2)
#temp use
if Debug == True:
x = np.linspace(-1.0,1.0,self.X_controlP_number)
y = np.linspace(-1.0,1.0,self.Y_controlP_number)
z = np.linspace(-1.0,1.0,self.Z_controlP_number)
x_s = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number],'float64')
y_s = tf.tile(self._repeat(y,self.X_controlP_number,'float64'),[self.Z_controlP_number])
z_s = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float64')
h_fc_loc2 = tf.concat([x_s,y_s,z_s],0)
h_fc_loc2 = tf.tile(h_fc_loc2,[self.num_batch])
h_fc_loc2 = tf.reshape(h_fc_loc2,[self.num_batch,-1])
return h_fc_loc2
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.mul(wave, z_2d)
return gabor.eval()
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.mul(wave, z_2d)
return gabor.eval()
def mu_law_bins(num_bins):
"""
this functions returns the mu-law bin (right) edges and bin centers, with num_bins number of bins
"""
#all edges
bins_edge = np.linspace(-1, 1, num_bins + 1)
#center of all edges
bins_center = np.linspace(-1 + 1.0 / num_bins, 1 - 1.0 / num_bins, num_bins)
#get the right edges
bins_trunc = bins_edge[1:]
#if sample >= right edges, it might be assigned to the next bin, add 0.1 to avoid this
bins_trunc[-1] += 0.1
#convert edges and centers to mu-law scale
bins_edge_mu = np.multiply(np.sign(bins_trunc), (num_bins ** np.absolute(bins_trunc) - 1) / (num_bins - 1))
bins_center_mu = np.multiply(np.sign(bins_center), (num_bins ** np.absolute(bins_center) - 1) / (num_bins - 1))
return (bins_edge_mu, bins_center_mu)
def mu_law_bins_tf(num_bins):
"""
this functions returns the mu-law bin (right) edges and bin centers, with num_bins number of bins
"""
#all edges
bins_edge = tf.linspace(-1.0, 1.0, num_bins + 1)
#center of all edges
bins_center = tf.linspace(-1.0 + 1.0 / num_bins, 1.0 - 1.0 / num_bins, num_bins)
#get the right edges
bins_trunc = tf.concat([bins_edge[1:-1], [1.1]], 0)
#if sample >= right edges, it might be assigned to the next bin, add 0.1 to avoid this
#convert edges and centers to mu-law scale
bins_edge_mu = tf.multiply(tf.sign(bins_trunc), (num_bins ** tf.abs(bins_trunc) - 1) / (num_bins - 1))
bins_center_mu = tf.multiply(tf.sign(bins_center), (num_bins ** tf.abs(bins_center) - 1) / (num_bins - 1))
return (bins_edge_mu, bins_center_mu)
def gauss(mean, stddev, ksize):
"""Use Tensorflow to compute a Gaussian Kernel.
Parameters
----------
mean : float
Mean of the Gaussian (e.g. 0.0).
stddev : float
Standard Deviation of the Gaussian (e.g. 1.0).
ksize : int
Size of kernel (e.g. 16).
Returns
-------
kernel : np.ndarray
Computed Gaussian Kernel using Tensorflow.
"""
g = tf.Graph()
with tf.Session(graph=g):
x = tf.linspace(-3.0, 3.0, ksize)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(stddev, 2.0)))) *
(1.0 / (stddev * tf.sqrt(2.0 * 3.1415))))
return z.eval()
def gabor(ksize=32):
"""Use Tensorflow to compute a 2D Gabor Kernel.
Parameters
----------
ksize : int, optional
Size of kernel.
Returns
-------
gabor : np.ndarray
Gabor kernel with ksize x ksize dimensions.
"""
g = tf.Graph()
with tf.Session(graph=g):
z_2d = gauss2d(0.0, 1.0, ksize)
ones = tf.ones((1, ksize))
ys = tf.sin(tf.linspace(-3.0, 3.0, ksize))
ys = tf.reshape(ys, [ksize, 1])
wave = tf.matmul(ys, ones)
gabor = tf.mul(wave, z_2d)
return gabor.eval()
def spatial_expected_softmax(x, temp=1):
assert len(x.get_shape()) == 4
vals = []
for dim in [0, 1]:
dim_val = x.get_shape()[dim + 1].value
lin = tf.linspace(-1.0, 1.0, dim_val)
lin = tf.expand_dims(lin, 1 - dim)
lin = tf.expand_dims(lin, 0)
lin = tf.expand_dims(lin, 3)
m = tf.reduce_max(x, [1, 2], keep_dims=True)
e = tf.exp((x - m) / temp) + 1e-5
val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
vals.append(tf.expand_dims(val, 2))
return tf.reshape(tf.concat(2, vals), [-1, x.get_shape()[-1].value * 2])
def get_output_for(self, input, **kwargs):
return spatial_expected_softmax(input)#, self.temp)
# max_ = tf.reduce_max(input, reduction_indices=[1, 2], keep_dims=True)
# exp = tf.exp(input - max_) + 1e-5
# vals = []
#
# for dim in [0, 1]:
# dim_val = input.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, dim_val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(input, [1, 2], keep_dims=True)
# e = tf.exp(input - m) + 1e-5
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
# vals.append(tf.expand_dims(val, 2))
#
# return tf.reshape(tf.concat(2, vals), [-1, input.get_shape()[-1].value * 2])
# import ipdb; ipdb.set_trace()
# input.get_shape()
# exp / tf.reduce_sum(exp, reduction_indices=[1, 2], keep_dims=True)
# import ipdb;
# ipdb.set_trace()
# spatial softmax?
# for dim in range(2):
# val = obs.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(e, [1, 2], keep_dims=True)
# e = tf.exp(e - m) + 1e-3
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
def site_rdf(distances, cutoff, step, width, eps=1e-5,
use_mean=False, lower_cutoff=None):
with tf.variable_scope('srdf'):
if lower_cutoff is None:
vrange = cutoff
else:
vrange = cutoff - lower_cutoff
distances = tf.expand_dims(distances, -1)
n_centers = np.ceil(vrange / step)
gap = vrange - n_centers * step
n_centers = int(n_centers)
if lower_cutoff is None:
centers = tf.linspace(0., cutoff - gap, n_centers)
else:
centers = tf.linspace(lower_cutoff + 0.5 * gap, cutoff - 0.5 * gap,
n_centers)
centers = tf.reshape(centers, (1, 1, 1, -1))
gamma = -0.5 / width / step ** 2
rdf = tf.exp(gamma * (distances - centers) ** 2)
mask = tf.cast(distances >= eps, tf.float32)
rdf *= mask
rdf = tf.reduce_sum(rdf, 2)
if use_mean:
N = tf.reduce_sum(mask, 2)
N = tf.maximum(N, 1)
rdf /= N
new_shape = [None, None, n_centers]
rdf.set_shape(new_shape)
return rdf
3D_DTN_tests.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 125
收藏 0
点赞 0
评论 0
def main():
sess = tf.Session()
# inputs
U=tf.linspace(1.0,10.0,2*8*8*8*2)
U =tf.reshape(U,[2,8,8,8,2])
#network initial
dtn_input_shape = [2,8,8,8,2]
control_points_ratio = 2
# initial DTN class
transform = DSN_Transformer_3D(dtn_input_shape,control_points_ratio)
# encoder
conv1= transform.Encoder(U,U)
#decoder
conv2 = transform.Decoder(conv1,conv1)
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def _meshgrid(self):
with tf.variable_scope('_meshgrid'):
x_use = tf.linspace(-1.0, 1.0, self.out_height)
y_use = tf.linspace(-1.0, 1.0, self.out_width)
z_use = tf.linspace(-1.0, 1.0, self.out_depth)
x_t = tf.tile(x_use,[self.out_width*self.out_depth])
y_t = tf.tile(self._repeat(y_use,self.out_height,'float32'),[self.out_depth])
z_t = self._repeat(z_use,self.out_height*self.out_width,'float32')
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
z_t_flat = tf.reshape(z_t, (1, -1))
px,py,pz = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2),tf.stack([z_t_flat],axis=2)
#source control points
x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number)
x = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number])
y = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number])
z = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32')
xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1)))
cpx,cpy,cpz = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2])
px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy); pz, cpz = tf.meshgrid(pz,cpz)
#Compute distance R
Rx,Ry,Rz = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy)),tf.square(tf.subtract(pz,cpz))
R = tf.add(tf.add(Rx,Ry),Rz)
R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
#Source coordinates
ones = tf.ones_like(x_t_flat)
grid = tf.concat([ones, x_t_flat, y_t_flat,z_t_flat,R],0)
return grid
Dense_Transformer_Networks_3D.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def _meshgrid(self):
with tf.variable_scope('_meshgrid'):
x_use = tf.linspace(-1.0, 1.0, self.out_height)
y_use = tf.linspace(-1.0, 1.0, self.out_width)
z_use = tf.linspace(-1.0, 1.0, self.out_depth)
x_t = tf.tile(x_use,[self.out_width*self.out_depth])
y_t = tf.tile(self._repeat(y_use,self.out_height,'float32'),[self.out_depth])
z_t = self._repeat(z_use,self.out_height*self.out_width,'float32')
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
z_t_flat = tf.reshape(z_t, (1, -1))
px,py,pz = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2),tf.stack([z_t_flat],axis=2)
#source control points
x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number)
x = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number])
y = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number])
z = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32')
xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1)))
cpx,cpy,cpz = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2])
px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy); pz, cpz = tf.meshgrid(pz,cpz)
#Compute distance R
Rx,Ry,Rz = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy)),tf.square(tf.subtract(pz,cpz))
R = tf.add(tf.add(Rx,Ry),Rz)
R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
#Source coordinates
ones = tf.ones_like(x_t_flat)
grid = tf.concat([ones, x_t_flat, y_t_flat,z_t_flat,R],0)
return grid
Dense_Transformer_Network.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 40
收藏 0
点赞 0
评论 0
def __init__(self,input_shape,control_points_ratio):
self.num_batch = input_shape[0]
self.height = input_shape[1]
self.width = input_shape[2]
self.num_channels = input_shape[3]
self.out_height = self.height
self.out_width = self.width
self.Column_controlP_number = int(input_shape[1] / \
(control_points_ratio))
self.Row_controlP_number = int(input_shape[2] / \
(control_points_ratio))
init_x = np.linspace(-5,5,self.Column_controlP_number)
init_y = np.linspace(-5,5,self.Row_controlP_number)
x_s,y_s = np.meshgrid(init_x, init_y)
self.initial = np.array([x_s,y_s])
Dense_Transformer_Network.py 文件源码
项目:3D_Dense_Transformer_Networks
作者: JohnYC1995
项目源码
文件源码
阅读 32
收藏 0
点赞 0
评论 0
def _makeT(self,cp):
with tf.variable_scope('_makeT'):
cp = tf.reshape(cp,(-1,2,self.Column_controlP_number*self.Row_controlP_number))
cp = tf.cast(cp,'float32')
N_f = tf.shape(cp)[0]
#c_s
x,y = tf.linspace(-1.,1.,self.Column_controlP_number),tf.linspace(-1.,1.,self.Row_controlP_number)
x,y = tf.meshgrid(x,y)
xs,ys = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1)))
cp_s = tf.concat([xs,ys],0)
cp_s_trans = tf.transpose(cp_s)
##===Compute distance R
xs_trans,ys_trans = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2])
xs, xs_trans = tf.meshgrid(xs,xs_trans);ys, ys_trans = tf.meshgrid(ys,ys_trans)
Rx,Ry = tf.square(tf.subtract(xs,xs_trans)),tf.square(tf.subtract(ys,ys_trans))
R = tf.add(Rx,Ry)
R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
ones = tf.ones([tf.multiply(self.Row_controlP_number,self.Column_controlP_number),1],tf.float32)
ones_trans = tf.transpose(ones)
zeros = tf.zeros([3,3],tf.float32)
Deltas1 = tf.concat([ones, cp_s_trans, R],1)
Deltas2 = tf.concat([ones_trans,cp_s],0)
Deltas2 = tf.concat([zeros,Deltas2],1)
Deltas = tf.concat([Deltas1,Deltas2],0)
##get deltas_inv
Deltas_inv = tf.matrix_inverse(Deltas)
Deltas_inv = tf.expand_dims(Deltas_inv,0)
Deltas_inv = tf.reshape(Deltas_inv,[-1])
Deltas_inv_f = tf.tile(Deltas_inv,tf.stack([N_f]))
Deltas_inv_f = tf.reshape(Deltas_inv_f,tf.stack([N_f,self.Column_controlP_number*self.Row_controlP_number+3, -1]))
cp_trans =tf.transpose(cp,perm=[0,2,1])
zeros_f_In = tf.zeros([N_f,3,2],tf.float32)
cp = tf.concat([cp_trans,zeros_f_In],1)
T = tf.transpose(tf.matmul(Deltas_inv_f,cp),[0,2,1])
return T
def spatial_expected_softmax(x, temp=1):
assert len(x.get_shape()) == 4
vals = []
for dim in [0, 1]:
dim_val = x.get_shape()[dim + 1].value
lin = tf.linspace(-1.0, 1.0, dim_val)
lin = tf.expand_dims(lin, 1 - dim)
lin = tf.expand_dims(lin, 0)
lin = tf.expand_dims(lin, 3)
m = tf.reduce_max(x, [1, 2], keep_dims=True)
e = tf.exp((x - m) / temp) + 1e-5
val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
vals.append(tf.expand_dims(val, 2))
return tf.reshape(tf.concat(axis=2, values=vals), [-1, x.get_shape()[-1].value * 2])
def get_output_for(self, input, **kwargs):
return spatial_expected_softmax(input)#, self.temp)
# max_ = tf.reduce_max(input, reduction_indices=[1, 2], keep_dims=True)
# exp = tf.exp(input - max_) + 1e-5
# vals = []
#
# for dim in [0, 1]:
# dim_val = input.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, dim_val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(input, [1, 2], keep_dims=True)
# e = tf.exp(input - m) + 1e-5
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
# vals.append(tf.expand_dims(val, 2))
#
# return tf.reshape(tf.concat(2, vals), [-1, input.get_shape()[-1].value * 2])
# import ipdb; ipdb.set_trace()
# input.get_shape()
# exp / tf.reduce_sum(exp, reduction_indices=[1, 2], keep_dims=True)
# import ipdb;
# ipdb.set_trace()
# spatial softmax?
# for dim in range(2):
# val = obs.get_shape()[dim + 1].value
# lin = tf.linspace(-1.0, 1.0, val)
# lin = tf.expand_dims(lin, 1 - dim)
# lin = tf.expand_dims(lin, 0)
# lin = tf.expand_dims(lin, 3)
# m = tf.reduce_max(e, [1, 2], keep_dims=True)
# e = tf.exp(e - m) + 1e-3
# val = tf.reduce_sum(e * lin, [1, 2]) / (tf.reduce_sum(e, [1, 2]))
def generate_anchors(boxes, height, width, conv_height, conv_width):
'''Generate anchors for given geometry
boxes: K x 2 tensor for anchor geometries, K different sizes
height: source image height
width: source image width
conv_height: convolution layer height
conv_width: convolution layer width
returns:
conv_height x conv_width x K x 4 tensor with boxes for all
positions. Last dimension 4 numbers are (y, x, h, w)
'''
k, _ = boxes.get_shape().as_list()
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
grid = tf.transpose(tf.stack(tf.meshgrid(
tf.linspace(-0.5, height - 0.5, conv_height),
tf.linspace(-0.5, width - 0.5, conv_width)), axis=2), [1, 0, 2])
# convert boxes from K x 2 to 1 x 1 x K x 2
boxes = tf.expand_dims(tf.expand_dims(boxes, 0), 0)
# convert grid from H' x W' x 2 to H' x W' x 1 x 2
grid = tf.expand_dims(grid, 2)
# combine them into single H' x W' x K x 4 tensor
return tf.concat(
3,
[tf.tile(grid, [1, 1, k, 1]),
tf.tile(boxes, [conv_height, conv_width, 1, 1])]
)
def _makeT(self,cp):
with tf.variable_scope('_makeT'):
cp = tf.reshape(cp,(-1,3,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number))
cp = tf.cast(cp,'float32')
N_f = tf.shape(cp)[0]
#c_s
x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number)
x = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number])
y = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number])
z = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32')
xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1)))
cp_s = tf.concat([xs,ys,zs],0)
cp_s_trans = tf.transpose(cp_s)
# (4*4*4)*3 -> 64 * 3
##===Compute distance R
xs_trans,ys_trans,zs_trans = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2])
xs, xs_trans = tf.meshgrid(xs,xs_trans);ys, ys_trans = tf.meshgrid(ys,ys_trans);zs, zs_trans = tf.meshgrid(zs,zs_trans)
Rx,Ry, Rz = tf.square(tf.subtract(xs,xs_trans)),tf.square(tf.subtract(ys,ys_trans)),tf.square(tf.subtract(zs,zs_trans))
R = tf.add_n([Rx,Ry,Rz])
R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
ones = tf.ones([self.Y_controlP_number*self.X_controlP_number*self.Z_controlP_number,1],tf.float32)
ones_trans = tf.transpose(ones)
zeros = tf.zeros([4,4],tf.float32)
Deltas1 = tf.concat([ones, cp_s_trans, R],1)
Deltas2 = tf.concat([ones_trans,cp_s],0)
Deltas2 = tf.concat([zeros,Deltas2],1)
Deltas = tf.concat([Deltas1,Deltas2],0)
##get deltas_inv
Deltas_inv = tf.matrix_inverse(Deltas)
Deltas_inv = tf.expand_dims(Deltas_inv,0)
Deltas_inv = tf.reshape(Deltas_inv,[-1])
Deltas_inv_f = tf.tile(Deltas_inv,tf.stack([N_f]))
Deltas_inv_f = tf.reshape(Deltas_inv_f,tf.stack([N_f,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number+4, -1]))
cp_trans =tf.transpose(cp,perm=[0,2,1])
zeros_f_In = tf.zeros([N_f,4,3],tf.float32)
cp = tf.concat([cp_trans,zeros_f_In],1)
T = tf.transpose(tf.matmul(Deltas_inv_f,cp),[0,2,1])
return T
def _meshgrid(self):
with tf.variable_scope('_meshgrid'):
x_use = tf.linspace(-1.0, 1.0, self.out_height)
y_use = tf.linspace(-1.0, 1.0, self.out_width)
z_use = tf.linspace(-1.0, 1.0, self.out_depth)
x_t = tf.tile(x_use,[self.out_width*self.out_depth])
y_t = tf.tile(self._repeat(y_use,self.out_height,'float32'),[self.out_depth])
z_t = self._repeat(z_use,self.out_height*self.out_width,'float32')
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
z_t_flat = tf.reshape(z_t, (1, -1))
px,py,pz = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2),tf.stack([z_t_flat],axis=2)
#source control points
x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number)
x = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number])
y = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number])
z = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32')
xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1)))
cpx,cpy,cpz = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2])
px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy); pz, cpz = tf.meshgrid(pz,cpz)
#Compute distance R
Rx,Ry,Rz = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy)),tf.square(tf.subtract(pz,cpz))
R = tf.add(tf.add(Rx,Ry),Rz)
R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10)))
#Source coordinates
ones = tf.ones_like(x_t_flat)
grid = tf.concat([ones, x_t_flat, y_t_flat,z_t_flat,R],0)
return grid
def meshgrid(height, width):
x = tf.tile(tf.linspace(-1.,1.,width), [height])
y = repeat(tf.linspace(-1.,1.,height), width)
return x, y
def meshgrid(height, width):
x = tf.tile(tf.linspace(-1.,1.,width), [height])
y = repeat(tf.linspace(-1.,1.,height), width)
return x, y
def gaussian_blur(input, filter_size, filter_sampling_range=3.5, strides=[1, 1, 1, 1], padding='SAME'):
"""
Blur input with a 2D Gaussian filter of size filter_size x filter_size. The filter's values are
sampled from an evenly spaced grid on the 2D standard normal distribution in the range
[-filter_sampling_range, filter_sampling_range] in both dimensions.
:param input: A rank-4 tensor with shape=(samples, x, y, n_channels). The same Gaussian filter
will be applied to all n_channels feature maps of input.
:param filter_size: The size of one edge of the square-shaped Gaussian filter.
:param filter_sampling_range: The range in which to sample from the standard normal distribution in
both dimensions, i.e. a sampling range of 1 corresponds to sampling in a square grid that bounds
the standard deviation circle.
:param strides: Param strides as passed to tf.nn.depthwise_conv2d.
:param padding: Param padding as passed to tf.nn.depthwise_conv2d.
:return: The result of the Gaussian blur as a rank-4 tensor with the same shape as input.
"""
# make 2D distribution
mu = np.repeat(np.float32(0.), 2)
sig = np.repeat(np.float32(1.), 2)
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, sig)
# sample from distribution on a grid
sampling_range = tf.cast(filter_sampling_range, tf.float32)
x_1D = tf.linspace(-sampling_range, sampling_range, filter_size)
x = tf.stack(tf.meshgrid(x_1D, x_1D), 2)
kern = dist.pdf(x)
kern /= tf.reduce_sum(kern)
kern = tf.reshape(kern, kern.shape.as_list() + [1, 1])
kern = tf.tile(kern, [1, 1, input.shape.as_list()[-1], 1])
return tf.nn.depthwise_conv2d(input, kern, strides, padding)