def __init__(self, args):
super(LSTM, self).__init__(
# RNN
LSTM=L.LSTM(args.n_in_units, args.n_units),
#W_predict=L.Linear(args.n_units, args.n_units),
W_candidate=L.Linear(args.n_in_units, args.n_units),
)
#self.act1 = F.tanh
self.act1 = F.identity
self.args = args
self.n_in_units = args.n_in_units
self.n_units = args.n_units
self.dropout_ratio = args.d_ratio
self.margin = args.margin
self.initialize_parameters()
python类identity()的实例源码
def __init__(self, in_channels, out_channels, ksize=3, fiber_map='id',
stride=1, pad=1, wscale=1, bias=0, nobias=False, use_cudnn=True, initialW=None, initial_bias=None):
assert ksize % 2 == 1
assert pad == (ksize - 1) // 2
super(ResBlock, self).__init__(
bn1=L.BatchNormalization(in_channels),
conv1=L.Convolution2D(in_channels, out_channels, ksize, stride, pad, wscale),
bn2=L.BatchNormalization(out_channels),
conv2=L.Convolution2D(out_channels, out_channels, ksize, 1, pad, wscale),
)
if fiber_map == 'id':
assert in_channels == out_channels
self.fiber_map = F.identity
elif fiber_map == 'linear':
self.add_link('fiber_map', L.Convolution2D(in_channels, out_channels, 1, 2, 0, wscale))
else:
raise ValueError('Unimplemented fiber map {}'.format(fiber_map))
def __init__(self, in_channels, out_channels, ksize=3, fiber_map='id',
stride=1, pad=1, wscale=1, bias=0, nobias=False, use_cudnn=True, initialW=None, initial_bias=None):
assert ksize % 2 == 1
assert pad == (ksize - 1) // 2
super(ResBlock, self).__init__(
bn1=L.BatchNormalization(in_channels),
conv1=L.Convolution2D(in_channels, out_channels, ksize, stride, pad, wscale),
bn2=L.BatchNormalization(out_channels),
conv2=L.Convolution2D(out_channels, out_channels, ksize, 1, pad, wscale),
)
if fiber_map == 'id':
assert in_channels == out_channels
self.fiber_map = F.identity
elif fiber_map == 'linear':
self.add_link('fiber_map', L.Convolution2D(in_channels, out_channels, 1, 2, 0, wscale))
else:
raise ValueError('Unimplemented fiber map {}'.format(fiber_map))
def __init__(self, in_channels, out_channels, ksize=3, fiber_map='id', conv_link=L.Convolution2D,
stride=1, pad=1, wscale=1):
assert ksize % 2 == 1
if not pad == (ksize - 1) // 2:
raise NotImplementedError()
super(ResBlock2D, self).__init__(
bn1=L.BatchNormalization(in_channels),
conv1=conv_link(
in_channels=in_channels, out_channels=out_channels, ksize=ksize, stride=stride, pad=pad, wscale=wscale),
bn2=L.BatchNormalization(out_channels),
conv2=conv_link(
in_channels=out_channels, out_channels=out_channels, ksize=ksize, stride=1, pad=pad, wscale=wscale)
)
if fiber_map == 'id':
if not in_channels == out_channels:
raise ValueError('fiber_map cannot be identity when channel dimension is changed.')
self.fiber_map = F.identity
elif fiber_map == 'zero_pad':
raise NotImplementedError()
elif fiber_map == 'linear':
fiber_map = conv_link(
in_channels=in_channels, out_channels=out_channels, ksize=1, stride=stride, pad=0, wscale=wscale)
self.add_link('fiber_map', fiber_map)
else:
raise ValueError('Unknown fiber_map: ' + str(type))
def house_transform(self,z):
vec_t = self.qh_vec_0
for i in range(self.num_trans):
vec_t = F.identity(self.qlin_h_vec_t(vec_t))
vec_t_product = F.matmul(vec_t, vec_t, transb=True)
vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
z = z - 2*F.matmul(vec_t_product, z)/vec_t_norm_sqr
return z
def house_transform(self,z):
vec_t = self.qh_vec_0
for i in range(self.num_trans):
vec_t = F.identity(self.qlin_h_vec_t(vec_t))
vec_t_product = F.matmul(vec_t, vec_t, transb=True)
vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
z = z - 2*F.matmul(vec_t_product, z)/vec_t_norm_sqr
return z
def house_transform(self,z):
vec_t = self.qh_vec_0
for i in range(self.num_trans):
vec_t = F.identity(self.qlin_h_vec_t(vec_t))
vec_t_product = F.matmul(vec_t, vec_t, transb=True)
vec_t_norm_sqr = F.tile(F.sum(F.square(vec_t)), (z.shape[0], z.shape[1]))
z = z - 2*F.matmul(vec_t_product, z)/vec_t_norm_sqr
return z
def test_backward(self):
x = chainer.Variable(numpy.array([1]), name='x')
y1 = F.identity(x)
y1.name = 'y1'
y2 = F.identity(x)
y2.name = 'y2'
z = y1 + y2
z.name = 'z'
z.grad = numpy.array([1])
z.backward(retain_grad=True)
self.assertEqual(y1.grad[0], 1)
self.assertEqual(y2.grad[0], 1)
self.assertEqual(x.grad[0], 2)
def __init__(self, predictor, lossfun=identity,
accuracyfun=accuracy, **links):
super(Model, self).__init__(predictor=predictor, **links)
self.lossfun = lossfun
self.accuracyfun = accuracyfun
self.y = None
self.loss = None
self.accuracy = None
model.py 文件源码
项目:deep-learning-for-human-part-discovery-in-images
作者: shiba24
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def crop(inputs, outsize, offset):
x = F.identity(inputs)
crop_axis = [i!=j for i, j in zip(inputs.data.shape, outsize)]
i = 0
for index, tf in enumerate(crop_axis):
if tf:
_, x, _ = F.split_axis(x, [offset[i], offset[i] + outsize[index]], index)
i += 1
return x