def tensor_swirl(image, center=None, strength=1, radius=100, rotation=0, cval=0.0, **kwargs):
# **kwargs is for unsupported options (ignored)
cval = tf.fill(K.shape(image)[0:1], cval)
shape = K.int_shape(image)[1:3]
if center is None:
center = np.array(shape) / 2
ys = np.expand_dims(np.repeat(np.arange(shape[0]), shape[1]),-1)
xs = np.expand_dims(np.tile (np.arange(shape[1]), shape[0]),-1)
map_xs, map_ys = swirl_mapping(xs, ys, center, rotation, strength, radius)
mapping = np.zeros((*shape, *shape))
for map_x, map_y, x, y in zip(map_xs, map_ys, xs, ys):
results = tensor_linear_interpolation(image, map_x, map_y, cval)
for _y, _x, w in results:
# mapping[int(y),int(x),int(_y),int(_x),] = w
mapping[int(_y),int(_x),int(y),int(x),] = w
results = tf.tensordot(image, K.variable(mapping), [[1,2],[0,1]])
# results = K.reshape(results, K.shape(image))
return results
python类variable()的实例源码
def generate_gpu(configs,**kwargs):
configs = np.array(configs)
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def build():
P = 2
configs = Input(shape=(size*size,))
_configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
configs_one_hot = K.reshape(configs_one_hot, [-1,P])
_panels = K.variable(panels)
_panels = K.reshape(_panels, [P, base*base])
states = tf.matmul(configs_one_hot, _panels)
states = K.reshape(states, [-1, size, size, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, size*base, size*base, 1])
states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
states = K.squeeze(states, -1)
return Model(configs, wrap(configs, states))
return preprocess(batch_swirl(build().predict(configs,**kwargs)))
def generate_gpu2(configs,**kwargs):
configs = np.array(configs)
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def build():
P = 2
configs = Input(shape=(size*size,))
_configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
configs_one_hot = K.reshape(configs_one_hot, [-1,P])
_panels = K.variable(panels)
_panels = K.reshape(_panels, [P, base*base])
states = tf.matmul(configs_one_hot, _panels)
states = K.reshape(states, [-1, size, size, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, size*base, size*base, 1])
states = K.spatial_2d_padding(states, padding=((pad,pad),(pad,pad)))
states = K.squeeze(states, -1)
states = tensor_swirl(states, radius=dim+2*pad * relative_swirl_radius, **swirl_args)
return Model(configs, wrap(configs, states))
return preprocess(build().predict(configs,**kwargs))
def generate_gpu(configs, **kwargs):
import math
size = int(math.sqrt(len(configs[0])))
base = panels.shape[1]
dim = base*size
def build():
P = 2
configs = Input(shape=(size*size,))
_configs = 1 - K.round((configs/2)+0.5) # from -1/1 to 1/0
configs_one_hot = K.one_hot(K.cast(_configs,'int32'), P)
configs_one_hot = K.reshape(configs_one_hot, [-1,P])
_panels = K.variable(panels)
_panels = K.reshape(_panels, [P, base*base])
states = tf.matmul(configs_one_hot, _panels)
states = K.reshape(states, [-1, size, size, base, base])
states = K.permute_dimensions(states, [0, 1, 3, 2, 4])
states = K.reshape(states, [-1, size*base, size*base])
return Model(configs, wrap(configs, states))
return build().predict(np.array(configs),**kwargs)
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in xrange(nb_labels):
if K.image_dim_ordering() == 'th':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_loss(style_image,
target_image, style_mask, target_mask)
return loss
def test_jaccard_distance():
# all_right, almost_right, half_right, all_wrong
y_true = np.array([[0, 0, 1, 0], [0, 0, 1, 0], [0, 0, 1, 0],
[0, 0, 1., 0.]])
y_pred = np.array([[0, 0, 1, 0], [0, 0, 0.9, 0], [0, 0, 0.1, 0],
[1, 1, 0.1, 1.]])
r = jaccard_distance(
K.variable(y_true),
K.variable(y_pred), )
if K.is_keras_tensor(r):
assert K.int_shape(r) == (4, )
all_right, almost_right, half_right, all_wrong = K.eval(r)
assert all_right == 0, 'should converge on zero'
assert all_right < almost_right
assert almost_right < half_right
assert half_right < all_wrong
def test_sub_pixel_upscaling():
num_samples = 2
num_row = 16
num_col = 16
input_dtype = K.floatx()
for scale_factor in [2, 3, 4]:
input_data = np.random.random((num_samples, 4 * (scale_factor ** 2), num_row, num_col))
input_data = input_data.astype(input_dtype)
if K.image_data_format() == 'channels_last':
input_data = input_data.transpose((0, 2, 3, 1))
input_tensor = K.variable(input_data)
expected_output = K.eval(KC.depth_to_space(input_tensor,
scale=scale_factor))
layer_test(convolutional.SubPixelUpscaling,
kwargs={'scale_factor': scale_factor},
input_data=input_data,
expected_output=expected_output,
expected_output_dtype=K.floatx())
def test_regularizer(layer_class):
layer = layer_class(output_dim, return_sequences=False, weights=None,
batch_input_shape=(nb_samples, timesteps, embedding_dim),
W_regularizer=regularizers.WeightRegularizer(l1=0.01),
U_regularizer=regularizers.WeightRegularizer(l1=0.01),
b_regularizer='l2')
shape = (nb_samples, timesteps, embedding_dim)
layer.build(shape)
output = layer(K.variable(np.ones(shape)))
K.eval(output)
if layer_class == recurrent.SimpleRNN:
assert len(layer.losses) == 3
if layer_class == recurrent.GRU:
assert len(layer.losses) == 9
if layer_class == recurrent.LSTM:
assert len(layer.losses) == 12
def __init__(self, mdl, x):
self.loss_value = None
self.grad_values = None
self.mdl = mdl
loss = K.variable(0.)
layer_dict = dict([(layer.name, layer) for layer in mdl.layers])
inp = layer_dict['face'].output
out = layer_dict['conf'].output
loss -= K.sum(out)
# Might want to add some L2-loss in here, depending on output
# loss += 0.0005 * K.sum(K.square(inp - x))
grads = K.gradients(loss, inp)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
self.f_outputs = K.function([inp, K.learning_phase()], outputs)
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in xrange(num_labels):
if K.image_data_format() == 'channels_first':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_loss(style_image,
target_image, style_mask, target_mask)
return loss
def get_gradcam(image,model,layer_name,mode):
layer = model.get_layer(layer_name)
image = np.expand_dims(image,0)
loss = K.variable(0.)
if mode == "abnormal":
loss += K.sum(model.output)
elif mode == "normal":
loss += K.sum(1 - model.output)
else:
raise ValueError("mode must be normal or abnormal")
#gradients of prediction wrt the conv layer of choice are used
upstream_grads = K.gradients(loss,layer.output)[0]
feature_weights = K.mean(upstream_grads,axis=[1,2]) #spatial global avg pool
heatmap = K.relu(K.dot(layer.output, K.transpose(feature_weights)))
fetch_heatmap = K.function([model.input, K.learning_phase()], [heatmap])
return fetch_heatmap([image,0])[0]
def checkScale(targetSample, outputSample, scale, nIters = 3, batchSize = 1000):
mmd_TT = np.zeros(nIters)
mmd_OT = np.zeros(nIters)
#ratios = np.zeros(nIters)
for i in range(nIters):
T = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
T1 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
T2 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
O = outputSample[np.random.randint(outputSample.shape[0], size=batchSize),:]
mmd_TT[i] = K.eval(cf.MMD(T1,T2, scales=[scale]).cost(K.variable(value=T1), K.variable(value=T2)))
mmd_OT[i] = K.eval(cf.MMD(T,O, scales=[scale]).cost(K.variable(value=T), K.variable(value=O)))
#ratios[i] = (mmd_OT[i] - mmd_TT[i])/ mmd_OT[i]
print('scale: ' + str(scale))
print('mmd_TT: ' + str (np.mean(mmd_TT)))
print('mmd_OT: ' + str (np.mean(mmd_OT)))
ratio = (np.mean(mmd_OT) - np.mean(mmd_TT))/ np.mean(mmd_OT)
print('ratio: ' + str(ratio))
return np.mean(mmd_TT), np.mean(mmd_OT), ratio
def checkScale(targetSample, outputSample, scale, nIters = 3, batchSize = 1000):
mmd_TT = np.zeros(nIters)
mmd_OT = np.zeros(nIters)
#ratios = np.zeros(nIters)
for i in range(nIters):
T = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
T1 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
T2 = targetSample[np.random.randint(targetSample.shape[0], size=batchSize),:]
O = outputSample[np.random.randint(outputSample.shape[0], size=batchSize),:]
mmd_TT[i] = K.eval(cf.MMD(T1,T2, scales=[scale]).cost(K.variable(value=T1), K.variable(value=T2)))
mmd_OT[i] = K.eval(cf.MMD(T,O, scales=[scale]).cost(K.variable(value=T), K.variable(value=O)))
#ratios[i] = (mmd_OT[i] - mmd_TT[i])/ mmd_OT[i]
print('scale: ' + str(scale))
print('mmd_TT: ' + str (np.mean(mmd_TT)))
print('mmd_OT: ' + str (np.mean(mmd_OT)))
ratio = (np.mean(mmd_OT) - np.mean(mmd_TT))/ np.mean(mmd_OT)
print('ratio: ' + str(ratio))
return np.mean(mmd_TT), np.mean(mmd_OT), ratio
def build(self, input_shape):
super().build(input_shape)
self.mask = np.ones(self.W_shape)
assert mask.shape[0] == mask.shape[1]
filter_size = self.mask.shape[0]
filter_center = filter_size / 2
self.mask[math.ceil(filter_center):] = 0
self.mask[math.floor(filter_center):, math.ceil(filter_center):] = 0
if self.mono:
if self.mask_type == 'A':
self.mask[math.floor(filter_center), math.floor(filter_center)] = 0
else:
op = np.greater_equal if self.mask_type == 'A' else np.greater
for i in range(self.n_channels):
for j in range(self.n_channels):
if op(i, j):
self.mask[math.floor(filter_center), math.floor(filter_center), i::self.n_channels, j::self.n_channels] = 0
self.mask = K.variable(self.mask)
def get_total_loss(content_losses, style_losses, total_var_loss,
content_weights, style_weights, tv_weights, class_targets):
total_loss = K.variable(0.)
# Compute content losses
for loss in content_losses:
weighted_loss = K.mean(K.gather(content_weights, class_targets) * loss)
weighted_content_losses.append(weighted_loss)
total_loss += weighted_loss
# Compute style losses
for loss in style_losses:
weighted_loss = K.mean(K.gather(style_weights, class_targets) * loss)
weighted_style_losses.append(weighted_loss)
total_loss += weighted_loss
# Compute tv loss
weighted_tv_loss = K.mean(K.gather(tv_weights, class_targets) *
total_var_loss)
total_loss += weighted_tv_loss
return (total_loss, weighted_content_losses, weighted_style_losses,
weighted_tv_loss)
def build(self, input_shape):
alpha_shape = input_shape[self.axis]
self.alpha = self.init((alpha_shape,),
name='alpha_pos'.format(self.name))
self.rho = K.variable(self.power_init * np.ones(alpha_shape),
name='rho_pos'.format(self.name))
if self.fit:
self.trainable_weights = [self.alpha, self.rho]
self.input_spec = [InputSpec(dtype=K.floatx(),
shape=input_shape)]
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
def test_downsample_model_features():
"""
Test creates a toy numpy array, and checks that the method
correctly downsamples the array into a hand-checked tensor
"""
# Create the spliced and averaged tensor via downsampling function
array = np.array([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
[21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
])
tensor = K.variable(array)
x = _downsample_model_features(tensor, 5)
# Create the spliced and averaged tensor by hand
check_array = np.array([[1.5, 3.5, 5.5, 7.5, 9.5],
[11.5, 13.5, 15.5, 17.5, 19.5],
[21.5, 23.5, 25.5, 27.5, 29.5]
])
check_tensor = K.variable(check_array)
# Check that they are equal: that it returns the correct tensor
assert np.allclose(K.eval(check_tensor), K.eval(x), atol=ATOL)
improved_neural_doodle.py 文件源码
项目:Neural-Style-Transfer-Windows
作者: titu1994
项目源码
文件源码
阅读 45
收藏 0
点赞 0
评论 0
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in range(nb_labels):
if K.image_dim_ordering() == 'th':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_weight * region_style_loss(style_image, target_image, style_mask, target_mask)
return loss
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in range(nb_labels):
if K.image_dim_ordering() == 'th':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_weight * region_style_loss(style_image, target_image, style_mask, target_mask)
return loss
def make_soft(y_true, fragment_length, nb_output_bins, train_with_soft_target_stdev, with_prints=False):
receptive_field, _ = compute_receptive_field()
n_outputs = fragment_length - receptive_field + 1
# Make a gaussian kernel.
kernel_v = scipy.signal.gaussian(9, std=train_with_soft_target_stdev)
print(kernel_v)
kernel_v = np.reshape(kernel_v, [1, 1, -1, 1])
kernel = K.variable(kernel_v)
if with_prints:
y_true = print_t(y_true, 'y_true initial')
# y_true: [batch, timesteps, input_dim]
y_true = K.reshape(y_true, (-1, 1, nb_output_bins, 1)) # Same filter for all output; combine with batch.
# y_true: [batch*timesteps, n_channels=1, input_dim, dummy]
y_true = K.conv2d(y_true, kernel, border_mode='same')
y_true = K.reshape(y_true, (-1, n_outputs, nb_output_bins)) # Same filter for all output; combine with batch.
# y_true: [batch, timesteps, input_dim]
y_true /= K.sum(y_true, axis=-1, keepdims=True)
if with_prints:
y_true = print_t(y_true, 'y_true after')
return y_true
def style_loss(style_image, target_image, style_masks, target_masks):
'''Calculate style loss between style_image and target_image,
in all regions.
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
loss = K.variable(0)
for i in xrange(nb_labels):
if K.image_dim_ordering() == 'th':
style_mask = style_masks[i, :, :]
target_mask = target_masks[i, :, :]
else:
style_mask = style_masks[:, :, i]
target_mask = target_masks[:, :, i]
loss += region_style_loss(style_image,
target_image, style_mask, target_mask)
return loss
def test_regularizer(layer_class):
layer = layer_class(output_dim, return_sequences=False, weights=None,
batch_input_shape=(nb_samples, timesteps, embedding_dim),
W_regularizer=regularizers.WeightRegularizer(l1=0.01),
U_regularizer=regularizers.WeightRegularizer(l1=0.01),
b_regularizer='l2')
shape = (nb_samples, timesteps, embedding_dim)
layer.build(shape)
output = layer(K.variable(np.ones(shape)))
K.eval(output)
if layer_class == recurrent.SimpleRNN:
assert len(layer.losses) == 3
if layer_class == recurrent.GRU:
assert len(layer.losses) == 9
if layer_class == recurrent.LSTM:
assert len(layer.losses) == 12
def check_two_tensor_operation(function_name, x_input_shape,
y_input_shape, **kwargs):
xval = np.random.random(x_input_shape) - 0.5
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random(y_input_shape) - 0.5
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(getattr(KTH, function_name)(xth, yth, **kwargs))
ztf = KTF.eval(getattr(KTF, function_name)(xtf, ytf, **kwargs))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def check_composed_tensor_operations(first_function_name, first_function_args,
second_function_name, second_function_args,
input_shape):
''' Creates a random tensor t0 with shape input_shape and compute
t1 = first_function_name(t0, **first_function_args)
t2 = second_function_name(t1, **second_function_args)
with both Theano and TensorFlow backends and ensures the answers match.
'''
val = np.random.random(input_shape) - 0.5
xth = KTH.variable(val)
xtf = KTF.variable(val)
yth = getattr(KTH, first_function_name)(xth, **first_function_args)
ytf = getattr(KTF, first_function_name)(xtf, **first_function_args)
zth = KTH.eval(getattr(KTH, second_function_name)(yth, **second_function_args))
ztf = KTF.eval(getattr(KTF, second_function_name)(ytf, **second_function_args))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
def test_shape_operations(self):
# concatenate
xval = np.random.random((4, 3))
xth = KTH.variable(xval)
xtf = KTF.variable(xval)
yval = np.random.random((4, 2))
yth = KTH.variable(yval)
ytf = KTF.variable(yval)
zth = KTH.eval(KTH.concatenate([xth, yth], axis=-1))
ztf = KTF.eval(KTF.concatenate([xtf, ytf], axis=-1))
assert zth.shape == ztf.shape
assert_allclose(zth, ztf, atol=1e-05)
check_single_tensor_operation('reshape', (4, 2), shape=(8, 1))
check_single_tensor_operation('permute_dimensions', (4, 2, 3),
pattern=(2, 0, 1))
check_single_tensor_operation('repeat', (4, 1), n=3)
check_single_tensor_operation('flatten', (4, 1))
check_single_tensor_operation('expand_dims', (4, 3), dim=-1)
check_single_tensor_operation('expand_dims', (4, 3, 2), dim=1)
check_single_tensor_operation('squeeze', (4, 3, 1), axis=2)
check_single_tensor_operation('squeeze', (4, 1, 1), axis=1)
check_composed_tensor_operations('reshape', {'shape': (4, 3, 1, 1)},
'squeeze', {'axis': 2},
(4, 3, 1, 1))
def test_repeat_elements(self):
reps = 3
for ndims in [1, 2, 3]:
shape = np.arange(2, 2 + ndims)
arr = np.arange(np.prod(shape)).reshape(shape)
arr_th = KTH.variable(arr)
arr_tf = KTF.variable(arr)
for rep_axis in range(ndims):
np_rep = np.repeat(arr, reps, axis=rep_axis)
th_rep = KTH.eval(
KTH.repeat_elements(arr_th, reps, axis=rep_axis))
tf_rep = KTF.eval(
KTF.repeat_elements(arr_tf, reps, axis=rep_axis))
assert th_rep.shape == np_rep.shape
assert tf_rep.shape == np_rep.shape
assert_allclose(np_rep, th_rep, atol=1e-05)
assert_allclose(np_rep, tf_rep, atol=1e-05)
def test_sparse_dot(self):
x_d = np.array([0, 7, 2, 3], dtype=np.float32)
x_r = np.array([0, 2, 2, 3], dtype=np.int64)
x_c = np.array([4, 3, 2, 3], dtype=np.int64)
x_sparse = sparse.csr_matrix((x_d, (x_r, x_c)), shape=(4, 5))
x_dense = x_sparse.toarray()
W = np.random.random((5, 4))
backends = [KTF]
if KTH.th_sparse_module:
# Theano has some dependency issues for sparse
backends.append(KTH)
for K in backends:
t_W = K.variable(W)
k_s = K.eval(K.dot(K.variable(x_sparse), t_W))
k_d = K.eval(K.dot(K.variable(x_dense), t_W))
assert k_s.shape == k_d.shape
assert_allclose(k_s, k_d, atol=1e-05)
def test_set_floatx(self):
"""
Make sure that changes to the global floatx are effectively
taken into account by the backend.
"""
# Keep track of the old value
old_floatx = floatx()
set_floatx('float16')
var = variable([10])
check_dtype(var, 'float16')
set_floatx('float64')
var = variable([10])
check_dtype(var, 'float64')
# Restore old value
set_floatx(old_floatx)
def build(self, input_shape):
# construct the clockwork structures
# basically: every n units the period changes;
# `period` is for flaggin this; `mask` is for enforcing it
n = self.output_dim // len(self.period_spec)
mask = np.zeros((self.output_dim, self.output_dim), K.floatx())
period = np.zeros((self.output_dim,), np.int16)
for i, t in enumerate(self.period_spec):
mask[i*n:(i+1)*n, i*n:] = 1
period[i*n:(i+1)*n] = t
self.mask = K.variable(mask, name='clockword_mask')
self.period = K.variable(period, dtype='int16', name='clockwork_period')
super(ClockworkRNN, self).build(input_shape)
self.U = self.U * self.mask # old implementation did this at run time...
# simple rnn initializes the wrong size self.states
# we want to also keep the time step in the state.
if self.stateful:
self.reset_states()
else:
self.states = [None, None]
def reset_states(self):
assert self.stateful, 'Layer must be stateful.'
input_shape = self.input_spec[0].shape
if not input_shape[0]:
raise Exception('If a RNN is stateful, a complete ' +
'input_shape must be provided (including batch size).')
if self.go_backwards:
initial_time = self.input_spec[0].shape[1]
else:
initial_time = 0.
if hasattr(self, 'states'):
K.set_value(self.states[0],
np.zeros((input_shape[0], self.output_dim)))
K.set_value(self.states[1], initial_time)
else:
self.states = [K.zeros((input_shape[0], self.output_dim)), K.variable(initial_time)]