def test_link_matrix(self):
b, n = 2, 5
write_weighting = np.random.rand(b, n)
precedence_weighting = np.random.rand(b, n) # precedence weighting from previous time step
link_matrix_old = np.random.rand(b, n, n) * (
1 - np.tile(np.eye(5), [b, 1, 1])) # random link matrix with diagonals zero
link_matrix_correct = np.zeros((b, n, n))
for k in range(b):
for i in range(n):
for j in range(n):
if i != j:
link_matrix_correct[k, i, j] = (1 - write_weighting[k, i] - write_weighting[k, j]) * \
link_matrix_old[k, i, j] + \
write_weighting[k, i] * precedence_weighting[k, j]
with self.test_session():
tf.global_variables_initializer().run()
Memory.batch_size = b
Memory.memory_size = n
new_link_matrix = Memory.update_link_matrix(Memory,
tf.constant(link_matrix_old, dtype=tf.float32),
tf.constant(precedence_weighting, dtype=tf.float32),
tf.constant(write_weighting, dtype=tf.float32))
self.assertAllClose(link_matrix_correct, new_link_matrix.eval())
python类tile()的实例源码
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
def closestPoints(mesh, pts, gridLoc='CC'):
"""Move a list of points to the closest points on a grid.
:param BaseMesh mesh: The mesh
:param numpy.ndarray pts: Points to move
:param string gridLoc: ['CC', 'N', 'Fx', 'Fy', 'Fz', 'Ex', 'Ex', 'Ey', 'Ez']
:rtype: numpy.ndarray
:return: nodeInds
"""
pts = asArray_N_x_Dim(pts, mesh.dim)
grid = getattr(mesh, 'grid' + gridLoc)
nodeInds = np.empty(pts.shape[0], dtype=int)
for i, pt in enumerate(pts):
if mesh.dim == 1:
nodeInds[i] = ((pt - grid)**2).argmin()
else:
nodeInds[i] = ((np.tile(pt, (grid.shape[0], 1)) - grid)**2).sum(axis=1).argmin()
return nodeInds
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
N, C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def test_big_binary(self):
"""Test workarounds for 32-bit limited fwrite, fseek, and ftell
calls in windows. These normally would hang doing something like this.
See http://projects.scipy.org/numpy/ticket/1660"""
if sys.platform != 'win32':
return
try:
# before workarounds, only up to 2**32-1 worked
fourgbplus = 2**32 + 2**16
testbytes = np.arange(8, dtype=np.int8)
n = len(testbytes)
flike = tempfile.NamedTemporaryFile()
f = flike.file
np.tile(testbytes, fourgbplus // testbytes.nbytes).tofile(f)
flike.seek(0)
a = np.fromfile(f, dtype=np.int8)
flike.close()
assert_(len(a) == fourgbplus)
# check only start and end for speed:
assert_((a[:n] == testbytes).all())
assert_((a[-n:] == testbytes).all())
except (MemoryError, ValueError):
pass
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
def dist_info_sym(self, obs_var, latent_var=None): # this is ment to be for one path!
# now this is not doing anything! And for computing the dist_info_vars of npo_snn_rewardMI it doesn't work
if latent_var is None:
latent_var1 = theano.shared(np.expand_dims(self.latent_fix, axis=0)) # new fix to avoid putting the latent as an input: just take the one fixed!
latent_var = TT.tile(latent_var1, [obs_var.shape[0], 1])
# generate the generalized input (append latents to obs.)
if self.bilinear_integration:
extended_obs_var = TT.concatenate([obs_var, latent_var,
TT.flatten(obs_var[:, :, np.newaxis] * latent_var[:, np.newaxis, :],
outdim=2)]
, axis=1)
else:
extended_obs_var = TT.concatenate([obs_var, latent_var], axis=1)
mean_var, log_std_var = L.get_output([self._l_mean, self._l_log_std], extended_obs_var)
if self.min_std is not None:
log_std_var = TT.maximum(log_std_var, np.log(self.min_std))
return dict(mean=mean_var, log_std=log_std_var)
def train(self, tran, selected):
self.targetNet.blobs['frames'].data[...] \
= tran.frames[selected + 1].copy()
netOut = self.targetNet.forward()
target = np.tile(tran.reward[selected]
+ pms.discount
* tran.n_last[selected]
* np.resize(netOut['value_q'].max(1),
(pms.batchSize, 1)),
(pms.actionSize,)
) * tran.action[selected]
self.solver.net.blobs['target'].data[...] = target
self.solver.net.blobs['frames'].data[...] = tran.frames[selected].copy()
self.solver.net.blobs['filter'].data[...] = tran.action[selected].copy()
self.solver.step(1)
def knn_masked_data(trX,trY,missing_data_dir, input_shape, k):
raw_im_data = np.loadtxt(join(script_dir,missing_data_dir,'index.txt'),delimiter=' ',dtype=str)
raw_mask_data = np.loadtxt(join(script_dir,missing_data_dir,'index_mask.txt'),delimiter=' ',dtype=str)
# Using 'brute' method since we only want to do one query per classifier
# so this will be quicker as it avoids overhead of creating a search tree
knn_m = KNeighborsClassifier(algorithm='brute',n_neighbors=k)
prob_Y_hat = np.zeros((raw_im_data.shape[0],int(np.max(trY)+1)))
total_images = raw_im_data.shape[0]
pbar = progressbar.ProgressBar(widgets=[progressbar.FormatLabel('\rProcessed %(value)d of %(max)d Images '), progressbar.Bar()], maxval=total_images, term_width=50).start()
for i in range(total_images):
mask_im=load_image(join(script_dir,missing_data_dir,raw_mask_data[i][0]), input_shape,1).reshape(np.prod(input_shape))
mask = np.logical_not(mask_im > eps) # since mask is 1 at missing locations
v_im=load_image(join(script_dir,missing_data_dir,raw_im_data[i][0]), input_shape, 255).reshape(np.prod(input_shape))
rep_mask = np.tile(mask,(trX.shape[0],1))
# Corrupt whole training set according to the current mask
corr_trX = np.multiply(trX, rep_mask)
knn_m.fit(corr_trX, trY)
prob_Y_hat[i,:] = knn_m.predict_proba(v_im.reshape(1,-1))
pbar.update(i)
pbar.finish()
return prob_Y_hat
def get_im2col_indices(x_shape, field_height, field_width, padding=1, stride=1):
# First figure out what the size of the output should be
C, H, W = x_shape
assert (H + 2 * padding - field_height) % stride == 0
assert (W + 2 * padding - field_height) % stride == 0
out_height = (H + 2 * padding - field_height) / stride + 1
out_width = (W + 2 * padding - field_width) / stride + 1
i0 = np.repeat(np.arange(field_height), field_width)
i0 = np.tile(i0, C)
i1 = stride * np.repeat(np.arange(out_height), out_width)
j0 = np.tile(np.arange(field_width), field_height * C)
j1 = stride * np.tile(np.arange(out_width), out_height)
i = i0.reshape(-1, 1) + i1.reshape(1, -1)
j = j0.reshape(-1, 1) + j1.reshape(1, -1)
k = np.repeat(np.arange(C), field_height * field_width).reshape(-1, 1)
return (k, i, j)
def __init__(self, directory, num_act, mean_path, num_threads=1, capacity=1e5, batch_size=32,
scale=(1.0/255.0), s_t_shape=[84, 84, 4], x_t_1_shape=[84, 84, 1], colorspace='gray'):
self.scale = scale
self.s_t_shape = s_t_shape
self.x_t_1_shape = x_t_1_shape
# Load image mean
mean = np.load(os.path.join(mean_path))
# Prepare data flow
s_t, a_t, x_t_1 = _read_and_decode(directory,
s_t_shape=s_t_shape,
num_act=num_act,
x_t_1_shape=x_t_1_shape)
self.mean = mean
self.s_t_batch, self.a_t_batch, self.x_t_1_batch = tf.train.shuffle_batch([s_t, a_t, x_t_1],
batch_size=batch_size, capacity=capacity,
min_after_dequeue=int(capacity*0.25),
num_threads=num_threads)
# Subtract image mean (according to J Oh design)
self.mean_const = tf.constant(mean, dtype=tf.float32)
print(self.mean_const.get_shape())
self.s_t_batch = (self.s_t_batch - tf.tile(self.mean_const, [1, 1, 4])) * scale
self.x_t_1_batch = (self.x_t_1_batch - self.mean_const) * scale
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
def create_test_input(batch_size, height, width, channels):
"""Create test input tensor.
Args:
batch_size: The number of images per batch or `None` if unknown.
height: The height of each image or `None` if unknown.
width: The width of each image or `None` if unknown.
channels: The number of channels per image or `None` if unknown.
Returns:
Either a placeholder `Tensor` of dimension
[batch_size, height, width, channels] if any of the inputs are `None` or a
constant `Tensor` with the mesh grid values along the spatial dimensions.
"""
if None in [batch_size, height, width, channels]:
return tf.placeholder(tf.float32, (batch_size, height, width, channels))
else:
return tf.to_float(
np.tile(
np.reshape(
np.reshape(np.arange(height), [height, 1]) +
np.reshape(np.arange(width), [1, width]),
[1, height, width, 1]),
[batch_size, 1, 1, channels]))
def get_op(self):
"""Returns all symmetry operations (including inversions and
subtranslations), but unlike get_symop(), they are returned as
two ndarrays."""
if self.centrosymmetric:
rot = np.tile(np.vstack((self.rotations, -self.rotations)),
(self.nsubtrans, 1, 1))
trans = np.tile(np.vstack((self.translations, -self.translations)),
(self.nsubtrans, 1))
trans += np.repeat(self.subtrans, 2 * len(self.rotations), axis=0)
trans = np.mod(trans, 1)
else:
rot = np.tile(self.rotations, (self.nsubtrans, 1, 1))
trans = np.tile(self.translations, (self.nsubtrans, 1))
trans += np.repeat(self.subtrans, len(self.rotations), axis=0)
trans = np.mod(trans, 1)
return rot, trans
model_hypothesis.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 41
收藏 0
点赞 0
评论 0
def get_feed_dic_obs(self, obs):
# needing to create all the nessisary feeds
obs_x = []
obs_y = []
obs_tf = []
for _ in range(OBS_SIZE):
obs_x.append(np.zeros([N_BATCH,L]))
obs_y.append(np.zeros([N_BATCH,L]))
obs_tf.append(np.zeros([N_BATCH,2]))
num_obs = len(obs)
for ob_idx in range(num_obs):
ob_coord, ob_lab = obs[ob_idx]
ob_x, ob_y = vectorize(ob_coord)
obs_x[ob_idx] = np.tile(ob_x, [50,1])
obs_y[ob_idx] = np.tile(ob_y, [50,1])
obs_tf[ob_idx] = np.tile(ob_lab, [50,1])
feed_dic = dict(zip(self.ph_obs_x + self.ph_obs_y + self.ph_obs_tf,
obs_x + obs_y + obs_tf))
return feed_dic
model_hypothesis.py 文件源码
项目:uai2017_learning_to_acquire_information
作者: evanthebouncy
项目源码
文件源码
阅读 34
收藏 0
点赞 0
评论 0
def get_feed_dic_obs(self, obs):
# needing to create all the nessisary feeds
obs_x = []
obs_y = []
obs_tf = []
for _ in range(OBS_SIZE):
obs_x.append(np.zeros([N_BATCH,L]))
obs_y.append(np.zeros([N_BATCH,L]))
obs_tf.append(np.zeros([N_BATCH,2]))
num_obs = len(obs)
for ob_idx in range(num_obs):
ob_coord, ob_lab = obs[ob_idx]
ob_x, ob_y = vectorize(ob_coord)
obs_x[ob_idx] = np.tile(ob_x, [50,1])
obs_y[ob_idx] = np.tile(ob_y, [50,1])
obs_tf[ob_idx] = np.tile(ob_lab, [50,1])
feed_dic = dict(zip(self.ph_obs_x + self.ph_obs_y + self.ph_obs_tf,
obs_x + obs_y + obs_tf))
return feed_dic
def collect_trajs_for_cost(self, n_trajs, pol, env, dom, cls):
paths = []
#print(n_trajs)
for iter_step in range(0, n_trajs):
paths.append(self.cyberpunk_rollout(agent=pol, env=env, max_path_length=self.horizon,
reward_extractor=None))
data_matrix = tensor_utils.stack_tensor_list([p['im_observations'] for p in paths])
class_matrix = np.tile(cls, (n_trajs, self.horizon, 1))
dom_matrix = np.tile(dom, (n_trajs, self.horizon, 1))
#data_matrix = np.zeros(shape=(n_trajs, self.horizon, self.im_height, self.im_width, self.im_channels))
#class_matrix = np.zeros(shape=(n_trajs, self.horizon, 2))
#dom_matrix = np.zeros(shape=(n_trajs, self.horizon, 2))
#for path, path_step in zip(paths, range(0, len(paths))):
# for sub_path, time_step in zip(path['im_observations'], range(0, self.horizon)):
# data_matrix[path_step, time_step, :, :, :] = sub_path
# class_matrix[path_step, time_step, :] = path['class']
# dom_matrix[path_step, time_step, :] = path['dom']
return dict(data=data_matrix, classes=class_matrix, domains=dom_matrix)
def sample(self, path, save_samples):
gan = self.gan
generator = gan.uniform_sample
z_t = gan.uniform_encoder.sample
x_t = gan.inputs.x
sess = gan.session
config = gan.config
global x_v
global z_v
x_v = sess.run(x_t)
x_v = np.tile(x_v[0], [gan.batch_size(),1,1,1])
sample = sess.run(generator, {x_t: x_v})
stacks = []
bs = gan.batch_size()
width = 5
print(np.shape(x_v), np.shape(sample))
stacks.append([x_v[1], sample[1], sample[2], sample[3], sample[4]])
for i in range(bs//width-1):
stacks.append([sample[i*width+width+j] for j in range(width)])
images = np.vstack([np.hstack(s) for s in stacks])
self.plot(images, path, save_samples)
return [{'images': images, 'label': 'tiled x sample'}]
def shadow_image(self, img, pos):
if img is None:
return None
weighted_img = np.ones((img.shape[0], img.shape[1]), np.uint8)
x = int(pos.x() / self.scale)
y = int(pos.y() / self.scale)
weighted_img[y, x] = 0
dist_img = cv2.distanceTransform(weighted_img, distanceType=cv2.cv.CV_DIST_L2, maskSize=5).astype(np.float32)
dist_sigma = self.img_size/2.0
dist_img_f = np.exp(-dist_img / dist_sigma)
dist_img_f = np.tile(dist_img_f[..., np.newaxis], [1,1,3])
l = 0.25
img_f = img.astype(np.float32)
rst_f = (img_f * l + (1-l) * (img_f * dist_img_f + (1-dist_img_f)*255.0))
rst = rst_f.astype(np.uint8)
return rst
def CSMToBinary(D, Kappa):
"""
Turn a cross-similarity matrix into a binary cross-simlarity matrix
If Kappa = 0, take all neighbors
If Kappa < 1 it is the fraction of mutual neighbors to consider
Otherwise Kappa is the number of mutual neighbors to consider
"""
N = D.shape[0]
M = D.shape[1]
if Kappa == 0:
return np.ones((N, M))
elif Kappa < 1:
NNeighbs = int(np.round(Kappa*M))
else:
NNeighbs = Kappa
J = np.argpartition(D, NNeighbs, 1)[:, 0:NNeighbs]
I = np.tile(np.arange(N)[:, None], (1, NNeighbs))
V = np.ones(I.size)
[I, J] = [I.flatten(), J.flatten()]
ret = sparse.coo_matrix((V, (I, J)), shape=(N, M))
return ret.toarray()