def xyz_at_latitude(local_xyz, lat):
"""
Rotate local XYZ coordinates into celestial XYZ coordinates. These
coordinate systems are very similar, with X pointing towards the
geographical east in both cases. However, before the rotation Z
points towards the zenith, whereas afterwards it will point towards
celestial north (parallel to the earth axis).
:param lat: target latitude (radians or astropy quantity)
:param local_xyz: Array of local XYZ coordinates
:return: Celestial XYZ coordinates
"""
x, y, z = numpy.hsplit(local_xyz, 3)
lat2 = numpy.pi / 2 - lat
y2 = -z * numpy.sin(lat2) + y * numpy.cos(lat2)
z2 = z * numpy.cos(lat2) + y * numpy.sin(lat2)
return numpy.hstack([x, y2, z2])
python类hsplit()的实例源码
coordinate_support.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def paint_latents( event ):
global r, Z, output,painted_rects,MASK,USER_MASK,RECON
# Get extent of latent paintbrush
x1, y1 = ( event.x - d.get() ), ( event.y - d.get() )
x2, y2 = ( event.x + d.get() ), ( event.y + d.get() )
selected_widget = event.widget
# Paint in latent space and update Z
painted_rects.append(event.widget.create_rectangle( x1, y1, x2, y2, fill = rb(color.get()),outline = rb(color.get()) ))
r[max((y1-bd),0):min((y2-bd),r.shape[0]),max((x1-bd),0):min((x2-bd),r.shape[1])] = color.get()/255.0;
Z = np.asarray([np.mean(o) for v in [np.hsplit(h,Z.shape[0])\
for h in np.vsplit((r),Z.shape[1])]\
for o in v]).reshape(Z.shape[0],Z.shape[1])
if SAMPLE_FLAG:
update_photo(None,output)
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
else:
DELTA = model.sample_at(np.float32([Z.flatten()]))[0]-to_tanh(np.float32(RECON))
MASK=scipy.ndimage.filters.gaussian_filter(np.min([np.mean(np.abs(DELTA),axis=0),np.ones((64,64))],axis=0),0.7)
# D = dampen(to_tanh(np.float32(RECON)),MASK*DELTA+(1-MASK)*ERROR)
D = MASK*DELTA+(1-MASK)*ERROR
IM = np.uint8(from_tanh(to_tanh(RECON)+D))
update_canvas(w) # Remove this if you wish to see a more free-form paintbrush
update_photo(IM,output)
# Scroll to lighten or darken an image patch
def hsplit(ary, indices_or_sections):
"""Splits an array into multiple sub arrays horizontally.
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
.. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
"""
if ary.ndim == 0:
raise ValueError('Cannot hsplit a zero-dimensional array')
if ary.ndim == 1:
return split(ary, indices_or_sections, 0)
else:
return split(ary, indices_or_sections, 1)
def apply_multiple_masked(func, data, args=(), kwargs={}):
# Data is a sequence of arrays (i.e. X, y pairs for training)
datastack = []
dims = []
flat = []
for d in data:
if d.ndim == 2:
datastack.append(d)
dims.append(d.shape[1])
flat.append(False)
elif d.ndim == 1:
datastack.append(d[:, np.newaxis])
dims.append(1)
flat.append(True)
else:
raise RuntimeError("data arrays have to be 1 or 2D arrays")
# Decorate functions to work on stacked data
dims = np.cumsum(dims[:-1]) # dont split by last dim
unstack = lambda catdata: [d.flatten() if f else d for d, f
in zip(np.hsplit(catdata, dims), flat)]
unstackfunc = lambda catdata, *nargs, **nkwargs: \
func(*chain(unstack(catdata), nargs), **nkwargs)
return apply_masked(unstackfunc, np.ma.hstack(datastack), args, kwargs)
#
# Static module properties
#
# Add all models available to the learning pipeline here!
def testComputation(self):
batch_size = 2
hidden_size = 4
inputs = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
prev_cell = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
prev_hidden = tf.placeholder(tf.float32, shape=[batch_size, hidden_size])
lstm = snt.LSTM(hidden_size)
_, next_state = lstm(inputs, (prev_hidden, prev_cell))
next_hidden, next_cell = next_state
lstm_variables = lstm.get_variables()
param_map = {param.name.split("/")[-1].split(":")[0]:
param for param in lstm_variables}
# With random data, check the TF calculation matches the Numpy version.
input_data = np.random.randn(batch_size, hidden_size)
prev_hidden_data = np.random.randn(batch_size, hidden_size)
prev_cell_data = np.random.randn(batch_size, hidden_size)
with self.test_session() as session:
tf.global_variables_initializer().run()
fetches = [(next_hidden, next_cell),
param_map[snt.LSTM.W_GATES],
param_map[snt.LSTM.B_GATES]]
output = session.run(fetches,
{inputs: input_data,
prev_cell: prev_cell_data,
prev_hidden: prev_hidden_data})
next_state_ex, gate_weights_ex, gate_biases_ex = output
in_and_hid = np.concatenate((input_data, prev_hidden_data), axis=1)
real_gate = np.dot(in_and_hid, gate_weights_ex) + gate_biases_ex
# i = input_gate, j = next_input, f = forget_gate, o = output_gate
i, j, f, o = np.hsplit(real_gate, 4)
real_cell = (prev_cell_data / (1 + np.exp(-(f + lstm._forget_bias))) +
1 / (1 + np.exp(-i)) * np.tanh(j))
real_hidden = np.tanh(real_cell) * 1 / (1 + np.exp(-o))
self.assertAllClose(real_hidden, next_state_ex[0])
self.assertAllClose(real_cell, next_state_ex[1])
coordinate_support.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def xyz_to_uvw(xyz, ha, dec):
"""
Rotate :math:`(x,y,z)` positions in earth coordinates to
:math:`(u,v,w)` coordinates relative to astronomical source
position :math:`(ha, dec)`. Can be used for both antenna positions
as well as for baselines.
Hour angle and declination can be given as single values or arrays
of the same length. Angles can be given as radians or astropy
quantities with a valid conversion.
:param xyz: :math:`(x,y,z)` co-ordinates of antennas in array
:param ha: hour angle of phase tracking centre (:math:`ha = ra - lst`)
:param dec: declination of phase tracking centre.
"""
x, y, z = numpy.hsplit(xyz, 3)
# Two rotations:
# 1. by 'ha' along the z axis
# 2. by '90-dec' along the u axis
u = x * numpy.cos(ha) - y * numpy.sin(ha)
v0 = x * numpy.sin(ha) + y * numpy.cos(ha)
w = z * numpy.sin(dec) - v0 * numpy.cos(dec)
v = z * numpy.cos(dec) + v0 * numpy.sin(dec)
return numpy.hstack([u, v, w])
coordinate_support.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 42
收藏 0
点赞 0
评论 0
def uvw_to_xyz(uvw, ha, dec):
"""
Rotate :math:`(x,y,z)` positions relative to a sky position at
:math:`(ha, dec)` to earth coordinates. Can be used for both
antenna positions as well as for baselines.
Hour angle and declination can be given as single values or arrays
of the same length. Angles can be given as radians or astropy
quantities with a valid conversion.
:param uvw: :math:`(u,v,w)` co-ordinates of antennas in array
:param ha: hour angle of phase tracking centre (:math:`ha = ra - lst`)
:param dec: declination of phase tracking centre
"""
u, v, w = numpy.hsplit(uvw, 3)
# Two rotations:
# 1. by 'dec-90' along the u axis
# 2. by '-ha' along the z axis
v0 = v * numpy.sin(dec) - w * numpy.cos(dec)
z = v * numpy.cos(dec) + w * numpy.sin(dec)
x = u * numpy.cos(ha) + v0 * numpy.sin(ha)
y = -u * numpy.sin(ha) + v0 * numpy.cos(ha)
return numpy.hstack([x, y, z])
convolutional_gridding.py 文件源码
项目:algorithm-reference-library
作者: SKA-ScienceDataProcessor
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def visibility_recentre(uvw, dl, dm):
""" Compensate for kernel re-centering - see `w_kernel_function`.
:param uvw: Visibility coordinates
:param dl: Horizontal shift to compensate for
:param dm: Vertical shift to compensate for
:returns: Visibility coordinates re-centrered on the peak of their w-kernel
"""
u, v, w = numpy.hsplit(uvw, 3)
return numpy.hstack([u - w * dl, v - w * dm, w])
def test_nested_model_void(self):
from l1l2py import tools
data, test_data = np.vsplit(self.X, 2)
labels, test_labels = np.hsplit(self.Y, 2)
tau_opt, lambda_opt = (50.0, 0.1)
mu_range = np.linspace(0.1, 1.0, 10)
assert_raises(
ValueError, nested_models,
data, labels, test_data, test_labels,
mu_range, tau_opt, lambda_opt,
error_function=tools.regression_error,
data_normalizer=tools.standardize,
labels_normalizer=tools.center)
def convert_cmvn_to_numpy(inputs_cmvn, labels_cmvn):
"""Convert global binary ark cmvn to numpy format."""
tf.logging.info("Convert %s and %s to numpy format" % (
inputs_cmvn, labels_cmvn))
inputs_filename = os.path.join(FLAGS.data_dir, inputs_cmvn + '.cmvn')
labels_filename = os.path.join(FLAGS.data_dir, labels_cmvn + '.cmvn')
inputs = read_binary_file(inputs_filename, 0)
labels = read_binary_file(labels_filename, 0)
inputs_frame = inputs[0][-1]
labels_frame = labels[0][-1]
assert inputs_frame == labels_frame
cmvn_inputs = np.hsplit(inputs, [inputs.shape[1]-1])[0]
cmvn_labels = np.hsplit(labels, [labels.shape[1]-1])[0]
mean_inputs = cmvn_inputs[0] / inputs_frame
stddev_inputs = np.sqrt(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)
mean_labels = cmvn_labels[0] / labels_frame
stddev_labels = np.sqrt(cmvn_labels[1] / labels_frame - mean_labels ** 2)
cmvn_name = os.path.join(FLAGS.output_dir, "train_cmvn.npz")
np.savez(cmvn_name,
mean_inputs=mean_inputs,
stddev_inputs=stddev_inputs,
mean_labels=mean_labels,
stddev_labels=stddev_labels)
tf.logging.info("Write to %s" % cmvn_name)
def convert_cmvn_to_numpy(inputs_cmvn, labels_cmvn):
"""Convert global binary ark cmvn to numpy format."""
tf.logging.info("Convert %s and %s to numpy format" % (
inputs_cmvn, labels_cmvn))
inputs_filename = os.path.join(FLAGS.data_dir, inputs_cmvn + '.cmvn')
labels_filename = os.path.join(FLAGS.data_dir, labels_cmvn + '.cmvn')
inputs = read_binary_file(inputs_filename, 0)
labels = read_binary_file(labels_filename, 0)
inputs_frame = inputs[0][-1]
labels_frame = labels[0][-1]
assert inputs_frame == labels_frame
cmvn_inputs = np.hsplit(inputs, [inputs.shape[1]-1])[0]
cmvn_labels = np.hsplit(labels, [labels.shape[1]-1])[0]
mean_inputs = cmvn_inputs[0] / inputs_frame
stddev_inputs = np.sqrt(cmvn_inputs[1] / inputs_frame - mean_inputs ** 2)
mean_labels = cmvn_labels[0] / labels_frame
stddev_labels = np.sqrt(cmvn_labels[1] / labels_frame - mean_labels ** 2)
cmvn_name = os.path.join(FLAGS.output_dir, "train_cmvn.npz")
np.savez(cmvn_name,
mean_inputs=mean_inputs,
stddev_inputs=stddev_inputs,
mean_labels=mean_labels,
stddev_labels=stddev_labels)
tf.logging.info("Write to %s" % cmvn_name)
def _generate_train_test_sets(self, samples, ratio_train):
num_samples_train = int(len(samples) * ratio_train)
data, labels = np.hsplit(samples, [-1])
X_train = np.array(data[:num_samples_train])
_labels = np.array(labels[:num_samples_train])
X_train_label = _labels.ravel()
X_test = np.array(data[num_samples_train:])
_labels = np.array(labels[num_samples_train:])
X_test_label = _labels.ravel()
return X_train, X_train_label, X_test, X_test_label
def _generate_train_test_sets(self, samples, ratio_train):
num_samples_train = int(len(samples) * ratio_train)
data, labels = np.hsplit(samples, [-1])
X_train = np.array(data[:num_samples_train])
_labels = np.array(labels[:num_samples_train])
X_train_label = _labels.ravel()
X_test = np.array(data[num_samples_train:])
_labels = np.array(labels[num_samples_train:])
X_test_label = _labels.ravel()
return X_train, X_train_label, X_test, X_test_label
def _generate_train_test_sets(self, samples, ratio_train):
num_samples_train = int(len(samples) * ratio_train)
data, labels = np.hsplit(samples, [-1])
X_train = np.array(data[:num_samples_train])
_labels = np.array(labels[:num_samples_train])
X_train_label = _labels.ravel()
X_test = np.array(data[num_samples_train:])
_labels = np.array(labels[num_samples_train:])
X_test_label = _labels.ravel()
return X_train, X_train_label, X_test, X_test_label
def _generate_train_test_sets(self, samples, ratio_train):
num_samples_train = int(len(samples) * ratio_train)
data, labels = np.hsplit(samples, [-1])
X_train = np.array(data[:num_samples_train])
_labels = np.array(labels[:num_samples_train])
X_train_label = _labels.ravel()
X_test = np.array(data[num_samples_train:])
_labels = np.array(labels[num_samples_train:])
X_test_label = _labels.ravel()
return X_train, X_train_label, X_test, X_test_label
def hsplit(ary, indices_or_sections):
"""Splits an array into multiple sub arrays horizontally.
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
.. seealso:: :func:`cupy.split` for more detail, :func:`numpy.hsplit`
"""
if ary.ndim == 0:
raise ValueError('Cannot hsplit a zero-dimensional array')
if ary.ndim == 1:
return split(ary, indices_or_sections, 0)
else:
return split(ary, indices_or_sections, 1)
policy_gradient_configurer.py 文件源码
项目:reinforcement-learning-policy-gradients
作者: DarkElement75
项目源码
文件源码
阅读 23
收藏 0
点赞 0
评论 0
def run_config(self, config_i, config_n, run_count, mb_n, initial_epsilon, initial_learning_rate, epsilon_decay_rate, learning_rate_decay_rate, discount_factor):
#Need this
mb_n = int(mb_n)
if mb_n == 0:
mb_n = 1
#So these are optimized in a way that they still work on different E value (/epochs)
#Negative value so we can still use our hyperparameter class and not go outside of range necessary
epsilon_decay_rate = -epsilon_decay_rate/self.epochs
learning_rate_decay_rate = -float(learning_rate_decay_rate)/self.epochs
cartpole_agent = PolicyGradientLearner(self.epochs, mb_n, self.timestep_n, initial_epsilon, initial_learning_rate, discount_factor, epsilon_decay_rate, learning_rate_decay_rate)
#For results, becomes 3d array of shape runs, epochs, values
results = []
for run_i in range(run_count):
#Reset environment
cartpole_agent.init_env('CartPole-v0')
#Gain more results for mean
results.append(cartpole_agent.train_env(config_i, config_n, run_i, run_count))
#Now we have 2d array of shape epochs, values
average_results = np.mean(results, axis=0)
#Ok this ones a bit complicated
#We need to get a list like 1, 2, 3 if the number of values we get from this is 3, hence the
#i+1 for i in range(len(average_results[0])-1)
#Then, we do hsplit to split our matrix on the columns(since we have 0, 1, 2, all the column indices)
#and thus get our indepentent average values for each one
average_values = np.hsplit(average_results, np.array([i+1 for i in range(len(average_results[0])-1)]))#so many brackets asdfahlkasdf))Fasdf0))))
#So we can transpose the column vector back to a row one for each of these
average_values = [average_value.flatten() for average_value in average_values]
#Yay, assign values
average_costs, average_avg_timesteps, average_max_timesteps = average_values
return average_avg_timesteps
def hack(self, img):
test_img_array = np.asarray(bytearray(img), dtype=np.uint8)
test_img = cv2.imdecode(test_img_array, -1)
test_gray = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
test_final = cv2.threshold(test_gray, 100, 255, cv2.THRESH_BINARY)[1]
test_cells = np.array([i.reshape(-1).astype(np.float32)
for i in np.hsplit(test_final, 4)])
ret, result, neighbours, dist = self.knn.find_nearest(test_cells, k=1)
result = result.reshape(-1)
letter = []
for i in result:
letter.append(chr(i))
return ''.join(letter)
def cut(filename):
img = cv2.imread(filename)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
final = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
cells = np.hsplit(final, 4)
for i in range(4):
cv2.imwrite(filename.split('.')[0] + str(i) + '.jpg', cells[i])
def downpic(filename):
r = requests.get('http://mis.teach.ustc.edu.cn/randomImage.do')
img_array = np.asarray(bytearray(r.content), dtype=np.uint8)
img = cv2.imdecode(img_array, -1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
final = cv2.threshold(gray, 100, 255, cv2.THRESH_BINARY)[1]
cells = np.hsplit(final, 4)
for i in range(4):
cv2.imwrite(str(filename+i)+'.jpg', cells[i])
cyber_attack_classification.py 文件源码
项目:AppsOfDataAnalysis
作者: nhanloukiala
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def Encoding(data, general_matrix=None):
encoder = LabelBinarizer()
count = 0
# encoding
for i in range(data.shape[1]):
if type(data[0, i]) == str:
count += 1
col = data[:, i]
unique = np.unique(col if general_matrix is None else general_matrix[:, i])
try:
encoder.fit(unique)
except:
pass
new_col = encoder.transform(col)
# split at i and i + 1
before, removed, after = np.hsplit(data, [i, i + 1])
# concatenate
data = np.concatenate((before, new_col, after), axis=1)
before, removed, after = np.hsplit(general_matrix, [i, i + 1])
general_matrix = np.concatenate((before, encoder.transform(general_matrix[:, i]), after), axis=1)
print "count : %d" % count
# return data
return data
def extract_data(filenames):
#????????
for f in filenames:
if not tf.gfile.Exists(f):
raise ValueError('Failed to find file: ' + f)
#????
labels = None
images = None
for f in filenames:
bytestream=open(f,'rb')
#????
buf = bytestream.read(TRAIN_NUM * (IMAGE_SIZE * IMAGE_SIZE * NUM_CHANNELS+LABEL_SIZE))
#???????np???
data = np.frombuffer(buf, dtype=np.uint8)
#??????
data = data.reshape(TRAIN_NUM,LABEL_SIZE+IMAGE_SIZE* IMAGE_SIZE* NUM_CHANNELS)
#????
labels_images = np.hsplit(data, [LABEL_SIZE])
label = labels_images[0].reshape(TRAIN_NUM)
image = labels_images[1].reshape(TRAIN_NUM,IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
if labels == None:
labels = label
images = image
else:
#??????????
labels = np.concatenate((labels,label))
images = np.concatenate((images,image))
images = (images - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
return labels,images
def __recall(self, data, gridSize = 10):
assert self.__optimized == True
assert type(data) == ndarray
assert min(data.shape) > 2
y, X = hsplit(data, [1])
ny = len(y)
assert gridSize < ny
assert unique(y).all() in [-1,0,1]
assert X.shape[1] == self._m
from math import ceil
grid = linspace(0, ny - 1, gridSize, True)
orderedLabels = y[argsort(X.dot(self.w), axis=0).flatten()[::-1]] == 1
proportions = cumsum(orderedLabels)/sum(orderedLabels)
recall = list(map(lambda tick: proportions[ceil(tick)], grid))
recall.insert(0, 0.)
grid = list((grid+1)/ny)
grid.insert(0, 0.)
return (grid, recall)
def _split_array(image):
"""Splits an image into 16x16 sized tiles.
Returns a list of arrays.
"""
tiles = []
dims = image.shape
split_image = np.vsplit(image, int(dims[0]/16))
for tile in split_image:
tiles.extend(np.hsplit(tile, int(dims[1]/16)))
return tiles
#Currently for 16x16 tiles only
def __call__(self, coords):
assert isinstance(coords, np.ndarray)
try:
d0, d1 = coords.shape
assert d1 ==2
except (ValueError, AssertionError):
raise NotImplementedError("input coords must be [N x 2] dimension numpy array")
if d1 != 2:
raise NotImplementedError("input coords must be [N x 2] dimension numpy array")
xarr, yarr = np.hsplit(coords, 2)
res = self.fwd(xarr, yarr)
return res
def determine_sweeps(self):
"""
Determine if input interferogram is single-sweep or
double-sweep (Forward-Backward).
"""
# Just testing 1st row for now
# assuming all in a group were collected the same way
data = self.data.X[0]
zpd = irfft.peak_search(data)
middle = data.shape[0] // 2
# Allow variation of +/- 0.4 % in zpd location
var = middle // 250
if zpd >= middle - var and zpd <= middle + var:
# single, symmetric
self.sweeps = 0
else:
try:
data = np.hsplit(data, 2)
except ValueError:
# odd number of data points, probably single
self.sweeps = 0
return
zpd1 = irfft.peak_search(data[0])
zpd2 = irfft.peak_search(data[1][::-1])
# Forward / backward zpds never perfectly match
if zpd1 >= zpd2 - var and zpd1 <= zpd2 + var:
# forward-backward, symmetric and asymmetric
self.sweeps = 1
else:
# single, asymetric
self.sweeps = 0
def EM_GMM_GMM_clustering(instance_array_amps, n_clusters=9, sin_cos = 0, number_of_starts = 10, show_covariances = 0, clim=None, covariance_type='diag', n_iter = 50):
'''
Cluster using a Gaussian for the real and imag part of the ratio of the complex value between adjacent channels
Supposed to be for imaging diagnostics
SRH: 18May2014
'''
print 'starting EM-GMM-GMM algorithm from sckit-learn, clusters=%d, retries : %d'%(n_clusters,number_of_starts)
#tmp = np.zeros((instance_array_amps.shape[0], instance_array_amps.shape[1]-1),dtype=complex)
#for i in range(1,instance_array_amps.shape[1]):
# tmp[:,i-1] = instance_array_amps[:,i]/instance_array_amps[:,i-1]
#print 'ratio :', np.sum(np.abs(np.imag(instance_array_amps)))/np.sum(np.abs(np.real(instance_array_amps)))
data_complex = instance_array_amps/np.sum(instance_array_amps,axis = 1)[:,np.newaxis]
#data_complex = instance_array_amps/(instance_array_amps[:,2])[:,np.newaxis]
#print 'hello..', instance_array_amps.shape
input_data = np.hstack((np.real(data_complex), np.real(data_complex)))
#k_means_cluster_assignments, k_means_cluster_details = k_means_clustering(input_data, n_clusters=n_clusters, sin_cos = 1, number_of_starts = 3,)
#print k_means_cluster_assignments
#input_data = np.hstack((np.abs(data_complex),(np.abs(data_complex))))
n_dim = data_complex.shape[1]
#print n_clusters
gmm = mixture.GMM(n_components = n_clusters, covariance_type = covariance_type, n_init = number_of_starts, n_iter = n_iter,)
gmm.fit(input_data)
cluster_assignments = gmm.predict(input_data)
bic_value = gmm.bic(input_data)
LL = np.sum(gmm.score(input_data))
#Extract the means, variances and covariances
gmm_covars = np.array(gmm._get_covars())
gmm_vars = np.array([np.diagonal(i) for i in gmm._get_covars()])
gmm_vars_re, gmm_vars_im = np.hsplit(gmm_vars,2)
gmm_covars_re = np.array([i[0:n_dim,0:n_dim] for i in gmm._get_covars()])
gmm_covars_im = np.array([i[n_dim:,n_dim:] for i in gmm._get_covars()])
gmm_means = gmm.means_
gmm_means_re, gmm_means_im = np.hsplit(gmm_means, 2)
#Bundle up the answer
cluster_details = {'EM_GMM_means':gmm_means, 'EM_GMM_variances':gmm_vars, 'EM_GMM_covariances':gmm_covars, 'EM_GMM_means_re':gmm_means_re, 'EM_GMM_variances_re':gmm_vars_re, 'EM_GMM_covariances_re':gmm_covars_re,'EM_GMM_means_im':gmm_means_im, 'EM_GMM_variances_im':gmm_vars_im, 'EM_GMM_covariances_im':gmm_covars_im,'BIC':bic_value,'LL':LL}
print 'EM_GMM_GMM Converged: ', gmm.converged_
return cluster_assignments, cluster_details
def colsAsList(A):
if len(A.shape) < 2:
return A
return np.hsplit(A, A.shape[1])
def rotateVec(vec, rotaxis, theta):
"""
Given a 3-vector vec, rotate about rotaxis by $\theta$
Also accepts iterable input for vec and rotaxis if the arguments are
compatible lengths.
"""
assert not (np.any(np.isnan(vec)) or
np.any(np.isnan(rotaxis)) or
np.any(np.isnan(theta))), "Inputs must not be NaN"
if np.shape(vec) == (3, ):
R = rotationMatrix3D(rotaxis, theta)
norm = np.linalg.norm(vec)
res = np.dot(R, vec)
assert np.isclose(np.linalg.norm(res), norm), "Rotation changed vector norm"
return np.dot(R, vec)
else:
assert np.shape(vec)[0] == np.shape(rotaxis)[0] == np.shape(theta)[0], "Dimension mismatch in rotateVec()"
# Unfortunately, seems that np.dot can't be coerced into doing this operation all at once
# Tried to build a tensor of rotation matrices and use np.einsum, but couldn't get good reuslts.
# If this becomes slow at any point, it's a good target for optimization.
res = np.zeros(shape=(np.shape(vec)[0], 3))
for i, (v, r, t) in enumerate(zip(vec, rotaxis, theta)):
# print("In rotateVec(): r={}, t={}".format(r, t))
norm = np.linalg.norm(v)
R = rotationMatrix3D(r, t)
res[i] = np.dot(R, v)
assert np.isclose(np.linalg.norm(res[i]), norm), "Rotation changed vector norm: v={}, r={}, t={}, R={}".format(v, r, t, R)
return np.hsplit(res, 3)
def multiple_vector_test():
vecs = [(1, 0, 0),
(0, 1, 0),
(0, 0, 1)]
rotaxes = [(0, 0, 1),
(0, 0, 1),
(0, 0, 1)]
thetas = [np.pi/4,
np.pi/4,
np.pi/4]
expected = np.hsplit(np.array([(np.sqrt(2)/2., np.sqrt(2)/2., 0),
(-np.sqrt(2)/2., np.sqrt(2)/2., 0),
(0, 0, 1)]), 3)
res = rotateVec(vecs, rotaxes, thetas)
assert np.allclose(res, expected)