def compute_image_property(self, property_name, min_contact_area=None, sub_factor=8.):
"""
"""
computable_properties = ['barycenter','volume','neighborhood_size','layer','mean_curvature','gaussian_curvature']
try:
assert property_name in computable_properties
except:
print "Property \""+property_name+"\" can not be computed on image"
print "Try with one of the following :"
for p in computable_properties:
print " * "+p
else:
if self._image is not None:
if property_name in ['barycenter','volume']:
graph = graph_from_image(self._image,background=self.background,spatio_temporal_properties=[property_name],ignore_cells_at_stack_margins=self.ignore_cells_at_stack_margins)
property_dict = graph.vertex_property(property_name)
elif property_name == 'neighborhood_size':
neighbors = [self.image_graph.neighbors(l) for l in self.labels]
property_dict = dict(zip(self.labels,map(len,neighbors)))
elif property_name == 'layer':
if min_contact_area is None:
min_contact_area = self.min_contact_area
graph = graph_from_image(self._image,background=self.background,spatio_temporal_properties=['L1'],ignore_cells_at_stack_margins=self.ignore_cells_at_stack_margins, min_contact_area=min_contact_area)
first_layer = graph.vertex_property('L1')
second_layer_cells = [v for v in graph.vertices() if np.any([first_layer[n] for n in graph.neighbors(v)]) and not first_layer[v]]
second_layer = dict(zip(list(graph.vertices()),[v in second_layer_cells for v in graph.vertices()]))
property_dict = dict(zip(self.labels,[1 if first_layer[l] else 2 if second_layer[l] else 3 for l in self.labels]))
elif property_name in ['mean_curvature','gaussian_curvature']:
if not self.has_image_property('barycenter'):
self.compute_image_property('barycenter')
if not self.has_image_property('layer'):
print "--> Computing layer property"
self.compute_image_property('layer')
cell_centers = self.image_property('barycenter')
L1_cells = self.labels[self.image_property('layer').values()==1]
from openalea.cellcomplex.property_topomesh.utils.implicit_surfaces import implicit_surface_topomesh
from openalea.cellcomplex.property_topomesh.property_topomesh_analysis import compute_topomesh_property, compute_topomesh_vertex_property_from_faces
from openalea.cellcomplex.property_topomesh.property_topomesh_optimization import property_topomesh_vertices_deformation, topomesh_triangle_split
sub_binary_image = (self._image!=self.background).astype(float)[::sub_factor,::sub_factor,::sub_factor]
surface_topomesh = implicit_surface_topomesh(sub_binary_image,np.array(sub_binary_image.shape),sub_factor*np.array(self._image.voxelsize),center=False)
property_topomesh_vertices_deformation(surface_topomesh,iterations=10)
compute_topomesh_property(surface_topomesh,'barycenter',2)
compute_topomesh_property(surface_topomesh,'normal',2,normal_method='orientation')
compute_topomesh_vertex_property_from_faces(surface_topomesh,'normal',adjacency_sigma=2,neighborhood=5)
compute_topomesh_property(surface_topomesh,'mean_curvature',2)
compute_topomesh_vertex_property_from_faces(surface_topomesh,property_name,adjacency_sigma=2,neighborhood=5)
surface_cells = L1_cells[vq(surface_topomesh.wisp_property('barycenter',0).values(),cell_centers.values(L1_cells))[0]]
surface_topomesh.update_wisp_property('label',0,array_dict(surface_cells,list(surface_topomesh.wisps(0))))
L1_cell_property = nd.sum(surface_topomesh.wisp_property(property_name,0).values(),surface_cells,index=L1_cells)/nd.sum(np.ones_like(surface_cells),surface_cells,index=L1_cells)
L1_cell_property = array_dict(L1_cell_property,L1_cells)
property_dict = array_dict([L1_cell_property[l] if (l in L1_cells) and (not np.isnan(L1_cell_property[l])) else 0 for l in self.labels],self.labels)
property_data = [property_dict[l] for l in self.labels]
self.update_image_property(property_name,property_data)
python类vq()的实例源码
def kmeans_clustering (vectorLayer, attributesList, normalize, clusterNumber, outputFieldName):
from scipy.cluster.vq import kmeans,vq
from numpy import array
fullObjectsList = []
features = vectorLayer.getFeatures()
for feature in features:
fullObjectsList.append([])
for attribute in attributesList:
if feature[attribute[0]]:
fullObjectsList[len(fullObjectsList)-1].append(feature[attribute[0]])
else:
fullObjectsList[len(fullObjectsList)-1].append(0)
#NORMALIZING
if normalize:
i = 0
maxValues = []
while i < len(attributesList):
maxValues.append(max(abs(item[i]) for item in fullObjectsList))
i += 1
j = 0
while j < len(fullObjectsList):
i = 0
while i < len(fullObjectsList[j]):
fullObjectsList[j][i] = (fullObjectsList[j][i] * 1.0) / (maxValues[i] * 1.0)
i += 1
j += 1
data = array(fullObjectsList)
centroids,_ = kmeans(data, clusterNumber, 25)
idx,_ = vq(data,centroids)
idx = idx.tolist()
vectorLayerDataProvider = vectorLayer.dataProvider()
# Create field of not exist
if vectorLayer.fieldNameIndex(outputFieldName) == -1:
vectorLayerDataProvider.addAttributes([QgsField(outputFieldName, QVariant.Int)])
vectorLayer.updateFields()
vectorLayer.startEditing()
attrIdx = vectorLayer.fieldNameIndex(outputFieldName)
features = vectorLayer.getFeatures()
i = 0
for feature in features:
vectorLayer.changeAttributeValue(feature.id(), attrIdx, int(idx[i]))
i += 1
vectorLayer.updateFields()
vectorLayer.commitChanges()
def run_phase_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_fft = 256
step = 32
f_r = np.vstack([np.abs(stft(dd, n_fft, step=step, real=False,
compute_onesided=False))
for dd in list_of_data])
return f_r, n_fft, step
def preprocess_train(list_of_data, random_state):
f_r, n_fft, step = _pre(list_of_data)
clusters = copy.deepcopy(f_r)
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft, step = _pre(list_of_data)
f_clust = f_r
# Nondeterministic ?
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True)
return d_k
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
d1 = d[::9]
d2 = d[7::8][:5]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
vq_d2 = apply_preprocess(d2, clusters)
wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
"""
import matplotlib.pyplot as plt
plt.specgram(agc_vq_d2, cmap="gray")
#plt.title("Fake")
plt.figure()
plt.specgram(agc_d2, cmap="gray")
#plt.title("Real")
plt.show()
"""
wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def run_phase_vq_example():
def _pre(list_of_data):
# Temporal window setting is crucial! - 512 seems OK for music, 256
# fruit perhaps due to samplerates
n_fft = 256
step = 32
f_r = np.vstack([np.abs(stft(dd, n_fft, step=step, real=False,
compute_onesided=False))
for dd in list_of_data])
return f_r, n_fft, step
def preprocess_train(list_of_data, random_state):
f_r, n_fft, step = _pre(list_of_data)
clusters = copy.deepcopy(f_r)
return clusters
def apply_preprocess(list_of_data, clusters):
f_r, n_fft, step = _pre(list_of_data)
f_clust = f_r
# Nondeterministic ?
memberships, distances = vq(f_clust, clusters)
vq_r = clusters[memberships]
d_k = iterate_invert_spectrogram(vq_r, n_fft, step, verbose=True)
return d_k
random_state = np.random.RandomState(1999)
fs, d = fetch_sample_speech_fruit()
d1 = d[::9]
d2 = d[7::8][:5]
# make sure d1 and d2 aren't the same!
assert [len(di) for di in d1] != [len(di) for di in d2]
clusters = preprocess_train(d1, random_state)
fix_d1 = np.concatenate(d1)
fix_d2 = np.concatenate(d2)
vq_d2 = apply_preprocess(d2, clusters)
wavfile.write("phase_train_no_agc.wav", fs, soundsc(fix_d1))
wavfile.write("phase_vq_test_no_agc.wav", fs, soundsc(vq_d2))
agc_d1, freq_d1, energy_d1 = time_attack_agc(fix_d1, fs, .5, 5)
agc_d2, freq_d2, energy_d2 = time_attack_agc(fix_d2, fs, .5, 5)
agc_vq_d2, freq_vq_d2, energy_vq_d2 = time_attack_agc(vq_d2, fs, .5, 5)
"""
import matplotlib.pyplot as plt
plt.specgram(agc_vq_d2, cmap="gray")
#plt.title("Fake")
plt.figure()
plt.specgram(agc_d2, cmap="gray")
#plt.title("Real")
plt.show()
"""
wavfile.write("phase_train_agc.wav", fs, soundsc(agc_d1))
wavfile.write("phase_test_agc.wav", fs, soundsc(agc_d2))
wavfile.write("phase_vq_test_agc.wav", fs, soundsc(agc_vq_d2))
def tetrahedra_topomesh(tetrahedra, positions, **kwargs):
tetrahedra = np.array(tetrahedra)
positions = array_dict(positions)
tetrahedra_triangles = array_unique(np.concatenate(np.sort(tetrahedra[:,tetra_triangle_list])))
tetrahedra_triangle_edges = tetrahedra_triangles[:,triangle_edge_list]
tetrahedra_triangle_vectors = positions.values(tetrahedra_triangle_edges[...,1]) - positions.values(tetrahedra_triangle_edges[...,0])
tetrahedra_triangle_lengths = np.linalg.norm(tetrahedra_triangle_vectors,axis=2)
tetrahedra_triangle_perimeters = tetrahedra_triangle_lengths.sum(axis=1)
tetrahedra_edges = array_unique(np.concatenate(tetrahedra_triangles[:,triangle_edge_list],axis=0))
start_time = time()
print "--> Generating tetrahedra topomesh"
triangle_edges = np.concatenate(tetrahedra_triangles[:,triangle_edge_list],axis=0)
triangle_edge_matching = vq(triangle_edges,tetrahedra_edges)[0]
tetrahedra_faces = np.concatenate(np.sort(tetrahedra[:,tetra_triangle_list]))
tetrahedra_triangle_matching = vq(tetrahedra_faces,tetrahedra_triangles)[0]
tetrahedra_topomesh = PropertyTopomesh(3)
for c in np.unique(tetrahedra_triangles):
tetrahedra_topomesh.add_wisp(0,c)
for e in tetrahedra_edges:
eid = tetrahedra_topomesh.add_wisp(1)
for pid in e:
tetrahedra_topomesh.link(1,eid,pid)
for t in tetrahedra_triangles:
fid = tetrahedra_topomesh.add_wisp(2)
for eid in triangle_edge_matching[3*fid:3*fid+3]:
tetrahedra_topomesh.link(2,fid,eid)
for t in tetrahedra:
cid = tetrahedra_topomesh.add_wisp(3)
for fid in tetrahedra_triangle_matching[4*cid:4*cid+4]:
tetrahedra_topomesh.link(3,cid,fid)
tetrahedra_topomesh.update_wisp_property('barycenter',0,positions.values(np.unique(tetrahedra_triangles)),keys=np.unique(tetrahedra_triangles))
end_time = time()
print "<-- Generating tetrahedra topomesh [",end_time-start_time,"s]"
return tetrahedra_topomesh
def poly_topomesh(polys, positions, faces_as_cells=False, **kwargs):
polys = np.array(polys)
positions = array_dict(positions)
poly_lengths = np.array(map(len,polys))
poly_edge_list = [np.transpose([np.arange(l),(np.arange(l)+1)%l]) for l in poly_lengths]
edges = array_unique(np.sort(np.concatenate([np.array(p)[l] for p,l in zip(polys,poly_edge_list)],axis=0)))
poly_edges = np.sort(np.concatenate([np.array(p)[l] for p,l in zip(polys,poly_edge_list)],axis=0))
start_time = time()
print "--> Generating poly topomesh"
poly_edge_matching = vq(poly_edges,edges)[0]
poly_topomesh = PropertyTopomesh(3)
for c in np.unique(polys):
poly_topomesh.add_wisp(0,c)
for e in edges:
eid = poly_topomesh.add_wisp(1)
for pid in e:
poly_topomesh.link(1,eid,pid)
total_poly_length = 0
for q,l in zip(polys,poly_lengths):
fid = poly_topomesh.add_wisp(2)
for eid in poly_edge_matching[total_poly_length:total_poly_length+l]:
poly_topomesh.link(2,fid,eid)
total_poly_length += l
if not faces_as_cells:
poly_topomesh.add_wisp(3,0)
for fid in poly_topomesh.wisps(2):
poly_topomesh.link(3,0,fid)
else:
for fid in poly_topomesh.wisps(2):
poly_topomesh.add_wisp(3,fid)
poly_topomesh.link(3,fid,fid)
poly_topomesh.update_wisp_property('barycenter',0,positions.values(np.unique(polys)),keys=np.unique(polys))
end_time = time()
print "<-- Generating poly topomesh [",end_time-start_time,"s]"
return poly_topomesh
def minibatch_kmedians(X, M=None, n_components=10, n_iter=100,
minibatch_size=100, random_state=None):
n_clusters = n_components
if M is not None:
assert M.shape[0] == n_components
assert M.shape[1] == X.shape[1]
if random_state is None:
random_state = np.random.RandomState(random_state)
elif not hasattr(random_state, 'shuffle'):
# Assume integer passed
random_state = np.random.RandomState(int(random_state))
if M is None:
ind = np.arange(len(X)).astype('int32')
random_state.shuffle(ind)
M = X[ind[:n_clusters]]
center_counts = np.zeros(n_clusters)
pts = list(np.arange(len(X), minibatch_size)) + [len(X)]
if len(pts) == 1:
# minibatch size > dataset size case
pts = [0, None]
minibatch_indices = zip(pts[:-1], pts[1:])
for i in range(n_iter):
for mb_s, mb_e in minibatch_indices:
Xi = X[mb_s:mb_e]
# Broadcasted Manhattan distance
# Could be made faster with einsum perhaps
centers = np.abs(Xi[:, None, :] - M[None]).sum(
axis=-1).argmin(axis=1)
def count_update(c):
center_counts[c] += 1
[count_update(c) for c in centers]
scaled_lr = 1. / center_counts[centers]
Mi = M[centers]
scaled_lr = scaled_lr[:, None]
# Gradient of abs
Mi = Mi - scaled_lr * ((Xi - Mi) / np.sqrt((Xi - Mi) ** 2 + 1E-9))
M[centers] = Mi
# Reassign centers to nearest datapoint
mem, _ = vq(M, X)
M = X[mem]
return M