def test_zip(self):
a = (1, 2, 3)
b = (4, 5, 6)
t = [(1, 4), (2, 5), (3, 6)]
self.assertEqual(zip(a, b), t)
b = [4, 5, 6]
self.assertEqual(zip(a, b), t)
b = (4, 5, 6, 7)
self.assertEqual(zip(a, b), t)
class I:
def __getitem__(self, i):
if i < 0 or i > 2: raise IndexError
return i + 4
self.assertEqual(zip(a, I()), t)
self.assertEqual(zip(), [])
self.assertEqual(zip(*[]), [])
self.assertRaises(TypeError, zip, None)
class G:
pass
self.assertRaises(TypeError, zip, a, G())
# Make sure zip doesn't try to allocate a billion elements for the
# result list when one of its arguments doesn't say how long it is.
# A MemoryError is the most likely failure mode.
class SequenceWithoutALength:
def __getitem__(self, i):
if i == 5:
raise IndexError
else:
return i
self.assertEqual(
zip(SequenceWithoutALength(), xrange(2**30)),
list(enumerate(range(5)))
)
class BadSeq:
def __getitem__(self, i):
if i == 5:
raise ValueError
else:
return i
self.assertRaises(ValueError, zip, BadSeq(), BadSeq())
python类xrange()的实例源码
def computeNormals(vertices, faces):
numVertices = len(vertices)
numFaces = len(faces)
normalsPerFace = [None] * numFaces
areasPerFace = [0.0] * numFaces
normalsPerVertex = np.zeros(vertices.shape, dtype=vertices.dtype)
for i in xrange(0, numFaces):
face = faces[i]
v0 = vertices[face[0]]
v1 = vertices[face[1]]
v2 = vertices[face[2]]
ctrd = centroid(v0, v1, v2)
v1A = c3d.subtract(v1, v0)
v2A = c3d.subtract(v2, v0)
normalA = np.cross(v1A, v2A)
viewPointA = c3d.add(ctrd, normalA)
normalB = np.cross(v2A, v1A)
viewPointB = c3d.add(ctrd, normalB)
area = triangleArea(v0, v1)
areasPerFace[i] = area
squaredDistanceA = c3d.magnitudeSquared(viewPointA)
squaredDistanceB = c3d.magnitudeSquared(viewPointB)
# Always take the furthest point
if squaredDistanceA > squaredDistanceB:
normalsPerFace[i] = normalA
else:
normalsPerFace[i] = normalB
for i in xrange(0, numFaces):
face = faces[i]
weightedNormal = [c * areasPerFace[i] for c in normalsPerFace[i]]
for j in face:
normalsPerVertex[j] = c3d.add(normalsPerVertex[j], weightedNormal)
for i in xrange(0, numVertices):
normalsPerVertex[i] = c3d.normalize(normalsPerVertex[i])
return normalsPerVertex
def process(self, block):
"""
Main function
:param block:
:return: (outputLeft, outputRight)
"""
# print("Convolver: process")
# First: Fill buffer and FDLs with current block
if not self.processStereo:
# print('Convolver Mono Processing')
self.fill_buffer_mono(block)
else:
# print('Convolver Stereo Processing')
self.fill_buffer_stereo(block)
# Second: Multiplikation with IR block und accumulation with previous data
for irBlockCount in xrange(0, self.IR_blocks):
# Always convolute current filter
self.multiply_and_add(irBlockCount)
# Also convolute old filter if interpolation needed
if self.interpolate:
self.multiply_and_add_previous(irBlockCount)
# Third: Transformation back to time domain
if self.interpolate:
# fade over full block size
# print('do block interpolation')
self.outputLeft = np.multiply(self.resultLeftPreviousIFFTPlan(self.resultLeftFreqPrevious).real[
self.block_size:self.block_size * 2], self.crossFadeOut) + \
np.multiply(self.resultLeftIFFTPlan(self.resultLeftFreq).real[
self.block_size:self.block_size * 2], self.crossFadeIn)
self.outputRight = np.multiply(self.resultRightPreviousIFFTPlan(self.resultRightFreqPrevious).real[
self.block_size:self.block_size * 2], self.crossFadeOut) + \
np.multiply(self.resultRightIFFTPlan(self.resultRightFreq).real[
self.block_size:self.block_size * 2], self.crossFadeIn)
else:
self.outputLeft = self.resultLeftIFFTPlan(self.resultLeftFreq).real[self.block_size:self.block_size * 2]
self.outputRight = self.resultRightIFFTPlan(self.resultRightFreq).real[self.block_size:self.block_size * 2]
self.processCounter += 1
self.interpolate = False
return self.outputLeft, self.outputRight
def to_association_matrix(self, bias='none', progress_callback=None):
"""Return a table with Markov associativities between columns
(cf. Bavaud & Xanthos 2005, Deneulin et al. 2014)
"""
freq = self.to_numpy()
total_freq = freq.sum()
sum_col = freq.sum(axis=0)
sum_row = freq.sum(axis=1)
exchange = np.dot(
np.transpose(freq),
np.dot(
np.diag(1 / sum_row),
freq
)
) / total_freq
if bias == 'frequent':
output_matrix = exchange
elif bias == 'none':
sqrt_pi_inv = np.diag(1 / np.sqrt(sum_col / total_freq))
output_matrix = np.dot(sqrt_pi_inv, np.dot(exchange, sqrt_pi_inv))
else:
pi_inv = np.diag(1 / (sum_col / total_freq))
output_matrix = np.dot(pi_inv, np.dot(exchange, pi_inv))
col_ids = self.col_ids
values = dict()
for col_id_idx1 in xrange(len(col_ids)):
col_id1 = col_ids[col_id_idx1]
values.update(
dict(
(
(col_id1, col_ids[i]),
output_matrix[col_id_idx1, i]
)
for i in xrange(len(col_ids))
)
)
if progress_callback:
progress_callback()
new_header_row_id = (
self.header_row_id[:-2]
+ "2"
+ self.header_row_id[-2:]
)
return (
PivotCrosstab(
self.col_ids[:],
self.col_ids[:],
values,
new_header_row_id,
self.header_row_type,
self.header_row_id,
self.header_row_type,
col_type=self.col_type.copy(),
)
)
def to_flat(self, progress_callback=None):
"""Return a copy of the crosstab in 'flat' format"""
new_header_col_id = '__id__'
new_header_col_type = 'string'
new_col_ids = [self.header_row_id or '__column__']
num_row_ids = len(self.row_ids)
if num_row_ids > 1:
new_col_ids.append(self.header_col_id or '__row__')
new_cached_row_id = None
second_col_id = new_col_ids[1]
else:
new_cached_row_id = self.row_ids[0]
new_col_type = dict([(col_id, 'discrete') for col_id in new_col_ids])
row_counter = 1
new_values = dict()
new_row_ids = list()
get_count = self.values.get
first_col_id = new_col_ids[0]
for row_id in self.row_ids:
for col_id in self.col_ids:
count = get_count((row_id, col_id), 0)
for i in xrange(count):
new_row_id = text(row_counter)
new_row_ids.append(new_row_id)
new_values[(new_row_id, first_col_id)] = col_id
if num_row_ids > 1:
new_values[(new_row_id, second_col_id)] = row_id
row_counter += 1
if progress_callback:
progress_callback()
return (
FlatCrosstab(
new_row_ids,
new_col_ids,
new_values,
header_col_id=new_header_col_id,
header_col_type=new_header_col_type,
col_type=new_col_type,
class_col_id=None,
missing=self.missing,
_cached_row_id=new_cached_row_id,
)
)
def to_flat(self, progress_callback=None):
"""Return a copy of the crosstab in 'flat' format"""
new_col_ids = list([c for c in self.col_ids if c != '__weight__'])
new_col_type = dict(self.col_type)
del new_col_type['__weight__']
row_counter = 1
new_values = dict()
new_row_ids = list()
if len(self.col_ids) > 1:
first_col_id = self.col_ids[0]
second_col_id = self.col_ids[1]
for row_id in self.row_ids:
count = self.values[(row_id, '__weight__')]
first_col_value = self.values[row_id, first_col_id]
second_col_value = self.values[row_id, second_col_id]
for i in xrange(count):
new_row_id = text(row_counter)
new_row_ids.append(new_row_id)
new_values[(new_row_id, first_col_id)] = first_col_value
new_values[(new_row_id, second_col_id)] = second_col_value
row_counter += 1
if progress_callback:
progress_callback()
else:
col_id = self.col_ids[0]
for row_id in self.row_ids:
count = self.values[(row_id, '__weight__')]
col_value = self.values[row_id, col_id]
for i in xrange(count):
new_row_id = text(row_counter)
new_row_ids.append(new_row_id)
new_values[(new_row_id, col_id)] = col_value
row_counter += 1
if progress_callback:
progress_callback()
return (
FlatCrosstab(
new_row_ids,
new_col_ids,
new_values,
self.header_row_id,
self.header_row_type,
self.header_col_id,
self.header_col_type,
new_col_type,
None,
self.missing,
self._cached_row_id,
)
)