def majorize(values):
"""Filter sequence to return only major considered numbers"""
sorted_values = sorted(values)
if len(values) <= 3 or (
abs(2 * sorted_values[1] - sorted_values[0] - sorted_values[2]) >
abs(1.5 * (sorted_values[1] - sorted_values[0]))):
return []
values_step = sorted_values[1] - sorted_values[0]
full_range = sorted_values[-1] - sorted_values[0]
step = 10 ** int(log10(full_range))
if step == values_step:
step *= 10
step_factor = 10 ** (int(log10(step)) + 1)
if round(step * step_factor) % (round(values_step * step_factor) or 1):
# TODO: Find lower common multiple instead
step *= values_step
if full_range <= 2 * step:
step *= .5
elif full_range >= 5 * step:
step *= 5
major_values = [
value for value in values if value / step == round(value / step)]
return [value for value in sorted_values if value in major_values]
python类log10()的实例源码
def compute_logarithmic_scale(min_, max_, min_scale, max_scale):
"""Compute an optimal scale for logarithmic"""
if max_ <= 0 or min_ <= 0:
return []
min_order = int(floor(log10(min_)))
max_order = int(ceil(log10(max_)))
positions = []
amplitude = max_order - min_order
if amplitude <= 1:
return []
detail = 10.
while amplitude * detail < min_scale * 5:
detail *= 2
while amplitude * detail > max_scale * 3:
detail /= 2
for order in range(min_order, max_order + 1):
for i in range(int(detail)):
tick = (10 * i / detail or 1) * 10 ** order
tick = round_to_scale(tick, tick)
if min_ <= tick <= max_ and tick not in positions:
positions.append(tick)
return positions
def get_audio_levels(self):
"""
Returns a tuple with left and right audio levels, or (None, None) if frame is not valid
"""
if not self.version_is_valid():
return (None, None)
else:
int16_max = 0x7FFF
if self.audiolevel_left:
dB_l = int(20*math.log10(float(self.audiolevel_left) / int16_max))
else:
dB_l = -90
if self.audiolevel_right:
dB_r = int(20*math.log10(float(self.audiolevel_right) / int16_max))
else:
dB_r = -90
return (dB_l, dB_r)
def test_calculate_SNR_positive_1(self):
source_array = [89, -89] * 6000 + [502, -502] * 8000 + [89, -89] * 7000
source_data = reduce(
lambda a, b: a + struct.pack('>h', b), source_array[1:], struct.pack('>h', source_array[0])
)
sampling_frequency = 8000
bounds_of_speech = [(2.0 * 6000.0 / sampling_frequency, 2.0 * (6000.0 + 8000.0) / sampling_frequency)]
silence_energy = reduce(
lambda a, b: a + b * b,
source_array[0:(2 * 6000)] + source_array[(2 * (6000 + 8000)):],
vad.EPS
) / (2.0 * (6000.0 + 7000.0))
speech_energy = reduce(
lambda a, b: a + b * b,
source_array[(2 * 6000):(2 * (6000 + 8000))],
vad.EPS
) / (2.0 * 8000.0)
target_snr = 20.0 * math.log10(speech_energy / silence_energy)
self.assertAlmostEqual(target_snr, vad.calculate_SNR(source_data, sampling_frequency, bounds_of_speech))
def calculate_features_for_VAD(sound_frames, frequencies_axis, spectrogram):
features = numpy.empty((spectrogram.shape[0], 3))
# smooted_spectrogram, smoothed_frequencies_axis = smooth_spectrogram(spectrogram, frequencies_axis, 24)
for time_ind in range(spectrogram.shape[0]):
mean_spectrum = spectrogram[time_ind].mean()
if mean_spectrum > 0.0:
sfm = -10.0 * math.log10(stats.gmean(spectrogram[time_ind]) / mean_spectrum)
else:
sfm = 0.0
# max_freq = smoothed_frequencies_axis[smooted_spectrogram[time_ind].argmax()]
max_freq = frequencies_axis[spectrogram[time_ind].argmax()]
features[time_ind][0] = numpy.square(sound_frames[time_ind]).mean()
features[time_ind][1] = sfm
features[time_ind][2] = max_freq
"""medfilt_order = 3
for feature_ind in range(features.shape[0]):
features[feature_ind] = signal.medfilt(features[feature_ind], medfilt_order)"""
return features
def to_data(self, x, y):
'''Convert window coords to data coords.
:Parameters:
`x, y`:
The coordinates to convert (in window coords).
'''
adj_x = float(x - self._plot_area.pos[0])
adj_y = float(y - self._plot_area.pos[1])
norm_x = adj_x / self._plot_area.size[0]
norm_y = adj_y / self._plot_area.size[1]
if self.xlog:
xmin, xmax = log10(self.xmin), log10(self.xmax)
conv_x = 10. ** (norm_x * (xmax - xmin) + xmin)
else:
conv_x = norm_x * (self.xmax - self.xmin) + self.xmin
if self.ylog:
ymin, ymax = log10(self.ymin), log10(self.ymax)
conv_y = 10. ** (norm_y * (ymax - ymin) + ymin)
else:
conv_y = norm_y * (self.ymax - self.ymin) + self.ymin
return [conv_x, conv_y]
def draw(self, *args):
super(MeshLinePlot, self).draw(*args)
points = self.points
mesh = self._mesh
vert = mesh.vertices
ind = mesh.indices
params = self._params
funcx = log10 if params['xlog'] else lambda x: x
funcy = log10 if params['ylog'] else lambda x: x
xmin = funcx(params['xmin'])
ymin = funcy(params['ymin'])
diff = len(points) - len(vert) // 4
size = params['size']
ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)
ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)
if diff < 0:
del vert[4 * len(points):]
del ind[len(points):]
elif diff > 0:
ind.extend(range(len(ind), len(ind) + diff))
vert.extend([0] * (diff * 4))
for k in range(len(points)):
vert[k * 4] = (funcx(points[k][0]) - xmin) * ratiox + size[0]
vert[k * 4 + 1] = (funcy(points[k][1]) - ymin) * ratioy + size[1]
mesh.vertices = vert
def __scale_coefficient(self, result, result_index, t, sum_log=False):
"""
?????
:param result:????
:param result_index:??????
:param t: ??????
:param sum_log: ??c_coefficient???
:return:
"""
sum_column = np.sum(result[result_index][:, t], axis=0)
if sum_column == 0.:
result[result_index][:, t] = 1. / len(self.__states)
sum_column = 1.
result[result_index][:, t] /= sum_column
if sum_log:
self.__c_coefficient += math.log10(sum_column)
def _compute_divisions(self, xi, xf):
assert xf > xi
dx = xf - xi
size = dx
ndiv = 5
text_width = dx/ndiv/2
def rint(x):
return math.floor(x+0.5)
dx_over_ndiv = dx / ndiv
for n in range(5): # iterate 5 times to find optimum division size
#/* div: length of each division */
tbe = math.log10(dx_over_ndiv)#; /* looking for approx. 'ndiv' divisions in a length 'dx' */
div = pow(10, rint(tbe))#; /* div: power of 10 closest to dx/ndiv */
if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */
div /= 2
elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv):
div *= 2 # /* test if div*2 is closer to dx/ndiv */
x0 = div*math.ceil(xi / div) - div
if n > 1:
ndiv = rint(size / text_width)
return x0, div
def _round_up_max(max_val):
"Rounds up a maximum value."
# Prevent zero values raising an error. Rounds up to 10 at a minimum.
max_val = max(10, max_val)
e = int(math.log10(max_val))
if e >= 2:
e -= 1
m = 10**e
return math.ceil(float(max_val)/m)*m
# Copied from Anki with the following changes:
# - Set tickDecimals to 0.
# - Update tickFormatter to show 1 decimal unless whole number
# TODO pull request to Anki to include these changes
def proba_to_quality_sanger(pe):
"""A value between 0 and 93
:param pe: the probability of error.
:return: Q is the quality score.
- a high probability of error (0.99) gives Q=0
- q low proba of errors (0.05) gives Q = 13
- q low proba of errors (0.01) gives Q = 20
"""
if pe > 1:
pe = 1
if pe < 1e-90:
pe = 1e-90
Qs = -10 * log10(pe)
if Qs > 93:
Qs = 93
return Qs
def proba_to_quality_solexa(pe):
"""prior v1.3 (ref: wikipedia
https://en.wikipedia.org/wiki/FASTQ_format
"""
if pe > 1:
pe = 1
return -5
if pe <1e-90:
pe = 1e-90
Qs = -10 * log10(pe/(1-pe))
if Qs > 62:
Qs = 62
if Qs < -5:
Qs = -5
return Qs
def rep_log10(rep):
def log10(string):
leading_digits = int(string[0:4])
log = math.log10(leading_digits) + 0.00000001
num = len(string) - 1
return num + (log - int(log))
rep = str(rep)
if rep == "0":
return 25
sign = -1 if rep[0] == '-' else 1
if sign < 0:
rep = rep[1:]
out = log10(rep)
out = max(out - 9, 0) * sign # @ -9, $1 earned is approx magnitude 1
out = (out * 9) + 25 # 9 points per magnitude. center at 25
return round(out, 2)
def classify_naive_bayes(X_test, prior, likelihood, num):
p_not = math.log10(prior[0])
p_free = math.log10(prior[1])
not_dict = likelihood[0]
free_dict = likelihood[1]
not_num = num[0]
free_num = num[1]
voc_num = num[2]
for word in X_test:
# not free
if word in not_dict:
p_not += math.log10(1.0 * not_dict[word])
else:
p_not += math.log10(1.0 / (not_num + voc_num))
# free
if word in free_dict:
p_free += math.log10(1.0 * free_dict[word])
else:
p_free += math.log10(1.0 / (free_num + voc_num))
if p_free >= p_not:
return True
else:
return False
def find(self, query, cutoff, limit=None):
"""Find similar fragments to query.
Args:
query (str): Query fragment identifier
cutoff (float): Cutoff, similarity scores below cutoff are discarded.
limit (int): Maximum number of hits. Default is None for no limit.
Returns:
list[tuple[str,float]]: Hit fragment identifier and similarity score
"""
precision = float(self.score_precision)
precision10 = float(10**(floor(log10(precision))))
scutoff = int(cutoff * precision)
query_id = self.cache_l2i[query]
subjects = self.h5file.root.scores[query_id, ...]
filled_subjects_ids = subjects.nonzero()[0]
filled_subjects = [(i, subjects[i]) for i in filled_subjects_ids]
hits = [(self.cache_i2l[k], ceil(precision10 * v / precision) / precision10) for k, v in filled_subjects if v >= scutoff]
sorted_hits = sorted(hits, key=lambda r: r[1], reverse=True)
if limit is not None:
sorted_hits = sorted_hits[:limit]
return sorted_hits
def __getitem__(self, item):
"""Get all similarities of fragment.
Self is excluded.
Args:
item (STR): Label of a fragment
Returns:
list[tuple[str, float]]: list of (fragment_label, score)
"""
precision = float(self.score_precision)
precision10 = float(10**(floor(log10(precision))))
query_id = self.cache_l2i[item]
subjects = self.h5file.root.scores[query_id, ...]
hits = [(self.cache_i2l[k], ceil(precision10 * v / precision) / precision10) for k, v in enumerate(subjects) if k != query_id]
return hits
def get_conf_int(nvar):
slices = []
for x in range(0, int(math.ceil(math.log(nvar,2)))):
slices.append(2**x)
slices.append(nvar-1);
slices.reverse()
points = []
for slice in slices:
rv = scipy.stats.beta(slice, nvar-slice)
points.append((
round(-math.log10((slice-0.5)/nvar),2),
round(-math.log10(rv.ppf(0.05/2)),2),
round(-math.log10(rv.ppf(1-(0.05/2))),2)
))
return points
def write_languages(self, file_path='',date=str(datetime.date.today())):
"""
Updates languages.csv file with current data.
"""
self.remove_date(file_path=file_path, date=date)
languages_exists = os.path.isfile(file_path)
with open(file_path, 'a') as out_languages:
if not languages_exists:
out_languages.write('date,language,count,size,size_log\n')
languages_sorted = sorted(self.languages_size)
#self.delete_last_line(date=date, file_path=file_path)
for language in languages_sorted:
try:
out_languages.write(date + ',' + language + ','
+ str(self.languages[language]) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n')
except (TypeError, KeyError) as e:
out_languages.write(date + ',' + language + ','
+ str(0) + ','
+ str(self.languages_size[language]) + ','
+ str(math.log10(int(self.languages_size[language])))
+ '\n')
def select_tweets(timeline, allow_rts=True, allow_replies=False, popular_only=True):
texts = []
for t in timeline:
if not 'retweeted_status' in t:
if not allow_replies and t['in_reply_to_status_id_str']:
continue
t['tweet_score'] = log(t['retweet_count'] + 1.0) + log(t['favorite_count'] + 1.0)
t['__is_rt__'] = False
texts.append(t)
else:
if allow_rts:
t['retweeted_status']['tweet_score'] = log10(t['retweet_count'] + 1.0) + log10(t['favorite_count'] + 1.0)
t['retweeted_status']['source_created_at'] = t['retweeted_status']['created_at']
t['retweeted_status']['created_at'] = t['created_at']
t['retweeted_status']['text'] = t['retweeted_status']['text']
t['retweeted_status']['__is_rt__'] = True
texts.append(t['retweeted_status'])
#texts = sorted(texts, key=lambda x: x['tweet_score'], reverse=True)[0:100]
if popular_only:
texts = list(filter(lambda x: x['tweet_score'] > 0, texts))
return texts
def to_data(self, x, y):
'''Convert window coords to data coords.
:Parameters:
`x, y`:
The coordinates to convert (in window coords).
'''
adj_x = float(x - self._plot_area.pos[0])
adj_y = float(y - self._plot_area.pos[1])
norm_x = adj_x / self._plot_area.size[0]
norm_y = adj_y / self._plot_area.size[1]
if self.xlog:
xmin, xmax = log10(self.xmin), log10(self.xmax)
conv_x = 10.**(norm_x * (xmax - xmin) + xmin)
else:
conv_x = norm_x * (self.xmax - self.xmin) + self.xmin
if self.ylog:
ymin, ymax = log10(self.ymin), log10(self.ymax)
conv_y = 10.**(norm_y * (ymax - ymin) + ymin)
else:
conv_y = norm_y * (self.ymax - self.ymin) + self.ymin
return [conv_x, conv_y]
def draw(self, *args):
super(MeshLinePlot, self).draw(*args)
points = self.points
mesh = self._mesh
vert = mesh.vertices
ind = mesh.indices
params = self._params
funcx = log10 if params['xlog'] else lambda x: x
funcy = log10 if params['ylog'] else lambda x: x
xmin = funcx(params['xmin'])
ymin = funcy(params['ymin'])
diff = len(points) - len(vert) // 4
size = params['size']
ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin)
ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin)
if diff < 0:
del vert[4 * len(points):]
del ind[len(points):]
elif diff > 0:
ind.extend(range(len(ind), len(ind) + diff))
vert.extend([0] * (diff * 4))
for k in range(len(points)):
vert[k * 4] = (funcx(points[k][0]) - xmin) * ratiox + size[0]
vert[k * 4 + 1] = (funcy(points[k][1]) - ymin) * ratioy + size[1]
mesh.vertices = vert
def get_bon_thresh(normalized,power): #same
"""
Calculate the bonferroni correction threshold.
Divide the power by the sum of all finite values (all non-nan values).
:param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value.
:param power: the threshold power being used (usually 0.05)
:type normalized: numpy array
:type power: float
:returns: The bonferroni correction
:rtype: float
"""
return power/sum(np.isfinite(normalized))
def get_bon_thresh(normalized, power): # same
"""
Calculate the bonferroni correction threshold.
Divide the power by the sum of all finite values (all non-nan values).
:param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value.
:param power: the threshold power being used (usually 0.05)
:type normalized: numpy array
:type power: float
:returns: The bonferroni correction
:rtype: float
"""
return power / sum(np.isfinite(normalized))
def get_bon_thresh(normalized, power): # same
"""
Calculate the bonferroni correction threshold.
Divide the power by the sum of all finite values (all non-nan values).
:param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value.
:param power: the threshold power being used (usually 0.05)
:type normalized: numpy array
:type power: float
:returns: The bonferroni correction
:rtype: float
"""
return power / sum(np.isfinite(normalized))
def get_bon_thresh(normalized,power): #same
"""
Calculate the bonferroni correction threshold.
Divide the power by the sum of all finite values (all non-nan values).
:param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value.
:param power: the threshold power being used (usually 0.05)
:type normalized: numpy array
:type power: float
:returns: The bonferroni correction
:rtype: float
"""
return power/sum(np.isfinite(normalized))
def test_logs(self):
LOG10E = math.log10(math.e)
for exp in list(range(10)) + [100, 1000, 10000]:
value = 10 ** exp
log10 = math.log10(value)
self.assertAlmostEqual(log10, exp)
# log10(value) == exp, so log(value) == log10(value)/log10(e) ==
# exp/LOG10E
expected = exp / LOG10E
log = math.log(value)
self.assertAlmostEqual(log, expected)
for bad in -(1 << 10000), -2, 0:
self.assertRaises(ValueError, math.log, bad)
self.assertRaises(ValueError, math.log10, bad)
def get_histogram(data,n=20,log=False):
""" Groups data in N steps """
import math
mn = logfloor(min(data))
mx = logroof(max(data))
print('data=[%e:%e],ranges=[%e:%e]'%(min(data),max(data),mn,mx))
if log: mn,mx = log10(mn),log10(mx)
step = float(mx-mn)/n
print('mn,mx,step = %s, %s, %s'%(mn,mx,step))
ranges = []
for i in range(n):
r0 = mn+i*step
r1 = mn+(i+1)*step
if log: r0,r1 = 10**r0,10**r1
ranges.append((r0,len([d for d in data if r0<=d<r1])))
return ranges
def get_channel(self, previous_value, new_value):
""" Prepares signal value depending on the previous one and algorithm. """
if self.stereo_algorithm == STEREO_ALGORITHM_NEW:
channel_value = new_value
elif self.stereo_algorithm == STEREO_ALGORITHM_LOGARITHM:
if previous_value == 0.0:
channel_value = 0.0
else:
channel_value = 20 * math.log10(new_value/previous_value)
if channel_value < -20:
channel_value = -20
if channel_value > 3:
channel_value = 3
channel_value = (channel_value + 20) * (100/23)
elif self.stereo_algorithm == STEREO_ALGORITHM_AVERAGE:
channel_value = statistics.mean([previous_value, new_value])
return channel_value
def visualizeCrossValidation(results):
# Visualize the cross-validation results
x_scatter = [math.log10(x[0]) for x in results]
y_scatter = [math.log10(x[1]) for x in results]
# plot training accuracy
marker_size = 100
colors = [results[x][0] for x in results]
plt.subplot(2, 1, 1)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 training accuracy')
# plot validation accuracy
colors = [results[x][1] for x in results] # default size of markers is 20
plt.subplot(2, 1, 2)
plt.scatter(x_scatter, y_scatter, marker_size, c=colors)
plt.colorbar()
plt.xlabel('log learning rate')
plt.ylabel('log regularization strength')
plt.title('CIFAR-10 validation accuracy')
plt.show()
def _set_numticks(self):
self.numticks = 11 # todo; be smart here; this is just for dev
# def view_limits(self, vmin, vmax):
# 'Try to choose the view limits intelligently'
# if vmax<vmin:
# vmin, vmax = vmax, vmin
# if vmin==vmax:
# vmin-=1
# vmax+=1
# exponent, remainder = divmod(math.log10(vmax - vmin), 1)
# if remainder < 0.5:
# exponent -= 1
# scale = 10**(-exponent)
# vmin = math.floor(scale*vmin)/scale
# vmax = math.ceil(scale*vmax)/scale
# return mtransforms.nonsingular(vmin, vmax)