def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):
""" short time fourier transform of audio signal """
win = window(frameSize)
hopSize = int(frameSize - np.floor(overlapFac * frameSize))
# zeros at beginning (thus center of 1st window should be for sample nr. 0)
# samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)
samples = np.array(sig, dtype='float64')
# cols for windowing
cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1
# zeros at end (thus samples can be fully covered by frames)
# samples = np.append(samples, np.zeros(frameSize))
frames = stride_tricks.as_strided(
samples,
shape=(cols, frameSize),
strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()
frames *= win
return np.fft.rfft(frames)
# all the definition of the flowing variable can be found
# train_net.py
python类floor()的实例源码
audio_eval.py 文件源码
项目:Multi-channel-speech-extraction-using-DNN
作者: zhr1201
项目源码
文件源码
阅读 31
收藏 0
点赞 0
评论 0
def fftfilt(b, x, *n):
N_x = len(x)
N_b = len(b)
N = 2**np.arange(np.ceil(np.log2(N_b)),np.floor(np.log2(N_x)))
cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1)
N_fft = int(N[np.argmin(cost)])
N_fft = int(N_fft)
# Compute the block length:
L = int(N_fft - N_b + 1)
# Compute the transform of the filter:
H = np.fft.fft(b,N_fft)
y = np.zeros(N_x, x.dtype)
i = 0
while i <= N_x:
il = np.min([i+L,N_x])
k = np.min([i+N_fft,N_x])
yt = np.fft.ifft(np.fft.fft(x[i:il],N_fft)*H,N_fft) # Overlap..
y[i:k] = y[i:k] + yt[:k-i] # and add
i += L
return y
def __call__(self, batch):
images, labels = zip(*batch)
imgH = self.imgH
imgW = self.imgW
if self.keep_ratio:
ratios = []
for image in images:
w, h = image.size
ratios.append(w / float(h))
ratios.sort()
max_ratio = ratios[-1]
imgW = int(np.floor(max_ratio * imgH))
imgW = max(imgH * self.min_ratio, imgW) # assure imgH >= imgW
transform = resizeNormalize((imgW, imgH))
images = [transform(image) for image in images]
images = torch.cat([t.unsqueeze(0) for t in images], 0)
return images, labels
def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):
"""Split episodes between training, validation and test sets.
seed: random seed (have split performed consistently every time)"""
if seed is not None:
random_state = np.random.get_state()
np.random.seed(seed)
np.random.shuffle(episode_paths)
np.random.set_state(random_state)
else:
np.random.shuffle(episode_paths)
if use_all:
multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)
n_train = int(math.floor(multiplier * n_train))
n_valid = int(math.floor(multiplier * n_valid))
n_test = int(math.floor(multiplier * n_test))
assert n_train + n_valid + n_test <= len(episode_paths)
return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],
episode_paths[n_train + n_test:n_train + n_test + n_test])
def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):
"""Split episodes between training, validation and test sets.
seed: random seed (have split performed consistently every time)"""
if seed is not None:
random_state = np.random.get_state()
np.random.seed(seed)
np.random.shuffle(episode_paths)
np.random.set_state(random_state)
else:
np.random.shuffle(episode_paths)
if use_all:
multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)
n_train = int(math.floor(multiplier * n_train))
n_valid = int(math.floor(multiplier * n_valid))
n_test = int(math.floor(multiplier * n_test))
assert n_train + n_valid + n_test <= len(episode_paths)
return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],
episode_paths[n_train + n_test:n_train + n_test + n_test])
def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):
"""Split episodes between training, validation and test sets.
seed: random seed (have split performed consistently every time)"""
if seed is not None:
random_state = np.random.get_state()
np.random.seed(seed)
np.random.shuffle(episode_paths)
np.random.set_state(random_state)
else:
np.random.shuffle(episode_paths)
if use_all:
multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)
n_train = int(math.floor(multiplier * n_train))
n_valid = int(math.floor(multiplier * n_valid))
n_test = int(math.floor(multiplier * n_test))
assert n_train + n_valid + n_test <= len(episode_paths)
return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],
episode_paths[n_train + n_test:n_train + n_test + n_test])
def split_episodes(self, episode_paths, n_train, n_valid, n_test, seed=None, use_all=True):
"""Split episodes between training, validation and test sets.
seed: random seed (have split performed consistently every time)"""
if seed is not None:
random_state = np.random.get_state()
np.random.seed(seed)
np.random.shuffle(episode_paths)
np.random.set_state(random_state)
else:
np.random.shuffle(episode_paths)
if use_all:
multiplier = float(len(episode_paths)) / float(n_train + n_valid + n_test)
n_train = int(math.floor(multiplier * n_train))
n_valid = int(math.floor(multiplier * n_valid))
n_test = int(math.floor(multiplier * n_test))
assert n_train + n_valid + n_test <= len(episode_paths)
return (episode_paths[:n_train], episode_paths[n_train:n_train + n_valid],
episode_paths[n_train + n_test:n_train + n_test + n_test])
def earn_dividend(self, dividend):
"""
Register the number of shares we held at this dividend's ex date so
that we can pay out the correct amount on the dividend's pay date.
"""
assert dividend['sid'] == self.sid
out = {'id': dividend['id']}
# stock dividend
if dividend['payment_sid']:
out['payment_sid'] = dividend['payment_sid']
out['share_count'] = np.floor(self.amount
* float(dividend['ratio']))
# cash dividend
if dividend['net_amount']:
out['cash_amount'] = self.amount * dividend['net_amount']
elif dividend['gross_amount']:
out['cash_amount'] = self.amount * dividend['gross_amount']
payment_owed = zp.dividend_payment(out)
return payment_owed
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_percent(self.sid(0), .001)
if isinstance(self.sid(0), Equity):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) / data[0].price
)
if isinstance(self.sid(0), Future):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) /
(data[0].price * self.sid(0).multiplier)
)
def int_bilin_MT(f, x, y):
# assume x, y are in pixel
fint = np.zeros(len(x))
for i in range(len(x)):
t = y[i] - np.floor(y[i])
u = x[i] - np.floor(x[i])
y0 = f[np.int(np.floor(y[i])), np.int(np.floor(x[i]))]
y1 = f[np.int(np.floor(y[i])) + 1, np.int(np.floor(x[i]))]
y2 = f[np.int(np.floor(y[i])) + 1, np.int(np.floor(x[i])) + 1]
y3 = f[np.int(np.floor(y[i])), np.int(np.floor(x[i])) + 1]
fint[i] = t * u * (y0 - y1 + y2 - y3)
fint[i] += t * (y1 - y0)
fint[i] += u * (y3 - y0)
fint[i] += y0
return fint
def expand_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):
"""
Align a potentially non-axis aligned bbox to the grid by growing it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
"""
chunk_size = np.array(chunk_size, dtype=np.float32)
result = self.clone()
result = result - offset
result.minpt = np.floor(result.minpt / chunk_size) * chunk_size
result.maxpt = np.ceil(result.maxpt / chunk_size) * chunk_size
return result + offset
def shrink_to_chunk_size(self, chunk_size, offset=Vec(0,0,0, dtype=int)):
"""
Align a potentially non-axis aligned bbox to the grid by shrinking it
to the nearest grid lines.
Required:
chunk_size: arraylike (x,y,z), the size of chunks in the
dataset e.g. (64,64,64)
Optional:
offset: arraylike (x,y,z), the starting coordinate of the dataset
"""
chunk_size = np.array(chunk_size, dtype=np.float32)
result = self.clone()
result = result - offset
result.minpt = np.ceil(result.minpt / chunk_size) * chunk_size
result.maxpt = np.floor(result.maxpt / chunk_size) * chunk_size
return result + offset
def resize_image(image,target_shape, pad_value = 0):
assert isinstance(target_shape, list) or isinstance(target_shape, tuple)
add_shape, subs_shape = [], []
image_shape = image.shape
shape_difference = np.asarray(target_shape, dtype=int) - np.asarray(image_shape,dtype=int)
for diff in shape_difference:
if diff < 0:
subs_shape.append(np.s_[int(np.abs(np.ceil(diff/2))):int(np.floor(diff/2))])
add_shape.append((0, 0))
else:
subs_shape.append(np.s_[:])
add_shape.append((int(np.ceil(1.0*diff/2)),int(np.floor(1.0*diff/2))))
output = np.pad(image, tuple(add_shape), 'constant', constant_values=(pad_value, pad_value))
output = output[subs_shape]
return output
def get_mask_boundaries(self,image_shape,mask_shape,ROI_mask):
half_segment_dimensions = np.zeros((len(image_shape), 2), dtype='int32')
for index, dim in enumerate(image_shape):
if dim % 2 == 0:
half_segment_dimensions[index, :] = [dim / 2 - 1, dim / 2]
else:
half_segment_dimensions[index, :] = [np.floor(dim / 2)] * 2
mask_boundaries = np.zeros(mask_shape, dtype='int32')
mask_boundaries[half_segment_dimensions[0][0]:-half_segment_dimensions[0][1],
half_segment_dimensions[1][0]:-half_segment_dimensions[1][1],
half_segment_dimensions[2][0]:-half_segment_dimensions[2][1]] = 1
if ROI_mask is None:
return mask_boundaries
else:
return mask_boundaries * ROI_mask
def logTickValues(self, minVal, maxVal, size, stdTicks):
## start with the tick spacing given by tickValues().
## Any level whose spacing is < 1 needs to be converted to log scale
ticks = []
for (spacing, t) in stdTicks:
if spacing >= 1.0:
ticks.append((spacing, t))
if len(ticks) < 3:
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
#major = list(range(v1+1, v2))
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = [x for x in minor if x>minVal and x<maxVal]
ticks.append((None, minor))
return ticks
def logTickValues(self, minVal, maxVal, size, stdTicks):
## start with the tick spacing given by tickValues().
## Any level whose spacing is < 1 needs to be converted to log scale
ticks = []
for (spacing, t) in stdTicks:
if spacing >= 1.0:
ticks.append((spacing, t))
if len(ticks) < 3:
v1 = int(np.floor(minVal))
v2 = int(np.ceil(maxVal))
#major = list(range(v1+1, v2))
minor = []
for v in range(v1, v2):
minor.extend(v + np.log10(np.arange(1, 10)))
minor = [x for x in minor if x>minVal and x<maxVal]
ticks.append((None, minor))
return ticks
def get_batch_idx(self, idx, **kwargs):
if self.mode == 'train':
new_idx = []
# self.log.info('Label IDX: {}'.format(idx))
if self.stats_provider is None:
label_ids = [ii % self._real_size for ii in idx]
else:
# print idx, self.stats_provider.get_size()
stats_batch = self.stats_provider.get_batch_idx(idx)
label_ids = []
for ii in xrange(len(idx)):
label_ids.append(np.argmax(stats_batch['y_gt'][ii]))
for ii in label_ids:
data_group = self.data_provider.label_idx[ii]
num_ids = len(data_group)
kk = int(np.floor(self.rnd.uniform(0, num_ids)))
new_idx.append(data_group[kk])
else:
new_idx = idx
return self.data_provider.get_batch_idx(new_idx)
def transform(self, images):
if self._aug_flag:
transformed_images =\
np.zeros([images.shape[0], self._imsize, self._imsize, 3])
ori_size = images.shape[1]
for i in range(images.shape[0]):
h1 = np.floor((ori_size - self._imsize) * np.random.random())
w1 = np.floor((ori_size - self._imsize) * np.random.random())
cropped_image =\
images[i][w1: w1 + self._imsize, h1: h1 + self._imsize, :]
if random.random() > 0.5:
transformed_images[i] = np.fliplr(cropped_image)
else:
transformed_images[i] = cropped_image
return transformed_images
else:
return images
def gaussian_kernel(kernel_shape, sigma=None):
"""
Get 2D Gaussian kernel
:param kernel_shape: kernel size
:param sigma: sigma of Gaussian distribution
:return: 2D Gaussian kernel
"""
kern = numpy.zeros((kernel_shape, kernel_shape), dtype='float32')
# get sigma from kernel size
if sigma is None:
sigma = 0.3*((kernel_shape-1.)*0.5 - 1.) + 0.8
def gauss(x, y, s):
Z = 2. * numpy.pi * s ** 2.
return 1. / Z * numpy.exp(-(x ** 2. + y ** 2.) / (2. * s ** 2.))
mid = numpy.floor(kernel_shape / 2.)
for i in xrange(0, kernel_shape):
for j in xrange(0, kernel_shape):
kern[i, j] = gauss(i - mid, j - mid, sigma)
return kern / kern.sum()
def _hpd_interval(self, x, width):
"""
Code adapted from pymc3.stats.calc_min_interval:
https://github.com/pymc-devs/pymc3/blob/master/pymc3/stats.py
"""
x = np.sort(x)
n = len(x)
interval_idx_inc = int(np.floor(width * n))
n_intervals = n - interval_idx_inc
interval_width = x[interval_idx_inc:] - x[:n_intervals]
if len(interval_width) == 0:
raise ValueError('Too few elements for interval calculation')
min_idx = np.argmin(interval_width)
hdi_min = x[min_idx]
hdi_max = x[min_idx + interval_idx_inc]
index = ['hpd{}_{}'.format(width, x) for x in ['lower', 'upper']]
return pd.Series([hdi_min, hdi_max], index=index)
def SLdshear(inputArray, k, axis):
"""
Computes the discretized shearing operator for a given inputArray, shear
number k and axis.
This version is adapted such that the MATLAB indexing can be used here in the
Python version.
"""
axis = axis - 1
if k==0:
return inputArray
rows = np.asarray(inputArray.shape)[0]
cols = np.asarray(inputArray.shape)[1]
shearedArray = np.zeros((rows, cols), dtype=inputArray.dtype)
if axis == 0:
for col in range(cols):
shearedArray[:,col] = np.roll(inputArray[:,col], int(k * np.floor(cols/2-col)))
else:
for row in range(rows):
shearedArray[row,:] = np.roll(inputArray[row,:], int(k * np.floor(rows/2-row)))
return shearedArray
def value_to_bin_index(val, **kwargs):
"""Convert value to bin index
Convert a numeric or timestamp column to an integer bin index.
:param bin_width: bin_width value needed to convert column to an integer bin index
:param bin_offset: bin_offset value needed to convert column to an integer bin index
"""
try:
# NOTE this notation also works for timestamps
bin_width = kwargs.get('bin_width', 1)
bin_offset = kwargs.get('bin_offset', 0)
bin_index = int(np.floor((val - bin_offset) / bin_width))
return bin_index
except BaseException:
pass
return val
def value_to_bin_center(val, **kwargs):
"""Convert value to bin center
Convert a numeric or timestamp column to a common bin center value.
:param bin_width: bin_width value needed to convert column to a common bin center value
:param bin_offset: bin_offset value needed to convert column to a common bin center value
"""
try:
# NOTE this notation also works for timestamps, and does not change the
# unit
bin_width = kwargs.get('bin_width', 1)
bin_offset = kwargs.get('bin_offset', 0)
bin_index = int(np.floor((val - bin_offset) / bin_width))
obj_type = type(bin_width)
return bin_offset + obj_type((bin_index + 0.5) * bin_width)
except BaseException:
pass
return val
def save_fft(fil,audio_in):
samples = len(audio_in)
fft_size = 2**int(floor(log(samples)/log(2.0)))
freq = fft(audio_in[0:fft_size])
s_data = numpy.zeros(fft_size/2)
x_data = numpy.zeros(fft_size/2)
peak = 0;
for j in xrange(fft_size/2):
if (abs(freq[j]) > peak):
peak = abs(freq[j])
for j in xrange(fft_size/2):
x_data[j] = log(2.0*(j+1.0)/fft_size);
if (x_data[j] < -10):
x_data[j] = -10
s_data[j] = 10.0*log(abs(freq[j])/peak)/log(10.0)
plt.ylim([-50,0])
plt.plot(x_data,s_data)
plt.title('fft log power')
plt.grid()
fields = fil.split('.')
plt.savefig(fields[0]+'_fft.png', bbox_inches="tight")
plt.clf()
plt.close()
def _gene_embed_space(self,vec):
shape = vec.shape
vec = vec.flatten()
combo_neg_idx = np.array([1 if vec[i]<0 else 0 for i in range(len(vec))])
vec_pos = np.abs(vec)
int_part = np.floor(vec_pos)
frac_part = np.round(vec_pos - int_part,2)
bi_int_part=[] #?????????????signature???????
for i in range(len(int_part)):
bi=list(bin(int(int_part[i]))[2:])
bie = [0] * (16 - len(bi))
bie.extend(bi)
bi_int_part.append(np.array(bie,dtype=np.uint16))
bi_int_part = np.array(bi_int_part)
sig = []
for i in range(len(bi_int_part)):
sig.append(bi_int_part[i][10])
sig = np.array(sig).reshape(shape)
return np.array(bi_int_part),frac_part.reshape(shape),combo_neg_idx.reshape(shape),sig
def _gene_embed_space(self,vec):
shape = vec.shape
vec = vec.flatten()
combo_neg_idx = np.array([1 if vec[i]<0 else 0 for i in range(len(vec))])
vec_pos = np.abs(vec)
int_part = np.floor(vec_pos)
frac_part = np.round(vec_pos - int_part,2)
bi_int_part=[] #?????????????signature???????
for i in range(len(int_part)):
bi=list(bin(int(int_part[i]))[2:])
bie = [0] * (16 - len(bi))
bie.extend(bi)
bi_int_part.append(np.array(bie,dtype=np.uint16))
bi_int_part = np.array(bi_int_part)
sig = []
for i in range(len(bi_int_part)):
sig.append(bi_int_part[i][10])
sig = np.array(sig).reshape(shape)
return np.array(bi_int_part),frac_part.reshape(shape),combo_neg_idx.reshape(shape),sig
def M(self):
"""Returns the :math:`M` matrix of integers that determine points at which the
functions are sampled in the unit cell.
Examples:
For `S = [2, 2, 1]`, the returned matrix is:
.. code-block:: python
np.ndarray([[0,0,0],
[1,0,0],
[0,1,0],
[1,1,0]], dtype=int)
"""
if self._M is None:
ms = np.arange(np.prod(self.S, dtype=int))
m1 = np.fmod(ms, self.S[0])
m2 = np.fmod(np.floor(ms/self.S[0]), self.S[1])
m3 = np.fmod(np.floor(ms/(self.S[0]*self.S[1])), self.S[2])
#Make sure we explicitly use an integer array; it's faster.
self._M = np.asarray(np.vstack((m1, m2, m3)).T, dtype=int)
return self._M
def _latvec_plot(self, R=True, withpts=False, legend=False):
"""Plots the lattice vectors (for real or reciprocal space).
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
vecs = self.R if R else self.K
for i in range(3):
steps = np.linspace(0, 1, np.floor(10*np.linalg.norm(vecs[:,i])))
Ri = vecs[:,i]
Ri.shape = (1, 3)
steps.shape = (len(steps), 1)
line = np.dot(steps, Ri)
ax.plot(line[:,0], line[:,1], line[:,2], label="R{0:d}".format(i+1))
if withpts:
pts = self.r if R else self.G
ax.scatter(pts[:,0], pts[:,1], pts[:,2], color='k')
if legend:
ax.legend()
return (fig, ax)
def cumultativesumstest(binin):
''' The focus of this test is the maximal excursion (from zero) of the random walk defined by the cumulative sum of adjusted (-1, +1) digits in the sequence. The purpose of the test is to determine whether the cumulative sum of the partial sequences occurring in the tested sequence is too large or too small relative to the expected behavior of that cumulative sum for random sequences. This cumulative sum may be considered as a random walk. For a random sequence, the random walk should be near zero. For non-random sequences, the excursions of this random walk away from zero will be too large.'''
n = len(binin)
ss = [int(el) for el in binin]
sc = map(sumi, ss)
cs = np.cumsum(sc)
z = max(abs(cs))
ra = 0
start = int(np.floor(0.25 * np.floor(-n / z) + 1))
stop = int(np.floor(0.25 * np.floor(n / z) - 1))
pv1 = []
for k in xrange(start, stop + 1):
pv1.append(sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)) - sst.norm.cdf((4 * k - 1) * z / np.sqrt(n)))
start = int(np.floor(0.25 * np.floor(-n / z - 3)))
stop = int(np.floor(0.25 * np.floor(n / z) - 1))
pv2 = []
for k in xrange(start, stop + 1):
pv2.append(sst.norm.cdf((4 * k + 3) * z / np.sqrt(n)) - sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)))
pval = 1
pval -= reduce(su, pv1)
pval += reduce(su, pv2)
return pval
b3_data_iter.py 文件源码
项目:kaggle-dstl-satellite-imagery-feature-detection
作者: u1234x1234
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def rel_crop(im, rel_cx, rel_cy, crop_size):
map_size = im.shape[1]
r = crop_size / 2
abs_cx = rel_cx * map_size
abs_cy = rel_cy * map_size
na = np.floor([abs_cy-r, abs_cy+r, abs_cx-r, abs_cx+r]).astype(np.int32)
a = np.clip(na, 0, map_size)
px0 = a[2] - na[2]
px1 = na[3] - a[3]
py0 = a[0] - na[0]
py1 = na[1] - a[1]
crop = im[a[0]:a[1], a[2]:a[3]]
crop = np.pad(crop, ((py0, py1), (px0, px1), (0, 0)),
mode='reflect')
assert crop.shape == (crop_size, crop_size, im.shape[2])
return crop