def binarize_predictions(array, task='binary.classification'):
''' Turn predictions into decisions {0,1} by selecting the class with largest
score for multiclass problems and thresholding at 0.5 for other cases.'''
# add a very small random value as tie breaker (a bit bad because this changes the score every time)
# so to make sure we get the same result every time, we seed it
#eps = 1e-15
#np.random.seed(sum(array.shape))
#array = array + eps*np.random.rand(array.shape[0],array.shape[1])
bin_array = np.zeros(array.shape)
if (task != 'multiclass.classification') or (array.shape[1]==1):
bin_array[array>=0.5] = 1
else:
sample_num=array.shape[0]
for i in range(sample_num):
j = np.argmax(array[i,:])
bin_array[i,j] = 1
return bin_array
python类zeros()的实例源码
def baseline(output_dir, basename, valid_num, test_num, target_num):
preds_valid = np.zeros([valid_num , target_num])
preds_test = np.zeros([test_num , target_num])
cycle = 0
filename_valid = basename + '_valid_' + str(cycle).zfill(3) + '.predict'
data_io.write(os.path.join(output_dir,filename_valid), preds_valid)
filename_test = basename + '_test_' + str(cycle).zfill(3) + '.predict'
data_io.write(os.path.join(output_dir,filename_test), preds_test)
def __generate_tensor(self, is_review, reverse=False):
"""
:param is_review:
:param reverse:
:return:
"""
seq_length = self.review_max_words if is_review else self.summary_max_words
total_rev_summary_pairs = self.rev_sum_pair.shape[0]
data_tensor = np.zeros([total_rev_summary_pairs,seq_length])
sample = self.rev_sum_pair[0::, 0] if is_review else self.rev_sum_pair[0::, 1]
for index, entry in enumerate(sample.tolist()):
index_lst = np.array([self.map[word.lower()] for word in wordpunct_tokenize(entry)])
# reverse if want to get backward form
if reverse:
index_lst = index_lst[::-1]
# Pad the list
if len(index_lst) <= seq_length:
index_lst = np.lib.pad(index_lst, (0,seq_length - index_lst.size), 'constant', constant_values=(0, 0))
else:
index_lst = index_lst[0:seq_length]
data_tensor[index] = index_lst
return data_tensor
def dexp(v):
r = v[0:3]
dexpr = quat.dexp(r)
MT = mt(v)
return np.bmat([[dexpr,MT],[np.zeros((3,3)),dexpr]])
def dlog(v):
r = v[0:3]
dlogr = quat.dlog(r)
MT = mt(v)
return np.bmat([[dlogr, -dlogr*MT*dlogr],[np.zeros((3,3)),dlogr]])
def get_id_set(lang_codes):
feature_database = np.load("family_features.npz")
lang_codes = [ get_language_code(l, feature_database) for l in lang_codes ]
all_languages = list(feature_database["langs"])
feature_names = [ "ID_" + l.upper() for l in all_languages ]
values = np.zeros((len(lang_codes), len(feature_names)))
for i, lang_code in enumerate(lang_codes):
feature_index = get_language_index(lang_code, feature_database)
values[i, feature_index] = 1.0
return feature_names, values
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01):
# Receptive Fields Summary
try:
W = layer.W
except:
W = layer
wp = W.eval().transpose();
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
else: # Convolutional layer already has shape
features, channels, iy, ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fig = mpl.figure(figOffset); mpl.clf()
# Using image grid
from mpl_toolkits.axes_grid1 import ImageGrid
grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
for i in range(0,np.shape(fields)[0]):
im = grid[i].imshow(fields[i],cmap=cmap);
grid.cbar_axes[0].colorbar(im)
mpl.title('%s Receptive Fields' % layer.name)
# old way
# fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
# tiled = []
# for i in range(0,perColumn*perRow,perColumn):
# tiled.append(np.hstack(fields2[i:i+perColumn]))
#
# tiled = np.vstack(tiled)
# mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
# Output summary
try:
W = layer.output
except:
W = layer
wp = W.eval(feed_dict=feed_dict);
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
fields = np.reshape(temp,[1]+fieldShape)
else: # Convolutional layer already has shape
wp = np.rollaxis(wp,3,0)
features, channels, iy,ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
tiled = []
for i in range(0,perColumn*perRow,perColumn):
tiled.append(np.hstack(fields2[i:i+perColumn]))
tiled = np.vstack(tiled)
if figOffset is not None:
mpl.figure(figOffset); mpl.clf();
mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def dense_to_one_hot(labels_dense, num_classes=10):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = numpy.arange(num_labels) * num_classes
labels_one_hot = numpy.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def plotFields(layer,fieldShape=None,channel=None,maxFields=25,figName='ReceptiveFields',cmap=None,padding=0.01):
# Receptive Fields Summary
W = layer.W
wp = W.eval().transpose();
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
else: # Convolutional layer already has shape
features, channels, iy, ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
fieldsN = min(fields.shape[0],maxFields)
perRow = int(math.floor(math.sqrt(fieldsN)))
perColumn = int(math.ceil(fieldsN/float(perRow)))
fig = mpl.figure(figName); mpl.clf()
# Using image grid
from mpl_toolkits.axes_grid1 import ImageGrid
grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
for i in range(0,fieldsN):
im = grid[i].imshow(fields[i],cmap=cmap);
grid.cbar_axes[0].colorbar(im)
mpl.title('%s Receptive Fields' % layer.name)
# old way
# fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
# tiled = []
# for i in range(0,perColumn*perRow,perColumn):
# tiled.append(np.hstack(fields2[i:i+perColumn]))
#
# tiled = np.vstack(tiled)
# mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
mpl.figure(figName+' Total'); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
# Output summary
W = layer.output
wp = W.eval(feed_dict=feed_dict);
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
fields = np.reshape(temp,[1]+fieldShape)
else: # Convolutional layer already has shape
wp = np.rollaxis(wp,3,0)
features, channels, iy,ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
tiled = []
for i in range(0,perColumn*perRow,perColumn):
tiled.append(np.hstack(fields2[i:i+perColumn]))
tiled = np.vstack(tiled)
if figOffset is not None:
mpl.figure(figOffset); mpl.clf();
mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def build_2D_cov_matrix(sigmax,sigmay,angle,verbose=True):
"""
Build a covariance matrix for a 2D multivariate Gaussian
--- INPUT ---
sigmax Standard deviation of the x-compoent of the multivariate Gaussian
sigmay Standard deviation of the y-compoent of the multivariate Gaussian
angle Angle to rotate matrix by in degrees (clockwise) to populate covariance cross terms
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
covmatrix = tu.build_2D_cov_matrix(3,1,35)
"""
if verbose: print ' - Build 2D covariance matrix with varinaces (x,y)=('+str(sigmax)+','+str(sigmay)+\
') and then rotated '+str(angle)+' degrees'
cov_orig = np.zeros([2,2])
cov_orig[0,0] = sigmay**2.0
cov_orig[1,1] = sigmax**2.0
angle_rad = (180.0-angle) * np.pi/180.0 # The (90-angle) makes sure the same convention as DS9 is used
c, s = np.cos(angle_rad), np.sin(angle_rad)
rotmatrix = np.matrix([[c, -s], [s, c]])
cov_rot = np.dot(np.dot(rotmatrix,cov_orig),np.transpose(rotmatrix)) # performing rot * cov * rot^T
return cov_rot
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_noisy_cube(cube,type='poisson',gauss_std=0.5,verbose=True):
"""
Generate noisy cube based on input cube.
--- INPUT ---
cube Data cube to be smoothed
type Type of noise to generate
poisson Generates poissonian (integer) noise
gauss Generates gaussian noise for a gaussian with standard deviation gauss_std=0.5
gauss_std Standard deviation of noise if type='gauss'
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
datacube = np.ones(([3,3,3])); datacube[0,1,1]=5; datacube[1,1,1]=6; datacube[2,1,1]=8
cube_with_noise = tu.gen_noisy_cube(datacube,type='gauss',gauss_std='0.5')
"""
if verbose: print ' - Generating "'+type+'" noise on data cube'
if type == 'poisson':
cube_with_noise = np.random.poisson(lam=cube, size=None)
elif type == 'gauss':
cube_with_noise = cube + np.random.normal(loc=np.zeros(cube.shape),scale=gauss_std, size=None)
else:
sys.exit(' ---> type="'+type+'" is not valid in call to mock_cube_sources.generate_cube_noise() ')
return cube_with_noise
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def perform_2Dconvolution(cube,kernels,use_fftconvolution=False,verbose=True):
"""
Perform 2D convolution in data cube layer by layer
--- INPUT ---
cube Data cube to convolve
kernels List of (astropy) kernels to apply on each (z/wavelengt)layer of the cube
use_fftconvolution To convolve in FFT space set this keyword to True
verbose Toggle verbosity
--- EXAMPLE OF USE ---
# see tdose_utilities.gen_psfed_cube()
"""
csh = cube.shape
cube_convolved = np.zeros(csh)
for zz in xrange(csh[0]): # looping over wavelength layers of cube
layer = cube[zz,:,:]
if use_fftconvolution:
layer_convolved = ac.convolve_fft(layer, kernels[zz], boundary='fill')
else:
layer_convolved = ac.convolve(layer, kernels[zz], boundary='fill')
cube_convolved[zz,:,:] = layer_convolved
return cube_convolved
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def gen_aperture(imgsize,ypos,xpos,radius,pixval=1,showaperture=False,verbose=True):
"""
Generating an aperture image
--- INPUT ---
imgsize The dimensions of the array to return. Expects [y-size,x-size].
The aperture will be positioned in the center of a (+/-x-size/2., +/-y-size/2) sized array
ypos Pixel position in the y direction
xpos Pixel position in the x direction
radius Radius of aperture in pixels
showaperture Display image of generated aperture
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
apertureimg = tu.gen_aperture([20,40],10,5,10,showaperture=True)
apertureimg = tu.gen_aperture([2000,4000],900,1700,150,showaperture=True)
"""
if verbose: print ' - Generating aperture in image (2D array)'
y , x = np.ogrid[-ypos:imgsize[0]-ypos, -xpos:imgsize[1]-xpos]
mask = x*x + y*y <= radius**2.
aperture = np.zeros(imgsize)
if verbose: print ' - Assigning pixel value '+str(pixval)+' to aperture'
aperture[mask] = pixval
if showaperture:
if verbose: print ' - Displaying resulting image of aperture'
plt.imshow(aperture,interpolation='none')
plt.title('Generated aperture')
plt.show()
return aperture
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def build_data_auto_encoder(data, step, win_size):
count = data.shape[1] / float(step)
docX = np.zeros((count, 3, win_size))
for i in range(0, data.shape[1] - win_size, step):
c = i / step
docX[c][0] = np.abs(data[0, i:i + win_size] - data[1, i:i + win_size])
docX[c][1] = np.power(data[0, i:i + win_size] - data[1, i:i + win_size], 2)
docX[c][2] = np.pad(
(data[0, i:i + win_size - 1] - data[0, i + 1:i + win_size]) * (data[1, i:i + win_size - 1] - data[1, i + 1:i + win_size]),
(0, 1), 'constant', constant_values=0)
data = np.dstack((docX[:, 0], docX[:, 1], docX[:, 2])).reshape(docX.shape[0], docX.shape[1]*docX.shape[2])
return data
def get_face_mask(img, img_l):
img = np.zeros(img.shape[:2], dtype = np.float64)
for idx in OVERLAY_POINTS_IDX:
cv2.fillConvexPoly(img, cv2.convexHull(img_l[idx]), color = 1)
img = np.array([img, img, img]).transpose((1, 2, 0))
img = (cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0) > 0) * 1.0
img = cv2.GaussianBlur(img, (BLUR_AMOUNT, BLUR_AMOUNT), 0)
return img
def warp_image(img, tM, shape):
out = np.zeros(shape, dtype=img.dtype)
# cv2.warpAffine(img,
# tM[:2],
# (shape[1], shape[0]),
# dst=out,
# borderMode=cv2.BORDER_TRANSPARENT,
# flags=cv2.WARP_INVERSE_MAP)
cv2.warpPerspective(img, tM, (shape[1], shape[0]), dst=out,
borderMode=cv2.BORDER_TRANSPARENT,
flags=cv2.WARP_INVERSE_MAP)
return out
# TODO: Modify this method to get a better face contour mask
def get_contour_mask(dshape, img_fl):
mask = np.zeros(dshape)
hull = cv2.convexHull(img_fl)
cv2.drawContours(mask, [hull], 0, (1, 1, 1) , -1)
return np.uint8(mask)
# Orients input_ mask onto tmpl_ face
def get_earnings_per_round(self, username):
r = requests.get('{0}/{1}'.format(self._users_url, username))
if r.status_code!=200:
return (None, r.status_code)
rj = r.json()
rewards = rj['rewards']
earnings = np.zeros(len(rewards))
for i in range(len(rewards)):
earnings[i] = rewards[i]['amount']
return (earnings, r.status_code)