def check_numeric_input(self, input_name, input_value):
if type(input_value) is np.ndarray:
if input_value.size == self.P:
setattr(self, input_name, input_value)
elif input_value.size == 1:
setattr(self, input_name, input_value*np.ones(self.P))
else:
raise ValueError("length of %s is %d; should be %d" % (input_name, input_value.size, self.P))
elif type(input_value) is float or type(input_value) is int:
setattr(self, input_name, float(input_value)*np.ones(self.P))
elif type(input_value) is list:
if len(input_value) == self.P:
setattr(self, input_name, np.array([float(x) for x in input_value]))
elif len(input_value) == 1:
setattr(self, input_name, np.array([float(x) for x in input_value]) * np.ones(self.P))
else:
raise ValueError("length of %s is %d; should be %d" % (input_name, len(input_value), self.P))
else:
raise ValueError("user provided %s with an unsupported type" % (input_name))
python类ones()的实例源码
cpm_utils.py 文件源码
项目:convolutional-pose-machines-tensorflow
作者: timctho
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def make_heatmaps_from_joints(input_size, heatmap_size, gaussian_variance, batch_joints):
# Generate ground-truth heatmaps from ground-truth 2d joints
scale_factor = input_size // heatmap_size
batch_gt_heatmap_np = []
for i in range(batch_joints.shape[0]):
gt_heatmap_np = []
invert_heatmap_np = np.ones(shape=(heatmap_size, heatmap_size))
for j in range(batch_joints.shape[1]):
cur_joint_heatmap = make_gaussian(heatmap_size,
gaussian_variance,
center=(batch_joints[i][j] // scale_factor))
gt_heatmap_np.append(cur_joint_heatmap)
invert_heatmap_np -= cur_joint_heatmap
gt_heatmap_np.append(invert_heatmap_np)
batch_gt_heatmap_np.append(gt_heatmap_np)
batch_gt_heatmap_np = np.asarray(batch_gt_heatmap_np)
batch_gt_heatmap_np = np.transpose(batch_gt_heatmap_np, (0, 2, 3, 1))
return batch_gt_heatmap_np
def primes_2_to_n(n):
"""
Efficient algorithm to find and list primes from
2 to `n'.
Args:
n (int): highest number from which to search for primes
Returns:
np array of all primes from 2 to n
References:
Robert William Hanks,
https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/
"""
sieve = np.ones(int(n / 3 + (n % 6 == 2)), dtype=np.bool)
for i in range(1, int((n ** 0.5) / 3 + 1)):
if sieve[i]:
k = 3 * i + 1 | 1
sieve[int(k * k / 3)::2 * k] = False
sieve[int(k * (k - 2 * (i & 1) + 4) / 3)::2 * k] = False
return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
def get_interv_table(model,intrv=True):
n_batches=25
table_outputs=[]
d_vals=np.linspace(TINY,0.6,n_batches)
for name in model.cc.node_names:
outputs=[]
for d_val in d_vals:
do_dict={model.cc.node_dict[name].label_logit : d_val*np.ones((model.batch_size,1))}
outputs.append(model.sess.run(model.fake_labels,do_dict))
out=np.vstack(outputs)
table_outputs.append(out)
table=np.stack(table_outputs,axis=2)
np.mean(np.round(table),axis=0)
return table
#dT=pd.DataFrame(index=p_names, data=T, columns=do_names)
#T=np.mean(np.round(table),axis=0)
#table=get_interv_table(model)
def load_ROI_mask(self):
proxy = nib.load(self.FLAIR_FILE)
image_array = np.asarray(proxy.dataobj)
mask = np.ones_like(image_array)
mask[np.where(image_array < 90)] = 0
# img = nib.Nifti1Image(mask, proxy.affine)
# nib.save(img, join(modalities_path,'mask.nii.gz'))
struct_element_size = (20, 20, 20)
mask_augmented = np.pad(mask, [(21, 21), (21, 21), (21, 21)], 'constant', constant_values=(0, 0))
mask_augmented = binary_closing(mask_augmented, structure=np.ones(struct_element_size, dtype=bool)).astype(
np.int)
return mask_augmented[21:-21, 21:-21, 21:-21].astype('bool')
def __init__(self, pos, color, mode=None):
"""
=============== ==============================================================
**Arguments:**
pos Array of positions where each color is defined
color Array of RGBA colors.
Integer data types are interpreted as 0-255; float data types
are interpreted as 0.0-1.0
mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
indicating the color space that should be used when
interpolating between stops. Note that the last mode value is
ignored. By default, the mode is entirely RGB.
=============== ==============================================================
"""
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
self.color = np.array(color)[order]
if mode is None:
mode = np.ones(len(pos))
self.mode = mode
self.stopsCache = {}
def build_test_data(self, variable='v'):
metadata = {
'size': NCELLS,
'first_index': 0,
'first_id': 0,
'n': 505,
'variable': variable,
'last_id': NCELLS - 1,
'last_index': NCELLS - 1,
'dt': 0.1,
'label': "population0",
}
if variable == 'v':
metadata['units'] = 'mV'
elif variable == 'spikes':
metadata['units'] = 'ms'
data = np.empty((505, 2))
for i in range(NCELLS):
# signal
data[i*101:(i+1)*101, 0] = np.arange(i, i+101, dtype=float)
# index
data[i*101:(i+1)*101, 1] = i*np.ones((101,), dtype=float)
return data, metadata
def __init__(self, pos, color, mode=None):
"""
=============== ==============================================================
**Arguments:**
pos Array of positions where each color is defined
color Array of RGBA colors.
Integer data types are interpreted as 0-255; float data types
are interpreted as 0.0-1.0
mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
indicating the color space that should be used when
interpolating between stops. Note that the last mode value is
ignored. By default, the mode is entirely RGB.
=============== ==============================================================
"""
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
self.color = np.array(color)[order]
if mode is None:
mode = np.ones(len(pos))
self.mode = mode
self.stopsCache = {}
def build_test_data(self, variable='v'):
metadata = {
'size': NCELLS,
'first_index': 0,
'first_id': 0,
'n': 505,
'variable': variable,
'last_id': NCELLS - 1,
'last_index': NCELLS - 1,
'dt': 0.1,
'label': "population0",
}
if variable == 'v':
metadata['units'] = 'mV'
elif variable == 'spikes':
metadata['units'] = 'ms'
data = np.empty((505, 2))
for i in range(NCELLS):
# signal
data[i*101:(i+1)*101, 0] = np.arange(i, i+101, dtype=float)
# index
data[i*101:(i+1)*101, 1] = i*np.ones((101,), dtype=float)
return data, metadata
def ONES(n):
return np.ones((n, n), np.uint8)
def gen_noisy_cube(cube,type='poisson',gauss_std=0.5,verbose=True):
"""
Generate noisy cube based on input cube.
--- INPUT ---
cube Data cube to be smoothed
type Type of noise to generate
poisson Generates poissonian (integer) noise
gauss Generates gaussian noise for a gaussian with standard deviation gauss_std=0.5
gauss_std Standard deviation of noise if type='gauss'
verbose Toggle verbosity
--- EXAMPLE OF USE ---
import tdose_utilities as tu
datacube = np.ones(([3,3,3])); datacube[0,1,1]=5; datacube[1,1,1]=6; datacube[2,1,1]=8
cube_with_noise = tu.gen_noisy_cube(datacube,type='gauss',gauss_std='0.5')
"""
if verbose: print ' - Generating "'+type+'" noise on data cube'
if type == 'poisson':
cube_with_noise = np.random.poisson(lam=cube, size=None)
elif type == 'gauss':
cube_with_noise = cube + np.random.normal(loc=np.zeros(cube.shape),scale=gauss_std, size=None)
else:
sys.exit(' ---> type="'+type+'" is not valid in call to mock_cube_sources.generate_cube_noise() ')
return cube_with_noise
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def __init__(self, env, shape, clip=10.0, update_freq=100):
self.env = env
self.clip = clip
self.update_freq = update_freq
self.count = 0
self.sum = 0.0
self.sum_sqr = 0.0
self.mean = np.zeros(shape, dtype=np.double)
self.std = np.ones(shape, dtype=np.double)
def expectation(self, dataSplit, coefficients, variances):
assignment_weights = np.ones(
(len(dataSplit), self.num_components), dtype=float)
self.Q = len(self.endoVar)
for k in range(self.num_components):
coef_ = coefficients[k]
Beta = coef_.ix[self.endoVar][self.endoVar]
Gamma = coef_.ix[self.endoVar][self.exoVar]
a_ = (np.dot(Beta, self.fscores[
self.endoVar].T) + np.dot(Gamma, self.fscores[self.exoVar].T))
invert_ = np.linalg.inv(np.array(variances[k]))
exponential = np.exp(-0.5 * np.dot(np.dot(a_.T, invert_), a_))
den = (((2 * np.pi)**(self.Q / 2)) *
np.sqrt(np.linalg.det(variances[k])))
probabilities = exponential / den
probabilities = probabilities[0]
assignment_weights[:, k] = probabilities
assignment_weights /= assignment_weights.sum(axis=1)[:, np.newaxis]
# print(assignment_weights)
return assignment_weights
def create_matrices(self):
"""Creates the a_* matrices required for simulation."""
self.a_d_v = self.d_x(factors=(self.t.increment / self.x.increment *
np.ones(self.x.samples)))
self.a_v_p = self.d_x(factors=(self.t.increment / self.x.increment) *
np.ones(self.x.samples), variant='backward')
self.a_v_v = self.d_x2(factors=(self.t.increment / self.x.increment ** 2 *
self.material_vector('absorption_coef')))
self.a_v_v2 = self.d_x(factors=(self.t.increment / self.x.increment / 2) *
np.ones(self.x.samples), variant='central')
def test_field_component_boundary_2():
fc = fls.FieldComponent(100)
fc.values = np.ones(100)
fc.boundaries = [reg.Boundary(reg.LineRegion([5, 6, 7], [0, 0.2], 'test boundary'))]
fc.boundaries[0].value = [23, 42, 23]
fc.boundaries[0].additive = True
fc.apply_bounds(step=0)
assert np.allclose(fc.values[[5, 6, 7]], [24, 43, 24])
def serve_files(model_path, config_path, num_samples):
"""INTERNAL Serve from pickled model, config."""
from treecat.serving import TreeCatServer
import numpy as np
model = pickle_load(model_path)
config = pickle_load(config_path)
model['config'] = config
server = TreeCatServer(model)
counts = np.ones(model['tree'].num_vertices, np.int8)
samples = server.sample(int(num_samples), counts)
server.logprob(samples)
server.median(counts, samples)
server.latent_correlation()
def validate_gof(N, V, C, M, server, conditional):
# Generate samples.
expected = C**V
num_samples = 1000 * expected
ones = np.ones(V, dtype=np.int8)
if conditional:
cond_data = server.sample(1, ones)[0, :]
else:
cond_data = server.make_zero_row()
samples = server.sample(num_samples, ones, cond_data)
logprobs = server.logprob(samples + cond_data[np.newaxis, :])
counts = {}
probs = {}
for sample, logprob in zip(samples, logprobs):
key = tuple(sample)
if key in counts:
counts[key] += 1
else:
counts[key] = 1
probs[key] = np.exp(logprob)
assert len(counts) == expected
# Check accuracy using Pearson's chi-squared test.
keys = sorted(counts.keys(), key=lambda key: -probs[key])
counts = np.array([counts[k] for k in keys], dtype=np.int32)
probs = np.array([probs[k] for k in keys])
probs /= probs.sum()
# Truncate to avoid low-precision.
truncated = False
valid = (probs * num_samples > 20)
if not valid.all():
T = valid.argmin()
T = max(8, T) # Avoid truncating too much
probs = probs[:T]
counts = counts[:T]
truncated = True
gof = multinomial_goodness_of_fit(
probs, counts, num_samples, plot=True, truncated=truncated)
assert 1e-2 < gof
def sample_tree(self, num_samples):
size = len(self._ensemble)
pvals = np.ones(size, dtype=np.float32) / size
sub_nums = np.random.multinomial(num_samples, pvals)
samples = []
for server, sub_num in zip(self._ensemble, sub_nums):
samples += server.sample_tree(sub_num)
np.random.shuffle(samples)
assert len(samples) == num_samples
return samples
def sample(self, N, counts, data=None):
size = len(self._ensemble)
pvals = np.ones(size, dtype=np.float32) / size
sub_Ns = np.random.multinomial(N, pvals)
samples = np.concatenate([
server.sample(sub_N, counts, data)
for server, sub_N in zip(self._ensemble, sub_Ns)
])
np.random.shuffle(samples)
assert samples.shape[0] == N
return samples
def arrangementToRasterMask( arrangement ):
rows = np.array(arrangement['rows'])
width = np.max(rows)
if arrangement['hex'] is True:
width+=1
height = len(rows)
mask = np.ones((height,width),dtype=int)
for row in range(len(rows)):
c = rows[row]
mask[row,(width-c)>>1:((width-c)>>1)+c] = 0
return {'width':width,'height':height,'mask':mask, 'count':np.sum(rows),'hex':arrangement['hex'],'type':arrangement['type']}