def _get_glyph(gnum, height, width, shift_prob, shift_size):
if isinstance(gnum, list):
n = randint(*gnum)
else:
n = gnum
glyph = random_points_in_circle(
n, 0, 0, 0.5
)*array((width, height), 'float')
_spatial_sort(glyph)
if random()<shift_prob:
shift = ((-1)**randint(0,2))*shift_size*height
glyph[:,1] += shift
if random()<0.5:
ii = randint(0,n-1,size=(1))
xy = glyph[ii,:]
glyph = row_stack((glyph, xy))
return glyph
python类row_stack()的实例源码
def find_stable_a(self, A, cond_h, cond_s):
# build the different combinations of
# condition equations
condeq = []
if cond_h.size > 0:
condeq.append(cond_h)
condeq.append(cond_s)
if cond_h.size > 0:
condeq.append(numpy.row_stack((cond_s,cond_h)))
condnum = []
condnum.append(numpy.linalg.cond(A))
for cond in condeq:
condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))
i = numpy.argmin(numpy.array(condnum))
if i == 0:
return numpy.array([])
else:
return condeq[i-1]
def find_stable_a(self, A, cond_h, cond_s):
# build the different combinations of
# condition equations
condeq = []
if cond_h.size > 0:
condeq.append(cond_h)
condeq.append(cond_s)
if cond_h.size > 0:
condeq.append(numpy.row_stack((cond_s,cond_h)))
condnum = []
condnum.append(numpy.linalg.cond(A))
for cond in condeq:
condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))
i = numpy.argmin(numpy.array(condnum))
if i == 0:
return numpy.array([])
else:
return condeq[i-1]
def find_stable_a(self, A, cond_h, cond_s):
# build the different combinations of
# condition equations
condeq = []
if cond_h.size > 0:
condeq.append(cond_h)
condeq.append(cond_s)
if cond_h.size > 0:
condeq.append(numpy.row_stack((cond_s,cond_h)))
condnum = []
condnum.append(numpy.linalg.cond(A))
for cond in condeq:
condnum.append(numpy.linalg.cond(numpy.row_stack((A,cond))))
i = numpy.argmin(numpy.array(condnum))
if i == 0:
return numpy.array([])
else:
return condeq[i-1]
def getallweights(model):
# get weights from model
model_list = MD.model2list(model)
weight_np = None
for i in range(len(model_list)):
model_state_dict = model_list[i].state_dict()
for k, d in model_state_dict.items():
k_split = k.split(".")
if k_split[-1] == "weight":
d_np = d.cpu().numpy()
d_np = d_np.reshape(d_np.size, 1)
if weight_np is None:
weight_np = d_np
else:
weight_np = np.row_stack((weight_np, d_np))
return weight_np
def get_training_data():
dict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(1))
images = dict[b'data']
labels = dict[b'labels']
filenames = dict[b'filenames']
for i in range(2,5):
idict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(i));
dict = np.row_stack((dict,idict))
iimages = idict[b'data']
images = np.row_stack((images,iimages))
ilabels = idict[b'labels']
labels = np.column_stack((labels,ilabels))
ifilenames = idict[b'filenames']
filenames = np.row_stack((filenames,ifilenames))
return {b'batch_label':'training batch,40000*3072',b'data':images,b'labels':labels,b'filenames':filenames}
def main(args, **argv):
from numpy import row_stack
fn = args.fn
out = args.out
# w = 1000
# h = 1000
paths = get_lines_from_svg(fn, out)
mi, ma, move = get_mid(row_stack(paths))
paths, _ = spatial_sort(paths)
paths = spatial_concat(paths)
paths = align_left(paths, mi)
w, h = ma - mi
if args.svgwrite:
export_svg_svgwrite(out, paths, w, h, line_width=1)
else:
export_svg(out, paths, w, h, line_width=1)
# return
def prepare_inputs(*inputs, **kwinputs):
"""Prepare the inputs for the simulator.
The signature follows that given in `elfi.tools.external_operation`. This function
appends kwinputs with unique and descriptive filenames and writes an input file for
the bdm executable.
"""
alpha, delta, tau, N = inputs
meta = kwinputs['meta']
# Organize the parameters to an array. The broadcasting works nicely with constant
# arguments.
param_array = np.row_stack(np.broadcast(alpha, delta, tau, N))
# Prepare a unique filename for parallel settings
filename = '{model_name}_{batch_index}_{submission_index}.txt'.format(**meta)
np.savetxt(filename, param_array, fmt='%.4f %.4f %.4f %d')
# Add the filenames to kwinputs
kwinputs['filename'] = filename
kwinputs['output_filename'] = filename[:-4] + '_out.txt'
# Return new inputs that the command will receive
return inputs, kwinputs
graph_matplot.py 文件源码
项目:starcraft-stacked-graph-service
作者: ibm-dev-incubator
项目源码
文件源码
阅读 42
收藏 0
点赞 0
评论 0
def draw_matplot_graph(unit_supplies, replay):
units = sorted(list(unit_supplies.keys()))
y = NP.row_stack([ unit_supplies[i] for i in units ])
# this call to 'cumsum' (cumulative sum), passing in your y data,
# is necessary to avoid having to manually order the datasets
x = times
y_stack = NP.cumsum(y, axis=0) # a 3x10 array
fig = PLT.figure()
ax1 = fig.add_subplot(111)
patches = []
ax1.fill_between(x, 0, y_stack[0,:], facecolor="#CC6666", alpha=.7)
patches.append(mpatches.Patch(color="#CC6666", label=units[0], alpha=.7))
for index, key in enumerate(units[1:]):
color = "#" + hashlib.sha224(bytes(key, 'utf-8')).hexdigest()[:6]
patches.append(mpatches.Patch(color=color, label=key, alpha=.7))
ax1.fill_between(x, y_stack[index,:], y_stack[index+1,:], facecolor=color, alpha=.7)
PLT.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=2, mode="expand", borderaxespad=0., handles=patches)
PLT.show()
def fill_in_missing_dates(df, date_col_name, other_col):
startd = df[date_col_name].values[0]
endd = df[date_col_name].values[-1]
print startd, endd
idx = pd.date_range(startd, endd)
dict = {}
for index, row in df.iterrows():
dict[row[date_col_name]] = row[other_col]
new_data = []
for d in idx:
pydate = d.to_pydatetime()
daskey = pydate.strftime('%Y-%m-%d')
new_data.append([daskey, dict[daskey] if dict.has_key(daskey) else None])
return np.row_stack(new_data)
def fill_in_missing_dates(df, date_col_name, other_col):
startd = df[date_col_name].values[0]
endd = df[date_col_name].values[-1]
print startd, endd
idx = pd.date_range(startd, endd)
dict = {}
for index, row in df.iterrows():
dict[row[date_col_name]] = row[other_col]
new_data = []
for d in idx:
pydate = d.to_pydatetime()
daskey = pydate.strftime('%Y-%m-%d')
new_data.append([daskey, dict[daskey] if dict.has_key(daskey) else 0])
return np.row_stack(new_data)
def link_export(self):
from numpy import row_stack
num = self.num
links = self.links[:num*10, 0]
edges = set()
for i, c in enumerate(self.link_counts[:num,0]):
for k in range(c):
j = links[10*i+k]
if i<j:
lnk = (i, j)
else:
lnk = (j, i)
if lnk not in edges:
edges.add(lnk)
return self.xy[:num,:], row_stack(list(edges))
def lookAtTransform(pos, target, up, square=False, camera=False):
pos = np.array(pos, np.float32)
target = np.array(target, np.float32)
up = np.array(up, np.float32)
# print 'lookAtTransform:'
dir = target - pos
R = rotationFromVectors(dir, up, camera=camera)
# print 'R:', R
pos = np.matrix(pos).T
V = np.column_stack((R, -R*pos))
if square:
V = np.row_stack((
V,
np.array([0,0,0,1], np.float32)
))
return V
def unprojectOpenGL(self, u):
# K, R, t = camera.factor()
# squareProj = np.row_stack((
# camera.P,
# np.array([0,0,0,1], np.float32)
# ))
# invProj = np.linalg.inv(squareProj)
# x = invProj*np.row_stack([np.mat(u).T, [1]])
# x = x[:3]
# u = np.mat(u).T
# x = np.linalg.inv(R)*(np.linalg.inv(K)*u - t)
proj = self.getOpenGlCameraMatrix()
invProj = np.linalg.inv(proj)
x = invProj*np.row_stack([np.mat(u).T, [1]])
x = x[:3] / x[3]
return x
def kronecker_weight_vpn(dims):
"""
Return VectorPartitionFunction for computing weight multiplicities in
the symmetric algebra Sym(C^prod(dims)) with respect to the maximal torus
of GL(dims[1]) x ... x GL(dims[n]).
"""
# build list of multi-indices
multi_indices = list(itertools.product(*map(range, dims)))
# build matrix such that the r-th row corresponds to the r-th entries of all weights
As = []
for i, dim in enumerate(dims):
A = np.zeros(shape=(dim, len(multi_indices)), dtype=object)
for j, midx in enumerate(multi_indices):
A[midx[i], j] = 1
As.append(A)
A = np.row_stack(As)
return VectorPartitionFunction(A)
def callback(itr):
def samplefun(num_samps):
import numpy as np
z = np.array(np.random.randn(num_samps, zdim), dtype=np.float32)
return decode(z).eval(session=sess)
viz.plot_samples(itr, samplefun, savedir='vae_mnist_samples')
def sample_z(mu, log_sigmasq, M=5):
eps = tf.random_normal((M, zdim), dtype=tf.float32)
return mu + tf.exp(0.5 * log_sigmasq) * eps
def recons(num_samps):
# random subset
subset = X[np.random.choice(X.shape[0], 1)]
mu, log_sigmasq = encode(subset)
imgs = decode(sample_z(mu, log_sigmasq, M=24)).eval(session=sess)
return np.row_stack([subset, imgs])
viz.plot_samples(itr, recons, savedir='vae_mnist_samples', stub='recon')
test_lb = test_lb_fun.eval(session=sess) * Ntest
print "test data VLB: ", np.mean(test_lb)
##########################################
# Make gradient descent fitting function #
##########################################
def _estimate_current_anticlockwise_degrees_using_minarearect(self, spot_xy) -> float:
# Find the minimum area rectangle around the number
nearby_contour_groups = contour_tools.extract_contour_groups_close_to(
self.contour_groups, target_point_xy=spot_xy, delta=self._min_pixels_between_contour_groups)
nearby_contours = [c for grp in nearby_contour_groups for c in grp]
box = cv2.minAreaRect(np.row_stack(nearby_contours))
corners_xy = cv2.boxPoints(box).astype(np.int32)
self._log_contours_on_current_image([corners_xy], name="Minimum area rectangle")
# Construct a vector which, once correctly rotated, goes from the bottom right corner up & left at 135 degrees
sorted_corners = sorted(corners_xy, key=lambda pt: np.linalg.norm(spot_xy - pt))
bottom_right_corner = sorted_corners[0] # The closest corner to the spot
adjacent_corners = sorted_corners[1:3] # The next two closest corners
unit_vectors_along_box_edge = misc.normalised(adjacent_corners - bottom_right_corner)
up_left_diagonal = unit_vectors_along_box_edge.sum(axis=0)
degrees_of_up_left_diagonal = np.rad2deg(np.arctan2(-up_left_diagonal[1], up_left_diagonal[0]))
return degrees_of_up_left_diagonal - 135
def get_fractures(self):
res = defaultdict(list)
for fid, node in self.fid_node[:self.fnum, :]:
res[fid].append(self.xy[node, :])
return [row_stack(v) for k, v in res.items()]
def write(sand):
from modules.writer import Writer
lines = []
vertices = []
vnum = 0
W = Writer(
GLYPH_HEIGHT,
GLYPH_WIDTH,
WORD_SPACE,
SHIFT_PROB,
SHIFT_SIZE,
EDGE
)
i = 0
for y in linspace(EDGE, 1.0-EDGE, ROW_NUM):
print(y)
for a in W.export(
get_word_generator(),
y,
gnum = GNUM,
inum = INUM
):
sand.paint_dots(a)
i += 1
vertices.append(a)
lines.append(arange(len(a)).astype('int')+vnum)
vnum += len(a)
return row_stack(vertices), lines
def _interpolate_write_with_cursive(glyphs, inum, theta, noise, offset_size):
stack = row_stack(glyphs)
ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
gamma = theta + cumsum((1.0-2.0*random(len(ig)))*noise)
dd = column_stack((cos(gamma), sin(gamma)))*offset_size
a = ig + dd
b = ig + dd[:,::-1]*array((1,-1))
return a, b
def _export(self, glyphs, inum):
stack = row_stack(glyphs)
ig = _rnd_interpolate(stack, len(glyphs)*inum, ordered=True)
return ig
def load(fn):
from codecs import open
from numpy import row_stack
vertices = []
faces = []
lines = []
with open(fn, 'r', encoding='utf8') as f:
for l in f:
if l.startswith('#'):
continue
values = l.split()
if not values:
continue
if values[0] == 'v':
vertices.append([float(v) for v in values[1:]])
if values[0] == 'f':
face = [int(v.split('//')[0])-1 for v in values[1:]]
faces.append(face)
if values[0] == 'l':
line = [int(v.split('//')[0])-1 for v in values[1:]]
lines.append(line)
try:
faces = row_stack(faces)
except ValueError:
faces = None
return {
'faces': faces,
'vertices': row_stack(vertices),
'lines': lines
}
def spatial_concat_2d(paths, eps=1.e-9):
from numpy.linalg import norm
from numpy import row_stack
res = []
curr = paths[0]
concats = 0
for p in paths[1:]:
if p.shape[0]<2:
print('WARNING: path with only one vertex.')
continue
if norm(p[0,:]-curr[-1,:])<eps:
curr = row_stack([curr, p[1:,:]])
concats += 1
else:
res.append(curr)
curr = p
res.append(curr)
print('concats: ', concats)
print('original paths: ', len(paths))
print('number after concatination: ', len(res))
print()
return res
def spatial_concat(paths, eps=1.e-9):
from numpy.linalg import norm
from numpy import row_stack
res = []
curr = paths[0]
concats = 0
for p in paths[1:]:
if p.shape[0]<2:
print('WARNING: path with only one vertex.')
continue
if norm(p[0,:]-curr[-1,:])<eps:
curr = row_stack([curr, p[1:,:]])
concats += 1
else:
res.append(curr)
curr = p
res.append(curr)
print('concats: ', concats)
print('original paths: ', len(paths))
print('number after concatination: ', len(res))
print()
return res
def optimize_glove(glove_path, vocab):
"""Trim down GloVe embeddings to use only words in the data."""
vocab_set = frozenset(vocab)
seen_vocab = []
X = []
with open(glove_path) as f:
for line in f:
line = line.strip().split(' ') # split() fails on ". . ."
word, embed = line[0], line[1:]
if word in vocab_set:
X.append(np.array(embed, dtype=np.float32))
seen_vocab.append(word)
return seen_vocab, np.row_stack(X)
def dietrich_baseline(bands, intensities, half_window=16, num_erosions=10):
'''
Fast and precise automatic baseline correction of ... NMR spectra, 1991.
http://www.sciencedirect.com/science/article/pii/002223649190402F
http://www.inmr.net/articles/AutomaticBaseline.html
'''
# Step 1: moving-window smoothing
w = half_window * 2 + 1
window = np.ones(w) / float(w)
Y = intensities.copy()
if Y.ndim == 2:
window = window[None]
Y[..., half_window:-half_window] = convolve(Y, window, mode='valid')
# Step 2: Derivative.
dY = np.diff(Y) ** 2
# Step 3: Iterative thresholding.
is_baseline = np.ones(Y.shape, dtype=bool)
is_baseline[..., 1:] = iterative_threshold(dY)
# Step 3: Binary erosion, to get rid of peak-tops.
mask = np.zeros_like(is_baseline)
mask[..., half_window:-half_window] = True
s = np.ones(3, dtype=bool)
if Y.ndim == 2:
s = s[None]
is_baseline = binary_erosion(is_baseline, structure=s,
iterations=num_erosions, mask=mask)
# Step 4: Reconstruct baseline via interpolation.
if Y.ndim == 2:
return np.row_stack([np.interp(bands, bands[m], y[m])
for y, m in zip(intensities, is_baseline)])
return np.interp(bands, bands[is_baseline], intensities[is_baseline])
def summarise(skelimage):
ndim = skelimage.ndim
g, counts, skelimage_labeled = skeleton_to_nx(skelimage)
coords = np.nonzero(skelimage)
ids = skelimage_labeled[coords]
sorted_coords = np.transpose(coords)[np.argsort(ids)]
tables = []
for i, cc in enumerate(nx.connected_component_subgraphs(g)):
stats = branch_statistics(cc)
if stats.size == 0:
continue
coords0 = sorted_coords[stats[:, 0].astype(int) - 1]
coords1 = sorted_coords[stats[:, 1].astype(int) - 1]
distances = np.sqrt(np.sum((coords0 - coords1)**2, axis=1))
skeleton_id = np.full(distances.shape, i, dtype=float)
tables.append(np.column_stack((skeleton_id, stats,
coords0, coords1, distances)))
columns = (['skeleton-id', 'node-id-0', 'node-id-1', 'branch-distance',
'branch-type'] +
['coord-0-%i' % i for i in range(ndim)] +
['coord-1-%i' % i for i in range(ndim)] +
['euclidean-distance'])
column_types = [int, int, int, float, int] + 2*ndim*[int] + [float]
arr = np.row_stack(tables).T
data_dict = {col: dat.astype(dtype)
for col, dat, dtype in zip(columns, arr, column_types)}
df = pd.DataFrame(data_dict)
return df
def __init__(self,file_path):
# w2v_file = os.path.join(base_path, "vectors_poem.bin")
self.model = word2vec.load(file_path)
if 'unknown' not in self.model.vocab_hash:
unknown_vec = np.random.uniform(-0.1,0.1,size=128)
self.model.vocab_hash['unknown'] = len(self.model.vocab)
self.model.vectors = np.row_stack((self.model.vectors,unknown_vec))
def read_merged_qdp(infile):
"""
Read merged QDP with multiple group of data separated by "no no no".
"""
lines = map(lambda line: re.sub(r"^\s*no\s+no\s+no.*$", "X",
line.strip(), flags=re.I),
open(infile).readlines())
lines = isplit(lines, ("X",))
data_groups = []
for block in lines:
data = [list(map(float, l.split())) for l in block]
data.append(np.row_stack(data))
return data_groups
def sliding_window_padded(a, ws, ss=(1,1), flatten=True):
colpad = ws[0]/2
col_a = np.empty((a.shape[0],colpad))
col_a[:] = np.nan
a = np.column_stack([col_a, a, col_a])
rowpad = ws[1]/2
row_a = np.empty((rowpad, a.shape[1]))
row_a[:] = np.nan
a = np.row_stack([row_a, a, row_a])
return sliding_window(a, ws, ss, flatten)
#From http://www.johnvinyard.com/blog/?p=268