def run(im):
im_disp = im.copy()
window_name = "Draw line here."
cv2.namedWindow(window_name,cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(window_name, 910, 0)
print " Drag across the screen to set lines.\n Do it twice"
print " After drawing the lines press 'r' to resume\n"
l1 = np.empty((2, 2), np.uint32)
l2 = np.empty((2, 2), np.uint32)
list = [l1,l2]
mouse_down = False
def callback(event, x, y, flags, param):
global trigger, mouse_down
if trigger<2:
if event == cv2.EVENT_LBUTTONDOWN:
mouse_down = True
list[trigger][0] = (x, y)
if event == cv2.EVENT_LBUTTONUP and mouse_down:
mouse_down = False
list[trigger][1] = (x,y)
cv2.line(im_disp, (list[trigger][0][0], list[trigger][0][1]),
(list[trigger][1][0], list[trigger][1][1]), (255, 0, 0), 2)
trigger += 1
else:
pass
cv2.setMouseCallback(window_name, callback)
while True:
cv2.imshow(window_name,im_disp)
key = cv2.waitKey(10) & 0xFF
if key == ord('r'):
# Press key `q` to quit the program
return list
exit()
python类empty()的实例源码
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
import numpy as np
data = np.empty(window_size, dtype=float)
data.fill(0.0)
cumsum = 0.0
while True:
i, n = yield task.receive()
if n is None:
break
cumsum += (n - data[0])
avg = cumsum / window_size
if avg > threshold:
trend_task.send((i, 'high', float(avg)))
elif avg < -threshold:
trend_task.send((i, 'low', float(avg)))
data = np.roll(data, -1)
data[-1] = n
raise StopIteration(0)
# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
import numpy as np
data = np.empty(window_size, dtype=float)
data.fill(0.0)
cumsum = 0.0
while True:
i, n = yield task.receive()
if n is None:
break
cumsum += (n - data[0])
avg = cumsum / window_size
if avg > threshold:
trend_task.send((i, 'high', float(avg)))
elif avg < -threshold:
trend_task.send((i, 'low', float(avg)))
data = np.roll(data, -1)
data[-1] = n
raise StopIteration(0)
# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
def rtask_avg_proc(threshold, trend_task, window_size, task=None):
import numpy as np
data = np.empty(window_size, dtype=float)
data.fill(0.0)
cumsum = 0.0
while True:
i, n = yield task.receive()
if n is None:
break
cumsum += (n - data[0])
avg = cumsum / window_size
if avg > threshold:
trend_task.send((i, 'high', float(avg)))
elif avg < -threshold:
trend_task.send((i, 'low', float(avg)))
data = np.roll(data, -1)
data[-1] = n
raise StopIteration(0)
# This generator function is sent to remote dispycos process to save the
# received data in a file (on the remote peer).
def __init__(self, N, V, tree_prior, config):
"""Initialize a model with an empty subsample.
Args:
N (int): Number of rows in the dataset.
V (int): Number of columns (features) in the dataset.
tree_prior: A [K]-shaped numpy array of prior edge log odds, where
K is the number of edges in the complete graph on V vertices.
config: A global config dict.
"""
assert isinstance(N, int)
assert isinstance(V, int)
assert isinstance(tree_prior, np.ndarray)
assert isinstance(config, dict)
K = V * (V - 1) // 2 # Number of edges in complete graph.
assert V <= 32768, 'Invalid # features > 32768: {}'.format(V)
assert tree_prior.shape == (K, )
assert tree_prior.dtype == np.float32
self._config = config.copy()
self._num_rows = N
self._tree_prior = tree_prior
self._tree = TreeStructure(V)
assert self._tree.num_vertices == V
self._program = make_propagation_program(self._tree.tree_grid)
self._added_rows = set()
def __init__(self, data, tree_prior, config):
"""Initialize a model with an empty subsample.
Args:
data: An [N, V]-shaped numpy array of real-valued data.
tree_prior: A [K]-shaped numpy array of prior edge log odds, where
K is the number of edges in the complete graph on V vertices.
config: A global config dict.
"""
assert isinstance(data, np.ndarray)
data = np.asarray(data, np.float32)
assert len(data.shape) == 2
N, V = data.shape
D = config['model_latent_dim']
E = V - 1 # Number of edges in the tree.
TreeTrainer.__init__(self, N, V, tree_prior, config)
self._data = data
self._latent = np.zeros([N, V, D], np.float32)
# This is symmetric positive definite.
self._vert_ss = np.zeros([V, D, D], np.float32)
# This is arbitrary (not necessarily symmetric).
self._edge_ss = np.zeros([E, D, D], np.float32)
# This represents (count, mean, covariance).
self._feat_ss = np.zeros([V, D, 1 + 1 + D], np.float32)
def observed_perplexity(self, counts):
"""Compute perplexity = exp(entropy) of observed variables.
Perplexity is an information theoretic measure of the number of
clusters or latent classes. Perplexity is a real number in the range
[1, M], where M is model_num_clusters.
Args:
counts: A [V]-shaped array of multinomial counts.
Returns:
A [V]-shaped numpy array of perplexity.
"""
V, E, M, R = self._VEMR
if counts is not None:
counts = np.ones(V, dtype=np.int8)
assert counts.shape == (V, )
assert counts.dtype == np.int8
assert np.all(counts > 0)
observed_entropy = np.empty(V, dtype=np.float32)
for v in range(V):
beg, end = self._ragged_index[v:v + 2]
probs = np.dot(self._feat_cond[beg:end, :], self._vert_probs[v, :])
observed_entropy[v] = multinomial_entropy(probs, counts[v])
return np.exp(observed_entropy)
def generate_batch(seq_length, batch_size, min_val, max_val):
"""
Generates batch of examples.
:param seq_length: length of the sequence to be generated
:param batch_size: number of samples in the batch
:param min_val: minimum value for a
:param max_val: maximum value for a
:return x: batch of examples
:return y: batch of ground truth values
"""
n_elems = 2
x = np.empty((batch_size, seq_length, n_elems))
y = np.empty((batch_size, 1))
for i in range(batch_size):
sample, ground_truth = generate_example(seq_length, min_val, max_val)
x[i, :, :] = sample
y[i, 0] = ground_truth
return x, y
def frame_from_bardata(self, data, algo_dt):
"""
Create a DataFrame from the given BarData and algo dt.
"""
data = data._data
frame_data = np.empty((len(self.fields), len(self.sids))) * np.nan
for j, sid in enumerate(self.sids):
sid_data = data.get(sid)
if not sid_data:
continue
if algo_dt != sid_data['dt']:
continue
for i, field in enumerate(self.fields):
frame_data[i, j] = sid_data.get(field, np.nan)
return pd.DataFrame(
frame_data,
index=self.fields.copy(),
columns=self.sids.copy(),
)
def last_date_in_output_for_sid(self, sid):
"""
Parameters:
-----------
sid : int
Asset identifier.
Returns:
--------
out : pd.Timestamp
The midnight of the last date written in to the output for the
given sid.
"""
sizes_path = "{0}/close/meta/sizes".format(self.sidpath(sid))
if not os.path.exists(sizes_path):
return pd.NaT
with open(sizes_path, mode='r') as f:
sizes = f.read()
data = json.loads(sizes)
num_days = data['shape'][0] / self._minutes_per_day
if num_days == 0:
# empty container
return pd.NaT
return self._trading_days[num_days - 1]
def forward(self, inputs):
# todo: This is only compatible with Numpy. Not yet compatible with cupy.
x = inputs[0]
W = inputs[1]
# Notes:
# In order to be compatible with the "static graph" feature, it is
# required that all output arrays of this forward
# function be allocated explicitly:
y = np.empty((x.shape[0], W.shape[0])).astype(x.dtype)
# This is required because all of the "static_*()" functions
# use the convention that any output arrays are supplied
# as input arguments to the function. That is because it is
# not allowed for a "static_*()" function to return anything
# other than `None`. The reason is to prevent dynamic allocation
# of output arrays during execution of the static schedule
# because it would break the model.
if len(inputs) == 3:
bias = inputs[2]
# Note: `y` is the output array.
self.static_linear(x, W, bias, y)
else:
# Note: `y` is the output array.
self.static_linear_no_bias(x, W, y)
return y,
def gl_init(self):
self.gl_vertex_shader_factory = functools.lru_cache(maxsize=None)(functools.partial(gl.Shader,GL_VERTEX_SHADER))
self.gl_fragment_shader_factory = functools.lru_cache(maxsize=None)(functools.partial(gl.Shader,GL_FRAGMENT_SHADER))
self.gl_program_factory = functools.lru_cache(maxsize=None)(GLProgram)
self.gl_texture_factory = functools.lru_cache(maxsize=None)(gx.texture.GLTexture)
array_table = {gx.VA_PTNMTXIDX:GLMatrixIndexArray()}
array_table.update((attribute,array.gl_convert()) for attribute,array in self.array_table.items())
for shape in self.shapes:
shape.gl_init(array_table)
for material in self.materials:
material.gl_init()
for texture in self.textures:
texture.gl_init(self.gl_texture_factory)
self.gl_joints = [copy.copy(joint) for joint in self.joints]
self.gl_joint_matrices = numpy.empty((len(self.joints),3,4),numpy.float32)
self.gl_matrix_table = gl.TextureBuffer(GL_DYNAMIC_DRAW,GL_RGBA32F,(len(self.matrix_descriptors),3,4),numpy.float32)
self.gl_update_matrix_table()
self.gl_draw_objects = list(self.gl_generate_draw_objects(self.scene_graph))
self.gl_draw_objects.sort(key=lambda draw_object: draw_object.material.unknown0)
def gl_init(self,array_table):
self.gl_hide = False
self.gl_vertex_array = gl.VertexArray()
glBindVertexArray(self.gl_vertex_array)
self.gl_vertex_buffer = gl.Buffer()
glBindBuffer(GL_ARRAY_BUFFER,self.gl_vertex_buffer)
self.gl_element_count = 3*gl_count_triangles(self)
self.gl_element_buffer = gl.Buffer()
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,self.gl_element_buffer)
vertex_type = numpy.dtype([array_table[attribute].field() for attribute in self.attributes])
vertex_count = sum(len(primitive.vertices) for primitive in self.primitives)
vertex_array = numpy.empty(vertex_count,vertex_type)
for attribute in self.attributes:
array_table[attribute].load(self,vertex_array)
vertex_array,element_map = numpy.unique(vertex_array,return_inverse=True)
element_array = gl_create_element_array(self,element_map,self.gl_element_count)
glBufferData(GL_ARRAY_BUFFER,vertex_array.nbytes,vertex_array,GL_STATIC_DRAW)
glBufferData(GL_ELEMENT_ARRAY_BUFFER,element_array.nbytes,element_array,GL_STATIC_DRAW)
def make2d(array, cols=None, dtype=None):
'''
Make a 2D array from an array of arrays. The `cols' and `dtype'
arguments can be omitted if the array is not empty.
'''
if (cols is None or dtype is None) and not len(array):
raise RuntimeError("cols and dtype must be specified for empty "
"array")
if cols is None:
cols = len(array[0])
if dtype is None:
dtype = array[0].dtype
return _np.fromiter(array, [('_', dtype, (cols,))],
count=len(array))['_']
def _read_bin(self, stream, byte_order):
'''
Load a PLY element from a binary PLY file. The element may
contain list properties.
'''
self._data = _np.empty(self.count, dtype=self.dtype(byte_order))
for k in _range(self.count):
for prop in self.properties:
try:
self._data[prop.name][k] = \
prop._read_bin(stream, byte_order)
except StopIteration:
raise PlyParseError("early end-of-file",
self, k, prop)
def parse_fasta(self):
self.ref_id=dict()
self.ref_inf=dict()
i=1
N = 0
ref_inf=np.empty(shape=[0,3])
for seqs in SeqIO.parse(self.ref,'fasta'):
seq_id = seqs.id
self.ref_id[i] = seq_id
seq = str(seqs.seq.upper())
seq_len = len(seq)
self.ref_inf[seq_id]=seq_len
N+=seq.count('N')
ref_inf = np.append(ref_inf,[[i,seq_id,seq_len]],axis=0)
i+=1
self.ref_detail = pd.DataFrame(ref_inf,columns=['Index','Contig','Length(bp)'])
self.N = N
def qualification_filter(self):
"""
Providing information of those unqualified and qualified contigs from the orginal fasta file
with the criterion: >20Kb & >=5 restriction sites inside.
"""
unqualified = np.empty(shape=[0,3])
qualified = np.empty(shape=[0,4])
rm_dup = self.RcmapTable[['CMapId','ContigLength','NumSites']].drop_duplicates()
for i in self.ref_id.keys():
index = i
name = self.ref_id[i]
length = self.ref_inf[name]
if i not in self.RcmapTable['CMapId'].unique():
unqualified = np.append(unqualified,[[index,name, length]],axis=0)
else:
Id = rm_dup[rm_dup['CMapId']==i].index[0]
sites = rm_dup['NumSites'][Id]
qualified = np.append(qualified,[[index,name,length,sites]],axis=0)
self.unqualified = pd.DataFrame(unqualified, columns=['index','contig','length(bp)'])
self.qualified = pd.DataFrame(qualified, columns=['index','contig','length(bp)','numSites'])
def get_train_batch(self, batch_size, seq_len):
"""
Gets a batch of sequences for training.
@param batch_size: The number of sequences in the batch.
@param seq_len: The number of words in a sequence.
@return: A tuple of arrays of shape [batch_size, seq_len].
"""
inputs = np.empty([batch_size, seq_len], dtype=int)
targets = np.empty([batch_size, seq_len], dtype=int)
for i in xrange(batch_size):
inp, target = self.get_seq(seq_len)
inputs[i] = inp
targets[i] = target
return inputs, targets
def test(self, input_path, output_path):
if not self.load()[0]:
raise Exception("No model is found, please train first")
mean, std = self.sess.run([self.mean, self.std])
images = np.empty((1, self.im_size[0], self.im_size[1], self.im_size[2], 1), dtype=np.float32)
#labels = np.empty((1, self.im_size[0], self.im_size[1], self.im_size[2], self.nclass), dtype=np.float32)
for f in input_path:
images[0, ..., 0], read_info = read_testing_inputs(f, self.roi[0], self.im_size, output_path)
probs = self.sess.run(self.probs, feed_dict = { self.images: (images - mean) / std,
self.is_training: True,
self.keep_prob: 1 })
#print(self.roi[1] + os.path.basename(f) + ":" + str(dice))
output_file = os.path.join(output_path, self.roi[1] + '_' + os.path.basename(f))
f_h5 = h5py.File(output_file, 'w')
if self.roi[0] < 0:
f_h5['predictions'] = restore_labels(np.argmax(probs[0], 3), self.roi[0], read_info)
else:
f_h5['probs'] = restore_labels(probs[0, ..., 1], self.roi[0], read_info)
f_h5.close()
def save_to(self, nameprefix, switch=False):
"""saves logger data to a different set of files, for
``switch=True`` also the loggers name prefix is switched to
the new value
"""
if not nameprefix or not utils.is_str(nameprefix):
raise ValueError('filename prefix must be a non-empty string')
if nameprefix == self.default_prefix:
raise ValueError('cannot save to default name "' + nameprefix + '...", chose another name')
if nameprefix == self.name_prefix:
return
for name in self.file_names:
open(nameprefix + name + '.dat', 'w').write(open(self.name_prefix + name + '.dat').read())
if switch:
self.name_prefix = nameprefix
def predict(self, model_path, x_test):
"""
Uses the model to create a prediction for the given data
:param model_path: path to the model checkpoint to restore
:param x_test: Data to predict on. Shape [n, nx, ny, channels]
:returns prediction: The unet prediction Shape [n, px, py, labels] (px=nx-self.offset/2)
"""
init = tf.global_variables_initializer()
with tf.Session() as sess:
# Initialize variables
sess.run(init)
# Restore model weights from previously saved model
self.restore(sess, model_path)
y_dummy = np.empty((x_test.shape[0], x_test.shape[1], x_test.shape[2], self.n_class))
prediction = sess.run(self.predicter, feed_dict={self.x: x_test, self.y: y_dummy, self.keep_prob: 1.})
return prediction
def draw_bs_pairs_linreg(x, y, size=1):
"""Perform pairs bootstrap for linear regression."""
# Set up array of indices to sample from: inds
inds = np.arange(len(x))
# Initialize replicates: bs_slope_reps, bs_intercept_reps
bs_slope_reps = np.empty(size)
bs_intercept_reps = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, size=len(inds))
bs_x, bs_y = x[bs_inds], y[bs_inds]
# noinspection PyTupleAssignmentBalance
bs_slope_reps[i], bs_intercept_reps[i] = np.polyfit(bs_x, bs_y, 1)
return bs_slope_reps, bs_intercept_reps
def draw_bs_pairs(x, y, func, size=1):
"""Perform pairs bootstrap for single statistic."""
# Set up array of indices to sample from
inds = np.arange(len(x))
# Initialize replicates
bs_replicates = np.empty(size)
# Generate replicates
for i in range(size):
bs_inds = np.random.choice(inds, len(inds))
bs_x, bs_y = x[bs_inds], y[bs_inds]
bs_replicates[i] = func(bs_x, bs_y)
return bs_replicates
def testMerge(self, dtype=dtype):
testarray1 = range(1,101)
testarray2 = range(5,106)
a = numpy.empty((100,2), dtype=dtype)
b = numpy.empty((100,2), dtype=dtype)
merged = numpy.empty((200,2), dtype=dtype)
incompatible1 = numpy.empty((200,3), dtype=dtype)
incompatible2 = numpy.empty(200, dtype=dtype)
a[:,0] = numpy.arange(1,101)
a[:,1] = numpy.arange(2,102)
b[:,0] = numpy.arange(5,105)
b[:,1] = numpy.arange(6,106)
ref = numpy.concatenate([a,b])
ref = ref[numpy.argsort(ref[:,0])]
self.assertEqual(mapped_struct.index_merge(a, b, merged), 200)
self.assertTrue((merged == ref).all())
self.assertRaises(ValueError, mapped_struct.index_merge, a, b, incompatible1)
self.assertRaises(ValueError, mapped_struct.index_merge, a, incompatible1, merged)
self.assertRaises(ValueError, mapped_struct.index_merge, a, b, incompatible2)
self.assertRaises(ValueError, mapped_struct.index_merge, a, incompatible2, merged)
pos-tagging-explore.py 文件源码
项目:Deep-Learning-with-Keras
作者: PacktPublishing
项目源码
文件源码
阅读 48
收藏 0
点赞 0
评论 0
def build_tensor(filename, numrecs, word2index, maxlen,
make_categorical=False):
data = np.empty((numrecs, ), dtype=list)
fin = open(filename, "rb")
i = 0
for line in fin:
wids = []
for word in line.strip().split():
if word2index.has_key(word):
wids.append(word2index[word])
else:
wids.append(word2index["UNK"])
if make_categorical:
data[i] = np_utils.to_categorical(
wids, num_classes=len(word2index))
else:
data[i] = wids
i += 1
fin.close()
pdata = sequence.pad_sequences(data, maxlen=maxlen)
return pdata
def generatePath(self, x, y):
if self.opts['stepMode']:
## each value in the x/y arrays generates 2 points.
x2 = np.empty((len(x),2), dtype=x.dtype)
x2[:] = x[:,np.newaxis]
if self.opts['fillLevel'] is None:
x = x2.reshape(x2.size)[1:-1]
y2 = np.empty((len(y),2), dtype=y.dtype)
y2[:] = y[:,np.newaxis]
y = y2.reshape(y2.size)
else:
## If we have a fill level, add two extra points at either end
x = x2.reshape(x2.size)
y2 = np.empty((len(y)+2,2), dtype=y.dtype)
y2[1:-1] = y[:,np.newaxis]
y = y2.reshape(y2.size)[1:-1]
y[0] = self.opts['fillLevel']
y[-1] = self.opts['fillLevel']
path = fn.arrayToQPath(x, y, connect=self.opts['connect'])
return path
def dataType(obj):
if hasattr(obj, '__len__') and len(obj) == 0:
return 'empty'
if isinstance(obj, dict):
return 'dictOfLists'
elif isSequence(obj):
first = obj[0]
if (hasattr(obj, 'implements') and obj.implements('MetaArray')):
return 'MetaArray'
elif isinstance(obj, np.ndarray):
if obj.ndim == 1:
if obj.dtype.names is None:
return 'listOfValues'
else:
return 'recarray'
elif obj.ndim == 2 and obj.dtype.names is None and obj.shape[1] == 2:
return 'Nx2array'
else:
raise Exception('array shape must be (N,) or (N,2); got %s instead' % str(obj.shape))
elif isinstance(first, dict):
return 'listOfDicts'
else:
return 'listOfValues'
def getLookupTable(self, nPts, alpha=None):
"""
Return an RGB(A) lookup table (ndarray).
============== ============================================================================
**Arguments:**
nPts The number of points in the returned lookup table.
alpha True, False, or None - Specifies whether or not alpha values are included
in the table.If alpha is None, alpha will be automatically determined.
============== ============================================================================
"""
if alpha is None:
alpha = self.usesAlpha()
if alpha:
table = np.empty((nPts,4), dtype=np.ubyte)
else:
table = np.empty((nPts,3), dtype=np.ubyte)
for i in range(nPts):
x = float(i)/(nPts-1)
color = self.getColor(x, toQColor=False)
table[i] = color[:table.shape[1]]
return table
def renderShapeMask(self, width, height):
"""Return an array of 0.0-1.0 into which the shape of the item has been drawn.
This can be used to mask array selections.
"""
if width == 0 or height == 0:
return np.empty((width, height), dtype=float)
# QImage(width, height, format)
im = QtGui.QImage(width, height, QtGui.QImage.Format_ARGB32)
im.fill(0x0)
p = QtGui.QPainter(im)
p.setPen(fn.mkPen(None))
p.setBrush(fn.mkBrush('w'))
shape = self.shape()
bounds = shape.boundingRect()
p.scale(im.width() / bounds.width(), im.height() / bounds.height())
p.translate(-bounds.topLeft())
p.drawPath(shape)
p.end()
mask = fn.imageToArray(im, transpose=True)[:,:,0].astype(float) / 255.
return mask
def faceNormals(self, indexed=None):
"""
Return an array (Nf, 3) of normal vectors for each face.
If indexed='faces', then instead return an indexed array
(Nf, 3, 3) (this is just the same array with each vector
copied three times).
"""
if self._faceNormals is None:
v = self.vertexes(indexed='faces')
self._faceNormals = np.cross(v[:,1]-v[:,0], v[:,2]-v[:,0])
if indexed is None:
return self._faceNormals
elif indexed == 'faces':
if self._faceNormalsIndexedByFaces is None:
norms = np.empty((self._faceNormals.shape[0], 3, 3))
norms[:] = self._faceNormals[:,np.newaxis,:]
self._faceNormalsIndexedByFaces = norms
return self._faceNormalsIndexedByFaces
else:
raise Exception("Invalid indexing mode. Accepts: None, 'faces'")