def consideronlylabels(self, list2consider, verbose = False):
"""
Add labels to the ignoredlabels list (set) and update the self._labels cache.
"""
if isinstance(list2consider, int):
list2consider = [list2consider]
toignore = set(np.unique(self.image))-set(list2consider)
integers = np.vectorize(lambda x : int(x))
toignore = integers(list(toignore)).tolist()
if verbose: print 'Adding labels', toignore,'to the list of labels to ignore...'
self._ignoredlabels.update(toignore)
if verbose: print 'Updating labels list...'
self._labels = self.__labels()
python类vectorize()的实例源码
def __init__(self, num_hidden_nodes, data_matrix, data_labels, training_indices, use_file=True):
# sigmoid??
self.sigmoid = np.vectorize(self._sigmoid_scalar)
# sigmoid????
self.sigmoid_prime = np.vectorize(self._sigmoid_prime_scalar)
# ??????
self._use_file = use_file
# ???
self.data_matrix = data_matrix
self.data_labels = data_labels
if (not os.path.isfile(OCRNeuralNetwork.NN_FILE_PATH) or not use_file):
# ???????
self.theta1 = self._rand_initialize_weights(400, num_hidden_nodes)
self.theta2 = self._rand_initialize_weights(num_hidden_nodes, 10)
self.input_layer_bias = self._rand_initialize_weights(1, num_hidden_nodes)
self.hidden_layer_bias = self._rand_initialize_weights(1, 10)
# ?????
TrainData = namedtuple('TrainData', ['y0', 'label'])
self.train([TrainData(self.data_matrix[i], int(self.data_labels[i])) for i in training_indices])
self.save()
else:
# ??nn.json?????
self._load()
def test_keywords2_ticket_2100(self):
# Test kwarg support: enhancement ticket 2100
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(a=args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(b=1, a=args)
assert_array_equal(r1, r2)
r1 = f(args, b=2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def setUp(self):
"""Setup script for each test
"""
# Initial estimate of prior functional form
init_prior = np.vectorize(lambda v: 2.56e9 / v**3)
# Create the model and *true* EOS
self.eos_model = EOSModel(init_prior, name="Default EOS Model")
self.eos_true = EOSBump()
# Create the objects to generate simulations and
# pseudo experimental data
self.exp1 = GunExperiment(model=self.eos_true)
self.sim1 = Gun(name="Default Gun Simulation")
self.exp2 = StickExperiment(model=self.eos_true)
self.sim2 = Stick()
# end
def test_shot_plot(self):
"""tests the plotting function
"""
init_prior = np.vectorize(lambda v: 2.56e9 / v**3)
# Create the model and *true* EOS
eos = EOSModel(init_prior)
gun = Gun()
data0 = gun({'eos': eos})
old_dof = eos.get_c()
old_dof[0] *= 1.02
eos.update_dof(old_dof)
data1 = gun({'eos': eos})
gun.plot(level=3, data=[data0, data1])
gun.plot(level=1, data=[data0, data1])
plt.show()
# end
def test_sigmoid():
'''
Test using a numerically stable reference sigmoid implementation
'''
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid():
'''
Test using a reference hard sigmoid implementation
'''
def ref_hard_sigmoid(x):
'''
Reference hard sigmoid with slope and shift values from theano, see
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py
'''
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.hard_sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def _get_new_id_seq(pos, numbers):
"""
A helper function to produce the new sequence of the transformed
structure. Algs is sort the position back to init and use the index
to sort numbers.
"""
# transfer the atom position into >=0 and <=1
pos = np.around(pos, decimals=3)
func_tofrac = np.vectorize(lambda x: round((x % 1), 3))
o_pos = func_tofrac(pos)
# round_o_pos = np.around(o_pos, decimals=3)
# z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0]
z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0]
inds = np.lexsort((z, y, x))
return inds
def _get_new_id_seq(pos, numbers):
"""
A helper function to produce the new sequence of the transformed
structure. Algs is sort the position back to init and use the index
to sort numbers.
"""
# transfer the atom position into >=0 and <=1
pos = np.around(pos, decimals=5)
func_tofrac = np.vectorize(lambda x: round((x % 1), 3))
o_pos = func_tofrac(pos)
# round_o_pos = np.around(o_pos, decimals=3)
# z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0]
z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0]
inds = np.lexsort((z, y, x))
return inds
def get_new_id_seq(pos, numbers):
"""
A helper function to produce the new sequence of the transformed
structure. Algs is sort the position back to init and use the index
to sort numbers.
"""
# transfer the atom position into >=0 and <=1
pos = np.around(pos, decimals=5)
func_tofrac = np.vectorize(lambda x: round((x % 1), 3))
o_pos = func_tofrac(pos)
# round_o_pos = np.around(o_pos, decimals=3)
# z, y, x = round_o_pos[:, 2], round_o_pos[:, 1], round_o_pos[:, 0]
z, y, x = o_pos[:, 2], o_pos[:, 1], o_pos[:, 0]
inds = np.lexsort((z, y, x))
return inds
def _ingest_pairs(self, pairs, oid2nid, frame_size, limit, single_sided):
oid2nid_v = np.vectorize(oid2nid.get)
# whole pairs set does not fit in memory, so split it in frames with `frame_size` number of pairs.
for start in range(0, limit, frame_size):
stop = frame_size + start
t1 = process_time()
six.print_('Fetching pairs {0}:{1} of {2} ... '.format(start, stop, limit), end='', flush=True)
raw_frame = pairs.read(start=start, stop=stop)
t2 = process_time()
six.print_('{0}s, Parsing ... '.format(int(t2 - t1)), flush=True)
frame = self._translate_frame(raw_frame, oid2nid_v, single_sided)
t3 = process_time()
six.print_('Writing ... '.format(int(t3 - t2)), flush=True)
# alternate direction, to make use of cached chunks of prev frame
self._ingest_pairs_frame(frame)
del frame
t4 = process_time()
six.print_('{0}s, Done with {1}:{2} in {3}s'.format(int(t4 - t3), start, stop, int(t4 - t1)), flush=True)
def _plot_mpl(scheme):
# pylint: disable=relative-import, unused-variable
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect('equal')
flt = numpy.vectorize(float)
pts = flt(scheme.points)
wgs = flt(scheme.weights)
for p, w in zip(pts, wgs):
# <https://en.wikipedia.org/wiki/Spherical_cap>
w *= 4 * numpy.pi
theta = numpy.arccos(1.0 - abs(w) / (2*numpy.pi))
color = '#1f77b4' if w >= 0 else '#d62728'
_plot_spherical_cap_mpl(ax, p, theta, color)
ax.set_axis_off()
return
def __init__(self, n, a=0.0, b=0.0):
# The general scheme is:
# Get the Jacobi recurrence coefficients, get the Kronrod vectors alpha
# and beta, and hand those off to orthopy.line.schemes.custom. There,
# the eigenproblem for a tridiagonal matrix with alpha and beta is
# solved to retrieve the points and weights.
# TODO replace math.ceil by -(-k//n)
length = int(math.ceil(3*n/2.0)) + 1
self.degree = 2*length + 1
_, _, alpha, beta = \
orthopy.line.recurrence_coefficients.jacobi(length, a, b, 'monic')
flt = numpy.vectorize(float)
alpha = flt(alpha)
beta = flt(beta)
a, b = self.r_kronrod(n, alpha, beta)
x, w = orthopy.line.schemes.custom(a, b, mode='numpy')
# sort by x
i = numpy.argsort(x)
self.points = x[i]
self.weights = w[i]
return
# pylint: disable=no-self-use
def map_predicate(self, f):
"""
Map a function from str -> bool element-wise over ``self``.
``f`` will be applied exactly once to each non-missing unique value in
``self``. Missing values will always return False.
"""
# Functions passed to this are of type str -> bool. Don't ever call
# them on None, which is the only non-str value we ever store in
# categories.
if self.missing_value is None:
def f_to_use(x):
return False if x is None else f(x)
else:
f_to_use = f
# Call f on each unique value in our categories.
results = np.vectorize(f_to_use, otypes=[bool_dtype])(self.categories)
# missing_value should produce False no matter what
results[self.reverse_categories[self.missing_value]] = False
# unpack the results form each unique value into their corresponding
# locations in our indices.
return results[self.as_int_array()]
def test_map_shrinks_code_storage_if_possible(self):
arr = LabelArray(
# Drop the last value so we fit in a uint16 with None as a missing
# value.
self.create_categories(16, plus_one=False)[:-1],
missing_value=None,
)
self.assertEqual(arr.itemsize, 2)
def either_A_or_B(s):
return ('A', 'B')[sum(ord(c) for c in s) % 2]
result = arr.map(either_A_or_B)
self.assertEqual(set(result.categories), {'A', 'B', None})
self.assertEqual(result.itemsize, 1)
assert_equal(
np.vectorize(either_A_or_B)(arr.as_string_array()),
result.as_string_array(),
)
def coords_edges(self, edges):
'''
Returns a list of coordinates head and tail points for all edge in edges
'''
res = np.empty((len(edges)), dtype=object)
for r, e in zip(range(len(edges)), edges):
if e[0] is None:
e[0] = 0
res[r] = self.coords_edge(e)
if len(res[r][0]) != 2:
print 'there is an error with the edges'
import pdb
pdb.set_trace()
# v = np.vectorize(self.coords_edge, otypes=[np.object])
# res = v(edges)
return res
def _get_alpha_data(data, kwargs):
"""Get alpha values for all data points.
Parameters
----------
data : array_like
alpha: Callable or float
This can be a fixed value or a function of the data.
Returns
-------
array_like
"""
alpha = kwargs.pop("alpha", 1)
if hasattr(alpha, "__call__"):
return np.vectorize(alpha)(data)
return alpha
def __init__(self, scale, pre=10):
"""
This class holds a queue of times drawn from an exponential
distribution with a specified scale.
Arguments:
- scale: The scale parameter for the exponential distribution.
- pre: Predefined size of the queue. Default=10
"""
self.scale = scale
self.pre = pre
self.queue = SimpleQueue(maxsize=pre + 1)
self.v_put = vectorize(self.queue.put_nowait)
#the exponential dist is not defined for a rate of 0
#therefore if the rate is 0 (scale is None then) huge times are set
if self.scale in [None, 0]:
self.scale = 0
self.draw_fct = no_mut
else:
self.draw_fct = random.exponential
#fillup the queue
self.fillup()
# there was: (new version compatible with pickeling see method below)
self.v_get = vectorize(self.get_val)
def __setstate__(self, d):
if 'simple_queue_list' in d:
event_queue_list = d.pop('simple_queue_list')
d['queue'] = SimpleQueue(maxsize=d['pre'] + 1)
while len(event_queue_list):
d['queue'].put_nowait(event_queue_list.pop())
self.__dict__.update(d)
self.__dict__['v_put'] = vectorize(self.queue.put_nowait)
#d['v_put'] = vectorize(d['queue'].put_nowait)
#self.__dict__.update(d)
self.__dict__['v_get'] = vectorize(self.get_val)
if self.scale is None:
self.scale = 0
self.queue = SimpleQueue(maxsize=self.pre + 1)
self.v_put = vectorize(self.queue.put_nowait) # this is specific to the queue, thus reinit here
self.draw_fct = no_mut
self.fillup()
def __init__(self, source, **params):
#_Graph.__init__(self)
self.is_static = False
if isinstance(source, str): # it is a file
self._load(source, **params)
else: # source must be an EventQueue then
# to do: read from event queue
# should also get self.starts, ...
pass
self.t_start = params.get('t_start', np.min(self.starts))
self.t_stop = params.get('t_stop', np.max(self.stops))
# ToDo: Ideally only use self.all_nodes
self.all_nodes = list(np.union1d(self.node1s, self.node2s))
all_nodes = list(np.union1d(self.node1s, self.node2s))
n = len(self.all_nodes)
def get_id(an_id):
return all_nodes.index(an_id)
v_get_id = np.vectorize(get_id)
self.node1s = v_get_id(self.node1s)
self.node2s = v_get_id(self.node2s)
# now we need to remap the node ids
_Graph.__init__(self, n=n)
def coords_edges(self, edges):
'''
Returns a list of coordinates head and tail points for all edge in edges
'''
res = np.empty((len(edges)), dtype=object)
for r, e in zip(range(len(edges)), edges):
if e[0] is None:
e[0] = 0
res[r] = self.coords_edge(e)
if len(res[r][0]) != 2:
print 'there is an error with the edges'
import pdb
pdb.set_trace()
# v = np.vectorize(self.coords_edge, otypes=[np.object])
# res = v(edges)
return res
def _init_genotypes(self):
"""Construct an array of genotype vectors, one per variant.
If it is found in cache, use the cached version,
otherwise recompute it and cache the result.
Either way, store a copy in local process memory.
"""
if self.genotypes_key in self.cache:
# Read cache, store in local memory
self._gt_types_bit = self._get_genotypes()
else:
# Regenerate, cache, and store in local memory
gt_types = extract_genotypes(db=self.db)
f = np.vectorize(variant_build_gt_type_bit, otypes=[np.uint8]) # apply to all array elements
self._gt_types_bit = f(gt_types)
self._gt_types_bit.flags.writeable = False # make it immutable
self._save_genotypes(self._gt_types_bit)
minesweeper.py 文件源码
项目:solving-minesweeper-by-tensorflow
作者: staytime
项目源码
文件源码
阅读 40
收藏 0
点赞 0
评论 0
def game(self, mask = False):
q = list()
if mask is True:
q.append(self.layers.flags)
q.append(self.layers.masks)
q.append(self.layers.mines)
q.append(self.layers.hints)
__ = self.addLayers(q)
__[__ == None] = __TOKEN_EMPTY__
f = np.vectorize(str)
return f(__).T.tolist()
def test_keywords2_ticket_2100(self):
# Test kwarg support: enhancement ticket 2100
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(a=args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(b=1, a=args)
assert_array_equal(r1, r2)
r1 = f(args, b=2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
def test_sigmoid():
'''
Test using a numerically stable reference sigmoid implementation
'''
def ref_sigmoid(x):
if x >= 0:
return 1 / (1 + np.exp(-x))
else:
z = np.exp(x)
return z / (1 + z)
sigmoid = np.vectorize(ref_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def test_hard_sigmoid():
'''
Test using a reference hard sigmoid implementation
'''
def ref_hard_sigmoid(x):
'''
Reference hard sigmoid with slope and shift values from theano, see
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/sigm.py
'''
x = (x * 0.2) + 0.5
z = 0.0 if x <= 0 else (1.0 if x >= 1 else x)
return z
hard_sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [activations.hard_sigmoid(x)])
test_values = get_standard_values()
result = f([test_values])[0]
expected = hard_sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
def _postcompute_biases(self):
""" Post-computed biases for non-boundary training examples (support vectors) when training is done.
This is for estimating sample mean and sample std of biases.
For a good learning result, sample std of biases should be small.
"""
def _b(i):
if self.enable_kernel_cache:
return self.train_y[i] - np.dot(self.alpha*self.train_y, self.kernel_cache[i])
else:
return self.train_y[i] - self._f(self.train_X[i])
I_non_boundary = np.where(np.logical_and(self.alpha > 0, self.alpha < self.C) == True)[0].tolist()
if len(I_non_boundary):
biases = np.vectorize(_b)(I_non_boundary)
self.b_mean = np.mean(biases)
self.b_std = np.sqrt(np.sum((biases - self.b_mean)**2) / (len(biases) - 1))
self.postcomputed_biases[I_non_boundary] = biases
def calcr2s(lc1, lc2, reltimeshifts, spline, trace=False):
"""
I calcuate the r2 for an array of relative time shifts.
To be compated to calcd2 of pycs.pelt.twospec !
"""
lc2abstimeshifts = reltimeshifts + lc2.timeshift
def r2(lc2abstimeshift):
# We work with copies at every trial time delay, to always start from the same position.
mylc1 = lc1.copy()
mylc2 = lc2.copy()
mylc2.timeshift = lc2abstimeshift
myspline = spline.copy()
return pycs.spl.multiopt.opt_source([mylc1, mylc2], myspline, verbose=False, trace=trace)
# We vectorize this before applying it to our abstimeshifts
vecr2 = np.vectorize(r2, otypes=[np.ndarray])
r2s = vecr2(lc2abstimeshifts)
return r2s
def test_keywords2_ticket_2100(self):
# Test kwarg support: enhancement ticket 2100
def foo(a, b=1):
return a + b
f = vectorize(foo)
args = np.array([1, 2, 3])
r1 = f(a=args)
r2 = np.array([2, 3, 4])
assert_array_equal(r1, r2)
r1 = f(b=1, a=args)
assert_array_equal(r1, r2)
r1 = f(args, b=2)
r2 = np.array([3, 4, 5])
assert_array_equal(r1, r2)
Stock_Prediction_Data_Processing.py 文件源码
项目:StockRecommendSystem
作者: doncat99
项目源码
文件源码
阅读 28
收藏 0
点赞 0
评论 0
def preprocessing_train_data(paras, df, LabelColumnName, ticker, train_tickers_dict, one_hot_label_proc, array_format=True):
day_list=train_tickers_dict[ticker]
index_df=np.vectorize(lambda s: s.strftime('%Y-%m-%d'))(df.index.to_pydatetime())
df.index=index_df
common_day=list(set(day_list).intersection(set(index_df)))
df=df.loc[common_day]
X = df.drop(LabelColumnName, 1)
y = np.array(df[LabelColumnName])
#print(X.head())
# print("ticker", ticker)
# print(X)
if one_hot_label_proc == True:
# generate one hot output
y_normalized_T = one_hot_processing(y, paras.n_out_class)
else:
y_normalized_T = y.astype(int) # np.repeat(float('nan'), len(y))
if array_format: return X.values, y_normalized_T
return X, y_normalized_T