def saveTxt(filename, ndarray):
with open(filename, 'w') as f:
labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
for row in ndarray:
row_str = row.astype(str)
label_str = labels[row[-1]]
feature_str = ' '.join(row_str[:-1])
f.write('|labels {} |features {}\n'.format(label_str, feature_str))
python类uint()的实例源码
def saveTxt(filename, ndarray):
with open(filename, 'w') as f:
labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
for row in ndarray:
row_str = row.astype(str)
label_str = labels[row[-1]]
feature_str = ' '.join(row_str[:-1])
f.write('|labels {} |features {}\n'.format(label_str, feature_str))
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def make_stack(series):
stack_size = compute_stack_size(series)
new = np.empty(stack_size, dtype=[('doc_index', np.uint), ('word', "S30"), ('value', np.float)])
counter = 0
for row in series.iteritems():
for word in row[1]:
new[counter] = (row[0], word, row[1][word])
counter +=1
return new
def get_articles_by_distance(article, corpus): #article is the row from the articles df
article = corpus[article['index'],:]
iterable = ((x, cosine_distance(article, corpus[x,:])) for x in range(corpus.shape[0]))
articles_by_distance = np.fromiter(iterable, dtype='uint,float', count=corpus.shape[0])
articles_by_distance = pd.DataFrame(articles_by_distance).rename(columns={'f1':'cosine_distance', 'f0':'index'}).sort_values(by='cosine_distance')
return articles_by_distance[0:25]
def saveTxt(filename, ndarray):
with open(filename, 'w') as f:
labels = list(map(' '.join, np.eye(10, dtype=np.uint).astype(str)))
for row in ndarray:
row_str = row.astype(str)
label_str = labels[row[-1]]
feature_str = ' '.join(row_str[:-1])
f.write('|labels {} |features {}\n'.format(label_str, feature_str))
def backproject_depth(self, depth):
constant_x = 1.0 / self.focal_x
constant_y = 1.0 / self.focal_y
row, col = depth.shape
coords = np.zeros((row, col, 2), dtype=np.uint)
coords[..., 0] = np.arange(row)[:, None]
coords[..., 1] = np.arange(col)
coords = coords.reshape((-1, 2))
output = np.zeros((len(coords), 3))
values = depth[coords[:, 0], coords[:, 1]]
output[:, 0] = (coords[:, 1] - self.center_x) * values * constant_x
output[:, 1] = (coords[:, 0] - self.center_y) * values * constant_y
output[:, 2] = values
return output
test_ujson.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 38
收藏 0
点赞 0
评论 0
def testIntArray(self):
arr = np.arange(100, dtype=np.int)
dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
tm.assert_numpy_array_equal(inpt, outp)
test_regression.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 17
收藏 0
点赞 0
评论 0
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def transNK(self, d, N, problem_arg=0):
# return np.arange(0, N), np.arange(0, N)
# Each ind has 2*|ind|_0 samples
indSet = setutil.GenTDSet(d, N, base=0)
N_per_ind = 2**np.sum(indSet!=0, axis=1)
if problem_arg == 1:
N_per_ind[1:] /= 2
_, k_ind = np.unique(np.sum(indSet, axis=1), return_inverse=True)
k_of_N = np.repeat(k_ind, N_per_ind.astype(np.int))[:N]
# N_of_k = [j+np.arange(0, i, dtype=np.uint) for i, j in
# zip(N_per_ind, np.hstack((np.array([0],
# dtype=np.uint),
# np.cumsum(N_per_ind)[:np.max(k_of_N)])))]
return k_of_N
def test_json_numpy_encoder_int(self):
assert (json.dumps(np.uint(10), cls=utils.JSONNumpyEncoder)
== json.dumps(10))
def test_json_numpy_encoder_int_array(self):
array = np.arange(10, dtype=np.uint).reshape(2, 5)
assert (json.dumps(array, cls=utils.JSONNumpyEncoder)
== json.dumps(array.tolist()))
def test_serialize_json(self):
array = np.arange(10, dtype=np.uint).reshape(2, 5)
assert (utils.serialize_json(array)
== json.dumps(array.tolist()))
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def is_integer(test_value):
""" Check all available integer representations.
@return: bool, True if the passed value is a integer, otherwise false.
"""
return type(test_value) in [np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32,
np.uint64]
def mask_od_vessels(skel, od_center):
# Create optic disk mask
od_mask = np.zeros_like(skel, dtype=np.uint8)
cv2.circle(od_mask, od_center, 30, (1, 1, 1), -1)
od_mask_inv = np.invert(od_mask) / 255.
skel = skel.astype(np.float)
masked_skel = skel * od_mask_inv
return masked_skel.astype(np.uint8)
# def line_diameters(edt, lines):
#
# diameters = []
#
# for line in lines:
#
# p0, p1 = [np.asarray(pt) for pt in line]
# vec = p1 - p0 # vector between segment end points
# vec_len = np.linalg.norm(vec)
#
# pts_along_line = np.uint(np.asarray([p0 + (i * vec) for i in np.arange(0., 1., 1. / vec_len)]))
#
# for pt in pts_along_line:
#
# try:
# diameters.append(edt[pt[0], pt[1]])
# except IndexError:
# pass
#
# return diameters
def test_dtype_keyerrors_(self):
# Ticket #1106.
dt = np.dtype([('f1', np.uint)])
assert_raises(KeyError, dt.__getitem__, "f2")
assert_raises(IndexError, dt.__getitem__, 1)
assert_raises(ValueError, dt.__getitem__, 0.0)
def train(self, X: np.ndarray, Y: np.ndarray, **kwargs):
"""Trains the EPM on X and Y.
Parameters
----------
X : np.ndarray [n_samples, n_features (config + instance features)]
Input data points.
Y : np.ndarray [n_samples, n_objectives]
The corresponding target values. n_objectives must match the
number of target names specified in the constructor.
Returns
-------
self : AbstractEPM
"""
self.n_params = X.shape[1] - self.n_feats
# reduce dimensionality of features of larger than PCA_DIM
if self.pca and X.shape[0] > 1:
X_feats = X[:, -self.n_feats:]
# scale features
X_feats = self.scaler.fit_transform(X_feats)
X_feats = np.nan_to_num(X_feats) # if features with max == min
# PCA
X_feats = self.pca.fit_transform(X_feats)
X = np.hstack((X[:, :self.n_params], X_feats))
if hasattr(self, "types"):
# for RF, adapt types list
# if X_feats.shape[0] < self.pca, X_feats.shape[1] ==
# X_feats.shape[0]
self.types = np.array(np.hstack((self.types[:self.n_params], np.zeros((X_feats.shape[1])))),
dtype=np.uint)
return self._train(X, Y)
def test_predict(self):
rs = np.random.RandomState(1)
X = rs.rand(20, 10)
Y = rs.rand(10, 1)
model = RandomForestWithInstances(np.zeros((10,), dtype=np.uint), bounds=np.array(
list(map(lambda x: (0, 10), range(10))), dtype=object))
model.train(X[:10], Y[:10])
m_hat, v_hat = model.predict(X[10:])
self.assertEqual(m_hat.shape, (10, 1))
self.assertEqual(v_hat.shape, (10, 1))
def test_train_with_pca(self):
rs = np.random.RandomState(1)
X = rs.rand(20, 20)
F = rs.rand(10, 10)
Y = rs.rand(20, 1)
model = RandomForestWithInstances(np.zeros((20,), dtype=np.uint),
np.array(list(map(lambda x: (0, 10), range(10))), dtype=object),
pca_components=2,
instance_features=F)
model.train(X, Y)
self.assertEqual(model.n_params, 10)
self.assertEqual(model.n_feats, 10)
self.assertIsNotNone(model.pca)
self.assertIsNotNone(model.scaler)