def label_and_build_mask(self, episode):
is_catastrophe_array = np.array(
[is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
# should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])
labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)
for i in range(len(episode.frames)):
if i + self.block_radius + 1 >= len(episode.frames):
mask[i] = False
continue
if is_catastrophe_array[i]:
mask[i] = False
continue
for j in range(self.block_radius + 1):
if is_catastrophe_array[i + j + 1]:
labels[i] = True
break
return labels, mask
python类full()的实例源码
def label_and_build_mask(self, episode):
is_catastrophe_array = np.array(
[is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
# should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])
labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)
for i in range(len(episode.frames)):
if i + self.block_radius + 1 >= len(episode.frames):
mask[i] = False
continue
if is_catastrophe_array[i]:
mask[i] = False
continue
for j in range(self.block_radius + 1):
if is_catastrophe_array[i + j + 1]:
labels[i] = True
break
return labels, mask
def label_and_build_mask(self, episode):
is_catastrophe_array = np.array(
[is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
# should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])
labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)
for i in range(len(episode.frames)):
if i + self.block_radius + 1 >= len(episode.frames):
mask[i] = False
continue
if is_catastrophe_array[i]:
mask[i] = False
continue
for j in range(self.block_radius + 1):
if is_catastrophe_array[i + j + 1]:
labels[i] = True
break
return labels, mask
def label_and_build_mask(self, episode):
is_catastrophe_array = np.array(
[is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
# should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])
labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)
for i in range(len(episode.frames)):
if i + self.block_radius + 1 >= len(episode.frames):
mask[i] = False
continue
if is_catastrophe_array[i]:
mask[i] = False
continue
for j in range(self.block_radius + 1):
if is_catastrophe_array[i + j + 1]:
labels[i] = True
break
return labels, mask
def label_and_build_mask(self, episode):
is_catastrophe_array = np.array(
[is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None])
# should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames])
labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool)
mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool)
for i in range(len(episode.frames)):
if i + self.block_radius + 1 >= len(episode.frames):
mask[i] = False
continue
if is_catastrophe_array[i]:
mask[i] = False
continue
for j in range(self.block_radius + 1):
if is_catastrophe_array[i + j + 1]:
labels[i] = True
break
return labels, mask
def setUp(self):
super(BridgeTest, self).setUp()
self.batch_size = 4
self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
final_encoder_state = nest.map_structure(
lambda x: tf.convert_to_tensor(
value=np.random.randn(self.batch_size, x),
dtype=tf.float32),
self.encoder_cell.state_size)
self.encoder_outputs = EncoderOutput(
outputs=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values_length=np.full([self.batch_size], 10),
final_state=final_encoder_state)
def _normalise_data(self):
self.train_x_mean = np.zeros(self.input_dim)
self.train_x_std = np.ones(self.input_dim)
self.train_y_mean = np.zeros(self.output_dim)
self.train_y_std = np.ones(self.output_dim)
if self.normalise_data:
self.train_x_mean = np.mean(self.train_x, axis=0)
self.train_x_std = np.std(self.train_x, axis=0)
self.train_x_std[self.train_x_std == 0] = 1.
self.train_x = (self.train_x - np.full(self.train_x.shape, self.train_x_mean, dtype=np.float32)) / \
np.full(self.train_x.shape, self.train_x_std, dtype=np.float32)
self.test_x = (self.test_x - np.full(self.test_x.shape, self.train_x_mean, dtype=np.float32)) / \
np.full(self.test_x.shape, self.train_x_std, dtype=np.float32)
self.train_y_mean = np.mean(self.train_y, axis=0)
self.train_y_std = np.std(self.train_y, axis=0)
if self.train_y_std == 0:
self.train_y_std[self.train_y_std == 0] = 1.
self.train_y = (self.train_y - self.train_y_mean) / self.train_y_std
def fit_predict(self, ts):
"""
Unsupervised training of TSBitMaps.
:param ts: 1-D numpy array or pandas.Series
:return labels: `+1` for normal observations and `-1` for abnormal observations
"""
assert self._lag_window_size > self._feature_window_size, 'lag_window_size must be >= feature_window_size'
self._ref_ts = ts
scores = self._slide_chunks(ts)
self._ref_bitmap_scores = scores
thres = np.percentile(scores[self._lag_window_size: -self._lead_window_size + 1], self._q)
labels = np.full(len(ts), 1)
for idx, score in enumerate(scores):
if score > thres:
labels[idx] = -1
return labels
def create_bitmap_grid(bitmap, n, num_bins, level_size):
"""
Arranges a time-series bitmap into a 2-D grid for heatmap visualization
"""
assert num_bins % n == 0, 'num_bins has to be a multiple of n'
m = num_bins // n
row_count = int(math.pow(m, level_size))
col_count = int(math.pow(n, level_size))
grid = np.full((row_count, col_count), 0.0)
for feat, count in bitmap.items():
i, j = symbols2index(m, n, feat)
grid[i, j] = count
return grid
def get_data(setname):
dataset = CorporaDataSet(setname)
# topic_word_array = dataset.getWordsInTopicMatrix()
# topic_doc_array = dataset.getDocsInTopicMatrix()
topic_word_array = dataset.getDocsInTopicMatrix()
topic_doc_array = dataset.getWordsInTopicMatrix().T
doc_length_array = numpy.full([topic_doc_array.shape[0]],1)
vocabulary = dataset.loadVocabulary()[0].keys()
print "topic word array shape: ",topic_word_array.shape
print "topic doc shape: ",topic_doc_array.shape
print "vocabulary: ",len(vocabulary)
wordfreqs = mmread(setname + ".mtx").sum(1)
word_freq_array = numpy.array(wordfreqs)[:,0]
return {topic_word_key:topic_word_array,
topic_doc_key:topic_doc_array,
doc_length_key:doc_length_array,
vocabulary_key:vocabulary,
word_freq_key:word_freq_array}
def sphankel1(n, kr):
"""Spherical Hankel (first kind) of order n at kr
Parameters
----------
n : array_like
Order
kr: array_like
Argument
Returns
-------
hn1 : complex float
Spherical Hankel function hn (first kind)
"""
n, kr = scalar_broadcast_match(n, kr)
hn1 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
kr_nonzero = kr != 0
hn1[kr_nonzero] = _np.sqrt(_np.pi / 2) / _np.lib.scimath.sqrt(kr[kr_nonzero]) * hankel1(n[kr_nonzero] + 0.5, kr[kr_nonzero])
return hn1
def sphankel2(n, kr):
"""Spherical Hankel (second kind) of order n at kr
Parameters
----------
n : array_like
Order
kr: array_like
Argument
Returns
-------
hn2 : complex float
Spherical Hankel function hn (second kind)
"""
n, kr = scalar_broadcast_match(n, kr)
hn2 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
kr_nonzero = kr != 0
hn2[kr_nonzero] = _np.sqrt(_np.pi / 2) / _np.lib.scimath.sqrt(kr[kr_nonzero]) * hankel2(n[kr_nonzero] + 0.5, kr[kr_nonzero])
return hn2
def dsphankel1(n, kr):
"""Derivative spherical Hankel (first kind) of order n at kr
Parameters
----------
n : array_like
Order
kr: array_like
Argument
Returns
-------
dhn1 : complex float
Derivative of spherical Hankel function hn' (second kind)
"""
n, kr = scalar_broadcast_match(n, kr)
dhn1 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
kr_nonzero = kr != 0
dhn1[kr_nonzero] = 0.5 * (sphankel1(n[kr_nonzero] - 1, kr[kr_nonzero]) - sphankel1(n[kr_nonzero] + 1, kr[kr_nonzero]) - sphankel1(n[kr_nonzero], kr[kr_nonzero]) / kr[kr_nonzero])
return dhn1
def dsphankel2(n, kr):
"""Derivative spherical Hankel (second kind) of order n at kr
Parameters
----------
n : array_like
Order
kr: array_like
Argument
Returns
-------
dhn2 : complex float
Derivative of spherical Hankel function hn' (second kind)
"""
n, kr = scalar_broadcast_match(n, kr)
dhn2 = _np.full(n.shape, _np.nan, dtype=_np.complex_)
kr_nonzero = kr != 0
dhn2[kr_nonzero] = 0.5 * (sphankel2(n[kr_nonzero] - 1, kr[kr_nonzero]) - sphankel2(n[kr_nonzero] + 1, kr[kr_nonzero]) - sphankel2(n[kr_nonzero], kr[kr_nonzero]) / kr[kr_nonzero])
return dhn2
def test_find_multiple_noisy(self):
""" Test finding multiple particles (noisy) """
self.atol = 5
radius = np.random.random() * 15 + 15
generated_image = self.generate_image(radius, 10, noise=0.2)
actual_number = len(generated_image.coords)
fits = find_disks(generated_image.image, (radius / 2.0,
radius * 2.0),
maximum=actual_number)
_, coords = sort_positions(generated_image.coords,
np.array([fits['y'].values,
fits['x'].values]).T)
if len(fits) == 0: # Nothing found
actual = np.repeat([[np.nan, np.nan, np.nan]], actual_number,
axis=0)
else:
actual = fits[['r', 'y', 'x']].values.astype(np.float64)
expected = np.array([np.full(actual_number, radius, np.float64),
coords[:, 0], coords[:, 1]]).T
return np.sqrt(((actual - expected)**2).mean(0)), [0] * 3
def test_fit(self):
'''
Tests the fit to samples.
'''
# Generate random variates
size = 100
samples = self.vine.rvs(size)
# Fit mixed vine to samples
is_continuous = np.full((self.dim), True, dtype=bool)
is_continuous[1] = False
vine_est = MixedVine.fit(samples, is_continuous)
assert_approx_equal(vine_est.root.copulas[0].theta, 0.77490,
significant=5)
assert_approx_equal(vine_est.root.input_layer.copulas[0].theta,
4.01646, significant=5)
assert_approx_equal(vine_est.root.input_layer.copulas[1].theta,
4.56877, significant=5)
def _logcdf(self, samples):
lower = np.full(2, -np.inf)
upper = norm.ppf(samples)
limit_flags = np.zeros(2)
if upper.shape[0] > 0:
def func1d(upper1d):
'''
Calculates the multivariate normal cumulative distribution
function of a single sample.
'''
return mvn.mvndst(lower, upper1d, limit_flags, self.theta)[1]
vals = np.apply_along_axis(func1d, -1, upper)
else:
vals = np.empty((0, ))
old_settings = np.seterr(divide='ignore')
vals = np.log(vals)
np.seterr(**old_settings)
vals[np.any(samples == 0.0, axis=1)] = -np.inf
vals[samples[:, 0] == 1.0] = np.log(samples[samples[:, 0] == 1.0, 1])
vals[samples[:, 1] == 1.0] = np.log(samples[samples[:, 1] == 1.0, 0])
return vals
def test_aggregate_variance(self):
result = self.raster_rdd.aggregate_by_cell(Operation.VARIANCE)
band = np.array([[
[1, 1.5, 2, 2.5, 3],
[1.5, 2, 2.5, 3, 3.5],
[2, 2.5, 3, 3.5, 4],
[2.5, 3, 3.5, 4, 4.5],
[3, 3.5, 4, 4.5, 5]]])
expected = np.array([
((self.first - band) ** 2) + ((self.second - band) ** 2),
((self.first - band) ** 2) + ((self.second - band) ** 2)
])
expected_2 = np.full((5, 5), -1.0)
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells == expected_2).all())
def test_aggregate_std(self):
result = self.raster_rdd.aggregate_by_cell(Operation.STANDARD_DEVIATION)
band = np.array([[
[1, 1.5, 2, 2.5, 3],
[1.5, 2, 2.5, 3, 3.5],
[2, 2.5, 3, 3.5, 4],
[2.5, 3, 3.5, 4, 4.5],
[3, 3.5, 4, 4.5, 5]]])
expected = np.array([
(((self.first - band) ** 2) + ((self.second - band) ** 2)) ** (1/2),
(((self.first - band) ** 2) + ((self.second - band) ** 2)) ** (1/2)
])
expected_2 = np.full((5, 5), -1.0)
self.assertTrue((result.lookup(1, 0)[0].cells == expected).all())
self.assertTrue((result.lookup(0, 0)[0].cells == expected_2).all())
def show_weather_bydate(self):
self.weathdf['gap'] = self.weathdf['time_slotid'].apply(self.find_gap_by_timeslot)
by_date = self.weathdf.groupby('time_date')
size = len(by_date)
col_len = row_len = math.ceil(math.sqrt(size))
count = 1
for name, group in by_date:
ax=plt.subplot(row_len, col_len, count)
# temp = np.empty(group['time_id'].shape[0])
# temp.fill(2)
# ax.plot(group['time_id'], group['gap']/group['gap'].max(), 'r', alpha=0.75)
# ax.plot(group['time_id'], group['weather']/group['weather'].max())
ax.bar(group['time_id'], group['weather'], width=1)
ax.set_title(name)
count = count + 1
# plt.bar(group['time_id'], np.full(group['time_id'].shape[0], 5), width=1)
plt.show()
return
def _retrieve_sample(self, annotation):
epsilon = 0.05
high_val = 1 - epsilon
low_val = 0 + epsilon
coco_image = self._coco.loadImgs(annotation['image_id'])[0]
image_path = os.path.join(self._config.data_dir['images'], coco_image['file_name'])
image = utils.load_image(image_path)
ann_mask = self._coco.annToMask(annotation)
mask_categorical = np.full((ann_mask.shape[0], ann_mask.shape[1], self.num_classes()), low_val, dtype=np.float32)
mask_categorical[:, :, 0] = high_val # every pixel begins as background
class_index = self._cid_to_id[annotation['category_id']]
mask_categorical[ann_mask > 0, class_index] = high_val
mask_categorical[ann_mask > 0, 0] = low_val # remove background label from pixels of this (non-bg) category
return image, mask_categorical
def round(self, decimals=0, out=None):
"""
Return an array rounded a to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
if result.ndim > 0:
result._mask = self._mask
result._update_from(self)
elif self._mask:
# Return masked when the scalar is masked
result = masked
# No explicit output: we're done
if out is None:
return result
if isinstance(out, MaskedArray):
out.__setmask__(self._mask)
return out
def reshape(a, new_shape, order='C'):
"""
Returns an array containing the same data with a new shape.
Refer to `MaskedArray.reshape` for full documentation.
See Also
--------
MaskedArray.reshape : equivalent function
"""
# We can't use 'frommethod', it whine about some parameters. Dmmit.
try:
return a.reshape(new_shape, order=order)
except AttributeError:
_tmp = narray(a, copy=False).reshape(new_shape, order=order)
return _tmp.view(MaskedArray)
def dump(a, F):
"""
Pickle a masked array to a file.
This is a wrapper around ``cPickle.dump``.
Parameters
----------
a : MaskedArray
The array to be pickled.
F : str or file-like object
The file to pickle `a` to. If a string, the full path to the file.
"""
if not hasattr(F, 'readline'):
F = open(F, 'w')
return pickle.dump(a, F)
def create_merge_multiple(save_path, creators, shuffle=True):
n_sample_total = 0
creator_indices = []
for i, creator in enumerate(creators):
creator._read_list()
n_sample_total += creator.n_samples
creator_indices.append(np.full((creator.n_samples), i, dtype=np.int))
creator_indices = np.concatenate(creator_indices)
if shuffle:
np.random.shuffle(creator_indices)
print('Start creating dataset with {} examples. Output path: {}'.format(
n_sample_total, save_path))
writer = tf.python_io.TFRecordWriter(save_path)
count = 0
for i in range(n_sample_total):
creator = creators[creator_indices[i]]
example = creator._create_next_sample()
if example is not None:
writer.write(example.SerializeToString())
count += 1
if i > 0 and i % 100 == 0:
print('Progress %d / %d' % (i, n_sample_total))
print('Done creating %d samples' % count)
def list_to_padded_tokens(dialogues, tokenizer):
# compute the length of the dialogue
seq_length = [len(d) for d in dialogues]
# Get dialogue numpy max size
batch_size = len(dialogues)
max_seq_length = max(seq_length)
# Initialize numpy array
padded_tokens = np.full((batch_size, max_seq_length), tokenizer.padding_token, dtype=np.int32)
# fill the padded array with word_id
for i, (one_path, l) in enumerate(zip(dialogues, seq_length)):
padded_tokens[i, 0:l] = one_path
return padded_tokens, seq_length
def __parse_pairs__(self, filepath, delimiter = ',', target_col = 2, column_names = list(), sequence_length = None):
assert("target" in column_names)
with open(filepath, "r") as f:
lines = f.readlines()
try:
if sequence_length is None:
dataframe = pd.read_csv(filepath, sep = delimiter, skip_blank_lines = True,
header = None, names = column_names, index_col = False)
sequence_length = np.asarray(dataframe[["i", "j"]]).max()
except ValueError:
return None
data = np.full((sequence_length, sequence_length), np.nan, dtype = np.double)
np.fill_diagonal(data, Params.DISTANCE_WITH_ITSELF)
for line in lines:
elements = line.rstrip("\r\n").split(delimiter)
i, j, k = int(elements[0]) - 1, int(elements[1]) - 1, float(elements[target_col])
data[i, j] = data[j, i] = k
if np.isnan(data).any():
# sequence_length is wrong or the input file has missing pairs
warnings.warn("Warning: Pairs of residues are missing from the contacts text file")
warnings.warn("Number of missing pairs: %i " % np.isnan(data).sum())
return data
def extended_2d_fancy_indexing(arr, sl1, sl2, value_of_nan):
new_shape = tuple([sl1.stop - sl1.start, sl2.stop - sl2.start] + list(arr.shape[2:]))
result = np.full(new_shape, value_of_nan, dtype = arr.dtype)
x_lower = 0 if sl1.start < 0 else sl1.start
x_upper = arr.shape[0] if sl1.stop > arr.shape[0] else sl1.stop
y_lower = 0 if sl2.start < 0 else sl2.start
y_upper = arr.shape[1] if sl2.stop > arr.shape[1] else sl2.stop
new_x_lower = max(0, - sl1.stop + (sl1.stop - sl1.start))
new_x_upper = new_x_lower + (x_upper - x_lower)
new_y_lower = max(0, - sl2.stop + (sl2.stop - sl2.start))
new_y_upper = new_y_lower + (y_upper - y_lower)
if len(result.shape) == 2:
result[new_x_lower:new_x_upper, new_y_lower:new_y_upper] = arr[x_lower:x_upper, y_lower:y_upper]
elif len(result.shape) == 3:
result[new_x_lower:new_x_upper, new_y_lower:new_y_upper, :] = arr[x_lower:x_upper, y_lower:y_upper, :]
else:
raise WrongTensorShapeError()
return result
def select_action(self, t, greedy_action_func, action_value=None):
a = greedy_action_func()
if self.ou_state is None:
if self.start_with_mu:
self.ou_state = np.full(a.shape, self.mu, dtype=np.float32)
else:
sigma_stable = (self.sigma /
np.sqrt(2 * self.theta - self.theta ** 2))
self.ou_state = np.random.normal(
size=a.shape,
loc=self.mu, scale=sigma_stable).astype(np.float32)
else:
self.evolve()
noise = self.ou_state
self.logger.debug('t:%s noise:%s', t, noise)
return a + noise
def test_soft_copy_param(self):
a = L.Linear(1, 5)
b = L.Linear(1, 5)
a.W.data[:] = 0.5
b.W.data[:] = 1
# a = (1 - tau) * a + tau * b
copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
np.testing.assert_almost_equal(a.W.data, np.full(a.W.data.shape, 0.55))
np.testing.assert_almost_equal(b.W.data, np.full(b.W.data.shape, 1.0))
copy_param.soft_copy_param(target_link=a, source_link=b, tau=0.1)
np.testing.assert_almost_equal(
a.W.data, np.full(a.W.data.shape, 0.595))
np.testing.assert_almost_equal(b.W.data, np.full(b.W.data.shape, 1.0))