def get_next_note_from_note(self, note):
"""Given a note, uses the model to predict the most probable next note.
Args:
note: A one-hot encoding of the note.
Returns:
Next note in the same format.
"""
with self.graph.as_default():
with tf.variable_scope(self.scope, reuse=True):
singleton_lengths = np.full(self.batch_size, 1, dtype=int)
input_batch = np.reshape(note,
(self.batch_size, 1, rl_tuner_ops.NUM_CLASSES))
softmax, self.state_value = self.session.run(
[self.softmax, self.state_tensor],
{self.melody_sequence: input_batch,
self.initial_state: self.state_value,
self.lengths: singleton_lengths})
return self.get_note_from_softmax(softmax)
python类full()的实例源码
finite_difference.py 文件源码
项目:house-of-enlightenment
作者: house-of-enlightenment
项目源码
文件源码
阅读 26
收藏 0
点赞 0
评论 0
def set_pixels(self, pixels):
hsv = np.full((self.X_MAX, self.Y_MAX, 3), 0xFF, dtype=np.uint8)
hsv[:, :, self.wave_type] = self.pixels[2] / 0xFFFF * 0xFF
if self.wave_type == self.VALUE:
hsv[:, :, 1] = 0
if self.darken_mids:
hsv[:, :, 2] = np.abs(self.pixels[2] - (0xFFFF >> 1)) / 0xFFFF * 0xFF
rgb = color_utils.hsv2rgb(hsv)
pixels[:self.X_MAX, :self.Y_MAX] = rgb
self.pixels.pop(0)
##
# Calculate next frame of explicit finite difference wave
#
def basic_check(self):
# TODO Ghi: check the microstructure model is compatible.
# if we want to be strict, only IndependentShpere should be valid, but in pratice any
# model of sphere with a radius can make it!
if not hasattr(self.layer.microstructure, "radius"):
raise SMRTError("Only microstructure_model which defined a `radius` can be used with Rayleigh scattering")
# The phase function is inherited from Rayleigh // Don't remove the commented code
# def phase(self, m, mhu):
# The ke function is inherited from Rayleigh // Don't remove the commented code
# def ke(self, mhu):
# return np.full(2*len(mhu), self.ks+self.ka)
# The effective_permittivity is inherited from Rayleigh // Don't remove the commented code
# def effective_permittivity(self):
# return self._effective_permittivity
def basic_check(self):
# TODO Ghi: check the microstructure model is compatible.
# if we want to be strict, only IndependentShpere should be valid, but in pratice any
# model of sphere with a radius can make it!
if not hasattr(self.layer.microstructure, "radius"):
raise SMRTError("Only microstructure_model which defined a `radius` can be used with Rayleigh scattering")
# The phase function is inherited from Rayleigh // Don't remove the commented code
# def phase(self, m, mhu):
# The ke function is inherited from Rayleigh // Don't remove the commented code
# def ke(self, mhu):
# return np.full(2*len(mhu), self.ks+self.ka)
# The effective_permittivity is inherited from Rayleigh // Don't remove the commented code
# def effective_permittivity(self):
# return self._effective_permittivity
def basic_check(self):
# TODO Ghi: check the microstructure model is compatible.
# if we want to be strict, only IndependentShpere should be valid, but in pratice any
# model of sphere with a radius can make it!
if not hasattr(self.layer.microstructure, "radius"):
raise SMRTError("Only microstructure_model which defined a `radius` can be used with Rayleigh scattering")
# The phase function is inherited from Rayleigh // Don't remove the commented code
# def phase(self, m, mhu):
# The ke function is inherited from Rayleigh // Don't remove the commented code
# def ke(self, mhu):
# return np.full(2*len(mhu), self.ks+self.ka)
# The effective_permittivity is inherited from Rayleigh // Don't remove the commented code
# def effective_permittivity(self):
# return self._effective_permittivity
def test_allreduce_hint(hetr_device, config):
if hetr_device == 'gpu':
if 'gpu' not in ngt.transformer_choices():
pytest.skip("GPUTransformer not available")
input = config['input']
device_id = config['device_id']
axis_A = ng.make_axis(length=4, name='axis_A')
parallel_axis = ng.make_axis(name='axis_parallel', length=16)
with ng.metadata(device=hetr_device,
device_id=device_id,
parallel=parallel_axis):
var_A = ng.variable(axes=[axis_A], initial_value=UniformInit(1, 1))
var_B = ng.variable(axes=[axis_A], initial_value=UniformInit(input, input))
var_B.metadata['reduce_func'] = 'sum'
var_B_mean = var_B / len(device_id)
var_minus = (var_A - var_B_mean)
with closing(ngt.make_transformer_factory('hetr', device=hetr_device)()) as hetr:
out_comp = hetr.computation(var_minus)
result = out_comp()
np_result = np.full((axis_A.length), config['expected_result'], np.float32)
np.testing.assert_array_equal(result, np_result)
def test_fixed_lr(iter_buf, max_iter, base_lr):
# set up
name = 'fixed'
params = {'name': name,
'max_iter': max_iter,
'base_lr': base_lr}
# execute
naive_lr = np.full(max_iter, base_lr)
lr_op = lr_policies[name]['obj'](params)(iter_buf)
with ExecutorFactory() as ex:
compute_lr = ex.executor(lr_op, iter_buf)
ng_lr = [compute_lr(i).item(0) for i in range(max_iter)]
# compare
ng.testing.assert_allclose(ng_lr, naive_lr, atol=1e-4, rtol=1e-3)
def plot_spikepattern(spike_trains, sim_time):
"""Plot set of spike trains (spike pattern)"""
plt.ioff()
plt.figure()
for i in xrange(len(spike_trains)):
spike_times = spike_trains[i].value
plt.plot(spike_times, np.full(len(spike_times), i,
dtype=np.int), 'k.')
plt.xlim((0.0, sim_time))
plt.ylim((0, len(spike_trains)))
plt.xlabel('Time (ms)')
plt.ylabel('Neuron index')
plt.show()
plt.ion()
def plot_spiker(record, spike_trains_target, neuron_index=0):
"""Plot spikeraster and target timings for given neuron index"""
plt.ioff()
spike_trains = [np.array(i.spiketrains[neuron_index])
for i in record.segments]
n_segments = record.size['segments']
plt.figure()
for i in xrange(len(spike_trains)):
plt.plot(spike_trains[i], np.full(len(spike_trains[i]), i + 1,
dtype=np.int), 'k.')
target_timings = spike_trains_target[neuron_index].value
plt.plot(target_timings, np.full(len(target_timings), 1.025 * n_segments),
'kx', markersize=8, markeredgewidth=2)
plt.xlim((0., np.float(record.segments[0].t_stop)))
plt.ylim((0, np.int(1.05 * n_segments)))
plt.xlabel('Time (ms)')
plt.ylabel('Trials')
plt.title('Output neuron {}'.format(neuron_index))
plt.show()
plt.ion()
def __init__(self, index):
self.name = 'Walkington(tetrahedron, {})'.format(index)
if index == 'p5':
self.degree = 5
self.weights = 6 * numpy.concatenate([
numpy.full(4, 0.018781320953002641800),
numpy.full(4, 0.012248840519393658257),
numpy.full(6, 0.0070910034628469110730),
])
self.bary = numpy.concatenate([
_xi1(0.31088591926330060980),
_xi1(0.092735250310891226402),
_xi11(0.045503704125649649492),
])
self.points = self.bary[:, 1:]
return
# Default: scheme from general simplex
w = walkington.Walkington(3, index)
self.weights = w.weights
self.bary = w.bary
self.points = w.points
self.degree = w.degree
return
def _gen5_3(n):
'''Spherical product Lobatto formula.
'''
data = []
s = sqrt(n+3)
for k in range(1, n+1):
rk = sqrt((k+2) * (n+3))
Bk = fr(2**(k-n) * (n+1), (k+1) * (k+2) * (n+3))
arr = [rk] + (n-k) * [s]
data += [
(Bk, pm_array0(n, arr, range(k-1, n)))
]
B0 = 1 - sum([item[0]*len(item[1]) for item in data])
data += [
(B0, numpy.full((1, n), 0))
]
return 5, data
def setup_rw(params):
pore = get_pore(**params)
rw = RandomWalk(pore, **params)
rw.add_wall_binding(t=params.t_bind, p=params.p_bind, eps=params.eps_bind)
# define non-standard stopping criteria
Tmax = params.Tmax
Rmax = params.Rmax
def success(self, r, z):
return self.in_channel(r, z) & (z <= params.zstop)
def fail(self, r, z):
if self.t > Tmax:
return np.full(r.shape, True, dtype=bool)
toolong = (self.times[self.alive] + self.bind_times[self.alive]) > 5e6
toofar = r**2 + z**2 > Rmax**2
return toolong | toofar
rw.set_stopping_criteria(success, fail)
return rw
########### STREAMLINE PLOT ###########
def move_ellipses(self, coll, cyl=False):
xz = self.x[:, ::2] if not cyl else np.column_stack(
[np.sqrt(np.sum(self.x[:, :2]**2, 1)), self.x[:, 2]])
coll.set_offsets(xz)
#inside = self.inside_wall()
#margin = np.nonzero(self.alive)[0][self.inside_wall(2.)]
colors = np.full((self.N,), "b", dtype=str)
#colors[margin] = "r"
colors[self.success] = "k"
colors[self.fail] = "k"
colors[self.alive & ~self.can_bind] = "r"
#colors = [("r" if inside[i] else "g") if margin[i] else "b" for i in range(self.N)]
coll.set_facecolors(colors)
#y = self.x[:, 1]
#d = 50.
#sizes = self.params.rMolecule*(1. + y/d)
#coll.set(widths=sizes, heights=sizes)
def sample_scalar(self, shape, a):
AMAX = 30
if a > AMAX:
return np.random.poisson(a, shape)
k = 1
K = np.full(shape, k)
s = a/np.expm1(a)
S = s
U = np.random.random(shape)
new = S < U
while np.any(new):
k += 1
K[new] = k
s = s*a/float(k)
S = S + s
new = S < U
return K
def values(cls, dataset, dimension, expanded, flat):
dimension = dataset.get_dimension(dimension)
idx = dataset.get_dimension_index(dimension)
data = dataset.data
if idx not in [0, 1] and not expanded:
return data[dimension.name].values
values = []
columns = list(data.columns)
arr = geom_to_array(data.geometry.iloc[0])
ds = dataset.clone(arr, datatype=cls.subtypes, vdims=[])
for i, d in enumerate(data.geometry):
arr = geom_to_array(d)
if idx in [0, 1]:
ds.data = arr
values.append(ds.interface.values(ds, dimension))
else:
arr = np.full(len(arr), data.iloc[i, columns.index(dimension.name)])
values.append(arr)
values.append([np.NaN])
return np.concatenate(values[:-1]) if values else np.array([])
def create_validTest_data(self):
for i in range(len(self.validTestQ)):
qId = self.validTestQ[i]
item = self.corpus.QAnswers[qId].itemId
question = self.corpus.QAnswers[qId].qFeature
answer_list = [qId, self.validTestNa[i]]
Pairwise = self.create_dense_pairwise(item, qId)
Question = self.create_sparse_one(qFeature = question)
Answer = self.create_sparse_one(answer_list = answer_list)
Review = self.Review[item]
TermtoTermR = self.create_sparse_two(item, qFeature = question)
TermtoTermP = self.create_sparse_two(item, answer_list = answer_list)
Question_I = (Question[0], Question[1] if Question[1].size == 1 and Question[1][0] == 0 else np.full((Question[1].size), 1.0/np.sqrt(Question[1].size)), Question[2])
Answer_I = (Answer[0], Answer[1] if Answer[1].size == 1 and Answer[1][0] == 0 else np.full((Answer[1].size), 1.0/np.sqrt(Answer[1].size)), Answer[2])
Review_I = (Review[0], np.full((Review[1].size), 1.0/np.sqrt(Review[1].size)), Review[2])
self.validTestM.append((Pairwise, Question, Answer, Review, TermtoTermR, TermtoTermP, Question_I, Answer_I, Review_I))
def setUp(self):
super(BridgeTest, self).setUp()
self.batch_size = 4
self.encoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.GRUCell(4), tf.contrib.rnn.GRUCell(8)])
self.decoder_cell = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.LSTMCell(16), tf.contrib.rnn.GRUCell(8)])
final_encoder_state = nest.map_structure(
lambda x: tf.convert_to_tensor(
value=np.random.randn(self.batch_size, x),
dtype=tf.float32),
self.encoder_cell.state_size)
self.encoder_outputs = EncoderOutput(
outputs=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values=tf.convert_to_tensor(
value=np.random.randn(self.batch_size, 10, 16), dtype=tf.float32),
attention_values_length=np.full([self.batch_size], 10),
final_state=final_encoder_state)
def dominant_sets(graph_mat, max_k=0, tol=1e-5, max_iter=1000):
graph_cardinality = graph_mat.shape[0]
if max_k == 0:
max_k = graph_cardinality
clusters = np.zeros(graph_cardinality)
already_clustered = np.full(graph_cardinality, False, dtype=np.bool)
for k in range(max_k):
if graph_cardinality - already_clustered.sum() <= ceil(0.05 * graph_cardinality):
break
# 1000 is added to obtain more similar values when x is normalized
# x = np.random.random_sample(graph_cardinality) + 1000.0
x = np.full(graph_cardinality, 1.0)
x[already_clustered] = 0.0
x /= x.sum()
y = replicator(graph_mat, x, np.where(~already_clustered)[0], tol, max_iter)
cluster = np.where(y >= 1.0 / (graph_cardinality * 1.5))[0]
already_clustered[cluster] = True
clusters[cluster] = k
clusters[~already_clustered] = k
return clusters
def _search_ann(self, search_keys, dnd_keys, update_LRU_order):
batch_indices = []
for act, ann in self.anns.items():
# These are the indices we get back from ANN search
indices = ann.query(search_keys)
log.debug("ANN indices for action {}: {}".format(act, indices))
# Create numpy array with full of corresponding action vector index
action_indices = np.full(indices.shape, self.action_vector.index(act))
log.debug("Action indices for action {}: {}".format(act, action_indices))
# Riffle two arrays
tf_indices = self._riffle_arrays(action_indices, indices)
batch_indices.append(tf_indices)
# Very important part: Modify LRU Order here
# Doesn't work without tabular update of course!
if update_LRU_order == 1:
_ = [self.tf_index__state_hash[act][i] for i in indices.ravel()]
np_batch = np.asarray(batch_indices)
log.debug("Batch update indices: {}".format(np_batch))
# Reshaping to gather_nd compatible format
final_indices = np.asarray([np_batch[:, j, :, :] for j in range(np_batch.shape[1])], dtype=np.int32)
return final_indices
def contains(self, other):
if isinstance(other, Point):
x = other._x
elif isinstance(other, np.ndarray):
x = other
elif isinstance(other, Polygon):
x = _points_to_array(other.vertices)
return np.all(self.contains(x))
else:
raise TypeError("P must be point or ndarray")
# keep track of whether each point is contained in a face
bools = np.full(x.shape[0], False, dtype=bool)
for f in self.faces:
bools = np.logical_or(bools, f.contains(x))
return bools