def load_h5_namedtuple(group, namedtuple):
""" Load a single namedtuple from an h5 group """
args = []
for field in namedtuple._fields:
try:
field_value = getattr(group, field).read()
if field_value.shape == ():
field_value = np.asscalar(field_value)
except tables.NoSuchNodeError:
try:
field_value = getattr(group._v_attrs, field)
except AttributeError:
field_value = None
args.append(field_value)
return namedtuple(*args)
python类asscalar()的实例源码
def json_sanitize(data):
# This really doesn't make me happy. How many cases we we have to test?
if (type(data) == float) or (type(data) == numpy.float64):
# Handle floats specially
if math.isnan(data):
return "NaN";
if (data == float("+Inf")):
return "inf"
if (data == float("-Inf")):
return "-inf"
return data
elif hasattr(data, 'iterkeys'):
# Dictionary case
new_data = {}
for k in data.keys():
new_data[k] = json_sanitize(data[k])
return new_data
elif hasattr(data, '__iter__'):
# Anything else that looks like a list. N
new_data = []
for d in data:
new_data.append(json_sanitize(d))
return new_data
elif hasattr(data, 'shape') and data.shape == ():
# Numpy 0-d array
return np.asscalar(data)
else:
return data
def sin(Vo, Va, Freq=None, Td=0, Df=0, Phase=0, t=None):
"""
SIN provides a damped sinusoidal waveform in the form
Vo + Va * np.sin(2 * np.pi * Freq * t + Phase * (np.pi / 180))
The waveforms is:
* t < Td ==> Vo + Va * np.sin(Phase * (np.pi / 180))
* t > Td ==> Vo + Va * np.sin(2 * np.pi * Freq * (t - Td) + Phase * (np.pi / 180)) * np.exp(-(t - Td) * Df)
:param Vo: offset
:param Va: amplitude (peak) of the waveform
:param Freq: frequency (Hz)
:param Td: delay time (s)
:param Df: damping factor (1/s)
:param Phase: voltage phase (deg)
:param t: array with times where the function has to be evaluated
:return: the function values at times defined in t
"""
# check presence of time array
if t is None:
raise TypeError('Missing time array')
# check if t is scalar
if isinstance(t, (int, float)):
t = np.array([t])
# check presence of Freq
if Freq is None:
Freq = 1 / t[-1]
out = np.zeros_like(t)
out[t <= Td] = Vo + Va * np.sin(Phase * (np.pi / 180))
out[t > Td] = Vo + Va * np.sin(2 * np.pi * Freq * (t[t > Td] - Td) + Phase * (np.pi / 180)) * np.exp(-(t[t > Td] - Td) * Df)
# if input is scalar convert out to scalar too
if out.size == 1:
out = np.asscalar(out)
return out
def validate(self):
if np.asscalar(self.x_size.value) is None:
self.x_size.value = 5
def __read_str(self, numchars=1, utf=None):
"""
Read a string of a specific length.
This is compatible with python 2 and python 3.
"""
rawstr = np.asscalar(np.fromfile(self._fsrc,
dtype='S%s' % numchars, count=1))
if utf or (utf is None and PY_VER == 3):
return rawstr.decode('utf-8')
return rawstr
def __read_comment(self):
"""
Read a single comment.
The comment is stored as an Event in Segment 0, which is
specifically for comments.
----------------------
Returns an empty list.
The returned object is already added to the Block.
No ID number: always called from another method
"""
# float64 -- timestamp (number of days since dec 30th 1899)
time = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]
# int16 -- length of next string
numchars1 = np.asscalar(np.fromfile(self._fsrc,
dtype=np.int16, count=1))
# char * numchars -- the one who sent the comment
sender = self.__read_str(numchars1)
# int16 -- length of next string
numchars2 = np.asscalar(np.fromfile(self._fsrc,
dtype=np.int16, count=1))
# char * numchars -- comment text
text = self.__read_str(numchars2, utf=False)
comment = Event(times=pq.Quantity(time, units=pq.d), labels=text,
sender=sender, file_origin=self._file_origin)
self._seg0.events.append(comment)
return []
def __read_str(self, numchars=1, utf=None):
"""
Read a string of a specific length.
This is compatible with python 2 and python 3.
"""
rawstr = np.asscalar(np.fromfile(self._fsrc,
dtype='S%s' % numchars, count=1))
if utf or (utf is None and PY_VER == 3):
return rawstr.decode('utf-8')
return rawstr
def __read_comment(self):
"""
Read a single comment.
The comment is stored as an Event in Segment 0, which is
specifically for comments.
----------------------
Returns an empty list.
The returned object is already added to the Block.
No ID number: always called from another method
"""
# float64 -- timestamp (number of days since dec 30th 1899)
time = np.fromfile(self._fsrc, dtype=np.double, count=1)[0]
# int16 -- length of next string
numchars1 = np.asscalar(np.fromfile(self._fsrc,
dtype=np.int16, count=1))
# char * numchars -- the one who sent the comment
sender = self.__read_str(numchars1)
# int16 -- length of next string
numchars2 = np.asscalar(np.fromfile(self._fsrc,
dtype=np.int16, count=1))
# char * numchars -- comment text
text = self.__read_str(numchars2, utf=False)
comment = Event(times=pq.Quantity(time, units=pq.d), labels=text,
sender=sender, file_origin=self._file_origin)
self._seg0.events.append(comment)
return []
def observe(self, y):
self.lastobservation = y
xa = self.xa[0:2:1,0:1:1]
Pa = self.Pa[0:2:1,0:2:1]
try:
X, Wm, Wc = sigmaPoints(xa, Pa)
except:
warnings.warn('Encountered a matrix that is not positive definite in the sigma points calculation at the observe step')
Pa = nearpd(Pa)
X, Wm, Wc = sigmaPoints(xa, Pa)
hX, self.predictedobservation, Pyy = \
unscentedTransform(X, Wm, Wc, self.ha)
self.predictedobservation = np.asscalar(self.predictedobservation)
Pyy = np.asscalar(Pyy)
self.innovcov = Pyy
x = self.xa[0,0]
Pxy = 0.
Pvy = 0.
M = np.shape(X)[1]
for j in range(0, M):
haImage = self.ha(X[:,j])
Pxy += Wc[j] * (X[0,j] - x) * (haImage - self.predictedobservation)
Pvy += Wc[j] * X[1,j] * haImage
Pa = np.array( ((Pxy,), (Pvy,), (0.,), (0.,)) )
K = Pa * (1./Pyy)
self.gain = K[0,0]
self.innov = y - self.predictedobservation
self.xa += K * self.innov
self.Pa -= np.dot(K, Pa.T)
self.loglikelihood += UnscentedKalmanFilter.MINUS_HALF_LN_2PI - .5 * (np.log(self.innovcov) + self.innov * self.innov / self.innovcov)
def format_time(ts):
res = []
for each in ts:
res.append(std_time.strftime("%H:%M.%S", std_time.localtime(np.asscalar(np.int32(each)))))
return res
def get_date_range(df, timestamp_colkey):
max_time = df[timestamp_colkey].max()
min_time = df[timestamp_colkey].min()
t_to = std_time.strftime("%d-%b-%Y", std_time.localtime(np.asscalar(np.int32(max_time))))
t_from = std_time.strftime("%d-%b-%Y", std_time.localtime(np.asscalar(np.int32(min_time))))
if t_to == t_from:
return t_to
return "{} - {}".format(t_from, t_to)
Simulate_Poisson.py 文件源码
项目:Wasserstein-Learning-For-Point-Process
作者: xiaoshuai09
项目源码
文件源码
阅读 33
收藏 0
点赞 0
评论 0
def generate_samples_marked(intensity, T, n):
U = intensity.dim
Sequences = []
inds = np.arange(U)
for i in range(n):
seq = []
t = 0
while True:
intens1 = intensity.getUpperBound(t,T,inds)
#print(intens1)
dt = np.random.exponential(1/sum(intens1))
#print(dt)
new_t = t + dt
#print(new_t)
if new_t > T:
break
intens2 = intensity.getValue(new_t, inds)
#print(intens2)
u = np.random.uniform()
if sum(intens2)/sum(intens1) > u:
#print(intens2)
x_sum = sum(intens2)
norm_i = [ x/x_sum for x in intens2]
#print(norm_i)
dim = np.nonzero(np.random.multinomial(1, norm_i))
seq.append([new_t, np.asscalar(dim[0])])
t = new_t
if len(seq)>1:
Sequences.append(seq)
return Sequences
def _send_output(self, output, timestamp, name):
"""Send pipeline outputs through the LSL or OSC stream.
NOT PER CHANNEL
Args:
output (scalar): output of the pipeline
timestamp (float): timestamp
"""
for out in self._output_threads:
if isinstance(out, str): # LSL outlet
raise NotImplementedError
# self._outlet.push_sample([output], timestamp=timestamp)
else: # OSC output stream
if USE_LIBLO:
if (np.array(output).size==1):
new_output = [('f', np.asscalar(output))]
message = Message('/{}'.format(name), *new_output)
else:
new_output = [('f', x) for x in output[:]]
message = Message('/{}'.format(name), *new_output)
# send(out, Bundle(timestamp, message))
send(out, message)
else:
raise NotImplementedError
# self._client.send_message(}{}'.format(name),output[:])
if self.verbose:
print('Output: {}'.format(output))
def label_to_idx(labels, label):
center_idx_bool = labels == label
return np.asscalar(np.where(center_idx_bool)[0]), center_idx_bool
def _process_video(filename, coder):
"""
Process a single video file using FFmpeg
Args
filename: path to the video file
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
video_buffer: numpy array with the video frames
mask_buffer: activity mask of the video frames
frame_h: integer, video height in pixels.
frame_w: integer, width width in pixels.
seq_length: sequence length (non-zero frames)
"""
video, raw_h, raw_w, seq_length = coder.decode_video(filename)
video = video.astype(np.uint8)
assert len(video.shape) == 4
assert video.shape[3] == 3
frame_h, frame_w = video.shape[1], video.shape[2]
# generate mask from annotations
groups = filename.split('/')
annot_file_name = groups[-1].split('.')[0] + '.xgtf'
annot_file_path = os.path.join(FLAGS.annotation_directory, groups[-2], annot_file_name)
parsed_bbx = _parse_annotation_xml(annot_file_path)
if FLAGS.resize_h != -1:
parsed_bbx = _resize_bbx(parsed_bbx, raw_h, raw_w)
masks = _bbx_to_mask(parsed_bbx, seq_length, FLAGS.resize_h, FLAGS.resize_w)
encoded_frames_seq = []
encoded_masks_seq = []
for idx in range(seq_length):
encoded_frames_seq.append(coder.encode_frame(video[idx, :, :, :]))
encoded_masks_seq.append(coder.encode_mask(masks[idx, :, :, :]))
return encoded_frames_seq, encoded_masks_seq, frame_h, frame_w, np.asscalar(seq_length)
def _get_trial(root, combo):
path = get_path(root, combo)
mat = spio.loadmat(path)
data = mat['data'].astype(np.float32)
gesture = np.repeat(label_to_gesture(np.asscalar(mat['label'].astype(np.int))), len(data))
subject = np.repeat(np.asscalar(mat['subject'].astype(np.int)), len(data))
return Trial(data=data, gesture=gesture, subject=subject)
def assign_scalar(message, value):
"""
Adds the appropriate scalar type of value to the protobuf message
"""
if value is None:
message.null_val = True
elif isinstance(value, np.generic):
assign_scalar(message, np.asscalar(value))
elif isinstance(value, (str, six.text_type)):
message.string_val = value
elif isinstance(value, np.dtype):
message.dtype_val = dtype_to_protobuf(value)
elif isinstance(value, float):
message.double_val = value
elif isinstance(value, bool):
message.bool_val = value
elif isinstance(value, six.integer_types):
message.int_val = value
elif isinstance(value, slice):
slice_val = ops_pb.Slice()
if value.start is not None:
slice_val.start.value = value.start
if value.step is not None:
slice_val.step.value = value.step
if value.stop is not None:
slice_val.stop.value = value.stop
message.slice_val.CopyFrom(slice_val)
elif isinstance(value, dict):
for key in value:
assign_scalar(message.map_val.map[key], value[key])
# This encodes an empty dict for deserialization
assign_scalar(message.map_val.map['_ngraph_map_sentinel_'], '')
elif isinstance(value, Axis):
message.axis.CopyFrom(axis_to_protobuf(value))
elif isinstance(value, AxesMap):
message.axes_map.CopyFrom(axes_map_to_protobuf(value))
else:
raise unhandled_scalar_value(value)
def f(self, X, W):
"""Function value.
"""
X_ = list(X)
n = len(X_)
K = W[0].shape[1] # The number of components
f = 0.0
for k in range(K):
for i in range(n):
wik = W[i][:, [k]]
for j in range(n):
if self.pred_comp[i][j] > 0:
wjk = W[j][:, [k]]
ti = np.dot(X_[i], wik)
tj = np.dot(X_[j], wjk)
f += np.asscalar(np.dot(ti.T, tj))
# Deflate for next component
if k < K - 1: # Do not deflate for last component
for i in range(n):
wi = W[i][:, k]
ti = np.dot(X_[i], wi)
titi = np.asscalar(np.dot(ti.T, ti))
if titi > consts.TOLERANCE:
pi = np.dot(X_[i].T, ti) / titi
X_[i] = X_[i] - np.dot(ti, pi.T) # Deflate
# else:
# pi = np.zeros_like(wi)
return f
def compute_error(A_in, Ag_in):
A = A_in
Ag = Ag_in
#reallign
D = A.shape[1]
inner = np.zeros((D, D))
for i in range(D):
for j in range(D):
inner[i, j] = np.asscalar(A[:, i].transpose() * Ag[:, j] )/(norm(A[:, i]) * norm(Ag[:, j]))
max = np.argmax(inner, axis = 0)
P = np.asmatrix(np.zeros((D, D)))
for i in range(D):
P[i, max[i]] = 1
# print "normalize the rows of A and A^*"
inv_norm_A = np.asarray(1.0 / np.apply_along_axis(norm, 0, A))
A = A * np.diag(inv_norm_A)
inv_norm_Ag = np.asarray(1.0 / np.apply_along_axis(norm, 0, Ag))
Ag = Ag * np.diag(inv_norm_Ag)
u = np.asmatrix(np.ones((1, D)))
#for each A_i^* we try to find the A_i that is closest to A_i^*
error = 0
for i in range(D):
Ag_i = Ag[:, i]
inner_product = np.asmatrix(Ag_i.transpose() * A)
norm_A = np.asmatrix(np.diag(A.transpose() * A))
z = np.divide(inner_product, norm_A).transpose()
z = np.asarray(z).flatten().transpose()
scalar = np.diag(z)
As = A * scalar
diff = np.apply_along_axis(norm, 0, As - Ag_i * u)
# min_idx = np.argmin(diff)
# print 'for Ag_%d: A_%d' % (i, min_idx)
difmin = np.amin(diff)
difmin = difmin * difmin
error = error + difmin
return error
def mean(self,*,axis=1):
"""Returns the mean of each signal in AnalogSignalArray."""
try:
means = np.mean(self._ydata, axis=axis).squeeze()
if means.size == 1:
return np.asscalar(means)
return means
except IndexError:
raise IndexError("Empty AnalogSignalArray cannot calculate mean")