def __init__(self):
timer.Timer.__init__(self)
python类Timer()的实例源码
def setRefreshTimer(self, tocall):
# Add refresh Timer
now = localtime()
begin = mktime(
(now.tm_year, now.tm_mon, now.tm_mday,
config.plugins.epgrefresh.begin.value[0],
config.plugins.epgrefresh.begin.value[1],
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
# If the last scan was finished before our timespan begins/began and
# timespan began in the past fire the timer once (timer wouldn't do so
# by itself)
if config.plugins.epgrefresh.lastscan.value < begin and begin < time():
tocall()
refreshTimer = EPGRefreshTimerEntry(begin, tocall, nocheck = True)
i = 0
while i < 7:
refreshTimer.setRepeated(i)
i += 1
# We can be sure that whenever this function is called the timer list
# was wiped, so just add a new timer
self.addTimerEntry(refreshTimer)
def add(self, entry):
entry.timeChanged()
print("[EPGRefresh] Timer added " + str(entry))
self.addTimerEntry(entry)
def make_td_probability_image(td, skip_steps=0, is_normalize = False):
"""Generate image from the Temporal Difference (td) events with each pixel value indicating probability of a spike within a 1 millisecond time step. 0 = 0%. 255 = 100%
td is read from a binary file (refer to eventvision.Readxxx functions)
td: eventvision.Events
skip_steps: number of time steps to skip (to allow tracker to init to a more correct position)
is_normalize: True to make the images more obvious (by scaling max probability to pixel value 255)
"""
assert isinstance(td, ev.Events)
#with timer.Timer() as my_timer:
event_offset = 0
combined_image = np.zeros((td.height, td.width), np.float32)
offset_ts = td.data[0].ts + (skip_steps * 1000)
num_time_steps = math.floor((td.data[-1].ts - offset_ts) / 1000)
current_frame = np.zeros((td.height, td.width), np.uint8)
for start_ts in range(int(offset_ts), td.data[-1].ts, 1000):
end_ts = start_ts + 1000
frame_data = td.data[(td.data.ts >= start_ts) & (td.data.ts < end_ts)]
current_frame.fill(0)
current_frame[frame_data.y, frame_data.x] = 1
combined_image = combined_image + current_frame
#print 'Making image out of bin file took %s seconds' % my_timer.secs
if (is_normalize):
combined_image = (combined_image / np.max(combined_image))
else:
combined_image = (combined_image / num_time_steps)
return combined_image
def show_em(self):
"""Displays the EM events (grayscale ATIS events)"""
frame_length = 24e3
t_max = self.data.ts[-1]
frame_start = self.data[0].ts
frame_end = self.data[0].ts + frame_length
max_val = 1.16e5
min_val = 1.74e3
val_range = max_val - min_val
thr = np.rec.array(None, dtype=[('valid', np.bool_), ('low', np.uint64), ('high', np.uint64)], shape=(self.height, self.width))
thr.valid.fill(False)
thr.low.fill(frame_start)
thr.high.fill(0)
def show_em_frame(frame_data):
"""Prepare and show a single frame of em data to be shown"""
for datum in np.nditer(frame_data):
ts_val = datum['ts'].item(0)
thr_data = thr[datum['y'].item(0), datum['x'].item(0)]
if datum['p'].item(0) == 0:
thr_data.valid = 1
thr_data.low = ts_val
elif thr_data.valid == 1:
thr_data.valid = 0
thr_data.high = ts_val - thr_data.low
img = 255 * (1 - (thr.high - min_val) / (val_range))
#thr_h = cv2.adaptiveThreshold(thr_h, 255,
#cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 3, 0)
img = np.piecewise(img, [img <= 0, (img > 0) & (img < 255), img >= 255], [0, lambda x: x, 255])
img = img.astype('uint8')
cv2.imshow('img', img)
cv2.waitKey(1)
while frame_start < t_max:
#with timer.Timer() as em_playback_timer:
frame_data = self.data[(self.data.ts >= frame_start) & (self.data.ts < frame_end)]
show_em_frame(frame_data)
frame_start = frame_end + 1
frame_end += frame_length + 1
#print 'showing em frame took %s seconds' %em_playback_timer.secs
cv2.destroyAllWindows()
return
def show_td(self, wait_delay=1):
"""Displays the TD events (change detection ATIS or DVS events)
waitDelay: milliseconds
"""
frame_length = 24e3
t_max = self.data.ts[-1]
frame_start = self.data[0].ts
frame_end = self.data[0].ts + frame_length
td_img = np.ones((self.height, self.width), dtype=np.uint8)
while frame_start < t_max:
frame_data = self.data[(self.data.ts >= frame_start) & (self.data.ts < frame_end)]
if frame_data.size > 0:
td_img.fill(128)
#with timer.Timer() as em_playback_timer:
for datum in np.nditer(frame_data):
td_img[datum['y'].item(0), datum['x'].item(0)] = datum['p'].item(0)
#print 'prepare td frame by iterating events took %s seconds'
#%em_playback_timer.secs
td_img = np.piecewise(td_img, [td_img == 0, td_img == 1, td_img == 128], [0, 255, 128])
cv2.imshow('img', td_img)
cv2.waitKey(wait_delay)
frame_start = frame_end + 1
frame_end = frame_end + frame_length + 1
cv2.destroyAllWindows()
return
def __init__(self):
timer.Timer.__init__(self)
self.Filename = Directories.resolveFilename(Directories.SCOPE_CONFIG, "timers.xml")
try:
self.loadTimer()
except IOError:
print "unable to load timers from file!"
def loadTimer(self):
try:
doc = xml.etree.cElementTree.parse(self.Filename)
except SyntaxError:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("The timer file (timers.xml) is corrupt and could not be loaded."), type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
print "timers.xml failed to load!"
try:
import os
os.rename(self.Filename, self.Filename + "_old")
except (IOError, OSError):
print "renaming broken timer failed"
return
except IOError:
print "timers.xml not found!"
return
root = doc.getroot()
checkit = False
timer_text = ""
for timer in root.findall("timer"):
newTimer = createTimer(timer)
conflict_list = self.record(newTimer, ignoreTSC=True, dosave=False, loadtimer=True)
if conflict_list:
checkit = True
if newTimer in conflict_list:
timer_text += _("\nTimer '%s' disabled!") % newTimer.name
if checkit:
from Tools.Notifications import AddPopup
from Screens.MessageBox import MessageBox
AddPopup(_("Timer overlap in timers.xml detected!\nPlease recheck it!") + timer_text, type = MessageBox.TYPE_ERROR, timeout = 0, id = "TimerLoadFailed")
def record(self, entry, ignoreTSC=False, dosave=True, loadtimer=False):
check_timer_list = self.timer_list[:]
timersanitycheck = TimerSanityCheck(check_timer_list,entry)
answer = None
if not timersanitycheck.check():
if not ignoreTSC:
print "[RecordTimer] timer conflict detected!"
print timersanitycheck.getSimulTimerList()
return timersanitycheck.getSimulTimerList()
else:
print "[RecordTimer] ignore timer conflict..."
if not dosave and loadtimer:
simulTimerList = timersanitycheck.getSimulTimerList()
if entry in simulTimerList:
entry.disabled = True
if entry in check_timer_list:
check_timer_list.remove(entry)
answer = simulTimerList
elif timersanitycheck.doubleCheck():
print "[RecordTimer] ignore double timer..."
return None
entry.timeChanged()
print "[Timer] Record " + str(entry)
entry.Timer = self
self.addTimerEntry(entry)
if dosave:
self.saveTimer()
return answer
def removeEntry(self, entry):
print "[Timer] Remove " + str(entry)
# avoid re-enqueuing
entry.repeated = False
# abort timer.
# this sets the end time to current time, so timer will be stopped.
entry.autoincrease = False
entry.abort()
if entry.state != entry.StateEnded:
self.timeChanged(entry)
print "state: ", entry.state
print "in processed: ", entry in self.processed_timers
print "in running: ", entry in self.timer_list
# autoincrease instanttimer if possible
if not entry.dontSave:
for x in self.timer_list:
if x.setAutoincreaseEnd():
self.timeChanged(x)
if entry in self.processed_timers:
# now the timer should be in the processed_timers list. remove it from there.
self.processed_timers.remove(entry)
self.saveTimer()
def run(path, debug, max_cycles):
with open(path, "rb") as rom_file:
debug_title = debug == "TITLE"
debug_header = debug == "HEADER" or debug == "ALL"
debug_mem = debug == "MEMORY" or debug == "ALL"
debug_instructions = debug == "INSTRUCTIONS" or debug == "ALL"
debug_registers = debug == "REGISTERS" or debug == "ALL"
rom = [i for i in rom_file.read()]
header = Header(rom, debug_header)
mem = Memory(rom, header)
if debug_title:
print("Title: " + header.name)
if debug_instructions:
print("PC: Operation")
interrupts = Interrupts()
cpu = CPU(mem, interrupts, debug_instructions, debug_registers)
timer = Timer(interrupts)
sound = Sound()
link = Link()
joypad = Joypad()
lcdc = LCDC(mem, interrupts)
mem.setupIO(lcdc, interrupts, timer, sound, link, joypad)
total_cycles = 0
try:
pygame.init()
while cpu.run_state != "QUIT":
for event in pygame.event.get():
if event.type == pygame.QUIT:
cpu.run_state = "QUIT"
if event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
joypad.keyEvent(event)
interrupts.update()
if cpu.run_state == "RUN":
cpu.run()
else:
cpu.cycles += 1
timer.update(cpu.cycles)
lcdc.update(cpu.cycles)
total_cycles += cpu.popCycles()
if max_cycles >= 0 and total_cycles > max_cycles:
cpu.run_state = "QUIT"
except AssertionError as e:
if debug_mem:
mem.display()
traceback.print_tb(e.__traceback__)
except KeyboardInterrupt as e:
if debug_mem:
mem.display()
else:
if debug_mem:
mem.display()
graph_kernels_labeled.py 文件源码
项目:TextAsGraphClassification
作者: NightmareNyx
项目源码
文件源码
阅读 29
收藏 0
点赞 0
评论 0
def sp_kernel(g1, g2=None):
with Timer("SP kernel"):
if g2 is not None:
graphs = []
for g in g1:
graphs.append(g)
for g in g2:
graphs.append(g)
else:
graphs = g1
sp_lengths = []
for graph in graphs:
sp_lengths.append(nx.shortest_path_length(graph))
N = len(graphs)
all_paths = {}
sp_counts = {}
for i in range(N):
sp_counts[i] = {}
nodes = graphs[i].nodes()
for v1 in nodes:
for v2 in nodes:
if v2 in sp_lengths[i][v1]:
label = tuple(
sorted([graphs[i].node[v1]['label'], graphs[i].node[v2]['label']]) + [
sp_lengths[i][v1][v2]])
if label in sp_counts[i]:
sp_counts[i][label] += 1
else:
sp_counts[i][label] = 1
if label not in all_paths:
all_paths[label] = len(all_paths)
phi = lil_matrix((N, len(all_paths)))
for i in range(N):
for label in sp_counts[i]:
phi[i, all_paths[label]] = sp_counts[i][label]
if g2 is not None:
K = np.dot(phi[:len(g1), :], phi[len(g1):, :].T)
else:
K = np.dot(phi, phi.T)
return K.todense()
graph_kernels_labeled.py 文件源码
项目:TextAsGraphClassification
作者: NightmareNyx
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def graphlet_kernel(g1, g2=None):
with Timer("Graphlet Kernel"):
if g2 is not None:
graphs = []
for g in g1:
graphs.append(g)
for g in g2:
graphs.append(g)
else:
graphs = g1
N = len(graphs)
graphlet_counts = []
graphlets = {}
ind = 0
for i in range(len(graphs)):
d = {}
for node1 in graphs[i].nodes():
for node2 in graphs[i].neighbors(node1):
for node3 in graphs[i].neighbors(node2):
if node1 != node3:
if node3 not in graphs[i].neighbors(node1):
graphlet = (1, min(graphs[i].node[node1]['label'], graphs[i].node[node3]['label']),
graphs[i].node[node2]['label'],
max(graphs[i].node[node1]['label'], graphs[i].node[node3]['label']))
if graphlet not in graphlets:
graphlets[graphlet] = len(graphlets)
if graphlets[graphlet] in d:
d[graphlets[graphlet]] += 1.0 / 2.0
else:
d[graphlets[graphlet]] = 1.0 / 2.0
else:
labs = sorted([graphs[i].node[node1]['label'], graphs[i].node[node2]['label'],
graphs[i].node[node3]['label']])
graphlet = (2, labs[0], labs[1], labs[2])
if graphlet not in graphlets:
graphlets[graphlet] = len(graphlets)
if graphlets[graphlet] in d:
d[graphlets[graphlet]] += 1.0 / 6.0
else:
d[graphlets[graphlet]] = 1.0 / 6.0
graphlet_counts.append(d)
phi = lil_matrix((N, len(graphlets)))
for i in range(len(graphs)):
for graphlet in graphlet_counts[i]:
phi[i, graphlet] = graphlet_counts[i][graphlet]
if g2 is not None:
K = np.dot(phi[:len(g1), :], phi[len(g1):, :].T)
else:
K = np.dot(phi, phi.T)
K = np.asarray(K.todense())
return K
# Compute Weisfeiler-Lehman subtree kernel
def apply_tracking1(td, alpha=0.98, threshold=-1):
"""Alternative to stabilization. Compensate for motion of a single "object" by tracking its movement
The concept is fairly simple:
0: The tracker starts at the center of the event recording
1: For each incoming event, calculate its distance to the tracker.
2: If the distance is less than a threshold then update the tracker location using
3: tracker_location = tracker_location*alpha + event_location*(1-alpha)
You may find the tracker is quite erratic because it moves with every incoming event. It may be a good idea to smooth the motion somewhat which would be another step.%
td: eventvision.Events
alpha: alpha is a number between 0 and 1. Typically quite high. Default 0.9
threshold: distance in pixels for the tracker to be updated. Default = 0.5 * height of td
"""
assert(alpha >= 0)
assert(alpha <= 1)
mix = 1 - alpha
#with timer.Timer() as my_timer:
track_x = center_x = td.width / 2
track_y = center_y = td.height / 2
threshold_sq = math.floor(center_y**2)
if (threshold > 0):
threshold_sq = math.floor(threshold**2)
copy = np.copy(td.data).view(np.recarray)
for i in range(copy.size):
datum = copy[i]
y_val = datum.y
x_val = datum.x
distance = (track_x - x_val)**2 + (track_y - y_val)**2
if (distance <= threshold_sq):
track_x = track_x * alpha + x_val * mix
track_y = track_y * alpha + y_val * mix
datum.y = round(y_val - track_y + center_y)
datum.x = round(x_val - track_x + center_x)
#print 'Applying tracker took %s seconds' % my_timer.secs
# remove the events that are out of bounds
return copy[(copy.x >= 0) & (copy.y >= 0) & (copy.x < td.width) & (copy.y < td.height)]
def make_td_images(td, num_spikes, step_factor=1):
"""Generate set of images from the Temporal Difference (td) events by reading a number of unique spikes
td is read from a binary file (refer to eventvision.Readxxx functions)
td: eventvision.Events
num_spikes: number of unique spikes to accumulate before generating an image
step_factor: proportional amount to shift before generating the next image.
1 would result in no overlapping events between images
0.6 would result in the next image overlapping with 40% of the previous image
returns array of images
"""
assert isinstance(td, ev.Events)
assert isinstance(num_spikes, (int, long))
assert num_spikes > 0
assert step_factor > 0
#with timer.Timer() as my_timer:
event_offset = 0
images = []
while event_offset + num_spikes < td.data.size:
image = np.zeros((td.height, td.width), dtype=np.uint8)
unique_spike_count = 0
index_ptr = event_offset
while (unique_spike_count < num_spikes) & (index_ptr < td.data.size):
event = td.data[index_ptr]
y = event.y
x = event.x
if image[y, x] == 0:
image[y, x] = 255
unique_spike_count += 1
index_ptr += 1
#cv2.imshow('img', img)
#cv2.waitKey(1)
if unique_spike_count < num_spikes:
break
images.append(image)
#offset next image
total_spikes_traversed = index_ptr - event_offset
event_offset += math.floor(total_spikes_traversed * step_factor) + 1
#print 'Making images out of bin file took %s seconds' % my_timer.secs
return images