def init_logger(self, args):
level = logging.INFO
if args.verbose:
level = logging.VERBOSE
if args.debug:
level = logging.DEBUG
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',
level=level)
Rthandler = RotatingFileHandler('arbitrage.log', maxBytes=100*1024*1024,backupCount=10)
Rthandler.setLevel(level)
formatter = logging.Formatter('%(asctime)-12s [%(levelname)s] %(message)s')
Rthandler.setFormatter(formatter)
logging.getLogger('').addHandler(Rthandler)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
python类debug()的实例源码
def check(self, first_check=False):
from pyplanet import __version__ as current_version
logging.debug('Checking for new versions...')
async with aiohttp.ClientSession() as session:
async with session.get(self.url) as resp:
for release in await resp.json():
if not release['draft'] and not release['prerelease']:
self.latest = release['tag_name']
break
self.current = current_version
logging.debug('Version check, your version: {}, online version: {}'.format(self.current, self.latest))
if first_check and self.update_available:
logging.info('New version of PyPlanet available, consider updating: {}'.format(self.latest))
await self.instance.chat(
'\uf1e6 $FD4$oPy$369Planet$z$s$fff \uf0e7 new version available: v{}. Consider updating!'.format(self.latest)
)
def update_locallist(listb, refresh_lock):
global UPDATE_LOCALSERVERS
cached = dict()
durable_duration = 5
while UPDATE_LOCALSERVERS:
toremove = []
for item in cached:
if cached[item] > durable_duration:
toremove.append(item)
cached[item] += 1
for item in toremove:
del cached[item]
info = zeroconf_info()
for item in info:
cached[item] = 0
if not UPDATE_LOCALSERVERS: break
logging.debug('Acquiring refresh lock for updating list of local servers.')
refresh_lock.acquire()
listb.update_items(cached.keys())
refresh_lock.release()
logging.debug('Releasing refresh lock after updating locla server list')
def grad_sparsity(self):
global_state = self._global_state
if self._iter == 0:
global_state["sparsity_avg"] = 0.0
non_zero_cnt = 0.0
all_entry_cnt = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
grad_non_zero = grad.nonzero()
if grad_non_zero.dim() > 0:
non_zero_cnt += grad_non_zero.size()[0]
all_entry_cnt += torch.numel(grad)
beta = self._beta
global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
+ (1 - beta) * non_zero_cnt / float(all_entry_cnt)
self._sparsity_avg = \
global_state["sparsity_avg"] / self.zero_debias_factor()
if self._verbose:
logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)
return
def grad_sparsity(self):
global_state = self._global_state
if self._iter == 0:
global_state["sparsity_avg"] = 0.0
non_zero_cnt = 0.0
all_entry_cnt = 0.0
for group in self._optimizer.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
grad_non_zero = grad.nonzero()
if grad_non_zero.dim() > 0:
non_zero_cnt += grad_non_zero.size()[0]
all_entry_cnt += torch.numel(grad)
beta = self._beta
global_state["sparsity_avg"] = beta * global_state["sparsity_avg"] \
+ (1 - beta) * non_zero_cnt / float(all_entry_cnt)
self._sparsity_avg = \
global_state["sparsity_avg"] / self.zero_debias_factor()
if DEBUG:
logging.debug("sparsity %f, sparsity avg %f", non_zero_cnt / float(all_entry_cnt), self._sparsity_avg)
return
def get_cubic_root(self):
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
# eps in the numerator is to prevent momentum = 1 in case of zero gradient
p = (self._dist_to_opt + eps)**2 * (self._h_min + eps)**2 / 2 / (self._grad_var + eps)
w3 = (-math.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = math.copysign(1.0, w3) * math.pow(math.fabs(w3), 1.0/3.0)
y = w - p / 3.0 / (w + eps)
x = y + 1
if DEBUG:
logging.debug("p %f, den %f", p, self._grad_var + eps)
logging.debug("w3 %f ", w3)
logging.debug("y %f, den %f", y, w + eps)
return x
def run(self):
global lock, background_activity_count
try:
logging.debug('[{0:d}] AsyncDNS - calling getaddrinfo for {1}:{2:d}'.format(self.client_id, self.hostname, self.port))
addresses = socket.getaddrinfo(self.hostname, self.port)
logging.info('[{0:d}] Resolving {1}:{2:d} Completed'.format(self.client_id, self.hostname, self.port))
except:
addresses = ()
logging.info('[{0:d}] Resolving {1}:{2:d} Failed'.format(self.client_id, self.hostname, self.port))
message = {'message': 'resolved', 'connection': self.client_id, 'addresses': addresses, 'localhost': self.is_localhost}
self.result_pipe.SendMessage(message, False)
lock.acquire()
if background_activity_count > 0:
background_activity_count -= 1
lock.release()
# open and close a local socket which will interrupt the long polling loop to process the message
s = socket.socket()
s.connect((server.ipaddr, server.port))
s.close()
########################################################################################################################
# TCP Client
########################################################################################################################
def close(self, *args, **kwargs):
"""
Engine closed, copy file to DB
"""
super(DatabaseWrapper, self).close(*args, **kwargs)
signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4")
s3 = boto3.resource('s3',
config=botocore.client.Config(signature_version=signature_version))
try:
with open(self.settings_dict['NAME'], 'rb') as f:
fb = f.read()
bytesIO = BytesIO()
bytesIO.write(fb)
bytesIO.seek(0)
s3_object = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['REMOTE_NAME'])
result = s3_object.put('rb', Body=bytesIO)
except Exception as e:
print(e)
logging.debug("Saved to remote DB!")
def function_definition(self, ast):
logging.debug("Found function definition %s", ast.name)
self.add_position_info(ast)
# Convert the AST to a simple dict, so that the grako buffer
# associated with the parseinfo can be released when it is no
# longer needed. Otherwise, the buffer is kept in memory
# until the reference to this AST is kept in memory. When
# dealing with many files this can lead to an excessive
# memory usage.
ast = dict(ast)
ast['type'] = 'function'
# Strip the comment character from the beginning of the line
docs = ast['documentation']
if docs is not None:
stripped = strip_leading_comments(docs)
ast['documentation'] = stripped
self.collected_elements.append(ast)
return ast
def name_definition(self, ast):
logging.debug('Found name definition %s', ast.name)
self.add_position_info(ast)
# Convert the AST to a simple dict, so that the grako buffer
# associated with the parseinfo can be released when it is no
# longer needed. Otherwise, the buffer is kept in memory
# until the reference to this AST is kept in memory. When
# dealing with many files this can lead to an excessive
# memory usage.
ast = dict(ast)
ast['type'] = 'name'
# Strip the comment character from the beginning of the line
docs = ast['documentation']
if docs is not None:
stripped = strip_leading_comments(docs)
ast['documentation'] = stripped
self.collected_elements.append(ast)
return ast
def stop(self):
self._log.debug("%s.stop()", self.naming_service_name)
if self.query_commandAlive() == True:
for sig, timeout in self.STOP_SIGNALS:
try:
os.kill(self._pid, sig)
except OSError:
self._pid = None
return
if timeout != None:
giveup_time = time.time() + timeout
while os.waitpid(self._pid, os.WNOHANG) == (0,0):
time.sleep(0.1)
if time.time() > giveup_time:
break
else:
# Wait until there is a response
os.waitpid(self._pid, 0)
self._pid = None
def query(self, configProperties):
self._log.debug("BasicUsesDevice.query(%s)", configProperties)
if configProperties == []:
rv = []
for key in self.props.keys():
val = self.props[key].value
d = CF.DataType(id=key, value=val)
rv.append(d)
else:
unknownProperties = []
for prop in configProperties:
try:
prop.value = self.props[prop.id].value
except KeyError:
unknownProperties.append(prop)
if len(unknownProperties) > 0:
raise CF.UnknownProperties(unknownProperties)
rv = configProperties
self._log.debug("BasicUsesDevice.query() -> %s", rv)
return rv
def _allocateCapacity(self, propname, value):
"""Override this if you want if you don't want magic dispatch"""
self._log.debug("_allocateCapacity(%s, %s)", propname, value)
if self._allocationCallbacks.has_key(propname):
return self._allocationCallbacks[propname]._allocate(value)
modified_propname = ''
for ch in propname:
if ch.isalnum():
modified_propname += ch
else:
modified_propname += '_'
allocate = _getCallback(self, "allocate_%s" % modified_propname)
if allocate:
self._log.debug("using callback for _allocateCapacity()", )
return allocate(value)
else:
self._log.debug("no callback for _allocateCapacity()", )
return False
def safe_shutdown(self, timeout):
if self._shutting_down:
return
self._shutting_down = True
logging.debug('Stopping http server.')
self.stop()
logging.debug('Will be shutdown within %s seconds ...', timeout)
io_loop = tornado.ioloop.IOLoop.instance()
deadline = time.time() + timeout
def safe_stop_loop():
now = time.time()
if now < deadline and io_loop._callbacks:
io_loop.add_timeout(now + 1, safe_stop_loop)
else:
io_loop.stop()
logging.debug('Http server has been shutdown.')
safe_stop_loop()
def __init__(self, servo_group, event, belt_speed, frequency,
mqtt_client, master_shadow, args=(), kwargs={}):
super(BeltControlThread, self).__init__(
name="belt_control_thread", args=args, kwargs=kwargs
)
self.sg = servo_group
self.rolling = False
self.cmd_event = event
self.belt_speed = belt_speed
self.frequency = frequency
self.reversed = False
self.active_state = 'initialized'
self.last_state = 'initialized'
self.control_stages = collections.OrderedDict()
self.control_stages['roll'] = self.roll
self.mqttc = mqtt_client
self.master_shadow = master_shadow
self.master_shadow.shadowRegisterDeltaCallback(self.shadow_mgr)
log.debug("[bct.__init__] shadowRegisterDeltaCallback()")
def shadow_mgr(self, payload, status, token):
if payload == "REQUEST TIME OUT":
log.error(
"[bct.shadow_mgr] shadow 'REQUEST TIME OUT' tk:{0}".format(
token))
return
shady_values = json.loads(payload)
log.debug("[bct.shadow_mgr] shadow payload:{0}".format(
json.dumps(shady_values, sort_keys=True)))
if 'convey_cmd' in shady_values['state']:
cmd = shady_values['state']['convey_cmd']
if cmd in commands:
self._activate_command(cmd)
else:
log.debug("[bct.shadow_mgr] unknown command:{0}".format(cmd))
if 'convey_reverse' in shady_values['state']:
reverse = shady_values['state']['convey_reverse']
log.info("[bct.shadow_mgr] convey_reverse val:{0}".format(reverse))
self._reverse_roll(reverse)
def topic_update(client, userdata, message):
log.debug('[topic_update] received topic:{0} ts:{1}'.format(
message.topic, dt.datetime.utcnow()))
topic_cache[message.topic] = message.payload
msg = json.loads(message.payload)
if 'data' in msg:
global last_hz
global current_hz
global current_hz_time
count_telemetry(msg['data'])
elapsed = dt.datetime.utcnow() - current_hz_time
if elapsed > second: # if a second has passed rollover Hz
with rollover_lock:
last_hz = current_hz
current_hz_time = dt.datetime.utcnow()
current_hz = 0
history(msg)
def get_core_definition(self, config):
"""
Get the Master Group Type's core definition
:param config: gg_group_setup.GroupConfigFile used with the Group Type
:return: the core definition used to provision the group
"""
cfg = config
definition = [{
"ThingArn": cfg['core']['thing_arn'],
"CertificateArn": cfg['core']['cert_arn'],
"Id": "{0}_00".format(self.type_name), # arbitrary unique Id string
"SyncShadow": True
}]
logging.debug('[master.get_core_definition] definition:{0}'.format(
definition)
)
return definition
def get_core_definition(self, config):
"""
Get the Arm Group Type's core definition
:param config: gg_group_setup.GroupConfigFile used with the Group Type
:return: the core definition used to provision the group
"""
cfg = config
definition = [{
"ThingArn": cfg['core']['thing_arn'],
"CertificateArn": cfg['core']['cert_arn'],
"Id": "{0}_00".format(self.type_name),
"SyncShadow": True
}]
logging.debug(
'[arm.get_core_definition] definition:{0}'.format(
definition)
)
return definition
def launch(self, cfg, path, flags):
logging.debug("Determine the OS and Architecture this application is currently running on")
hostOS = platform.system().lower()
logging.debug("hostOS: " + str(hostOS))
is_64bits = sys.maxsize > 2 ** 32
if is_64bits:
hostArchitecture = 'x64'
else:
hostArchitecture = 'ia32'
logging.debug("hostArchitecture: " + str(hostArchitecture))
if(self.validateConfig(cfg, hostOS, hostArchitecture)):
fnull = open(os.devnull, 'w')
if os.environ.get("WPW_HOME") is not None:
cmd = [os.environ["WPW_HOME"] + '/bin/rpc-agent-' + platform.system().lower() + '-' + self.detectHostArchitecture()]
else:
cmd = [path + '/wpwithinpy/iot-core-component/bin/rpc-agent-' + platform.system().lower() + '-' + self.detectHostArchitecture()]
cmd.extend(flags)
proc = subprocess.Popen(cmd, stdin=None, stdout=fnull, stderr=subprocess.STDOUT)
return proc
else:
logging.debug("Invalid OS/Architecture combination detected")
def startRPC(self, port, eventListenerPort):
logging.basicConfig(filename='worldpay-within-wrapper.log', level=logging.DEBUG)
reqOS = ["darwin", "win32", "windows", "linux"]
reqArch = ["x64", "ia32"]
cfg = launcher.Config(reqOS, reqArch)
launcherLocal = launcher.launcher()
# define log file name for rpc agent, so e.g
# for "runConsumerOWP.py" it will be: "rpc-wpwithin-runConsumerOWP.log"
logfilename = os.path.basename(sys.argv[0])
logfilename = "rpc-wpwithin-" + logfilename.rsplit(".", 1)[0] + ".log"
args = []
if eventListenerPort > 0:
logging.debug(str(os.getcwd()) + "" + "-port " + str(port) + " -logfile " + logfilename + " -loglevel debug,warn,error,fatal,info" + " -callbackport " + str(eventListenerPort))
args = ['-port', str(port), '-logfile', logfilename, '-loglevel', 'debug,warn,error,fatal,info', '-callbackport', str(eventListenerPort)]
else:
logging.debug(str(os.getcwd()) + "" + "-port " + str(port) + " -logfile " + logfilename + " -loglevel debug,warn,error,fatal,info")
args = ['-port', str(port), '-logfile', logfilename, '-loglevel', 'debug,warn,error,fatal,info']
process = launcherLocal.launch(cfg, os.getcwd() + "", args)
return process
def _send_message(self, message, update_status=True):
"""
Sends a message to the VMU931 device, with 5ms delay between each character.
:param message: Message to send to device
:param update_status: Update sensor status after message send (defaults to True)
"""
byte_message = message.encode('ascii')
# bytes must be sent with 1ms+ interval to be recognised by device.
for c in byte_message:
bs = bytes([c])
self.ser.write(bs)
logging.debug("Sent {}".format(bs))
time.sleep(0.01)
time.sleep(0.05)
if update_status:
self.request_status()
time.sleep(0.100)
def get_org_id(org_name):
"""
Return the Organisation ID for a given Org Name
"""
# Check if our organization exists, and extract its ID
org = get_json(SAT_API + "organizations/" + org_name)
# If the requested organization is not found, exit
if org.get('error', None):
msg = "Organization '%s' does not exist." % org_name
log_msg(msg, 'ERROR')
sys.exit(1)
else:
# Our organization exists, so let's grab the ID and write some debug
org_id = org['id']
msg = "Organisation '" + org_name + "' found with ID " + str(org['id'])
log_msg(msg, 'DEBUG')
return org_id
def log_msg(msg, level):
"""Write message to logfile"""
# If we are NOT in debug mode, only write non-debug messages to the log
if level == 'DEBUG':
if DEBUG:
logging.debug(msg)
print BOLD + "DEBUG: " + msg + ENDC
elif level == 'ERROR':
logging.error(msg)
tf.write('ERROR:' + msg + '\n')
print ERROR + "ERROR: " + msg + ENDC
elif level == 'WARNING':
logging.warning(msg)
tf.write('WARNING:' + msg + '\n')
print WARNING + "WARNING: " + msg + ENDC
# Otherwise if we ARE in debug, write everything to the log AND stdout
else:
logging.info(msg)
tf.write(msg + '\n')
def SendAndAccount(self, binary_data):
# Keep this check!
if self._logger.isEnabledFor(logging.DEBUG):
logging.debug("!! Sending BIN data: {0}".format(binascii.hexlify(binary_data)))
datalen = len(binary_data)
if self._is_udp:
self._swarm.SendData(self.ip_address, self.udp_port, binary_data)
else:
# Prevent crashes when TCP connection is already removed, but some sending is still pending
if self._proto is not None:
self._proto.send_data(binary_data)
self._swarm._all_data_tx += datalen
else:
return # No need to increase sent data counter...
self._total_data_tx += datalen
def HandleRequest(self, msg_request):
"""Handle incomming REQUEST message"""
for x in range(msg_request.start_chunk, msg_request.end_chunk + 1):
# Ignore requests for discarded chunks
if x <= self._swarm._last_discarded_id:
continue
self.set_requested.add(x)
# TODO: We might want a more intelligent ACK mechanism than this, but this works well for now
self.set_sent.discard(x)
if self._logger.isEnabledFor(logging.DEBUG):
logging.debug("FROM > {0} > REQUEST: {1}".format(self._peer_num, msg_request))
# Try to send some data
if self._sending_handle == None:
self._sending_handle = asyncio.get_event_loop().call_soon(self.SendRequestedChunks)
def index():
"""Primary index function.
This function handles searching and the main page. If ``q`` is passed in a query
string, e.g. ``http://localhost?q=gabriel+dropout``, then a search will be performed.
If request path is ``search``, e.g. ``http://localhost/search``, then the navigation
menu will not be rendered.
Should there be no shows returned from the backend, ``front_end.do_first_time_setup``
will be called to scrape shows from the source.
Returns:
A rendered template, either ``first_time.html`` for the first run or ``default.html`` otherwise.
"""
log.debug("Entering index, attempting to get shows.")
watching, airing, specials, movies = fe.get_shows_for_display(request.args.get('q',None))
standalone = True if request.path.strip('/') == 'search' else False
logged_in = fe.check_login_id(escape(session['logged_in'])) if 'logged_in' in session else False
if not watching and not airing and not specials and not movies:
log.debug("No shows found in any category. Starting first time startup.")
fe.do_first_time_setup()
return render_template('first_time.html', logged_in=logged_in)
return render_template('default.html', watching=watching, airing=airing, specials=specials, movies=movies, standalone=standalone, logged_in=logged_in)
def login():
"""Login POST handler.
Only runs when ``/login`` is hit with a POST method. There is no GET method
equivilent, as it is handled by the navigation template. Sets the status
code to ``401`` on login failure.
Returns:
JSON formatted output describing success or failure.
"""
log.debug("Entering login, attempting to authenticate user.")
username = request.form['signin_username']
password = request.form['signin_password']
log.debug("Username: {0}".format(username))
if fe.check_auth(username, password):
log.debug("User authenticated. Trying to set session.")
session_id = fe.set_login_id()
session['logged_in'] = session_id
log.debug("Session ID: {0}, returning to user".format(session_id))
return jsonify({ "login": "success" })
log.debug("Username or password not recognized, sending 401.")
response.status = 401
return jsonify({ "login": "failed" })
def star():
"""Starring/Highlighting handler.
Attempts to toggle a star/highlight on a particular show. The show ID must
be passed in the ``id`` query string. If the user is unauthenticated, the
function is aborted with a ``404`` message to hide the page.
Returns:
JSON formatted output describing success and the ID of the show starred.
"""
log.debug("Entering star, trying to toggle star.")
if fe.check_login_id(escape(session['logged_in'])):
log.debug("Sending show ID {0} to function".format(request.args['id']))
fe.star_show(request.args['id'])
log.debug("Returning to user.")
return jsonify({ "star": "success", "id": request.args['id'] })
log.debug("User cannot be authenticated, send 404 to hide page.")
abort(404)
def drop_show():
"""Show removal handler.
Attempts to remove a show from the backend system. The show ID must
be passed in the ``id`` query string. If the user if unauthenticated, the
function is aborted with a ``404`` message to hide the page.
Returns:
An HTTP redirect to the home page, to refresh.
"""
log.debug("Entering drop_show, trying to remove show from list.")
if fe.check_login_id(escape(session['logged_in'])):
log.debug("Sending show ID {0} to function".format(request.args['id']))
fe.remove_show(request.args['id'])
log.debug("Refreshing user's page.")
return redirect('/')
log.debug("User cannot be authenticated, send 404 to hide page.")
abort(404)