def handle(self, *args, **kwargs):
""" Handle api request by invoke `runner.handle()`.
All exception raised by runner will be catched here, and convert them
into cherrypy `HTTPError()` with corresponding status code and message.
"""
try:
return self._runner.handle(*args, **kwargs)
except JobDeleteError:
raise cherrypy.HTTPError(status.BAD_REQUEST, excinst().message)
except JobConflictError:
raise cherrypy.HTTPError(status.CONFLICT, excinst().message)
except JobNotSupportedError:
raise cherrypy.HTTPError(status.INTERNAL_SERVER_ERROR, excinst().message)
except (JobNotExistsError, ExecutorNoMatchError):
raise cherrypy.HTTPError(status.NOT_FOUND, excinst().message)
except:
cherrypy.log("error response 500", traceback=True)
raise cherrypy.HTTPError(status.INTERNAL_SERVER_ERROR)
python类log()的实例源码
def getListOfModels(comFolder):
'''
This function creates a list with all the community models that will be used in the analysis. It creates this list by listing the metabolic models in SBML format present in the user specified folder that contains the community models.
:param comFolder: path to the folder that contains the community metabolic models.
:return listOfModels: list object containing the filenames for all the models that will be analysed in the function calculateGR
'''
import os
cherrypy.log('We will first get the full list of community models from the %s folder' %comFolder)
path = comFolder
listOfFiles = os.listdir(path)
listOfModels = []
for file in listOfFiles:
if file.endswith('.sbml'):
pathToFile = path + file
listOfModels.append(pathToFile)
cherrypy.log('There are %s community models what will be analyzed.'%listOfModels)
return listOfModels
def replaceRxns(model,modelID):
'''
This function adds the tag specified in the parameter modelID to the beginning of the reaction IDs for a particular Model object. We are doing this so that we know which reactions come from one species or the other. This is the same as assigning each species to a different compartment. This is important because the two species have common reactions and metabolites, but are not sharing these metabolites in their biology, since the cells are closed compartments. They only share the metabolites that are transported in and out of the cell, hence the creation of an extra external compartment.
:param model: Model object containing the metabolic model of a particular species
:param modelID: Tag to add to the beginning of the reaction IDs of the model.
:return model: same model but with updated reactions IDs
'''
cherrypy.log('Started function to replace the reaction IDs in the species models')
for i in range(len(model.reactions)):
old_rxns = str(model.reactions[i])
new_rxns = 'model' + modelID + '_' + old_rxns
model.reactions[i].id = new_rxns
cherrypy.log('Finished changing the reaction IDs in the species models')
def run_server():
# Enable WSGI access logging via Paste
app_logged = TransLogger(app)
# Mount the WSGI callable object (app) on the root directory
cherrypy.tree.graft(app_logged, '/')
# Set the configuration of the web server
cherrypy.config.update({
'engine.autoreload_on': True,
'log.screen': True,
'log.error_file': "cherrypy.log",
'server.socket_port': 5000,
'server.socket_host': '0.0.0.0',
'server.thread_pool': 50, # 10 is default
})
# Start the CherryPy WSGI web server
cherrypy.engine.start()
cherrypy.engine.block()
# Connection
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def _load(self, path=None):
assert self.locked, ('The session load without being locked. '
"Check your tools' priority levels.")
if path is None:
path = self._get_file_path()
try:
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close()
except (IOError, EOFError):
e = sys.exc_info()[1]
if self.debug:
cherrypy.log('Error loading the session pickle: %s' %
e, 'TOOLS.SESSIONS')
return None
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
checker = locking.LockChecker(self.id, self.lock_timeout)
while not checker.expired():
try:
self.lock = lockfile.LockFile(path)
except lockfile.LockError:
time.sleep(0.1)
else:
break
self.locked = True
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def digest_auth(realm, users, debug=False):
"""If auth fails, raise 401 with a digest authentication header.
realm
A string containing the authentication realm.
users
A dict of the form: {username: password} or a callable returning
a dict.
"""
if check_auth(users, realm=realm):
if debug:
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers[
'www-authenticate'] = httpauth.digestAuth(realm)
raise cherrypy.HTTPError(
401, 'You are not authorized to access that resource')
def log_hooks(debug=False):
"""Write request.hooks to the cherrypy error log."""
request = cherrypy.serving.request
msg = []
# Sort by the standard points if possible.
from cherrypy import _cprequest
points = _cprequest.hookpoints
for k in request.hooks.keys():
if k not in points:
points.append(k)
for k in points:
msg.append(' %s:' % k)
v = request.hooks.get(k, [])
v.sort()
for h in v:
msg.append(' %r' % h)
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
':\n' + '\n'.join(msg), 'HTTP')
def flatten(debug=False):
"""Wrap response.body in a generator that recursively iterates over body.
This allows cherrypy.response.body to consist of 'nested generators';
that is, a set of generators that yield generators.
"""
def flattener(input):
numchunks = 0
for x in input:
if not is_iterator(x):
numchunks += 1
yield x
else:
for y in flattener(x):
numchunks += 1
yield y
if debug:
cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
response = cherrypy.serving.response
response.body = flattener(response.body)
def autovary(ignore=None, debug=False):
"""Auto-populate the Vary response header based on request.header access.
"""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log(
'Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY')
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
def run(self, point):
"""Execute all registered Hooks (callbacks) for the given point."""
exc = None
hooks = self[point]
hooks.sort()
for hook in hooks:
# Some hooks are guaranteed to run even if others at
# the same hookpoint fail. We will still log the failure,
# but proceed on to the next hook. The only way
# to stop all processing from one of these hooks is
# to raise SystemExit and stop the whole server.
if exc is None or hook.failsafe:
try:
hook()
except (KeyboardInterrupt, SystemExit):
raise
except (cherrypy.HTTPError, cherrypy.HTTPRedirect,
cherrypy.InternalRedirect):
exc = sys.exc_info()[1]
except:
exc = sys.exc_info()[1]
cherrypy.log(traceback=True, severity=40)
if exc:
raise exc
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + '.' + name] = vtype
traverse(cherrypy.request, 'request')
traverse(cherrypy.response, 'response')
traverse(cherrypy.server, 'server')
traverse(cherrypy.engine, 'engine')
traverse(cherrypy.log, 'log')
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def _load(self, path=None):
assert self.locked, ("The session load without being locked. "
"Check your tools' priority levels.")
if path is None:
path = self._get_file_path()
try:
f = open(path, "rb")
try:
return pickle.load(f)
finally:
f.close()
except (IOError, EOFError):
e = sys.exc_info()[1]
if self.debug:
cherrypy.log("Error loading the session pickle: %s" %
e, 'TOOLS.SESSIONS')
return None
def acquire_lock(self, path=None):
"""Acquire an exclusive lock on the currently-loaded session data."""
if path is None:
path = self._get_file_path()
path += self.LOCK_SUFFIX
checker = locking.LockChecker(self.id, self.lock_timeout)
while not checker.expired():
try:
self.lock = lockfile.LockFile(path)
except lockfile.LockError:
time.sleep(0.1)
else:
break
self.locked = True
if self.debug:
cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def digest_auth(realm, users, debug=False):
"""If auth fails, raise 401 with a digest authentication header.
realm
A string containing the authentication realm.
users
A dict of the form: {username: password} or a callable returning
a dict.
"""
if check_auth(users, realm=realm):
if debug:
cherrypy.log('Auth successful', 'TOOLS.DIGEST_AUTH')
return
# inform the user-agent this path is protected
cherrypy.serving.response.headers[
'www-authenticate'] = httpauth.digestAuth(realm)
raise cherrypy.HTTPError(
401, "You are not authorized to access that resource")
def log_hooks(debug=False):
"""Write request.hooks to the cherrypy error log."""
request = cherrypy.serving.request
msg = []
# Sort by the standard points if possible.
from cherrypy import _cprequest
points = _cprequest.hookpoints
for k in request.hooks.keys():
if k not in points:
points.append(k)
for k in points:
msg.append(" %s:" % k)
v = request.hooks.get(k, [])
v.sort()
for h in v:
msg.append(" %r" % h)
cherrypy.log('\nRequest Hooks for ' + cherrypy.url() +
':\n' + '\n'.join(msg), "HTTP")
def flatten(debug=False):
"""Wrap response.body in a generator that recursively iterates over body.
This allows cherrypy.response.body to consist of 'nested generators';
that is, a set of generators that yield generators.
"""
def flattener(input):
numchunks = 0
for x in input:
if not is_iterator(x):
numchunks += 1
yield x
else:
for y in flattener(x):
numchunks += 1
yield y
if debug:
cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN')
response = cherrypy.serving.response
response.body = flattener(response.body)
def autovary(ignore=None, debug=False):
"""Auto-populate the Vary response header based on request.header access.
"""
request = cherrypy.serving.request
req_h = request.headers
request.headers = MonitoredHeaderMap()
request.headers.update(req_h)
if ignore is None:
ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type'])
def set_response_header():
resp_h = cherrypy.serving.response.headers
v = set([e.value for e in resp_h.elements('Vary')])
if debug:
cherrypy.log(
'Accessed headers: %s' % request.headers.accessed_headers,
'TOOLS.AUTOVARY')
v = v.union(request.headers.accessed_headers)
v = v.difference(ignore)
v = list(v)
v.sort()
resp_h['Vary'] = ', '.join(v)
request.hooks.attach('before_finalize', set_response_header, 95)
def run(self, point):
"""Execute all registered Hooks (callbacks) for the given point."""
exc = None
hooks = self[point]
hooks.sort()
for hook in hooks:
# Some hooks are guaranteed to run even if others at
# the same hookpoint fail. We will still log the failure,
# but proceed on to the next hook. The only way
# to stop all processing from one of these hooks is
# to raise SystemExit and stop the whole server.
if exc is None or hook.failsafe:
try:
hook()
except (KeyboardInterrupt, SystemExit):
raise
except (cherrypy.HTTPError, cherrypy.HTTPRedirect,
cherrypy.InternalRedirect):
exc = sys.exc_info()[1]
except:
exc = sys.exc_info()[1]
cherrypy.log(traceback=True, severity=40)
if exc:
raise exc
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def catalog_1(self, *tokens):
"""Outputs the contents of the specified catalog file, using the
name in the request path, directly to the client."""
try:
name = tokens[0]
except IndexError:
raise cherrypy.HTTPError(http_client.FORBIDDEN,
_("Directory listing not allowed."))
try:
fpath = self.repo.catalog_1(name,
pub=self._get_req_pub())
except srepo.RepositoryError as e:
# Treat any remaining repository error as a 404, but
# log the error and include the real failure
# information.
cherrypy.log("Request failed: {0}".format(str(e)))
raise cherrypy.HTTPError(http_client.NOT_FOUND, str(e))
self.__set_response_expires("catalog", 86400, 86400)
return serve_file(fpath, "text/plain; charset=utf-8")
def _tar_stream_close(**kwargs):
"""This is a special function to finish a tar_stream-based
request in the event of an exception."""
tar_stream = cherrypy.request.tar_stream
if tar_stream:
try:
# Attempt to close the tar_stream now that we
# are done processing the request.
tar_stream.close()
except Exception:
# All exceptions are intentionally caught as
# this is a failsafe function and must happen.
# tarfile most likely failed trying to flush
# its internal buffer. To prevent tarfile from
# causing further exceptions during __del__,
# we have to lie and say the fileobj has been
# closed.
tar_stream.fileobj.closed = True
cherrypy.log("Request aborted: ",
traceback=True)
cherrypy.request.tar_stream = None
def file_0(self, *tokens):
"""Outputs the contents of the file, named by the SHA-1 hash
name in the request path, directly to the client."""
try:
fhash = tokens[0]
except IndexError:
fhash = None
try:
fpath = self.repo.file(fhash, pub=self._get_req_pub())
except srepo.RepositoryFileNotFoundError as e:
raise cherrypy.HTTPError(http_client.NOT_FOUND, str(e))
except srepo.RepositoryError as e:
# Treat any remaining repository error as a 404, but
# log the error and include the real failure
# information.
cherrypy.log("Request failed: {0}".format(str(e)))
raise cherrypy.HTTPError(http_client.NOT_FOUND, str(e))
self.__set_response_expires("file", 86400*365, 86400*365)
return serve_file(fpath, "application/data")
def run(self):
"""Run any background task scheduled for execution."""
while self.__running:
try:
try:
# A brief timeout here is necessary
# to reduce CPU usage and to ensure
# that shutdown doesn't wait forever
# for a new task to appear.
task, args, kwargs = \
self.__q.get(timeout=.5)
except queue.Empty:
continue
task(*args, **kwargs)
if hasattr(self.__q, "task_done"):
# Task is done; mark it so.
self.__q.task_done()
except:
self.bus.log("Failure encountered executing "
"background task {0!r}.".format(self),
traceback=True)
def __init__(self, spotty):
self.__root = Root(spotty)
log = cherrypy.log
log.access_file = ''
log.error_file = ''
log.screen = False
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': PROXY_PORT,
'engine.timeout_monitor.frequency': 5,
'server.shutdown_timeout': 1
})
self.__server = cherrypy.server.httpserver = CPHTTPServer(cherrypy.server)
threading.Thread.__init__(self)
cherrypyserver.py 文件源码
项目:arduino-ciao-meteor-ddp-connector
作者: andrea689
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def start(self):
self.bus.log("Starting WebSocket processing")
self.bus.subscribe('stop', self.cleanup)
self.bus.subscribe('handle-websocket', self.handle)
self.bus.subscribe('websocket-broadcast', self.broadcast)
self.manager.start()
cherrypyserver.py 文件源码
项目:arduino-ciao-meteor-ddp-connector
作者: andrea689
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def stop(self):
self.bus.log("Terminating WebSocket processing")
self.bus.unsubscribe('stop', self.cleanup)
self.bus.unsubscribe('handle-websocket', self.handle)
self.bus.unsubscribe('websocket-broadcast', self.broadcast)
cherrypyserver.py 文件源码
项目:arduino-ciao-meteor-ddp-connector
作者: andrea689
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def index(self):
cherrypy.log("Handler created: %s" % repr(cherrypy.request.ws_handler))