def handle_request(self):
parsed_request = self.parse_request(self.request.body)
unpacked_requests = self.unpack_batch(parsed_request)
context.main_logger.debug("Received an event batch of {num_requests} requests in batch handler {handler}"
.format(num_requests=len(unpacked_requests),
handler=self.__class__.__name__))
if not context.request_processor.submit(self.create_request(unpacked_requests)):
context.main_logger.warning("RequestProcessor queue size limit reached, sending back off response...")
self.set_status(httplib.SERVICE_UNAVAILABLE)
else:
self.set_status(httplib.OK)
python类SERVICE_UNAVAILABLE的实例源码
def validate_steam_ticket():
"""Validate steam ticket from /auth call."""
ob = request.get_json()
check_schema(ob, steam_provider_schema, "Error in request body.")
provider_details = ob['provider_details']
auth_config = current_app.config.get('authentication')
# Get Steam authentication config
steam_config = auth_config.get('steam')
if not steam_config:
abort(httplib.SERVICE_UNAVAILABLE, description="Steam authentication not configured for current tenant")
# Find configuration for the requested Steam app id.
appid = provider_details.get('appid')
for steam_app in steam_config:
if steam_app['appid'] == int(provider_details.get('appid')): # Cast to int is temporary hack
break
else:
abort(httplib.SERVICE_UNAVAILABLE, description="Steam authentication not configured for app %s." % appid)
# Look up our secret key or key url
key_url = steam_app.get('key_url')
key = steam_app.get('key')
if not key_url and not key:
log.error("Steam tickets cannot be validated. AUTH_STEAM_KEY_URL or AUTH_STEAM_KEY missing from config.")
abort(httplib.SERVICE_UNAVAILABLE, description="Steam tickets cannot be validated at the moment.")
# Call validation and authenticate if ticket is good
identity_id = run_ticket_validation(provider_details, key_url=key_url, key=key, appid=appid)
return identity_id
def should_retry(self, error, retries_attempted):
"""Return true if the http client should retry the request.
:param error: the caught error.
:type error: Exception
:param retries_attempted: the number of retries which has been attempted before.
:type retries_attempted: int
:return: true if the http client should retry the request.
:rtype: bool
"""
# stop retrying when the maximum number of retries is reached
if retries_attempted >= self.max_error_retry:
return False
# always retry on IOError
if isinstance(error, IOError):
_logger.debug('Retry for IOError.')
return True
# Only retry on a subset of service exceptions
if isinstance(error, BceServerError):
if error.status_code == httplib.INTERNAL_SERVER_ERROR:
_logger.debug('Retry for internal server error.')
return True
if error.status_code == httplib.SERVICE_UNAVAILABLE:
_logger.debug('Retry for service unavailable.')
return True
if error.code == BceServerError.REQUEST_EXPIRED:
_logger.debug('Retry for request expired.')
return True
return False
def should_retry(self, error, retries_attempted):
"""Return true if the http client should retry the request.
:param error: the caught error.
:type error: Exception
:param retries_attempted: the number of retries which has been attempted before.
:type retries_attempted: int
:return: true if the http client should retry the request.
:rtype: bool
"""
# stop retrying when the maximum number of retries is reached
if retries_attempted >= self.max_error_retry:
return False
# always retry on IOError
if isinstance(error, IOError):
_logger.debug('Retry for IOError.')
return True
# Only retry on a subset of service exceptions
if isinstance(error, BceServerError):
if error.status_code == httplib.INTERNAL_SERVER_ERROR:
_logger.debug('Retry for internal server error.')
return True
if error.status_code == httplib.SERVICE_UNAVAILABLE:
_logger.debug('Retry for service unavailable.')
return True
if error.code == BceServerError.REQUEST_EXPIRED:
_logger.debug('Retry for request expired.')
return True
return False
def _post_request(self, body, headers):
retry_count = self.max_retries
while True:
logger.debug("POST %s %r %r", self.url.path, body, headers)
try:
self.connection.request('POST', self.url.path, body=body, headers=headers)
response = self.connection.getresponse()
except httplib.HTTPException as e:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
self.close()
self.connect()
time.sleep(delay)
retry_count -= 1
continue
raise errors.InterfaceError('RPC request failed', cause=e)
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
if retry_count > 0:
delay = math.exp(-retry_count)
logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
time.sleep(delay)
retry_count -= 1
continue
return response
def _handle_request(self, scheme, netloc, path, headers, body=None, method="GET"):
"""
Run the actual request
"""
backend_url = "{}://{}{}".format(scheme, netloc, path)
try:
response = self.http_request.request(backend_url, method=method, body=body, headers=dict(headers))
self._return_response(response)
except Exception as e:
body = "Invalid response from backend: '{}' Server might be busy".format(e.message)
logging.debug(body)
self.send_error(httplib.SERVICE_UNAVAILABLE, body)
def retry_task(self):
"""Ask taskqueue to retry this task.
Even though raising an exception can cause a task retry, it
will flood logs with highly visible ERROR logs. Handlers should uses
this method to perform controlled task retries. Only raise exceptions
for those deserve ERROR log entries.
"""
self.response.set_status(httplib.SERVICE_UNAVAILABLE, "Retry task")
self.response.clear()
def check_resp_status_and_retry(resp, image_id, url):
# Note(Jesse): This branch sorts errors into those that are permanent,
# those that are ephemeral, and those that are unexpected.
if resp.status in (httplib.BAD_REQUEST, # 400
httplib.UNAUTHORIZED, # 401
httplib.PAYMENT_REQUIRED, # 402
httplib.FORBIDDEN, # 403
httplib.METHOD_NOT_ALLOWED, # 405
httplib.NOT_ACCEPTABLE, # 406
httplib.PROXY_AUTHENTICATION_REQUIRED, # 407
httplib.CONFLICT, # 409
httplib.GONE, # 410
httplib.LENGTH_REQUIRED, # 411
httplib.PRECONDITION_FAILED, # 412
httplib.REQUEST_ENTITY_TOO_LARGE, # 413
httplib.REQUEST_URI_TOO_LONG, # 414
httplib.UNSUPPORTED_MEDIA_TYPE, # 415
httplib.REQUESTED_RANGE_NOT_SATISFIABLE, # 416
httplib.EXPECTATION_FAILED, # 417
httplib.UNPROCESSABLE_ENTITY, # 422
httplib.LOCKED, # 423
httplib.FAILED_DEPENDENCY, # 424
httplib.UPGRADE_REQUIRED, # 426
httplib.NOT_IMPLEMENTED, # 501
httplib.HTTP_VERSION_NOT_SUPPORTED, # 505
httplib.NOT_EXTENDED, # 510
):
raise PluginError("Got Permanent Error response [%i] while "
"uploading image [%s] to glance [%s]"
% (resp.status, image_id, url))
# Nova service would process the exception
elif resp.status == httplib.NOT_FOUND: # 404
exc = XenAPI.Failure('ImageNotFound')
raise exc
# NOTE(nikhil): Only a sub-set of the 500 errors are retryable. We
# optimistically retry on 500 errors below.
elif resp.status in (httplib.REQUEST_TIMEOUT, # 408
httplib.INTERNAL_SERVER_ERROR, # 500
httplib.BAD_GATEWAY, # 502
httplib.SERVICE_UNAVAILABLE, # 503
httplib.GATEWAY_TIMEOUT, # 504
httplib.INSUFFICIENT_STORAGE, # 507
):
raise RetryableError("Got Ephemeral Error response [%i] while "
"uploading image [%s] to glance [%s]"
% (resp.status, image_id, url))
else:
# Note(Jesse): Assume unexpected errors are retryable. If you are
# seeing this error message, the error should probably be added
# to either the ephemeral or permanent error list.
raise RetryableError("Got Unexpected Error response [%i] while "
"uploading image [%s] to glance [%s]"
% (resp.status, image_id, url))