def _setup_subscription(self):
"""Creates a subscription if it does not exist."""
subscription_name = pubsub_utils.get_full_subscription_name()
try:
self.client.projects().subscriptions().get(
subscription=subscription_name).execute()
except errors.HttpError as e:
if e.resp.status == 404:
body = {
'topic': pubsub_utils.get_full_topic_name(),
'pushConfig': {
'pushEndpoint': pubsub_utils.get_app_endpoint_url()
}
}
self.client.projects().subscriptions().create(
name=subscription_name, body=body).execute()
else:
logging.exception(e)
raise
python类exception()的实例源码
def main():
print("see log scrape.log")
if os.path.isfile("scrape.log"):
os.remove("scrape.log")
log.basicConfig(filename="scrape.log",
format='%(asctime)s %(levelname)s %(message)s',
level=log.DEBUG)
try:
log.debug("main() full scrape will take 5-10 minutes")
cards, tokens = loadJsonCards()
saveCardsAsJson("data/cards.json", loadSets(allcards=cards))
# a lot of token names are not unique
# a static, handmade list of ids is more reliable
if os.path.isfile('data/tokenlist.json'):
with open('data/tokenlist.json', 'r', encoding='utf8') as f:
saveCardsAsJson("data/tokens.json", loadTokens(tokens, json.load(f)))
except Exception as e:
log.exception("main() error %s", e)
def _resume_compute(session, compute_ref, compute_uuid):
"""Resume compute node on slave host after pool join.
This has to happen regardless of the success or failure of the join
operation.
"""
try:
# session is valid if the join operation has failed
session.xenapi.VM.start(compute_ref, False, True)
except XenAPI.Failure:
# if session is invalid, e.g. xapi has restarted, then the pool
# join has been successful, wait for xapi to become alive again
for c in range(0, DEFAULT_TRIES):
try:
_run_command(["xe", "vm-start", "uuid=%s" % compute_uuid])
return
except pluginlib.PluginError:
logging.exception('Waited %d seconds for the slave to '
'become available.' % (c * DEFAULT_SLEEP))
time.sleep(DEFAULT_SLEEP)
raise pluginlib.PluginError('Unrecoverable error: the host has '
'not come back for more than %d seconds'
% (DEFAULT_SLEEP * (DEFAULT_TRIES + 1)))
def process_tag(self, seqid, iprot, oprot):
args = tag_args()
args.read(iprot)
iprot.readMessageEnd()
result = tag_result()
try:
result.success = self._handler.tag(args.sentence)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("tag", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
def process_tag(self, seqid, iprot, oprot):
args = tag_args()
args.read(iprot)
iprot.readMessageEnd()
result = tag_result()
try:
result.success = self._handler.tag(args.sentence)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("tag", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
def __init__(self, app_name, elastic_url, headers=None, auth=None):
"""Initialize the ElasticLoggger class.
Args:
app_name (str) :Name of the application which is using current logger
elastic_url (str) :Url of elastic-http-input to push logs to (for eg. 'http://localhost:3332' )
headers (str) :Since this is post request headers are required, defaults to {'content-type': 'application/json'}
auth (tuple) :A tuple containing username and password: for eg. ('myuser', 'mypassword')
"""
self.elastic_url = elastic_url
self.auth = auth
self.headers = headers
if not self.headers:
self.headers = {'content-type': 'application/json'}
self.debug_method = partial(self.__log, level='debug')
self.error_method = partial(self.__log, level='error')
self.info_method = partial(self.__log, level='info')
self.exception_method = partial(self.__log, level='exception')
self.session = FuturesSession(max_workers=10)
self.app_name = app_name
def update_charts(q, event, size):
try:
os.nice(10)
except AttributeError:
logging.warn("can't be nice to windows")
q.put((CRAWL_MESSAGE, 4, 'Chart engine starting...'))
base_map = graphics.create_map()
last_qso_timestamp = 0
q.put((CRAWL_MESSAGE, 4, ''))
try:
while not event.is_set():
t0 = time.time()
last_qso_timestamp = load_data(size, q, base_map, last_qso_timestamp)
t1 = time.time()
delta = t1 - t0
update_delay = config.DATA_DWELL_TIME - delta
if update_delay < 0:
update_delay = config.DATA_DWELL_TIME
logging.debug('Next data update in %f seconds', update_delay)
event.wait(update_delay)
except Exception, e:
logging.exception('Exception in update_charts', exc_info=e)
q.put((CRAWL_MESSAGE, 4, 'Chart engine failed.', graphics.YELLOW, graphics.RED))
def delete_contact(db, cursor, timestamp, station, callsign):
"""
Delete the results of a delete in N1MM
"""
""" station_id = stations.lookup_station_id(station)
"""
logging.info('DELETEQSO: %s, timestamp = %s' % (callsign, calendar.timegm(timestamp)))
try:
cursor.execute(
"delete from qso_log where callsign = ? and timestamp = ?", (callsign, calendar.timegm(timestamp),))
db.commit()
except Exception as e:
logging.exception('Exception deleting contact from db.')
return ''
def open_resource(name):
"""Load the object from the datastore"""
import logging
from cStringIO import StringIO
try:
data = ndb.Key('Zoneinfo', name, namespace=NDB_NAMESPACE).get().data
except AttributeError:
# Missing zone info; test for GMT - which would be there if the
# Zoneinfo has been initialized.
if ndb.Key('Zoneinfo', 'GMT', namespace=NDB_NAMESPACE).get():
# the user asked for a zone that doesn't seem to exist.
logging.exception("Requested zone '%s' is not in the database." %
name)
raise
# we need to initialize the database
init_zoneinfo()
return open_resource(name)
return StringIO(data)
def select_proxy(self, table_name, **kwargs):
filter = {}
for k, v in kwargs.items():
if v != '':
filter[k] = v
try:
command = "SELECT * FROM {name} WHERE anonymity LIKE '{anonymity}' AND https LIKE '{https}' ORDER BY " \
"{order} {sort} limit {count}". \
format(name=table_name, anonymity=filter.get('anonymity', '%'),
https=filter.get('https', '%'), order=filter.get('order', 'save_time'),
sort=filter.get('sort', 'desc'), count=filter.get('count', 100))
result = self.query(command)
data = [{
'ip': item[1], 'port': item[2], 'anonymity': item[4], 'https': item[5],
'speed': item[6], 'save_time': str(item[8])
} for item in result]
return data
except Exception as e:
logging.exception('mysql select_proxy exception msg:%s' % e)
return []
def refund_stripe_charge (self, charge_id):
all_methods = PROP.getProperty(self.request, PROP.PAYMENT_METHODS)
method = [x for x in all_methods if x.__name__ == "stripe"]
method = method[0] if len(method) > 0 else None
if method == None:
logging.exception(self.user.username + ": Could not refund %s because could not find stripe payment method" % charge_id)
return False
priv_api_key = method.settings[PROP.STRIPE_API_KEY].value
try:
stripe.api_key = priv_api_key
refund = stripe.Refund.create(
charge=charge_id,
reason="duplicate"
)
if refund != None and "id" in refund:
logging.info(self.user.username + ": Refunded charge %s with refund id %s" % (charge_id, refund["id"]))
return True
else:
logging.error(self.user.username + ": Refund of charge %s may have failed" % charge_id)
return False
except Exception, e:
logging.error(self.user.username + ": Exception was thrown during refund: %s" % e)
return False
def commit(self, preserve_cache=False):
if not preserve_cache:
self._clear_cache()
if not self._change_batch:
return
try:
self._client.change_resource_record_sets(
HostedZoneId=self.id,
ChangeBatch={'Changes': self._change_batch}
)
except ClientError as excp:
if excp.response['Error']['Code'] == 'InvalidInput':
logging.exception("failed to process batch %r", self._change_batch)
raise
self._reset_change_batch()
def _reconcile_policy_records(self):
"""
Reconcile policy records for this zone.
"""
with self.db_zone.lock_dirty_policy_records() as dirty_policy_records:
dirty_policies = set()
for policy_record in dirty_policy_records:
if not policy_record.deleted:
dirty_policies.add(policy_record.policy)
for policy in dirty_policies:
r53_policy = Policy(policy=policy, zone=self)
r53_policy.reconcile()
self.commit(preserve_cache=True)
for policy_record in dirty_policy_records:
try:
with transaction.atomic():
policy_record.r53_policy_record.reconcile()
self.commit(preserve_cache=True)
except ClientError as excp:
logger.exception("failed to reconcile record %r", policy_record)
self._reset_change_batch()
self._delete_orphaned_managed_records()
self.commit()
def run(self, *args, **kwargs):
try:
self.spectate = VRSpectate()
self.spectate.run()
except ProcessException as e:
logging.exception(e)
if "get pid from name" in str(e):
self.error.emit("LoL client was not found")
elif "ReadProcessMemory" in str(e):
self.error.emit("Either the LoL client or this program is outdated")
else:
self.error.emit(str(e))
except pywintypes.error as e:
logging.exception(e)
if "SetSecurityInfo" in str(e):
self.error.emit("Unable to access the League of Legends client, you have to run LoLVRSpectate at the "
"same privilege level as the LoL client. \nEX. if the LoL client is running as admin "
"LoLVRSpectate also has to run as admin")
except Exception as e:
logging.exception(e)
self.error.emit("Unknown error, please submit a bug report at https://github.com/Fire-Proof/LoLVRSpectate "
"(please include the LoLVRSpectate.log file)")
def main_loop(self, concurrency=4, worker_class=UploadWorker):
chunk_index = 0
self._begin_upload()
self._workers = self._start_workers(concurrency, worker_class=worker_class)
while self._pending_chunks or not self.stream_handler.finished:
self._check_workers() # raise exception and stop everything if any worker has crashed
# print "main_loop p:{} o:{} i:{}".format(
# self._pending_chunks, self.outbox.qsize(), self.inbox.qsize())
# consume results first as this is a quick operation
self._handle_results()
chunk = self.stream_handler.get_chunk()
if chunk:
# s3 multipart index is 1 based, increment before sending
chunk_index += 1
self._send_chunk(chunk_index, chunk)
self._finish_upload()
self.results.sort()
return multipart_etag(r[1] for r in self.results)
def wait(self):
try:
while self._process.stderr:
poll = select([self._process.stderr.fileno()], [], [])
if len(poll) >= 1:
for fd in poll[0]:
read = self._process.stderr.readline()
line = self.parse_mongodump_line(read)
if not line:
continue
elif self.is_password_prompt(read):
self.handle_password_prompt()
elif self.is_failed_line(read):
self.handle_failure(read)
break
else:
logging.info(line)
if self._process.poll() is not None:
break
except Exception, e:
logging.exception("Error reading mongodump output: %s" % e)
finally:
self._process.communicate()
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
try:
self._handler.ping()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("ping", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_get_node_info(self, seqid, iprot, oprot):
args = get_node_info_args()
args.read(iprot)
iprot.readMessageEnd()
result = get_node_info_result()
try:
result.success = self._handler.get_node_info()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except UnauthorizedException as unauthorized:
msg_type = TMessageType.REPLY
result.unauthorized = unauthorized
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("get_node_info", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_register_node(self, seqid, iprot, oprot):
args = register_node_args()
args.read(iprot)
iprot.readMessageEnd()
result = register_node_result()
try:
result.success = self._handler.register_node(args.node, args.pass_phrase)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except UnauthorizedException as unauthorized:
msg_type = TMessageType.REPLY
result.unauthorized = unauthorized
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("register_node", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_phase_1_message(self, seqid, iprot, oprot):
args = phase_1_message_args()
args.read(iprot)
iprot.readMessageEnd()
result = phase_1_message_result()
try:
result.success = self._handler.phase_1_message(args.p1)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("phase_1_message", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()