def open_connection(self, identity):
logger.debug('Connection to backend opened')
assert not self._websocket, "Connection to backend already opened"
try:
self._websocket = await websockets.connect(self.url)
# Handle handshake
raw = await self._websocket.recv()
challenge = ejson_loads(raw)
answer = identity.private_key.sign(challenge['challenge'].encode())
await self._websocket.send(ejson_dumps({
'handshake': 'answer',
'identity': identity.id,
'answer': to_jsonb64(answer)
}))
resp = ejson_loads(await self._websocket.recv())
if resp['status'] != 'ok':
await self.close_connection()
raise exception_from_status(resp['status'])(resp['label'])
self._ws_recv_handler_task = asyncio.ensure_future(
self._ws_recv_handler(), loop=self.loop)
if self.watchdog_time:
self._watchdog_task = asyncio.ensure_future(self._watchdog(), loop=self.loop)
except (ConnectionRefusedError, websockets.exceptions.ConnectionClosed) as exc:
raise BackendConnectionError('Cannot connect to backend (%s)' % exc)
python类s()的实例源码
def _ws_recv_handler(self):
# Given command responses and notifications are all send through the
# same websocket, separate them here, passing command response thanks
# to a Queue.
while True:
raw = await self._websocket.recv()
try:
if isinstance(raw, bytes):
raw = raw.decode()
recv = ejson_loads(raw)
if 'status' in recv:
# Message response
self._resp_queue.put_nowait(recv)
else:
# Event
self._signal_ns.signal(recv['event']).send(recv['sender'])
except (KeyError, TypeError, json.JSONDecodeError):
# Dummy ???
logger.warning('Backend server sent invalid message: %s' % raw)
def perform_vlob_read(self, intent):
async with self.connection.acquire() as conn:
async with conn.cursor() as cur:
if intent.version:
await cur.execute("SELECT * FROM vlobs WHERE "
"id=%s AND version=%s;", (intent.id, intent.version))
else:
await cur.execute("SELECT * FROM vlobs WHERE "
"id=%s ORDER BY version DESC;", (intent.id, ))
ret = await cur.fetchone()
if not ret:
raise VlobNotFound('Vlob not found.')
_, version, rts, wts, blob = ret
if rts != intent.trust_seed:
raise TrustSeedError('Invalid read trust seed.')
return VlobAtom(id=intent.id, version=version, read_trust_seed=rts,
write_trust_seed=wts, blob=bytes(blob))
def _perform_vlob_update(self, intent):
async with self.connection.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT version, read_trust_seed, write_trust_seed FROM "
"vlobs WHERE id=%s ORDER BY version DESC;", (intent.id, ))
ret = await cur.fetchone()
if ret is None:
raise VlobNotFound('Vlob not found.')
last_version, rts, wts = ret
if wts != intent.trust_seed:
raise TrustSeedError('Invalid write trust seed.')
if intent.version != last_version + 1:
raise VlobNotFound('Wrong blob version.')
# TODO: insertion doesn't do atomic check of version
await cur.execute("INSERT INTO vlobs VALUES (%s, %s, %s, %s, %s);",
(intent.id, intent.version, rts, wts, intent.blob))
await cur.execute("NOTIFY vlob_updated, %s", (intent.id, ))
def backend_layer(self):
if self._backend_layer is None:
cache_key = (self.backend_type, dict_to_hashable(self.backend_layer_kwargs))
if cache_key not in self._backend_layer_cache:
with self._get_timer('backend.initialize'):
backend_layer_kwargs = deepcopy(self.backend_layer_kwargs)
if self.backend_type == REDIS_BACKEND_TYPE_SENTINEL:
self._backend_layer_cache[cache_key] = SentinelRedisClient(**backend_layer_kwargs)
else:
self._backend_layer_cache[cache_key] = StandardRedisClient(**backend_layer_kwargs)
self._backend_layer = self._backend_layer_cache[cache_key]
# Each time the backend layer is accessed, use _this_ transport's metrics recorder for the backend layer
self._backend_layer.metrics_counter_getter = lambda name: self._get_counter(name)
return self._backend_layer
# noinspection PyAttributeOutsideInit
def test_fallback(cl_and_vals):
"""The fallback case works."""
cl, vals = cl_and_vals
assume(attr.fields(cl)) # At least one field.
@attr.s
class A(object):
pass
fn = create_uniq_field_dis_func(A, cl)
assert fn({}) is A
assert fn(attr.asdict(cl(*vals))) is cl
attr_names = {a.name for a in attr.fields(cl)}
if 'xyz' not in attr_names:
fn({'xyz': 1}) is A # Uses the fallback.
def test_optional_field_roundtrip(converter, cl_and_vals):
"""
Classes with optional fields can be unstructured and structured.
"""
cl, vals = cl_and_vals
@attr.s
class C(object):
a = attr.ib(type=Optional[cl])
inst = C(a=cl(*vals))
assert inst == converter.structure(converter.unstructure(inst), C)
inst = C(a=None)
unstructured = converter.unstructure(inst)
assert inst == converter.structure(unstructured, C)
def retry_pattern(backoff_type, exception, **wait_gen_kwargs):
def log_retry_attempt(details):
_, exception, _ = sys.exc_info()
LOGGER.info(exception)
LOGGER.info('Caught retryable error after %s tries. Waiting %s more seconds then retrying...',
details["tries"],
details["wait"])
def should_retry_api_error(exception):
if isinstance(exception, FacebookRequestError):
return exception.api_transient_error()
elif isinstance(exception, InsightsJobTimeout):
return True
return False
return backoff.on_exception(
backoff_type,
exception,
jitter=None,
on_backoff=log_retry_attempt,
giveup=lambda exc: not should_retry_api_error(exc),
**wait_gen_kwargs
)
def advance_bookmark(stream, bookmark_key, date):
tap_stream_id = stream.name
state = stream.state or {}
LOGGER.info('advance(%s, %s)', tap_stream_id, date)
date = pendulum.parse(date) if date else None
current_bookmark = get_start(stream, bookmark_key)
if date is None:
LOGGER.info('Did not get a date for stream %s '+
' not advancing bookmark',
tap_stream_id)
elif not current_bookmark or date > current_bookmark:
LOGGER.info('Bookmark for stream %s is currently %s, ' +
'advancing to %s',
tap_stream_id, current_bookmark, date)
state = singer.write_bookmark(state, tap_stream_id, bookmark_key, str(date))
else:
LOGGER.info('Bookmark for stream %s is currently %s ' +
'not changing to to %s',
tap_stream_id, current_bookmark, date)
return state
def get_time_for_session(task, time):
cd = datetime.date.today()
if task.cutoff == "week":
cutoff_time = datetime.datetime(cd.year, cd.month, cd.day)
cutoff_delta = datetime.timedelta(
days=datetime.datetime.weekday(cutoff_time))
cutoff_time = (cutoff_time - cutoff_delta).timestamp()
elif task.cutoff == "month":
cutoff_time = datetime.datetime(cd.year, cd.month, 1).timestamp()
qualifiers = filter(lambda t: t.started > cutoff_time, time)
time_spent_this_per = sum(map(
lambda s: s.finished - s.started, qualifiers))
return time_spent_this_per
def _reads_exist(self, attribute, value):
if isinstance(value, str):
if not os.path.exists(value):
raise FileNotFoundError('Reads file {} does not exist!'.format(value))
if not os.path.isfile(value):
raise OSError('{} is not a valid reads file'.format(value))
elif isinstance(value, list):
for x in value:
if not isinstance(x, str):
raise Exception(
'Reads file not specified as string or list of string: type={} "{}"'.format(type(x), x))
if not os.path.exists(x):
raise FileNotFoundError('Reads file {} does not exist!'.format(x))
if not os.path.isfile(x):
raise OSError('{} is not a valid reads file'.format(x))
else:
raise Exception(
'Reads file(s) not specified as string or list of string: type={} "{}"'.format(type(value), value))
def _create_tmp_folder(self):
count = 1
tmp_dir = self.tmp_dir
while True:
try:
logging.info('Trying to create analysis directory at: %s', tmp_dir)
os.makedirs(tmp_dir)
break
except OSError as e:
logging.warning('Error on creation of tmp analysis directory "{}"! {}'.format(
tmp_dir,
e
))
tmp_dir = '{}_{}'.format(self.tmp_dir, count)
count += 1
self.tmp_dir = tmp_dir
return self.tmp_dir
def _create_tmp_folder(self):
count = 1
tmp_dir = self.tmp_work_dir
while True:
try:
logging.info('Trying to create analysis directory at: %s', tmp_dir)
os.makedirs(tmp_dir)
break
except OSError as e:
logging.warning('Error on creation of tmp analysis directory "{}"! {}'.format(
tmp_dir,
e
))
tmp_dir = '{}_{}'.format(self.tmp_work_dir, count)
count += 1
self.tmp_work_dir = tmp_dir
return self.tmp_work_dir
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--silent', action='store_true',
help="don't print time logs")
args = parser.parse_args()
Notify.init('Tomato')
gbulb.install()
loop = asyncio.get_event_loop()
tomato = Tomato(silent=args.silent)
try:
loop.run_until_complete(tomato.run())
except KeyboardInterrupt:
pass
finally:
tomato.close()
def test_secret_str_no_repr(self):
"""
Outside of reprs, _SecretStr behaves normally.
"""
s = _SecretStr("abc")
assert "'abc'" == repr(s)
def test_secret_str_censors(self):
"""
_SecretStr censors it's __repr__ if its called from another __repr__.
"""
s = _SecretStr("abc")
@attr.s
class Cfg(object):
s = attr.ib()
assert "Cfg(s=<SECRET>)" == repr(Cfg(s))
def config(maybe_cls=None, prefix="APP"):
def wrap(cls):
cls._prefix = prefix
return attr.s(cls, slots=True)
if maybe_cls is None:
return wrap
else:
return wrap(maybe_cls)
def _env_to_bool(val):
"""
Convert *val* to a bool if it's not a bool in the first place.
"""
if isinstance(val, bool):
return val
val = val.strip().lower()
if val in ("1", "true", "yes"):
return True
return False
def _print_prop_summary(prop, outcome, outfile=sys.stdout):
name = prop.name
failed_impl = prop.failed_implications
depth = outcome.state['depth']
n = outcome.state['calls']
if failed_impl:
outfile.write('After {} call(s) ({} did not meet implication)\n'.format(n, failed_impl))
else:
outfile.write('After {} call(s)\n'.format(n))
outfile.write('To depth {}\n'.format(depth))
outfile.write('In property `{}`\n'.format(name))
outfile.write('\n')
def passed(self):
return all(s.passed for s in self.pipelines)