def steal_emoji(self, ctx: DogbotContext, emoji: EmojiStealer, name=None):
"""
Imports an external emoji into this server.
You can specify an emoji ID, the custom emoji itself, or "recent" to make the bot scan for recent messages
with a custom emoji that isn't already in this server. If you provide a name, the bot will use that when
uploading the emoji, instead of the name it finds. The name parameter is mandatory if you specify an emoji ID.
"""
emoji_url = f'https://cdn.discordapp.com/emojis/{emoji[0]}.png'
if not emoji.name and not name:
return await ctx.send('You must provide the name for the stolen emoji.')
# strip colons from name if they are there
name = None if not name else name.strip(':')
msg = await ctx.send('Downloading...')
try:
async with ctx.bot.session.get(emoji_url) as emoji_resp:
emoji_bytes = await emoji_resp.read()
if emoji_resp.status != 200 or not emoji_bytes:
return await ctx.send('Failed to download the emoji.')
# steal
emoji = await ctx.guild.create_custom_emoji(name=name or emoji.name, image=emoji_bytes)
# as confirmation, attempt to add react to the comamnd message with it, and fall back to ok
try:
await msg.edit(content=str(emoji))
await msg.add_reaction(f'{emoji.name}:{emoji.id}')
except discord.HTTPException:
await ctx.ok()
except aiohttp.ClientError:
await msg.edit(content='Failed to download the emoji.')
except discord.HTTPException:
await msg.edit(content='Failed to upload the emoji.')
python类ClientError()的实例源码
def urban(self, ctx: DogbotContext, *, word):
"""Finds UrbanDictionary definitions."""
async with ctx.channel.typing():
try:
result = await UrbanDefinition.query(self.bot.session, word)
except ClientError:
return await ctx.send('Failed to look up that word!')
if not result:
return await ctx.send('No results.')
await ctx.send(embed=result.embed)
def shibe(self, ctx: DogbotContext):
"""Posts a random Shiba Inu picture."""
async with ctx.typing():
try:
resp = await utils.get_json(ctx.bot.session, SHIBE_ENDPOINT)
except aiohttp.ClientError:
return await ctx.send('Failed to contact the Shibe API. Please try again later.')
await ctx.send(embed=discord.Embed().set_image(url=resp[0]))
def __post(self, data):
with aiohttp.Timeout(self.timeout, loop=self.loop):
try:
response = await self.__session.post(str(self.__url), data=data, headers=self.__headers)
except aiohttp.ClientError as e:
log.debug('Caught during POST request: %r', e)
raise ConnectionError(str(self.url))
else:
if response.status == CSRF_ERROR_CODE:
# Send request again with CSRF header
self.__headers[CSRF_HEADER] = response.headers[CSRF_HEADER]
log.debug('Setting CSRF header: %s = %s',
CSRF_HEADER, response.headers[CSRF_HEADER])
await response.release()
return await self.__post(data)
elif response.status == AUTH_ERROR_CODE:
await response.release()
log.debug('Authentication failed')
raise AuthError(str(self.url))
else:
try:
answer = await response.json()
except aiohttp.ClientResponseError as e:
text = textwrap.shorten(await response.text(),
50, placeholder='...')
raise RPCError('Server sent malformed JSON: {}'.format(text))
else:
return answer
def __send_request(self, post_data):
"""Send RPC POST request to daemon
post_data: Any valid RPC request as JSON string
If applicable, returns response['arguments']['torrents'] or
response['arguments'], otherwise response.
Raises ClientError.
"""
try:
answer = await self.__post(post_data)
except OSError as e:
log.debug('Caught OSError: %r', e)
raise ConnectionError(str(self.url))
except asyncio.TimeoutError as e:
log.debug('Caught TimeoutError: %r', e)
raise ConnectionError('Timeout after {}s: {}'.format(self.timeout, self.url))
else:
if answer['result'] != 'success':
raise RPCError(answer['result'].capitalize())
else:
if 'arguments' in answer:
if 'torrents' in answer['arguments']:
return answer['arguments']['torrents']
else:
return answer['arguments']
return answer
def fetch_listing(session, url):
try:
data = await fetch_json(session, url)
return data['prefixes'], data['files']
except (aiohttp.ClientError, KeyError, ValueError) as e:
raise ValueError("Could not fetch '{}': {}".format(url, e))
def fetch_release_candidate_metadata(session, record):
"""A JSON file containing build info is published along the nightly build archive.
"""
global _rc_metadata
url = record['download']['url']
# Make sure the rc URL is turned into a en-US one.
rc_url = localize_release_candidate_url(url)
if rc_url in _rc_metadata:
return _rc_metadata[rc_url]
product = record['source']['product']
if product == 'devedition':
product = 'firefox'
if product == 'fennec':
metadata_url = re.sub('\.({})$'.format(FILE_EXTENSIONS), '.json', rc_url)
else:
major_version = record['target']['version'].split('rc')[0]
parts = rc_url.split('/')
parts[-1] = '{}-{}.json'.format(product, major_version)
metadata_url = '/'.join(parts)
try:
metadata = await fetch_json(session, metadata_url)
except aiohttp.ClientError as e:
# Old RC like https://archive.mozilla.org/pub/firefox/releases/1.0rc1/
# don't have metadata.
logger.warning("Could not fetch metadata for '%s' from '%s'" % (record['id'],
metadata_url))
_rc_metadata[rc_url] = None # Don't try it anymore.
return None
m = re.search('/build(\d+)/', url)
metadata['buildnumber'] = int(m.group(1))
_rc_metadata[rc_url] = metadata
return metadata
def _do_http_request(self, url, body, headers):
try:
return await self._do_http_request_impl(url, body, headers)
except asyncio.TimeoutError:
raise PyVLXException("Request timeout when talking to VELUX API")
except aiohttp.ClientError:
raise PyVLXException("HTTP error when talking to VELUX API")
except OSError:
raise PyVLXException("OS error when talking to VELUX API")
def booru(self, ctx, booru, tags):
if '[jose:no_nsfw]' in ctx.channel.topic:
return
# taxxx
await self.jcoin.pricing(ctx, self.prices['API'])
try:
# grab posts
posts = await booru.get_posts(ctx.bot, tags)
if not posts:
return await ctx.send('Found nothing.')
# grab random post
post = random.choice(posts)
post_id = post.get('id')
post_author = booru.get_author(post)
log.info('%d posts from %s, chose %d', len(posts),
booru.__name__, post_id)
tags = (post['tags'].replace('_', '\\_'))[:500]
# add stuffs
embed = discord.Embed(title=f'Posted by {post_author}')
embed.set_image(url=post['file_url'])
embed.add_field(name='Tags', value=tags)
embed.add_field(name='URL', value=booru.url_post.format(post_id))
# hypnohub doesn't have this
if 'fav_count' in post and 'score' in post:
embed.add_field(name='Votes/Favorites',
value=f"{post['score']} votes, {post['fav_count']} favorites")
# send
await ctx.send(embed=embed)
except BooruError as err:
raise self.SayException(f'Error while fetching posts: `{err!r}`')
except aiohttp.ClientError as err:
log.exception('client error')
raise self.SayException(f'Something went wrong. Sorry! `{err!r}`')
def fetch(url, session, params, method='GET', t=10):
with timeout(t):
resp = yield from session.request(method.lower(), url, params=params)
try:
return resp
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
LOG.exception(err)
def process(self, task):
status, tasks, items = 0, set(), set()
with (await self.manager.semaphore):
try:
request = Request(url=task.url)
request = await self.pipeline.requests.process(request)
request = await self.middleware.http.before(request)
response = await self.downloader.process(request)
response = await self.middleware.http.after(response)
response = await self.pipeline.responses.process(response)
if response.status in constants.HTTP_FAILED:
status = response.status
else:
tasks, items = await self.spider.process(task=task, response=response)
if tasks:
tasks = await self.pipeline.tasks.process(tasks)
if items:
items = await self.pipeline.items.process(items)
await self.pipeline.stats.process(stats=await self.stats())
except aiohttp.ClientError as e:
log.exception(e)
status = constants.status.RETRIAL
self.session.close()
except Exception as e:
log.exception(e)
status = constants.status.FAILED
result = Result(status=status, task=task, tasks=tasks, items=items)
await self.manager.process(result=result)
return result
def request(self):
"""Refresh drawing plan data."""
current_time = int(time.time())
url = DRAWING_DATA_URL.format(current_time)
try:
async with self.session.get(url) as resp:
data = await resp.json(content_type=None)
self.start_x = data['startX']
self.start_y = data['startY']
self.colours = data['colors']
self.kill = data['kill']
self.version = data['newVersion']
self.height = len(self.colours)
if self.height > 0:
self.width = max(len(row) for row in self.colours)
else:
self.width = 0
logger.debug("Succesfully updated drawing plan.")
logger.debug("Start X: %d, start y: %d, kill: %s",
self.start_x, self.start_y, self.kill)
return True
except (aiohttp.ClientError, KeyError) as e:
logger.exception(e)
return False
def run_loop(context, creds_key="credentials"):
"""Split this out of the async_main while loop for easier testing.
args:
context (scriptworker.context.Context): the scriptworker context.
creds_key (str, optional): when reading the creds file, this dict key
corresponds to the credentials value we want to use. Defaults to
"credentials".
Returns:
int: status
None: if no task run.
"""
loop = asyncio.get_event_loop()
tasks = await claim_work(context)
status = None
if not tasks:
await asyncio.sleep(context.config['poll_interval'])
return status
# Assume only a single task, but should more than one fall through,
# run them sequentially. A side effect is our return status will
# be the status of the final task run.
for task_defn in tasks.get('tasks', []):
status = 0
prepare_to_run_task(context, task_defn)
loop.create_task(reclaim_task(context, context.task))
try:
if context.config['verify_chain_of_trust']:
chain = ChainOfTrust(context, context.config['cot_job_type'])
await verify_chain_of_trust(chain)
status = await run_task(context)
generate_cot(context)
except ScriptWorkerException as e:
status = worst_level(status, e.exit_code)
log.error("Hit ScriptWorkerException: {}".format(e))
try:
await upload_artifacts(context)
except ScriptWorkerException as e:
status = worst_level(status, e.exit_code)
log.error("Hit ScriptWorkerException: {}".format(e))
except aiohttp.ClientError as e:
status = worst_level(status, STATUSES['intermittent-task'])
log.error("Hit aiohttp error: {}".format(e))
await complete_task(context, status)
cleanup(context)
return status
def __getattr__(self, method):
"""Return asyncio coroutine that sends RPC request and returns response
method: Any method from the RPC specs with every '-' replaced with '_'.
For arguments see the RPC specs.
Example:
>>> stats = await client.session_stats()
>>> torrents = await client.torrent_get(ids=(1,2,3), fields=('status','name'))
Raises RPCError, ConnectionError, AuthError
"""
async def request(arguments={}, autoconnect=True, **kwargs):
async with self.__request_lock:
if not self.connected:
if autoconnect:
log.debug('Autoconnecting for %r', method)
await self.connect()
else:
log.debug('Not connected and autoconnect=%r - %r returns None',
autoconnect, method)
return None
arguments.update(**kwargs)
rpc_request = json.dumps({'method' : method.replace('_', '-'),
'arguments' : arguments})
try:
return await self.__send_request(rpc_request)
except ClientError as e:
log.debug('Caught ClientError in %r request: %r', method, e)
# RPCError does not mean host is unreachable, there was just a
# misunderstanding, so we're still connected.
if not isinstance(e, RPCError) and self.connected:
await self.disconnect(str(e))
self.__on_error.send(self.__url, error=e)
raise
request.__name__ = method
request.__qualname__ = method
return request
def fetch_nightly_metadata(session, record):
"""A JSON file containing build info is published along the nightly build archive.
"""
global _nightly_metadata
url = record['download']['url']
# Make sure the nightly_url is turned into a en-US one.
nightly_url = localize_nightly_url(url)
if nightly_url in _nightly_metadata:
return _nightly_metadata[nightly_url]
try:
metadata_url = re.sub('\.({})$'.format(FILE_EXTENSIONS), '.json', nightly_url)
metadata = await fetch_json(session, metadata_url)
_nightly_metadata[nightly_url] = metadata
return metadata
except aiohttp.ClientError:
# Very old nightly metadata is published as .txt files.
try:
# e.g. https://archive.mozilla.org/pub/firefox/nightly/2011/05/
# 2011-05-05-03-mozilla-central/firefox-6.0a1.en-US.mac.txt
old_metadata_url = re.sub('\.({})$'.format(FILE_EXTENSIONS), '.txt', nightly_url)
async with session.get(old_metadata_url) as response:
old_metadata = await response.text()
m = re.search('^(\d+)\n(http.+)/rev/(.+)$', old_metadata)
if m:
metadata = {
'buildid': m.group(1),
'moz_source_repo': m.group(2),
'moz_source_stamp': m.group(3),
}
_nightly_metadata[nightly_url] = metadata
return metadata
# e.g. https://archive.mozilla.org/pub/firefox/nightly/2010/07/2010-07-04-05
# -mozilla-central/firefox-4.0b2pre.en-US.win64-x86_64.txt
m = re.search('^(\d+) (.+)$', old_metadata)
if m:
metadata = {
'buildid': m.group(1),
'moz_source_stamp': m.group(2),
'moz_source_repo': 'http://hg.mozilla.org/mozilla-central',
}
_nightly_metadata[nightly_url] = metadata
return metadata
except aiohttp.ClientError as e:
pass
logger.warning("Could not fetch metadata for '%s' from '%s'" % (record['id'],
metadata_url))
_nightly_metadata[url] = None # Don't try it anymore.
return None
def fetch_release_metadata(session, record):
"""The `candidates` folder contains build info about recent released versions.
"""
global _candidates_build_folder
product = record['source']['product']
version = record['target']['version']
platform = record['target']['platform']
locale = 'en-US'
try:
latest_build_folder = _candidates_build_folder[product][version]
except KeyError:
# Version is not listed in candidates. Give up.
return None
build_number = int(latest_build_folder.strip('/')[-1]) # build3 -> 3
# Metadata for EME-free and sha1 repacks are the same as original release.
platform = re.sub('-(eme-free|sha1)', '', platform, flags=re.I)
url = archive_url(product, version, platform, locale, candidate='/' + latest_build_folder)
# We already have the metadata for this platform and version.
if url in _release_metadata:
return _release_metadata[url]
try:
_, files = await fetch_listing(session, url)
except ValueError:
# Some partial update don't have metadata. eg. /47.0.1-candidates/
_release_metadata[url] = None
return None
for f in files:
filename = f['name']
if is_release_build_metadata(product, version, filename):
try:
metadata = await fetch_json(session, url + filename)
metadata['buildnumber'] = build_number
_release_metadata[url] = metadata
return metadata
except aiohttp.ClientError as e:
# Sometimes, some XML comes out \o/ (see #259)
pass
# Version exists in candidates but has no metadata!
_release_metadata[url] = None # Don't try it anymore.
raise ValueError('Missing metadata for candidate {}'.format(url))
def fetch(self, url, max_redirect):
tries = 0
exception = None
while tries < self.max_tries:
try:
response = await self.session.get(
url, allow_redirects=False)
break
except aiohttp.ClientError as client_error:
exception = client_error
tries += 1
else:
self.record_statistic(FetchStatistic(url=url,
next_url=None,
status=None,
exception=exception,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0))
return
try:
if is_redirect(response):
location = response.headers['location']
next_url = urllib.parse.urljoin(url, location)
self.record_statistic(FetchStatistic(url=url,
next_url=next_url,
status=response.status,
exception=None,
size=0,
content_type=None,
encoding=None,
num_urls=0,
num_new_urls=0))
if next_url in self.seen_urls:
return
if max_redirect > 0:
self.add_url(next_url, max_redirect - 1)
else:
print('redirect limit reached for %r from %r',
next_url, url)
else:
stat, links = await self.parse_links(response)
self.record_statistic(stat)
for link in links.difference(self.seen_urls):
self.q.put_nowait((link, self.max_redirect))
self.seen_urls.update(links)
finally:
await response.release()