def use_store(self):
"""
Opens a database to save data
"""
logging.info('Using Elasticsearch database')
self.db = Elasticsearch(
[self.settings.get('host', 'localhost:9200')],
)
try:
self.db.indices.create(index='mcp-watch', ignore=400) # may exist
except ConnectionError as feedback:
logging.error('- unable to connect')
raise
return self.db
python类ConnectionError()的实例源码
def reset_store(self):
"""
Opens a database for points
"""
logging.info('Resetting Elasticsearch database')
self.db = Elasticsearch(
[self.settings.get('host', 'localhost:9200')],
)
try:
self.db.indices.create(index='mcp-watch', ignore=400) # may exist
except ConnectionError as feedback:
logging.error('- unable to connect')
raise
return self.db
def on_bond(self, bot):
"""
Creates index on space bonding
"""
self.db = Elasticsearch(
[self.get_host()],
)
try:
self.db.indices.create(index=self.index, ignore=400) # may exist
except ConnectionError as feedback:
logging.error('- unable to connect')
raise
def main_loop(self, method, url, params, body, headers=None, ignore=(), timeout=None):
for attempt in range(self.max_retries + 1):
connection = self.get_connection()
try:
status, headers, data = yield from connection.perform_request(
method, url, params, body, headers=headers, ignore=ignore, timeout=timeout)
except TransportError as e:
if method == 'HEAD' and e.status_code == 404:
return False
retry = False
if isinstance(e, ConnectionTimeout):
retry = self.retry_on_timeout
elif isinstance(e, ConnectionError):
retry = True
elif e.status_code in self.retry_on_status:
retry = True
if retry:
# only mark as dead if we are retrying
self.mark_dead(connection)
# raise exception on last retry
if attempt == self.max_retries:
raise
else:
raise
else:
if method == 'HEAD':
return 200 <= status < 300
# connection didn't fail, confirm it's live status
self.connection_pool.mark_live(connection)
if data:
data = self.deserializer.loads(data, headers.get('content-type'))
return data
test_elasticsearch_driver.py 文件源码
项目:Image-search-engine
作者: praveenKumar88
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def test_elasticsearch_running(es):
i = 0
while i < 5:
try:
es.ping()
assert True
return
except ConnectionError:
i += 1
sleep(2)
pytest.fail('Elasticsearch not running (failed to connect after {} tries)'
.format(str(i)))
def index(self, index, doc_type, body):
"""Store a document in Elasticsearch."""
try:
return self.es.index(
index=index, doc_type=doc_type, body=body)
except elasticsearch.ConnectionError as e:
self.logger.exception(
'Failed to log to elasticsearch: %s', e.error)
return {}
def __init__(self, name, host, port, doc_type, index):
self.host = host
self.port = port
self.doc_type = doc_type
self.index = index
DatabaseInterface.__init__(self,
name=name,
db_type='ElasticSearch',
conn_exception=elasticsearch.ConnectionError,
execution_exception=elasticsearch.ElasticsearchException,
type_converter=self.TYPE_CONVERTER,
deserialize_query=True)
def _get_sniff_data(self, initial=False):
previous_sniff = self.last_sniff
# reset last_sniff timestamp
self.last_sniff = time.time()
# use small timeout for the sniffing request, should be a fast api call
timeout = self.sniff_timeout if not initial else None
tasks = [
c.perform_request('GET', '/_nodes/_all/http', timeout=timeout)
# go through all current connections as well as the
# seed_connections for good measure
for c in chain(self.connection_pool.connections, (c for c in self.seed_connections if c not in self.connection_pool.connections))
]
done = ()
try:
while tasks:
# execute sniff requests in parallel, wait for first to return
done, tasks = yield from asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED, loop=self.loop)
# go through all the finished tasks
for t in done:
try:
_, headers, node_info = t.result()
node_info = self.deserializer.loads(node_info, headers.get('content-type'))
except (ConnectionError, SerializationError) as e:
logger.warn('Sniffing request failed with %r', e)
continue
node_info = list(node_info['nodes'].values())
return node_info
else:
# no task has finished completely
raise TransportError("N/A", "Unable to sniff hosts.")
except:
# keep the previous value on error
self.last_sniff = previous_sniff
raise
finally:
# clean up pending futures
for t in chain(done, tasks):
t.cancel()