def read(fd, n):
"""read(fd, buffersize) -> string
Read a file descriptor."""
while True:
try:
return __original_read__(fd, n)
except (OSError, IOError) as e:
if get_errno(e) != errno.EAGAIN:
raise
except socket.error as e:
if get_errno(e) == errno.EPIPE:
return ''
raise
try:
hubs.trampoline(fd, read=True)
except hubs.IOClosed:
return ''
python类EPIPE的实例源码
def get_resp(self, timeout=180.0):
rl = select((self.sock, ), (), (), timeout)[0]
if not rl:
raise RobotError("get resp timeout")
bml = msg_waitall(self.sock, 2, timeout)
if not bml:
logger.error("Message payload recv error")
raise socket.error(EPIPE, "Broken pipe")
message_length = struct.unpack("<H", bml)[0]
message = b""
while len(message) != message_length:
buf = self.sock.recv(message_length - len(message))
if not buf:
logger.error("Recv empty message")
raise socket.error(EPIPE, "Broken pipe")
message += buf
return message
def print_history(self):
"""
Print database information to STDOUT
"""
try:
print("History of Nodes")
print("=========")
self.cur.execute("SELECT * FROM NodeStates ORDER BY datetime(Time) DESC")
rows = self.cur.fetchall()
for row in rows:
print("%s | %s | %s | '%s'" % (row[0], row[3], TrackNodes.decode_state(row[1]), row[2]))
print("")
except IOError as e:
if e.errno == errno.EPIPE:
# Perhaps output was piped to less and was quit prior to EOF
return
def _publish_status(self, status_socket, status_info):
"""Publish service status on the incomming connection on socket
"""
with contextlib.closing(status_socket.accept()[0]) as clt:
clt_stream = clt.makefile(mode='w')
try:
yaml.dump(status_info,
explicit_start=True, explicit_end=True,
default_flow_style=False,
stream=clt_stream)
clt_stream.flush()
except socket.error as err:
if err.errno == errno.EPIPE:
pass
else:
raise
def do_GET(self):
query = self.path.split("?",1)[-1]
filepath = urllib.unquote_plus(query)
self.suppress_socket_error_report = None
self.send_headers(filepath)
print "sending data"
try:
self.write_response(filepath)
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] in (errno.EPIPE, errno.ECONNRESET):
print "disconnected"
self.suppress_socket_error_report = True
return
raise
def _async_recv_msg(self):
"""Internal use only; use 'recv_msg' with 'yield' instead.
Message is tagged with length of the payload (data). This method
receives length of payload, then the payload and returns the payload.
"""
n = AsyncSocket._MsgLengthSize
try:
data = yield self.recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
raise StopIteration('')
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
raise StopIteration('')
n = struct.unpack('>L', data)[0]
# assert n >= 0
if n:
try:
data = yield self.recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
raise StopIteration('')
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
raise StopIteration('')
raise StopIteration(data)
else:
raise StopIteration('')
def _sync_recv_msg(self):
"""Internal use only; use 'recv_msg' instead.
Synchronous version of async_recv_msg.
"""
n = AsyncSocket._MsgLengthSize
try:
data = self._sync_recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
return ''
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
return ''
n = struct.unpack('>L', data)[0]
# assert n >= 0
if n:
try:
data = self._sync_recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
return ''
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
return ''
return data
else:
return ''
def _async_recv_msg(self):
"""Internal use only; use 'recv_msg' with 'yield' instead.
Message is tagged with length of the payload (data). This method
receives length of payload, then the payload and returns the payload.
"""
n = AsyncSocket._MsgLengthSize
try:
data = yield self.recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
raise StopIteration(b'')
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
raise StopIteration(b'')
n = struct.unpack('>L', data)[0]
# assert n >= 0
if n:
try:
data = yield self.recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
raise StopIteration(b'')
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
raise StopIteration(b'')
raise StopIteration(data)
else:
raise StopIteration(b'')
def _sync_recv_msg(self):
"""Internal use only; use 'recv_msg' instead.
Synchronous version of async_recv_msg.
"""
n = AsyncSocket._MsgLengthSize
try:
data = self._sync_recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
return b''
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
return b''
n = struct.unpack('>L', data)[0]
# assert n >= 0
if n:
try:
data = self._sync_recvall(n)
except socket.error as err:
if err.args[0] == 'hangup':
# raise socket.error(errno.EPIPE, 'Insufficient data')
return b''
else:
raise
if len(data) != n:
# raise socket.error(errno.EPIPE, 'Insufficient data: %s / %s' % (len(data), n))
return b''
return data
else:
return b''
sam-discard-dups.py 文件源码
项目:personal-identification-pipeline
作者: TeamErlich
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def get_dup_read_ids(filename):
"""
Reads a SAM file, returns a set() of duplicated reads
(reads which are listed more than once)
"""
try:
seen_ids = set()
dup_ids = set()
sam=file(filename,'r')
for linenum,line in enumerate(sam):
err = "input error in '%s' line %d: " % (filename, linenum+1)
line = line.strip()
if line[:1]=='@':
continue
flds = line.split('\t')
read_id = flds[0]
if read_id in seen_ids:
dup_ids.add(read_id)
seen_ids.add(read_id)
return dup_ids
except IOError as e:
if e.errno == errno.EPIPE:
sys.exit(0) # exit silently
# TODO: this is a cop-out, hard to tell what's the exact error
# and give informative,useful error message to the user.
sys.exit("I/O error: %s" % (str(e)))
sam-discard-dups.py 文件源码
项目:personal-identification-pipeline
作者: TeamErlich
项目源码
文件源码
阅读 46
收藏 0
点赞 0
评论 0
def filter_sam_dups(filename,ids_to_discard):
"""
Reads a SAM file, returns a set() of duplicated reads
(reads which are listed more than once)
"""
try:
sam=file(filename,'r')
for linenum,line in enumerate(sam):
err = "input error in '%s' line %d: " % (filename, linenum+1)
line = line.strip()
if line[:1]=='@':
print line
continue
flds = line.split('\t')
read_id = flds[0]
if not (read_id in ids_to_discard):
print line
except IOError as e:
if e.errno == errno.EPIPE:
sys.exit(0) # exit silently
# TODO: this is a cop-out, hard to tell what's the exact error
# and give informative,useful error message to the user.
sys.exit("I/O error: %s" % (str(e)))
poretools-basenames.py 文件源码
项目:personal-identification-pipeline
作者: TeamErlich
项目源码
文件源码
阅读 22
收藏 0
点赞 0
评论 0
def process_times(filename):
"""
Remove the path (dirname) from the second field of the 'times' file.
"""
try:
f=open(filename,'r')
for linenum,line in enumerate(f):
err = "input error in '%s' line %d: " % (filename, linenum+1)
line = line.strip()
flds = line.split('\t')
if len(flds) != 11:
sys.exit(err + "expecting 11 fields, found %d - is this a " \
"'poretools times' file?" % (len(flds)))
# First line: validate, then print as-is
if linenum==0:
if flds[0] != "channel":
sys.exit(err + "expecting header line (first word: " \
"'channel') - is this a 'poretools times' file?")
print '\t'.join(flds)
continue
# other lines - remove path from second field.
flds[1] = basename(flds[1])
print '\t'.join(flds)
except IOError as e:
if e.errno == errno.EPIPE:
sys.exit(0) # exit silently
# TODO: this is a cop-out, hard to tell what's the exact error
# and give informative,useful error message to the user.
sys.exit("I/O error: %s" % (str(e)))
def main(args):
"""
Parse IgBLAST output
"""
n = 0
if args.vdatabase:
with SequenceReader(args.vdatabase) as fr:
v_database = {record.name: record.sequence.upper() for record in fr}
else:
v_database = None
detected_cdr3s = 0
writer = TableWriter(sys.stdout)
with SequenceReader(args.fasta) as sequences, xopen(args.igblast) as igblast:
parser = ExtendedIgBlastParser(sequences, igblast, v_database)
for record in parser:
n += 1
d = record.asdict()
if d['CDR3_aa']:
detected_cdr3s += 1
if args.rename is not None:
d['name'] = "{}seq{}".format(args.rename, n)
try:
writer.write(d)
except IOError as e:
if e.errno == errno.EPIPE:
sys.exit(1)
raise
logger.info('%d IgBLAST assignments parsed and written', n)
logger.info('CDR3s detected in %.1f%% of all sequences', detected_cdr3s / n * 100)
if args.stats:
stats = {'total': n, 'detected_cdr3s': detected_cdr3s}
with open(args.stats, 'w') as f:
json.dump(stats, f)
print(file=f)
def run(self):
try:
self._compress_streaming_json()
except IOError as e:
if e.errno == errno.EPIPE:
# just stop if the pipe breaks
pass
else:
raise
finally:
self._close_file()
def run(self):
try:
self._parse_binlog()
except IOError as e:
if e.errno == errno.EPIPE:
# just stop if the pipe breaks
pass
else:
raise
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr)."""
# Optimization: If we are only using one pipe, or no pipe at
# all, using select() or threads is unnecessary.
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if e.errno != errno.EPIPE and e.errno != errno.EINVAL:
raise
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
def send(self, input):
if not self.stdin:
return None
if not select.select([], [self.stdin], [], 0)[1]:
return 0
try:
written = os.write(self.stdin.fileno(), input)
except OSError:
if geterror()[0] == errno.EPIPE: #broken pipe
return self._close('stdin')
raise
return written
def _notifyParent(self, parent, msg):
"""Send message to parent, ignoring EPIPE and retrying on EAGAIN"""
while True:
try:
parent.send(msg)
return True
except socket.error, e:
if e[0] == errno.EPIPE:
return False # Parent is gone
if e[0] == errno.EAGAIN:
# Wait for socket change before sending again
select.select([], [parent], [])
else:
raise
def run(self):
events = []
while not self._stopping:
asap = False
try:
events = self.poll(TIMEOUT_PRECISION)
except (OSError, IOError) as e:
if errno_from_exception(e) in (errno.EPIPE, errno.EINTR):
# EPIPE: Happens when the client closes the connection
# EINTR: Happens when received a signal
# handles them as soon as possible
asap = True
logging.debug('poll:%s', e)
else:
logging.error('poll:%s', e)
import traceback
traceback.print_exc()
continue
handle = False
for sock, fd, event in events:
handler = self._fdmap.get(fd, None)
if handler is not None:
handler = handler[1]
try:
handle = handler.handle_event(sock, fd, event) or handle
except (OSError, IOError) as e:
shell.print_exception(e)
now = time.time()
if asap or now - self._last_time >= TIMEOUT_PRECISION:
for callback in self._periodic_callbacks:
callback()
self._last_time = now
if events and not handle:
time.sleep(0.001)
def _on_local_error(self):
if self._local_sock:
err = eventloop.get_sock_error(self._local_sock)
if err.errno not in [errno.ECONNRESET, errno.EPIPE]:
logging.error(err)
logging.error("local error, exception from %s:%d" % (self._client_address[0], self._client_address[1]))
self.destroy()