def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
python类SEEK_END的实例源码
def testTruncate(self):
f = _FileIO(TESTFN, 'w')
f.write(bytes(bytearray(range(10))))
self.assertEqual(f.tell(), 10)
f.truncate(5)
self.assertEqual(f.tell(), 10)
self.assertEqual(f.seek(0, io.SEEK_END), 5)
f.truncate(15)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.seek(0, io.SEEK_END), 15)
f.close()
def recognizes(cls, file):
size = os.stat(file.path).st_size
if size < CBFS_HEADER_SIZE or size > CBFS_MAXIMUM_FILE_SIZE:
return False
with open(file.path, 'rb') as f:
# pick at the latest byte as it should contain the relative offset of the header
f.seek(-4, io.SEEK_END)
# <pgeorgi> given the hardware we support so far, it looks like
# that field is now bound to be little endian
# -- #coreboot, 2015-10-14
rel_offset = struct.unpack('<i', f.read(4))[0]
if rel_offset < 0 and -rel_offset > CBFS_HEADER_SIZE and -rel_offset < size:
f.seek(rel_offset, io.SEEK_END)
logger.debug('looking for header at offset: %x', f.tell())
if is_header_valid(f.read(CBFS_HEADER_SIZE), size):
return True
elif not file.name.endswith('.rom'):
return False
else:
logger.debug('CBFS relative offset seems wrong, scanning whole image')
f.seek(0, io.SEEK_SET)
offset = 0
buf = f.read(CBFS_HEADER_SIZE)
while len(buf) >= CBFS_HEADER_SIZE:
if is_header_valid(buf, size, offset):
return True
if len(buf) - offset <= CBFS_HEADER_SIZE:
buf = f.read(32768)
offset = 0
else:
offset += 1
return False
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def seek(self, offset, whence):
"""
Seek within the part. This is similar to the standard seek, except
that io.SEEK_SET is the start of the part and io.SEEK_END is the
end of the part.
:param offset: Offset in bytes from location determined by the whence value
:param whence: io.SEEK_END and io.SEEK_SET are supported.
"""
if whence == io.SEEK_END:
self.file.seek(self.start + self.size + offset, io.SEEK_SET)
elif whence == io.SEEK_SET:
self.file.seek(self.start + offset, io.SEEK_SET)
else:
raise RuntimeError("Unhandled whence value: {}".format(whence))
def add_parts_from_file(self, file_path):
"""
Splits a file into parts and adds all parts to an internal list of parts to upload. The parts will not be uploaded to the server until upload is called.
:param string file_path: Path of file to upload in parts
"""
with io.open(file_path, mode='rb') as file_object:
file_object.seek(0, io.SEEK_END)
end = file_object.tell()
file_object.seek(0, io.SEEK_SET)
offset = 0
while file_object.tell() < end:
self.add_part_from_file(file_path, offset=offset, size=self.part_size)
offset += self.part_size
file_object.seek(offset, io.SEEK_SET)
def _reverse_read_lines(fp, buf_size=8192): # pylint: disable=invalid-name
"""
Async generator that returns the lines of a file in reverse order.
ref: https://stackoverflow.com/a/23646049/8776239
and: https://stackoverflow.com/questions/2301789/read-a-file-in-reverse-order-using-python
"""
segment = None # holds possible incomplete segment at the beginning of the buffer
offset = 0
await fp.seek(0, io.SEEK_END)
file_size = remaining_size = await fp.tell()
while remaining_size > 0:
offset = min(file_size, offset + buf_size)
await fp.seek(file_size - offset)
buffer = await fp.read(min(remaining_size, buf_size))
remaining_size -= buf_size
lines = buffer.splitlines(True)
# the first line of the buffer is probably not a complete line so
# we'll save it and append it to the last line of the next buffer
# we read
if segment is not None:
# if the previous chunk starts right from the beginning of line
# do not concat the segment to the last line of new chunk
# instead, yield the segment first
if buffer[-1] == '\n':
# print 'buffer ends with newline'
yield segment
else:
lines[-1] += segment
# print 'enlarged last line to >{}<, len {}'.format(lines[-1], len(lines))
segment = lines[0]
for index in range(len(lines) - 1, 0, -1):
l = lines[index]
if l:
yield l
# Don't yield None if the file was empty
if segment is not None:
yield segment
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def available(self):
"""
This is duplicate functionality if we have a HighPerformanceStreamIO.
But we also want to support those that aren't. TODO: Better solution?
"""
curPos = self._stream.tell()
self._stream.seek(0, SEEK_END)
endPos = self._stream.tell()
self._stream.seek(curPos)
return endPos-curPos
def _rawStreamSize(self):
curPos = self._stream.tell()
self._stream.seek(0, SEEK_END)
endPos = self._stream.tell()
self._stream.seek(curPos)
return endPos - self._prefixStart
def _rawAvailable(self):
"some underlying streams may not support 'available'"
curPos = self._stream.tell()
self._stream.seek(0, SEEK_END)
endPos = self._stream.tell()
self._stream.seek(curPos)
return endPos - curPos
def available(self):
"""
Get the available bytes, cutting off the suffix if
it's been acocunted for
"""
if not self._rawStreamSize() >= (self.PREFIX_SIZE + self.SUFFIX_SIZE):
return 0
curPos = self._stream.tell()
# even if the suffix hasn't been received yet, we calculate our offsets as if it had.
# why? because if it hasn't been received yet, we don't want to finish! The whole packet
# isn't framed (verified) until the final bytes are received.
self._stream.seek(-self.SUFFIX_SIZE, SEEK_END)
endPos = self._stream.tell()
self._stream.seek(curPos)
return endPos-curPos
def seek(self, offset, whence=SEEK_SET):
"Adjust seek from prefix start and, if present, from prefix"
if not self._rawStreamSize() >= (self.PREFIX_SIZE + self.SUFFIX_SIZE):
return
if whence == SEEK_SET:
offset += self._prefixStart + self.PREFIX_SIZE
return self._stream.seek(offset, whence)
elif whence == SEEK_CUR:
return self._stream.seek(offset, whence)
elif whence == SEEK_END:
# even if the suffix hasn't been received yet, we calculate our offsets as if it had.
# why? because if it hasn't been received yet, we don't want to finish! The whole packet
# isn't framed (verified) until the final bytes are received.
offset = offset - self.SUFFIX_SIZE
return self._stream.seek(offset, whence)
def update(self, newData):
beforeWritePos = self.tell()
self.seek(0, io.SEEK_END)
self.write(newData)
self.seek(beforeWritePos)
def available(self):
cur = self.tell()
end = self.seek(0, io.SEEK_END)
self.seek(cur)
return end-cur
def ended(self) -> bool:
try:
fileno = self._codeio.fileno()
offset = os.fstat(fileno).st_size
except io.UnsupportedOperation:
old_offset = self.offset
self._codeio.seek(0, io.SEEK_END)
offset = self._codeio.tell()
self._codeio.seek(old_offset)
return self.offset == offset
def _reset_frame(self):
self._iobuf = io.BytesIO(self._iobuf.read())
self._iobuf.seek(0, 2) # io.SEEK_END == 2 (constant not present in 2.6)
self._current_frame = None
def _play_run(self, f):
err = None
try:
# Calculate how many records are in the file; we'll use this later
# when updating the progress bar
rec_total = (f.seek(0, io.SEEK_END) - HEADER_REC.size) // DATA_REC.size
f.seek(0)
skipped = 0
for rec, data in enumerate(self._play_source(f)):
now = time()
if data.timestamp < now:
skipped += 1
continue
else:
if self._play_event.wait(data.timestamp - now):
break
self.props.application.pressure.set_values(data.pressure, data.ptemp)
self.props.application.humidity.set_values(data.humidity, data.htemp)
self.props.application.imu.set_imu_values(
(data.ax, data.ay, data.az),
(data.gx, data.gy, data.gz),
(data.cx, data.cy, data.cz),
(data.ox, data.oy, data.oz),
)
# Again, would be better to use custom signals here but
# attempting to do so just results in seemingly random
# segfaults during playback
with self._play_update_lock:
if self._play_update_id == 0:
self._play_update_id = GLib.idle_add(self._play_update_controls, rec / rec_total)
except Exception as e:
err = e
finally:
f.close()
# Must ensure that controls are only re-enabled *after* all pending
# control updates have run
with self._play_update_lock:
if self._play_update_id:
GLib.source_remove(self._play_update_id)
self._play_update_id = 0
# Get the main thread to re-enable the controls at the end of
# playback
GLib.idle_add(self._play_controls_finish, err)