def dumpsect(self, sector, firstindex=0):
"Displays a sector in a human-readable form, for debugging purpose."
if not DEBUG_MODE:
return
VPL = 8 # number of values per line (8+1 * 8+1 = 81)
tab = array.array(UINT32, sector)
if sys.byteorder == 'big':
tab.byteswap()
nbsect = len(tab)
nlines = (nbsect+VPL-1)//VPL
print("index", end=" ")
for i in range(VPL):
print("%8X" % i, end=" ")
print()
for l in range(nlines):
index = l*VPL
print("%8X:" % (firstindex+index), end=" ")
for i in range(index, index+VPL):
if i >= nbsect:
break
sect = tab[i]
name = "%8X" % sect
print(name, end=" ")
print()
python类byteorder()的实例源码
def decompile(self, data, ttFont):
longFormat = ttFont['head'].indexToLocFormat
if longFormat:
format = "I"
else:
format = "H"
locations = array.array(format)
locations.fromstring(data)
if sys.byteorder != "big":
locations.byteswap()
if not longFormat:
l = array.array("I")
for i in range(len(locations)):
l.append(locations[i] * 2)
locations = l
if len(locations) < (ttFont['maxp'].numGlyphs + 1):
log.warning("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
len(locations) - 1, ttFont['maxp'].numGlyphs)
self.locations = locations
def compile(self, ttFont):
try:
max_location = max(self.locations)
except AttributeError:
self.set([])
max_location = 0
if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
locations = array.array("H")
for i in range(len(self.locations)):
locations.append(self.locations[i] // 2)
ttFont['head'].indexToLocFormat = 0
else:
locations = array.array("I", self.locations)
ttFont['head'].indexToLocFormat = 1
if sys.byteorder != "big":
locations.byteswap()
return locations.tostring()
def decompileDeltas_(numDeltas, data, offset):
"""(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
result = []
pos = offset
while len(result) < numDeltas:
runHeader = byteord(data[pos])
pos += 1
numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
if (runHeader & DELTAS_ARE_ZERO) != 0:
result.extend([0] * numDeltasInRun)
else:
if (runHeader & DELTAS_ARE_WORDS) != 0:
deltas = array.array("h")
deltasSize = numDeltasInRun * 2
else:
deltas = array.array("b")
deltasSize = numDeltasInRun
deltas.fromstring(data[pos:pos+deltasSize])
if sys.byteorder != "big":
deltas.byteswap()
assert len(deltas) == numDeltasInRun
pos += deltasSize
result.extend(deltas)
assert len(result) == numDeltas
return (result, pos)
def compile(self, ttFont):
if self.data:
return struct.pack(">HHH", self.format, self.length, self.language) + self.data
cmap = self.cmap
codes = sorted(cmap.keys())
if codes: # yes, there are empty cmap tables.
codes = list(range(codes[0], codes[-1] + 1))
firstCode = codes[0]
valueList = [cmap.get(code, ".notdef") for code in codes]
valueList = map(ttFont.getGlyphID, valueList)
gids = array.array("H", valueList)
if sys.byteorder != "big":
gids.byteswap()
data = gids.tostring()
else:
data = b""
firstCode = 0
header = struct.pack(">HHHHH",
6, len(data) + 10, self.language, firstCode, len(codes))
return header + data
def decode_format_4_0(self, data, ttFont):
from fontTools import agl
numGlyphs = ttFont['maxp'].numGlyphs
indices = array.array("H")
indices.fromstring(data)
if sys.byteorder != "big":
indices.byteswap()
# In some older fonts, the size of the post table doesn't match
# the number of glyphs. Sometimes it's bigger, sometimes smaller.
self.glyphOrder = glyphOrder = [''] * int(numGlyphs)
for i in range(min(len(indices),numGlyphs)):
if indices[i] == 0xFFFF:
self.glyphOrder[i] = ''
elif indices[i] in agl.UV2AGL:
self.glyphOrder[i] = agl.UV2AGL[indices[i]]
else:
self.glyphOrder[i] = "uni%04X" % indices[i]
self.build_psNameMapping(ttFont)
def decompileOffsets_(data, tableFormat, glyphCount):
if tableFormat == 0:
# Short format: array of UInt16
offsets = array.array("H")
offsetsSize = (glyphCount + 1) * 2
else:
# Long format: array of UInt32
offsets = array.array("I")
offsetsSize = (glyphCount + 1) * 4
offsets.fromstring(data[0 : offsetsSize])
if sys.byteorder != "big":
offsets.byteswap()
# In the short format, offsets need to be multiplied by 2.
# This is not documented in Apple's TrueType specification,
# but can be inferred from the FreeType implementation, and
# we could verify it with two sample GX fonts.
if tableFormat == 0:
offsets = [off * 2 for off in offsets]
return offsets
def compileOffsets_(offsets):
"""Packs a list of offsets into a 'gvar' offset table.
Returns a pair (bytestring, tableFormat). Bytestring is the
packed offset table. Format indicates whether the table
uses short (tableFormat=0) or long (tableFormat=1) integers.
The returned tableFormat should get packed into the flags field
of the 'gvar' header.
"""
assert len(offsets) >= 2
for i in range(1, len(offsets)):
assert offsets[i - 1] <= offsets[i]
if max(offsets) <= 0xffff * 2:
packed = array.array("H", [n >> 1 for n in offsets])
tableFormat = 0
else:
packed = array.array("I", offsets)
tableFormat = 1
if sys.byteorder != "big":
packed.byteswap()
return (packed.tostring(), tableFormat)
def decompile(self, data, ttFont):
dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
GMAPoffsets = array.array("I")
endPos = (self.numGMAPs+1) * 4
GMAPoffsets.fromstring(newData[:endPos])
if sys.byteorder != "big":
GMAPoffsets.byteswap()
self.GMAPs = []
for i in range(self.numGMAPs):
start = GMAPoffsets[i]
end = GMAPoffsets[i+1]
self.GMAPs.append(data[start:end])
pos = endPos
endPos = pos + (self.numGlyplets + 1)*4
glyphletOffsets = array.array("I")
glyphletOffsets.fromstring(newData[pos:endPos])
if sys.byteorder != "big":
glyphletOffsets.byteswap()
self.glyphlets = []
for i in range(self.numGlyplets):
start = glyphletOffsets[i]
end = glyphletOffsets[i+1]
self.glyphlets.append(data[start:end])
def __init__(self, shape, typechar, itemsize):
import ctypes
ndim = len(shape)
self.ndim = ndim
self.shape = tuple(shape)
array_len = 1
for d in shape:
array_len *= d
self.size = itemsize * array_len
self.parent = ctypes.create_string_buffer(self.size)
self.itemsize = itemsize
strides = [itemsize] * ndim
for i in range(ndim - 1, 0, -1):
strides[i - 1] = strides[i] * shape[i]
self.strides = tuple(strides)
self.data = ctypes.addressof(self.parent), False
if self.itemsize == 1:
byteorder = '|'
elif sys.byteorder == 'big':
byteorder = '>'
else:
byteorder = '<'
self.typestr = byteorder + typechar + str(self.itemsize)
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth in (2, 4) and sys.byteorder == 'big':
import array
a = array.array(_array_fmts[self._sampwidth])
a.fromstring(data)
data = a
assert data.itemsize == self._sampwidth
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
if self._sampwidth == 3 and sys.byteorder == 'big':
data = _byteswap3(data)
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
def test_endian_float(self):
if sys.byteorder == "little":
self.assertIs(c_float.__ctype_le__, c_float)
self.assertIs(c_float.__ctype_be__.__ctype_le__, c_float)
else:
self.assertIs(c_float.__ctype_be__, c_float)
self.assertIs(c_float.__ctype_le__.__ctype_be__, c_float)
s = c_float(math.pi)
self.assertEqual(bin(struct.pack("f", math.pi)), bin(s))
# Hm, what's the precision of a float compared to a double?
self.assertAlmostEqual(s.value, math.pi, 6)
s = c_float.__ctype_le__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack("<f", math.pi)), bin(s))
s = c_float.__ctype_be__(math.pi)
self.assertAlmostEqual(s.value, math.pi, 6)
self.assertEqual(bin(struct.pack(">f", math.pi)), bin(s))
def test_endian_double(self):
if sys.byteorder == "little":
self.assertIs(c_double.__ctype_le__, c_double)
self.assertIs(c_double.__ctype_be__.__ctype_le__, c_double)
else:
self.assertIs(c_double.__ctype_be__, c_double)
self.assertIs(c_double.__ctype_le__.__ctype_be__, c_double)
s = c_double(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("d", math.pi)), bin(s))
s = c_double.__ctype_le__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack("<d", math.pi)), bin(s))
s = c_double.__ctype_be__(math.pi)
self.assertEqual(s.value, math.pi)
self.assertEqual(bin(struct.pack(">d", math.pi)), bin(s))
def test_struct_fields_2(self):
# standard packing in struct uses no alignment.
# So, we have to align using pad bytes.
#
# Unaligned accesses will crash Python (on those platforms that
# don't allow it, like sparc solaris).
if sys.byteorder == "little":
base = BigEndianStructure
fmt = ">bxhid"
else:
base = LittleEndianStructure
fmt = "<bxhid"
class S(base):
_fields_ = [("b", c_byte),
("h", c_short),
("i", c_int),
("d", c_double)]
s1 = S(0x12, 0x1234, 0x12345678, 3.14)
s2 = struct.pack(fmt, 0x12, 0x1234, 0x12345678, 3.14)
self.assertEqual(bin(s1), bin(s2))
def test_memmove():
Short = new_primitive_type("short")
ShortA = new_array_type(new_pointer_type(Short), None)
Char = new_primitive_type("char")
CharA = new_array_type(new_pointer_type(Char), None)
p = newp(ShortA, [-1234, -2345, -3456, -4567, -5678])
memmove(p, p + 1, 4)
assert list(p) == [-2345, -3456, -3456, -4567, -5678]
p[2] = 999
memmove(p + 2, p, 6)
assert list(p) == [-2345, -3456, -2345, -3456, 999]
memmove(p + 4, newp(CharA, b"\x71\x72"), 2)
if sys.byteorder == 'little':
assert list(p) == [-2345, -3456, -2345, -3456, 0x7271]
else:
assert list(p) == [-2345, -3456, -2345, -3456, 0x7172]
def test_ffi_buffer_ptr(self):
a = ffi.new("short *", 100)
try:
b = ffi.buffer(a)
except NotImplementedError as e:
py.test.skip(str(e))
content = b[:]
assert len(content) == len(b) == 2
if sys.byteorder == 'little':
assert content == b'\x64\x00'
assert b[0] == b'\x64'
b[0] = b'\x65'
else:
assert content == b'\x00\x64'
assert b[1] == b'\x64'
b[1] = b'\x65'
assert a[0] == 101
def test_ffi_buffer_ptr_size(self):
a = ffi.new("short *", 0x4243)
try:
b = ffi.buffer(a, 1)
except NotImplementedError as e:
py.test.skip(str(e))
content = b[:]
assert len(content) == 1
if sys.byteorder == 'little':
assert content == b'\x43'
b[0] = b'\x62'
assert a[0] == 0x4262
else:
assert content == b'\x42'
b[0] = b'\x63'
assert a[0] == 0x6343
def test_ffi_buffer_array(self):
ffi = FFI(backend=self.Backend())
a = ffi.new("int[]", list(range(100, 110)))
try:
b = ffi.buffer(a)
except NotImplementedError as e:
py.test.skip(str(e))
content = b[:]
if sys.byteorder == 'little':
assert content.startswith(b'\x64\x00\x00\x00\x65\x00\x00\x00')
b[4] = b'\x45'
else:
assert content.startswith(b'\x00\x00\x00\x64\x00\x00\x00\x65')
b[7] = b'\x45'
assert len(content) == 4 * 10
assert a[1] == 0x45
def test_ffi_buffer_ptr_size(self):
ffi = FFI(backend=self.Backend())
a = ffi.new("short *", 0x4243)
try:
b = ffi.buffer(a, 1)
except NotImplementedError as e:
py.test.skip(str(e))
content = b[:]
assert len(content) == 1
if sys.byteorder == 'little':
assert content == b'\x43'
b[0] = b'\x62'
assert a[0] == 0x4262
else:
assert content == b'\x42'
b[0] = b'\x63'
assert a[0] == 0x6343
def dumpsect(self, sector, firstindex=0):
"Displays a sector in a human-readable form, for debugging purpose."
if not DEBUG_MODE:
return
VPL = 8 # number of values per line (8+1 * 8+1 = 81)
tab = array.array(UINT32, sector)
if sys.byteorder == 'big':
tab.byteswap()
nbsect = len(tab)
nlines = (nbsect+VPL-1)//VPL
print("index", end=" ")
for i in range(VPL):
print("%8X" % i, end=" ")
print()
for l in range(nlines):
index = l*VPL
print("%8X:" % (firstindex+index), end=" ")
for i in range(index, index+VPL):
if i >= nbsect:
break
sect = tab[i]
name = "%8X" % sect
print(name, end=" ")
print()
def read_uic1tag(fh, byteorder, dtype, count, plane_count=None):
"""Read MetaMorph STK UIC1Tag from file and return as dictionary.
Return empty dictionary if plane_count is unknown.
"""
assert dtype in ('2I', '1I') and byteorder == '<'
result = {}
if dtype == '2I':
# pre MetaMorph 2.5 (not tested)
values = fh.read_array('<u4', 2*count).reshape(count, 2)
result = {'z_distance': values[:, 0] / values[:, 1]}
elif plane_count:
for i in range(count):
tagid = struct.unpack('<I', fh.read(4))[0]
if tagid in (28, 29, 37, 40, 41):
# silently skip unexpected tags
fh.read(4)
continue
name, value = read_uic_tag(fh, tagid, plane_count, offset=True)
result[name] = value
return result
def read_cz_lsm_info(fh, byteorder, dtype, count):
"""Read CS_LSM_INFO tag from file and return as numpy.rec.array."""
assert byteorder == '<'
magic_number, structure_size = struct.unpack('<II', fh.read(8))
if magic_number not in (50350412, 67127628):
raise ValueError("not a valid CS_LSM_INFO structure")
fh.seek(-8, 1)
if structure_size < numpy.dtype(CZ_LSM_INFO).itemsize:
# adjust structure according to structure_size
cz_lsm_info = []
size = 0
for name, dtype in CZ_LSM_INFO:
size += numpy.dtype(dtype).itemsize
if size > structure_size:
break
cz_lsm_info.append((name, dtype))
else:
cz_lsm_info = CZ_LSM_INFO
return fh.read_record(cz_lsm_info, byteorder=byteorder)
def dumpsect(self, sector, firstindex=0):
"Displays a sector in a human-readable form, for debugging purpose."
if not DEBUG_MODE:
return
VPL = 8 # number of values per line (8+1 * 8+1 = 81)
tab = array.array(UINT32, sector)
if sys.byteorder == 'big':
tab.byteswap()
nbsect = len(tab)
nlines = (nbsect+VPL-1)//VPL
print("index", end=" ")
for i in range(VPL):
print("%8X" % i, end=" ")
print()
for l in range(nlines):
index = l*VPL
print("%8X:" % (firstindex+index), end=" ")
for i in range(index, index+VPL):
if i >= nbsect:
break
sect = tab[i]
name = "%8X" % sect
print(name, end=" ")
print()
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def encode(self, input, final=False):
if self.encoder is None:
result = codecs.utf_32_encode(input, self.errors)[0]
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
return result
return self.encoder(input, self.errors)[0]
def setstate(self, state):
if state:
self.encoder = None
else:
if sys.byteorder == 'little':
self.encoder = codecs.utf_32_le_encode
else:
self.encoder = codecs.utf_32_be_encode
def _buffer_decode(self, input, errors, final):
if self.decoder is None:
(output, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, final)
if byteorder == -1:
self.decoder = codecs.utf_32_le_decode
elif byteorder == 1:
self.decoder = codecs.utf_32_be_decode
elif consumed >= 4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (output, consumed)
return self.decoder(input, self.errors, final)
def getstate(self):
# additional state info from the base class must be None here,
# as it isn't passed along to the caller
state = codecs.BufferedIncrementalDecoder.getstate(self)[0]
# additional state info we pass to the caller:
# 0: stream is in natural order for this platform
# 1: stream is in unnatural order
# 2: endianness hasn't been determined yet
if self.decoder is None:
return (state, 2)
addstate = int((sys.byteorder == "big") !=
(self.decoder is codecs.utf_32_be_decode))
return (state, addstate)
def setstate(self, state):
# state[1] will be ignored by BufferedIncrementalDecoder.setstate()
codecs.BufferedIncrementalDecoder.setstate(self, state)
state = state[1]
if state == 0:
self.decoder = (codecs.utf_32_be_decode
if sys.byteorder == "big"
else codecs.utf_32_le_decode)
elif state == 1:
self.decoder = (codecs.utf_32_le_decode
if sys.byteorder == "big"
else codecs.utf_32_be_decode)
else:
self.decoder = None
def decode(self, input, errors='strict'):
(object, consumed, byteorder) = \
codecs.utf_32_ex_decode(input, errors, 0, False)
if byteorder == -1:
self.decode = codecs.utf_32_le_decode
elif byteorder == 1:
self.decode = codecs.utf_32_be_decode
elif consumed>=4:
raise UnicodeError("UTF-32 stream does not start with BOM")
return (object, consumed)
### encodings module API