def init(self, update=None, lang=None):
'''
Init pool
Set update to proceed to update
lang is a list of language code to be updated
'''
with self._lock:
if not self._started:
self.start()
with self._locks[self.database_name]:
# Don't reset pool if already init and not to update
if not update and self._pool.get(self.database_name):
return
logger.info('init pool for "%s"', self.database_name)
self._pool.setdefault(self.database_name, {})
# Clean the _pool before loading modules
for type in self.classes.keys():
self._pool[self.database_name][type] = {}
restart = not load_modules(self.database_name, self, update=update,
lang=lang)
if restart:
self.init()
python类type()的实例源码
def get(self, name, type='model'):
'''
Get an object from the pool
:param name: the object name
:param type: the type
:return: the instance
'''
if type == '*':
for type in self.classes.keys():
if name in self._pool[self.database_name][type]:
break
try:
return self._pool[self.database_name][type][name]
except KeyError:
if type == 'report':
from trytond.report import Report
# Keyword argument 'type' conflicts with builtin function
cls = __builtin__.type(str(name), (Report,), {})
cls.__setup__()
self.add(cls, type)
return cls
raise
def new(self, name, bits, idaname=None, **kwargs):
'''Add a register to the architecture's cache.'''
# older
if idaapi.__version__ < 7.0:
dtype_by_size = utils.compose(idaapi.get_dtyp_by_size, ord)
# newer
else:
dtype_by_size = idaapi.get_dtyp_by_size
dtyp = kwargs.get('dtyp', idaapi.dt_bitfild if bits == 1 else dtype_by_size(bits//8))
namespace = dict(register_t.__dict__)
namespace.update({'__name__':name, 'parent':None, 'children':{}, 'dtyp':dtyp, 'offset':0, 'size':bits})
namespace['realname'] = idaname
namespace['alias'] = kwargs.get('alias', set())
res = type(name, (register_t,), namespace)()
self.__register__.__state__[name] = res
self.__cache__[idaname or name,dtyp] = name
return res
def child(self, parent, name, offset, bits, idaname=None, **kwargs):
'''Add a child-register to the architecture's cache.'''
# older
if idaapi.__version__ < 7.0:
dtype_by_size = utils.compose(idaapi.get_dtyp_by_size, ord)
# newer
else:
dtype_by_size = idaapi.get_dtyp_by_size
dtyp = kwargs.get('dtyp', idaapi.dt_bitfild if bits == 1 else dtype_by_size(bits//8))
namespace = dict(register_t.__dict__)
namespace.update({'__name__':name, 'parent':parent, 'children':{}, 'dtyp':dtyp, 'offset':offset, 'size':bits})
namespace['realname'] = idaname
namespace['alias'] = kwargs.get('alias', set())
res = type(name, (register_t,), namespace)()
self.__register__.__state__[name] = res
self.__cache__[idaname or name,dtyp] = name
parent.children[offset] = res
return res
def __init__(self, maxlen=132, indent=4, method='smart', prefix='&',
suffix='&'):
# Line length should be long enough that contintuation lines can host at
# east one character apart of indentation and two continuation signs
minmaxlen = indent + len(prefix) + len(suffix) + 1
if maxlen < minmaxlen:
msg = 'Maximal line length less than {0} when using an indentation'\
' of {1}'.format(minmaxlen, indent)
raise FyppFatalError(msg)
self._maxlen = maxlen
self._indent = indent
self._prefix = ' ' * self._indent + prefix
self._suffix = suffix
if method not in ['brute', 'smart', 'simple']:
raise FyppFatalError('invalid folding type')
if method == 'brute':
self._inherit_indent = False
self._fold_position_finder = self._get_maximal_fold_pos
elif method == 'simple':
self._inherit_indent = True
self._fold_position_finder = self._get_maximal_fold_pos
elif method == 'smart':
self._inherit_indent = True
self._fold_position_finder = self._get_smart_fold_pos
def _get_callable_argspec_py3(func):
sig = inspect.signature(func)
args = []
defaults = {}
vararg = None
for param in sig.parameters.values():
if param.kind == param.POSITIONAL_OR_KEYWORD:
args.append(param.name)
if param.default != param.empty:
defaults[param.name] = param.default
elif param.kind == param.VAR_POSITIONAL:
vararg = param.name
else:
msg = "argument '{0}' has invalid argument type".format(param.name)
raise FyppFatalError(msg)
return args, defaults, vararg
# Signature objects are available from Python 3.3 (and deprecated from 3.5)
def traverse(self, edges, filter=lambda node:True, **kwds):
"""Will walk the elements returned by the generator ``edges -> node -> ptype.type``
This will iterate in a top-down approach.
"""
for self in edges(self, **kwds):
if not isinstance(self, generic):
continue
if filter(self):
yield self
for y in self.traverse(edges=edges, filter=filter, **kwds):
yield y
continue
return
def alloc(self, **attrs):
"""Will zero the ptype.container instance with the provided ``attrs``.
This can be overloaded in order to allocate physical space for the new ptype.
"""
# If there's a custom .blocksize changing the way this instance get's loaded
# then restore the original blocksize temporarily so that .alloc will actually
# allocate the entire object.
if getattr(self.blocksize, 'im_func', None) is not container.blocksize.im_func:
instancemethod = type.new.__class__
func = container.blocksize.im_func
method = instancemethod(func, self, self.__class__)
attrs.setdefault('blocksize', method)
return super(container, self).alloc(**attrs)
def append(self, object):
"""Add ``object`` to the ptype.container ``self``. Return the element's index.
When adding ``object`` to ``self``, none of the offsets are updated and
thus will need to be manually updated before committing to a provider.
"""
# if we're uninitialized, then create an empty value and try again
if self.value is None:
self.value = []
return self.append(object)
# if object is not an instance, then try to resolve it to one and try again
if not isinstance(object, generic):
res = self.new(object)
return self.append(res)
# assume that object is now a ptype instance
assert isinstance(object, generic), "container.append : {:s} : Tried to append unknown type to container : {:s}".format(self.instance(), object.__class__)
object.parent,object.source = self,None
current = len(self.value)
self.value.append(object if object.initializedQ() else object.a)
return current
def define(cls, *definition, **attributes):
"""Add a definition to the cache keyed by the .type attribute of the definition. Return the original definition.
If any ``attributes`` are defined, the definition is duplicated with the specified attributes before being added to the cache.
"""
def clone(definition):
res = dict(definition.__dict__)
res.update(attributes)
#res = __builtin__.type(res.pop('__name__',definition.__name__), definition.__bases__, res)
res = __builtin__.type(res.pop('__name__',definition.__name__), (definition,), res)
cls.add(getattr(res,cls.attribute),res)
return definition
if attributes:
assert len(definition) == 0, 'Unexpected positional arguments'
return clone
res, = definition
cls.add(getattr(res,cls.attribute),res)
return res
def reference(self, object, **attrs):
"""Reference ``object`` and encode it into self"""
object = self.__hook(object)
# take object, and encode it to an encoded type
enc = self.encode(object, **attrs)
# take encoded type and cast it to self's wrapped type in order to preserve length
res = enc.cast(self._value_, **attrs)
# now that the length is correct, write it to the wrapper_t
res.commit(offset=0, source=provider.proxy(self))
# assign some default attributes to object
object.__name__ = '*'+self.name()
self._object_ = object.__class__
return self
def test_container_copy():
class leaf_sr(ptype.type):
length = 4
class leaf_jr(ptype.type):
length = 2
class branch(ptype.container): pass
a = branch(source=prov.empty())
a.set(leaf_sr, leaf_jr, branch().set(leaf_jr,leaf_jr,leaf_jr))
b = a.copy()
if b.v[2].v[1].size() == leaf_jr.length:
raise Success
# XXX: test casting between block types and stream types (szstring) as this
# might've been broken at some point...
def test_decompression_block():
from ptypes import dynamic,pint,pstruct,ptype
class cblock(pstruct.type):
class _zlibblock(ptype.encoded_t):
_object_ = ptype.block
def encode(self, object, **attrs):
data = object.serialize().encode('zlib')
return super(cblock._zlibblock,self).encode(ptype.block().set(data), length=len(data))
def decode(self, object, **attrs):
data = object.serialize().decode('zlib')
return super(cblock._zlibblock,self).decode(ptype.block().set(data), length=len(data))
def __zlibblock(self):
return ptype.clone(self._zlibblock, _value_=dynamic.block(self['size'].l.int()))
_fields_ = [
(pint.uint32_t, 'size'),
(__zlibblock, 'data'),
]
message = 'hi there.'
cmessage = message.encode('zlib')
data = pint.uint32_t().set(len(cmessage)).serialize()+cmessage
a = cblock(source=prov.string(data)).l
if a['data'].d.l.serialize() == message:
raise Success
def __init__(self, *args, **kwds):
super(enum, self).__init__(*args, **kwds)
# invert ._values_ if they're defined backwards
if len(self._values_):
name, value = self._values_[0]
if isinstance(value, basestring):
Log.warning("pint.enum : {:s} : {:s}._values_ is defined backwards. Inverting it's values.".format(self.classname(), self.typename()))
self._values_ = [(k,v) for v,k in self._values_]
# verify the types are correct for ._values_
if any(not isinstance(k, basestring) or not isinstance(v, six.integer_types) for k,v in self._values_):
raise TypeError(self, 'enum.__init__', "{:s}._values_ is of an incorrect format. Should be [({:s}, {:s}), ...]".format(self.classname(), basestring, int))
# FIXME: fix constants within ._values_ by checking to see if they're out of bounds of our type
return
def test_load(cls, integer, expected):
if cls.length == 4:
expected, = struct.unpack('f', struct.pack('f', expected))
i,_ = bitmap.join(bitmap.new(ord(x),8) for x in reversed(struct.pack('f',expected)))
elif cls.length == 8:
expected, = struct.unpack('d', struct.pack('d', expected))
i,_ = bitmap.join(bitmap.new(ord(x),8) for x in reversed(struct.pack('d',expected)))
else:
i = 0
a = cls()
super(type,a).set(integer)
n = a.getf()
if n == expected:
raise Success
raise Failure('getf: 0x%x == %s? %s (%s) %x'%( integer, expected, a.num(), n, i))
## tests for floating-point
def __init__(self, *args, **kwargs):
self.data = {}
# Initialise from sequence object
if len(args) == 1 and type(args[0]) == list:
for item in args[0]:
self[item[0]] = item[1]
# Initialise from mapping object
if len(args) == 1 and type(args[0]) == dict:
self.update(args[0])
# Initialise from NocaseDict
if len(args) == 1 and isinstance(args[0], NocaseDict):
self.data = args[0].data.copy()
# Initialise from keyword args
self.update(kwargs)
# Basic accessor and settor methods
def __str__(self):
s = ''
if self.host is not None:
s += '//%s/' % self.host
if self.namespace is not None:
s += '%s:' % self.namespace
s += '%s.' % self.classname
for key, value in self.keybindings.items():
s +='%s=' % key
if type(value) == int or type(value) == long:
s += str(value)
else:
s += '"%s"' % value
s += ','
return s[:-1]
def __setitem__(self, key, value):
# Don't let anyone set integer or float values. You must use
# a subclass from the cim_type module.
if type(value) == int or type(value) == float or type(value) == long:
raise TypeError('Must use a CIM type assigning numeric values.')
if self.property_list is not None and key.lower() not in \
self.property_list:
if self.path is not None and key not in self.path.keybindings:
return
# Convert value to appropriate PyWBEM type
if isinstance(value, CIMProperty):
v = value
else:
v = CIMProperty(key, value)
self.properties[key] = v
if self.path is not None and key in self.path.keybindings:
self.path[key] = v.value
def tocimxml(self):
props = []
for key, value in self.properties.items():
# Value has already been converted into a CIM object
# property type (e.g for creating null property values).
if isinstance(value, CIMProperty):
props.append(value)
continue
props.append(CIMProperty(key, value))
instance_xml = cim_xml.INSTANCE(
self.classname,
properties = [p.tocimxml() for p in props],
qualifiers = [q.tocimxml() for q in self.qualifiers.values()])
if self.path is None:
return instance_xml
return cim_xml.VALUE_NAMEDINSTANCE(self.path.tocimxml(),
instance_xml)
def tomof(self):
def _prop2mof(_type, value):
if value is None:
val = 'NULL'
elif isinstance(value, list):
val = '{'
for i,x in enumerate(value):
if i > 0:
val += ', '
val += _prop2mof(_type, x)
val += '}'
elif _type == 'string':
val = '"' + value + '"'
else:
val = str(value)
return val
s = 'instance of %s {\n' % self.classname
for p in self.properties.values():
s+= '\t%s = %s;\n' % (p.name, _prop2mof(p.type, p.value))
s+= '};\n'
return s
def tocimxml(self):
value = None
if type(self.value) == list:
value = VALUE_ARRAY([VALUE(v) for v in self.value])
elif self.value is not None:
value = VALUE(self.value)
return QUALIFIER(self.name,
self.type,
value,
propagated = self.propagated,
overridable = self.overridable,
tosubclass = self.tosubclass,
toinstance = self.toinstance,
translatable = self.translatable)
def __cmp__(self, other):
if self is other:
return 0
elif not isinstance(other, CIMQualifierDeclaration):
return 1
return (cmpname(self.name, other.name) or
cmp(self.type, other.type) or
cmp(self.value, other.value) or
cmp(self.is_array, other.is_array) or
cmp(self.array_size, other.array_size) or
cmp(self.scopes, other.scopes) or
cmp(self.overridable, other.overridable) or
cmp(self.tosubclass, other.tosubclass) or
cmp(self.toinstance, other.toinstance) or
cmp(self.translatable, other.translatable))
def __new__(cls, name, bases, dct):
new = type.__new__(cls, name, bases, dct)
if '__name__' in dct:
try:
new.__name__ = dct['__name__']
except TypeError:
new.__name__ = dct['__name__'].encode('utf-8')
return new
def add(self, cls, type='model'):
'''
Add a classe to the pool
'''
with self._locks[self.database_name]:
self._pool[self.database_name][type][cls.__name__] = cls
def iterobject(self, type='model'):
'''
Return an iterator over object name, object
:param type: the type
:return: an iterator
'''
return self._pool[self.database_name][type].iteritems()
def type(self):
'''Returns the IDA dtype of the register.'''
return self.dtyp
def define(cls, processor, type):
def decorator(fn):
res = processor, type
return cls.cache.setdefault(res, fn)
return decorator
def lookup(cls, type, processor=None):
try: return cls.cache[processor or idaapi.ph.id, type]
except KeyError: return cls.cache[0, type]
def type(cls, op, processor=None):
res = cls.lookup(op.type, processor=processor)
return res.__name__
def operand(ea, none):
'''Return all the op_t's of the instruction at ``ea``.'''
insn = at(ea)
res = itertools.takewhile(utils.compose(operator.attrgetter('type'), functools.partial(operator.ne, idaapi.o_void)), insn.Operands)
if idaapi.__version__ < 7.0:
return tuple(op.copy() for op in res)
res = ((idaapi.op_t(), op) for op in res)
return tuple([n.assign(op), n][1] for n, op in res)