def test_doubly_broadcasted_and(self):
self.check_array_doubly_broadcasted_op(operator.and_)
python类and_()的实例源码
def follow_the_sun():
"""
Set tickets to alarm when user is away
"""
now = int(time())
where = [~Q(status='Open'), ~Q(status='Reopened'), ~Q(status='Paused'), ~Q(status='Closed')]
where = reduce(operator.and_, where)
for user in User.objects.filter(~Q(username=common.BOT_USER.username)):
if now > mktime((user.last_login + timedelta(hours=24)).timetuple()):
Logger.debug(
unicode('user %s logged out, set alarm to True' % (user.username)),
extra={
'user': user.username,
}
)
user.ticketUser.filter(where).update(alarm=True)
else:
Logger.debug(
str('user %s logged in, set alarm to False' % (user.username)),
extra={
'user': user.username,
}
)
user.ticketUser.filter(where).update(alarm=False)
def count(self, **kwargs):
if kwargs.get('where'):
where = kwargs['where']
rejected = get_defendant_to_reject(where=where)
count = Ticket.objects.filter(
~Q(defendant__in=rejected),
where,
escalated=False,
status='Open',
priority__in=TODO_TICKET_PRIORITY_FILTERS
).order_by('id').distinct().count()
else:
rejected = get_defendant_to_reject()
where = [~Q(defendant__in=rejected)]
where = reduce(operator.and_, where)
count = Ticket.objects.filter(
where,
escalated=False,
status='Open',
priority__in=TODO_TICKET_PRIORITY_FILTERS
).order_by('id').distinct().count()
return count
def toolbar(**kwargs):
""" Get reports/tickets stats
"""
user = kwargs['user']
where = [Q()]
if not AbusePermission.objects.filter(user=user.id).count():
raise Forbidden('You are not allowed to see any category')
user_specific_where = _get_user_specific_where(user)
user_specific_where = reduce(operator.or_, user_specific_where)
where.append(user_specific_where)
# Aggregate all filters
where = reduce(operator.and_, where)
response = _get_toolbar_count(where, user)
return response
def generate_request_filter(filters):
""" Generates filters from filter query string
"""
where = [Q()]
if 'where' in filters and len(filters['where']):
try:
keys = set(k for k in filters['where'])
if 'in' in keys:
for i in filters['where']['in']:
for key, val in i.iteritems():
where.append(reduce(operator.or_, [Q(**{key: i}) for i in val]))
where = reduce(operator.and_, where)
except (AttributeError, KeyError, FieldError, SyntaxError, ValueError) as ex:
raise BadRequest(str(ex.message))
else:
where = reduce(operator.and_, where)
return where
def _build_logical_expression(self, grammar, terminal_component_names):
terminal_component_symbols = eval("symbols('%s')"%(' '.join(terminal_component_names)))
if isinstance(terminal_component_symbols, Symbol):
terminal_component_symbols = [terminal_component_symbols]
name_to_symbol = {terminal_component_names[i]:symbol for i, symbol in enumerate(terminal_component_symbols)}
terminal_component_names = set(terminal_component_names)
op_to_symbolic_operation = {'not':operator.invert, 'concat':operator.and_, 'gap':operator.and_, 'union':operator.or_, 'intersect':operator.and_}
def logical_expression_builder(component):
if component['id'] in terminal_component_names:
return name_to_symbol[component['id']]
else:
children = component['components']
return reduce(op_to_symbolic_operation[component['operation']],[logical_expression_builder(child) for child in children])
return simplify(logical_expression_builder(grammar))
def interpret_multi_sents(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.DrtExpression``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = list(map(str, range(len(inputs))))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def __init__(self, code, objects=None):
self._OPERATORS = [
('|', operator.or_),
('^', operator.xor),
('&', operator.and_),
('>>', operator.rshift),
('<<', operator.lshift),
('-', operator.sub),
('+', operator.add),
('%', operator.mod),
('/', operator.truediv),
('*', operator.mul),
]
self._ASSIGN_OPERATORS = [(op + '=', opfunc)
for op, opfunc in self._OPERATORS]
self._ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
self._VARNAME_PATTERN = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
if objects is None:
objects = {}
self.code = code
self._functions = {}
self._objects = objects
def number_of_args(fn):
"""Return the number of positional arguments for a function, or None if the number is variable.
Looks inside any decorated functions."""
try:
if hasattr(fn, '__wrapped__'):
return number_of_args(fn.__wrapped__)
if any(p.kind == p.VAR_POSITIONAL for p in signature(fn).parameters.values()):
return None
else:
return sum(p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) for p in signature(fn).parameters.values())
except ValueError:
# signatures don't work for built-in operators, so check for a few explicitly
UNARY_OPS = [len, op.not_, op.truth, op.abs, op.index, op.inv, op.invert, op.neg, op.pos]
BINARY_OPS = [op.lt, op.le, op.gt, op.ge, op.eq, op.ne, op.is_, op.is_not, op.add, op.and_, op.floordiv, op.lshift, op.mod, op.mul, op.or_, op.pow, op.rshift, op.sub, op.truediv, op.xor, op.concat, op.contains, op.countOf, op.delitem, op.getitem, op.indexOf]
TERNARY_OPS = [op.setitem]
if fn in UNARY_OPS:
return 1
elif fn in BINARY_OPS:
return 2
elif fn in TERNARY_OPS:
return 3
else:
raise NotImplementedError("Bult-in operator {} not supported".format(fn))
def get_filters(self):
data = self.cleaned_data
query = []
_type = data.get('type')
if _type:
query.append(Q(type=_type))
name = data.get('name')
if name:
query.append(Q(name__icontains=name))
content = data.get('content')
if content:
query.append(Q(content__icontains=content))
changed_since = data.get('changed_since')
if changed_since:
query.append(Q(change_date__gte=changed_since.strftime('%s')))
if query:
return reduce(and_, query)
else:
return None
def _translate_query(self, query):
if isinstance(query, queries.CompoundQuery):
return functools.reduce({
queries.Or: operator.or_,
queries.And: operator.and_
}[query.__class__], [
self._translate_query(q)
for q in query.queries
])
key = query.key
if key.startswith('data.') and isinstance(query.value, str):
key += '.raw'
return elasticsearch_dsl.F({
queries.Equal: 'term'
}[query.__class__], **{key: query.value})
def filter(self):
filter_dict = {}
matcher = re.compile(r'filter\[(.+)\]')
for key in self.request.query_arguments:
match = matcher.match(key)
if match:
if match.groups()[-1] in {'ref'}:
filter_dict[match.groups()[-1]] = self.request.query_arguments[key][-1].decode()
else:
filter_dict['data.{}'.format(match.groups()[-1])] = self.request.query_arguments[key][-1].decode()
if not filter_dict:
return None
return functools.reduce(operator.and_, [
jam.Q(key, 'eq', parse_value(value))
for key, value in filter_dict.items()
])
boxer.py 文件源码
项目:PyDataLondon29-EmbarrassinglyParallelDAWithAWSLambda
作者: SignalMedia
项目源码
文件源码
阅读 27
收藏 0
点赞 0
评论 0
def interpret_multi_sents(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.DrtExpression``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = list(map(str, range(len(inputs))))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def interpret_multi_sents(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.DrtExpression``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = list(map(str, range(len(inputs))))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def check_and_update_hash_and_deps(self, bn):
# Get latest computed hash and deps
oldhash, deps = self.srcmd5.get(bn, (None, []))
# read source
src = open(os.path.join(self.buildpath, bn)).read()
# compute new hash
newhash = hashlib.md5(src).hexdigest()
# compare
match = (oldhash == newhash)
if not match:
# file have changed
# update direct dependencies
deps = []
self.append_cfile_deps(src, deps)
# store that hashand deps
self.srcmd5[bn] = (newhash, deps)
# recurse through deps
# TODO detect cicular deps.
return reduce(operator.and_, map(self.check_and_update_hash_and_deps, deps), match)
def _filter_q(self, q):
"""
filter by Q object.
Split it up into recursive _filter_q and _filter_kwarg calls and combine them again.
:return: new Q object and set of matched existing pks
"""
if not q.children:
return q, self._get_initial_created_pks()
filters, created_pks = zip(*((self._filter_q(c) if isinstance(c, Q) else self._filter_kwarg(*c))
for c in q.children))
result = Q(*filters)
result.connector = q.connector
result.negated = q.negated
created_pks = reduce(operator.and_ if q.connector == 'AND' else operator.or_, created_pks)
if q.negated:
created_pks = self._get_initial_created_pks()-created_pks
return result, created_pks
def interpret_multi_sents(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.DrtExpression``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = list(map(str, range(len(inputs))))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def pack(cls, **kwargs):
"""Pack a bitmask from explicit bit values.
Parameters
----------
kwargs
The names of the fields and their status. Any fields not explicitly
passed will be set to False.
Returns
-------
bitmask : int
The packed bitmask.
"""
members = cls.__members__
try:
return reduce(
op.and_,
(members[k] * bool(v) for k, v in kwargs.items()),
)
except KeyError as e:
raise TypeError('{e} is not a member of {cls.__qualname__}')
def interpret_multi_sents(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.DrtExpression``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = list(map(str, range(len(inputs))))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def interpret_multi_sents(self, inputs, discourse_ids=None, question=False, verbose=False):
"""
Use Boxer to give a first order representation.
:param inputs: list of list of str Input discourses to parse
:param occur_index: bool Should predicates be occurrence indexed?
:param discourse_ids: list of str Identifiers to be inserted to each occurrence-indexed predicate.
:return: ``drt.DrtExpression``
"""
if discourse_ids is not None:
assert len(inputs) == len(discourse_ids)
assert reduce(operator.and_, (id is not None for id in discourse_ids))
use_disc_id = True
else:
discourse_ids = list(map(str, range(len(inputs))))
use_disc_id = False
candc_out = self._call_candc(inputs, discourse_ids, question, verbose=verbose)
boxer_out = self._call_boxer(candc_out, verbose=verbose)
# if 'ERROR: input file contains no ccg/2 terms.' in boxer_out:
# raise UnparseableInputException('Could not parse with candc: "%s"' % input_str)
drs_dict = self._parse_to_drs_dict(boxer_out, use_disc_id)
return [drs_dict.get(id, None) for id in discourse_ids]
def __and__(self, trc):
return self.apply_op2(trc, operator.and_)
def has_result_of_status(self, status, results):
inbound_str = self.items["operation"]["inbound"]
query = Query()
result_q = reduce(or_, [
query.result == a_result for a_result in results])
querys = [query.inbound == inbound_str,
query.inbound_status_id == status.get_status_id(), result_q]
combined_query = reduce(and_, querys)
return self.search_db(combined_query)
def has_required_sections(self):
rq_sections = [a_section for a_section in self.required_params
if not a_section.startswith("endpoint ")]
has_sections = reduce(and_, [a_section in self.items.sections()
for a_section in rq_sections])
return has_sections
def has_required_endpoint_sections(self):
rq_endpoint_sections = [
"endpoint {0}".format(self.items["operation"][direction])
for direction in ["inbound", "outbound"]]
has_sections = reduce(and_, [a_section in self.items.sections()
for a_section in rq_endpoint_sections])
return has_sections
def test_percentile_nasty_partitions(self):
# Test percentile with nasty partitions: divide up 5 assets into
# quartiles.
# There isn't a nice mathematical definition of correct behavior here,
# so for now we guarantee the behavior of numpy.nanpercentile. This is
# mostly for regression testing in case we write our own specialized
# percentile calculation at some point in the future.
data = arange(25, dtype=float).reshape(5, 5) % 4
quartiles = range(4)
filter_names = ['pct_' + str(q) for q in quartiles]
graph = TermGraph(
{
name: self.f.percentile_between(q * 25.0, (q + 1) * 25.0)
for name, q in zip(filter_names, quartiles)
}
)
results = self.run_graph(
graph,
initial_workspace={self.f: data},
mask=self.build_mask(ones((5, 5))),
)
for name, quartile in zip(filter_names, quartiles):
result = results[name]
lower = quartile * 25.0
upper = (quartile + 1) * 25.0
expected = and_(
nanpercentile(data, lower, axis=1, keepdims=True) <= data,
data <= nanpercentile(data, upper, axis=1, keepdims=True),
)
check_arrays(result, expected)
def exec(self, proc: Processor):
self.proc = proc
self.args = map(self.expand, self.o_args) # load register values
val = reduce(self.operator, self.args) # apply operator
if operator is addc or operator is subc:
val += int(self.proc.external.carry)
proc.memory.set_register(self.register, val) # set result
if operator is operator.and_ or operator is operator.or_ or operator is operator.xor:
self.proc.set_carry(False)
# increment pc
self.proc.manager.next()
def bitwise_and(lst):
return reduce(operator.and_, lst)
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
return functools.reduce(operator.and_, [cls.interpret(nodelist[i]) for i in range(1,len(nodelist),2)])
def and_test(cls, nodelist):
# MUST NOT short-circuit evaluation, or invalid syntax can be skipped!
items = [
cls.interpret(nodelist[i])
for i in range(1, len(nodelist), 2)
]
return functools.reduce(operator.and_, items)