def image_reading(path: str, resized_size: Tuple[int, int]=None, data_augmentation: bool=False,
padding: bool=False) -> Tuple[tf.Tensor, tf.Tensor]:
# Read image
image_content = tf.read_file(path, name='image_reader')
image = tf.cond(tf.equal(tf.string_split([path], '.').values[1], tf.constant('jpg', dtype=tf.string)),
true_fn=lambda: tf.image.decode_jpeg(image_content, channels=1, try_recover_truncated=True), # TODO channels = 3 ?
false_fn=lambda: tf.image.decode_png(image_content, channels=1), name='image_decoding')
# Data augmentation
if data_augmentation:
image = augment_data(image)
# Padding
if padding:
with tf.name_scope('padding'):
image, img_width = padding_inputs_width(image, resized_size, increment=CONST.DIMENSION_REDUCTION_W_POOLING)
# Resize
else:
image = tf.image.resize_images(image, size=resized_size)
img_width = tf.shape(image)[1]
with tf.control_dependencies([tf.assert_equal(image.shape[:2], resized_size)]):
return image, img_width
python类Tuple()的实例源码
def process_(child) -> Tuple[str, datetime]:
name, text = child.name, child.get_text()
try:
# Try converting text to an integer
text = int(text)
# Ignore if we get a value we can't cast to int
except ValueError:
pass
if name == "my_last_updated":
text = datetime.fromtimestamp(float(text))
if name in ('my_finish_date', "my_start_date", "series_end", "series_start"):
try:
text = datetime.strptime(text, "%Y-%m-%d")
except ValueError:
text = datetime.fromtimestamp(0)
# Return name and text in tuple
return name, text
def _sort_dataset_by_padding(dataset: Dataset,
sorting_keys: List[Tuple[str, str]], # pylint: disable=invalid-sequence-index
padding_noise: float = 0.0) -> Dataset:
"""
Sorts the ``Instances`` in this ``Dataset`` by their padding lengths, using the keys in
``sorting_keys`` (in the order in which they are provided). ``sorting_keys`` is a list of
``(field_name, padding_key)`` tuples.
"""
instances_with_lengths = []
for instance in dataset.instances:
padding_lengths = cast(Dict[str, Dict[str, float]], instance.get_padding_lengths())
if padding_noise > 0.0:
noisy_lengths = {}
for field_name, field_lengths in padding_lengths.items():
noisy_lengths[field_name] = add_noise_to_dict_values(field_lengths, padding_noise)
padding_lengths = noisy_lengths
instance_with_lengths = ([padding_lengths[field_name][padding_key]
for (field_name, padding_key) in sorting_keys],
instance)
instances_with_lengths.append(instance_with_lengths)
instances_with_lengths.sort(key=lambda x: x[0])
return Dataset([instance_with_lengths[-1] for instance_with_lengths in instances_with_lengths])
def text_to_instance(self, # type: ignore
question_text: str,
passage_text: str,
token_spans: List[Tuple[int, int]] = None,
answer_texts: List[str] = None,
question_tokens: List[Token] = None,
passage_tokens: List[Token] = None) -> Instance:
# pylint: disable=arguments-differ
if not question_tokens:
question_tokens = self._tokenizer.tokenize(question_text)
if not passage_tokens:
passage_tokens = self._tokenizer.tokenize(passage_text)
return util.make_reading_comprehension_instance(question_tokens,
passage_tokens,
self._token_indexers,
passage_text,
token_spans,
answer_texts)
def evaluate_result(result: dict, group_order: list) -> Tuple[dict, OrderedDict]:
"""
Evaluate and describe a complete result dictionary.
As a result, a dictionary of the groups is returned. Each group has another
dictionary specifying the amount of good, the amount of bad and the amount
of neutral results as well as the overall group rating and the ratio of
good results.
"""
if 'reachable' in result and not result['reachable']:
return UnrateableSiteEvaluation(), {}
evaluated_groups = {}
described_groups = OrderedDict()
for group in group_order:
if group not in CHECKS:
continue
evaluated_groups[group], described_groups[group] = evaluate_group(
group, result)
return SiteEvaluation(evaluated_groups, group_order), described_groups
def _parse_new_results(previous_results: List[Tuple[list, dict]]) -> tuple:
"""
Parse previous results, split into raw data, results and errors and merge
data from multiple test suites.
"""
raw = []
result = {}
errors = []
for e in previous_results:
if isinstance(e, (list, tuple)):
scan_host = e[0]
test = e[1]
if isinstance(e[2], dict):
# add test specifier to each raw data element
for identifier, raw_elem in e[2].items():
raw.append(dict(
identifier=identifier,
scan_host=scan_host,
test=test,
**raw_elem))
if isinstance(e[3], dict):
result.update(e[3])
else:
errors.append(e)
return raw, result, errors
def get_form_and_formset(
request: HttpRequest=None, extra: int=1, initial_form: SessionBaseForm=None,
initial_formset=None
) -> Tuple[SessionBaseForm, Any]:
ItemMovementFormSet = forms.formset_factory(ItemMovementForm, extra=extra)
if request:
form = SessionBaseForm(request.POST, prefix='session')
formset = ItemMovementFormSet(request.POST, prefix='items')
elif initial_form or initial_formset:
form = SessionBaseForm(initial=initial_form, prefix='session')
formset = ItemMovementFormSet(initial=initial_formset, prefix='items')
else:
form = SessionBaseForm(prefix='session')
formset = ItemMovementFormSet(prefix='items')
return form, formset
def _split_sample(
split: Callable[[object], bool], X: np.ndarray, y: np.ndarray
) -> Tuple[Tuple[np.ndarray, np.ndarray], Tuple[np.ndarray, np.ndarray]]:
"""
Split X, y sample set in two with a split function
:return: ((X_left, y_left), (X_right, y_right))
"""
if split.type is 'numerical':
left_indexes = X[:, split.attribute] < split.criteria
right_indexes = ~left_indexes
else:
Z = (
pd.Index(pd.unique(split.criteria))
.get_indexer(X[:, split.attribute]))
left_indexes = np.where(Z >= 0)[0]
right_indexes = np.where(Z < 0)[0]
left = X[left_indexes], y[left_indexes]
right = X[right_indexes], y[right_indexes]
return left, right
def linearize(path: List, obstacles: List[Tuple]) ->List:
"""
Remplit l'espace entre deux cases non consecutives
:param path: -> Liste de coordonnees du chemin
:param obstacles: -> Liste de coordonnees des obstacles
:return: -> Une liste linearisee
"""
y_dir = 1 if path[0][1] < path[-1][1] else -1
x_dir = 1 if path[0][0] < path[-1][0] else -1
list2 = []
for i in range(1, len(path) + 1):
try:
list2.append(path[i - 1])
if path[i - 1][0] != path[i][0] and path[i - 1][1] != path[i][1]:
if (path[i - 1][0], path[i - 1][1] + y_dir) not in obstacles:
list2.append((path[i - 1][0], path[i - 1][1] + y_dir))
elif (path[i - 1][0] + x_dir, path[i - 1][1]) not in obstacles:
list2.append((path[i - 1][0] + x_dir, path[i - 1][1]))
except IndexError:
continue
return list(remove_duplicates(list2))
def linearize(path: List, obstacles: List[Tuple]) ->List:
"""
Remplit l'espace entre deux cases non consecutives
:param path: -> Liste de coordonnees du chemin
:param obstacles: -> Liste de coordonnees des obstacles
:return: -> Une liste linearisee
"""
y_dir = 1 if path[0][1] < path[-1][1] else -1
x_dir = 1 if path[0][0] < path[-1][0] else -1
list2 = []
for i in range(1, len(path) + 1):
try:
list2.append(path[i - 1])
if path[i - 1][0] != path[i][0] and path[i - 1][1] != path[i][1]:
if (path[i - 1][0], path[i - 1][1] + y_dir) not in obstacles:
list2.append((path[i - 1][0], path[i - 1][1] + y_dir))
elif (path[i - 1][0] + x_dir, path[i - 1][1]) not in obstacles:
list2.append((path[i - 1][0] + x_dir, path[i - 1][1]))
except IndexError:
continue
return list(remove_duplicates(list2))
def verif_conditions(self, entitee: Entitee, cible: Tuple[int, int]) -> bool:
"""Cette fonction détermine si le sort est valide"""
if entitee.var_attributs.ap >= self.cost:
if entitee.combat_coords[0] == cible[0]:
if not (self.max_range >= abs(entitee.combat_coords[1] - cible[1]) >= self.min_range):
return False
elif entitee.combat_coords[1] == cible[1]:
if not (self.max_range >= abs(entitee.combat_coords[0] - cible[0]) >= self.min_range):
return False
else:
return False
cases_traversee = bresenham(entitee.combat_coords, cible)
for i in cases_traversee:
if i in entitee.combat.map.fullobs:
return False
entitee.var_attributs.ap -= self.cost
return True
return False
def config_per_platform(config: ConfigType,
domain: str) -> Iterable[Tuple[Any, Any]]:
"""Generator to break a component config into different platforms.
For example, will find 'switch', 'switch 2', 'switch 3', .. etc
"""
for config_key in extract_domain_configs(config, domain):
platform_config = config[config_key]
if not platform_config:
continue
elif not isinstance(platform_config, list):
platform_config = [platform_config]
for item in platform_config:
try:
platform = item.get(CONF_PLATFORM)
except AttributeError:
platform = None
yield platform, item
def metrics_from_counts(counts: List[int]) -> Tuple[float, float, float, float]:
"""
Computes classifier metrics given counts of correct, incorrect, missing and spurious
:param counts: A (4,) vector of (correct, incorrect, missing, spurious)
:return: acc, recall, precision and f1
"""
eps = 1e-16
correct, incorrect, missing, spurious = counts
acc = correct / (correct + incorrect + missing + spurious + eps)
recall = correct / (correct + incorrect + missing + eps)
precision = correct / (correct + incorrect + spurious + eps)
f1 = 2 * (precision * recall) / (recall + precision + eps)
return acc, recall, precision, f1
def _batch_questions(self, questions: List[Tuple[QASetting, List[Answer]]], batch_size, is_eval: bool):
"""Optionally shuffles and batches annotations.
By default, all annotations are shuffled (if self.shuffle(is_eval) and
then batched. Override this method if you want to customize the
batching, e.g., to do stratified sampling, sampling with replacement,
etc.
Args:
- annotations: List of annotations to shuffle & batch.
- is_eval: Whether batches are generated for evaluation.
Returns: Batch iterator
"""
rng = _rng if self._shuffle(is_eval) else None
return shuffle_and_batch(questions, batch_size, rng)
def train(self, optimizer,
training_set: Iterable[Tuple[QASetting, List[Answer]]],
batch_size: int, max_epochs=10, hooks=tuple(),
l2=0.0, clip=None, clip_op=tf.clip_by_value, summary_writer=None, **kwargs):
"""
This method trains the reader (and changes its state).
Args:
optimizer: TF optimizer
training_set: the training instances.
batch_size: size of training batches
max_epochs: maximum number of epochs
hooks: TrainingHook implementations that are called after epochs and batches
l2: whether to use l2 regularization
clip: whether to apply gradient clipping and at which value
clip_op: operation to perform for clipping
"""
batches, loss, min_op, summaries = self._setup_training(
batch_size, clip, optimizer, training_set, summary_writer, l2, clip_op, **kwargs)
self._train_loop(min_op, loss, batches, hooks, max_epochs, summaries, summary_writer, **kwargs)
def __init__(self, text: str, span: Tuple[int, int] = None, doc_idx: int = 0, score: float = 1.0):
"""
Create a new answer.
Args:
text: The text string of the answer.
span: For extractive QA, a span in the support documents. The triple `(start, end)`
represents a span in support document with index `doc_index` in the ordered sequence of
doc_idx: index of document where answer was found
support documents. The span starts at `start` and ends at `end` (exclusive).
score: the score a model associates with this answer.
"""
assert span is None or len(span) == 2, "span should be (char_start, char_end) tuple"
self.score = score
self.span = span
self.doc_idx = doc_idx
self.text = text
def quadratic(a: float, b: float, c: float) -> Tuple[complex, complex]:
''' Compute the roots of the quadratic equation:
ax^2 + bx + c = 0
Written in Python as:
a*x**2 + b*x + c == 0.0
For example:
>>> x1, x2 = quadratic(a=8, b=22, c=15)
>>> x1
(-1.25+0j)
>>> x2
(-1.5+0j)
>>> 8*x1**2 + 22*x1 + 15
0j
>>> 8*x2**2 + 22*x2 + 15
0j
'''
discriminant = cmath.sqrt(b**2.0 - 4.0*a*c)
x1 = (-b + discriminant) / (2.0 * a)
x2 = (-b - discriminant) / (2.0 * a)
return x1, x2
def tokenize(sentences: List[str]) -> Tuple[List[int], List[List[str]]]:
tokenizer = Tokenizer()
lengths = []
texts = []
for s in sentences:
result = tokenizer.tokenize(s)
surfaces = [t.surface for t in result]
lengths.append(len(surfaces))
text = ' '.join(surfaces)
texts.append(text)
return lengths, texts
def option_make(optionname, # type: AnyStr
optiontype, # type: AnyStr
configs, # type: Iterable[OptionValue]
nestedopts=None # type: Optional[StyleDef]
):
# type: (...) -> Tuple[str, str, List[OptionValue], Optional[StyleDef]]
configs = [typeconv(c) for c in configs]
return unistr(optionname), unistr(optiontype), configs, nestedopts
def style_make(options=None):
# type: (Union[dict, List[Tuple[str, OptionValue]], None]) -> Style
if options is None:
return Style()
if isinstance(options, dict):
s = style_make()
for k, v in sorted(options.items()):
if isinstance(v, dict):
v = style_make(v)
set_option(s, k, v)
return s
raise TypeError('options must be a dict or None')