def issue_to_changelog(issue):
return dict(
[
('key', issue.key),
(
'changelog',
[
(u'Created', parse_date(issue.fields.created))
] + flatten([
[
(i.toString, parse_date(h.created))
for i in h.items if i.field == 'status'
] for h in issue.changelog.histories
])
)
]
)
python类flatten()的实例源码
def __init__(self, past, future, features = None):
"""Create a training pattern.
Parameters:
past -- past feature vectors as a tensor of shape [P, V]
where P is past days and V is the vectors/day
future -- future feature vectors as a tensor of [F, V]
where F is future days and V is the vectors/day
features -- a sequence of feature names to use
where None means use all features
"""
# calculate training input from past features
past_subfeatures = [[self._subfeatures(vector, features)
for vector in vectors]
for vectors in past]
self._input = numpy.array(
[list(util.flatten(vectors)) for vectors in past_subfeatures])
# calculate training output from future volatility
future_returns = numpy.log1p(
[[vector.ret for vector in vectors] for vectors in future])
self._output = numpy.std(future_returns, axis = 0, ddof = 1)\
* numpy.sqrt(252)
# calculate past returns for forecasts
self._past_returns = numpy.log1p(
[[vector.ret for vector in vectors] for vectors in past])
def intFloor(*args):
return [int(math.floor(x)) for x in flatten(args)]
def finalize(self):
merged_clusters = []
for c1 in self.clusters.values():
existing = None
for m in c1:
for c2 in merged_clusters:
if m in c2:
existing = c2
break
if existing is not None:
break
if existing is not None:
print("Merging clusters (shouldn't happen very often.)")
existing.update(c1)
else:
merged_clusters.append(set(c1))
merged_clusters = [list(c) for c in merged_clusters]
all_mentions = util.flatten(merged_clusters)
assert len(all_mentions) == len(set(all_mentions))
return {
"doc_key": self.doc_key,
"sentences": self.sentences,
"speakers": self.speakers,
"clusters": merged_clusters
}
def print_predictions(example):
words = util.flatten(example["sentences"])
for cluster in example["predicted_clusters"]:
print(u"Predicted cluster: {}".format([" ".join(words[m[0]:m[1]+1]) for m in cluster]))
def generate_respondents_summary(self):
all_uids_by_option = [option.people.keys() for option in self.options]
all_uids = util.flatten(all_uids_by_option)
num_respondents = len(set(all_uids))
if num_respondents == 0:
output = 'Nobody responded'
elif num_respondents == 1:
output = '1 person responded'
else:
output = '{} people responded'.format(num_respondents)
return output
def tensorize_example(self, example, is_training, oov_counts=None):
clusters = example["clusters"]
gold_mentions = sorted(tuple(m) for m in util.flatten(clusters))
gold_mention_map = {m:i for i,m in enumerate(gold_mentions)}
cluster_ids = np.zeros(len(gold_mentions))
for cluster_id, cluster in enumerate(clusters):
for mention in cluster:
cluster_ids[gold_mention_map[tuple(mention)]] = cluster_id
sentences = example["sentences"]
num_words = sum(len(s) for s in sentences)
speakers = util.flatten(example["speakers"])
assert num_words == len(speakers)
max_sentence_length = max(len(s) for s in sentences)
max_word_length = max(max(max(len(w) for w in s) for s in sentences), max(self.config["filter_widths"]))
word_emb = np.zeros([len(sentences), max_sentence_length, self.embedding_size])
char_index = np.zeros([len(sentences), max_sentence_length, max_word_length])
text_len = np.array([len(s) for s in sentences])
for i, sentence in enumerate(sentences):
for j, word in enumerate(sentence):
current_dim = 0
for k, (d, (s,l)) in enumerate(zip(self.embedding_dicts, self.embedding_info)):
if l:
current_word = word.lower()
else:
current_word = word
if oov_counts is not None and current_word not in d:
oov_counts[k] += 1
word_emb[i, j, current_dim:current_dim + s] = util.normalize(d[current_word])
current_dim += s
char_index[i, j, :len(word)] = [self.char_dict[c] for c in word]
speaker_dict = { s:i for i,s in enumerate(set(speakers)) }
speaker_ids = np.array([speaker_dict[s] for s in speakers])
doc_key = example["doc_key"]
genre = self.genres[doc_key[:2]]
gold_starts, gold_ends = self.tensorize_mentions(gold_mentions)
if is_training and len(sentences) > self.config["max_training_sentences"]:
return self.truncate_example(word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids)
else:
return word_emb, char_index, text_len, speaker_ids, genre, is_training, gold_starts, gold_ends, cluster_ids