def next_batch(self, batches, in_memory):
"""
Returns the next batch in some fixed-length representation.
Currently we use Panchenko et al.'s cumulative traces
@param batches an iterator with all of the batches (
if in_memory == True:
in batch-major form without padding
else:
A list of paths to the files
)
@param in_memory is a boolean value
@return if in_memory is False, returns a tuple of (dict, [paths]) where paths is a list of paths for each batch
else it returns a dict for training
"""
batch = next(batches)
data_batch = batch
if not in_memory:
data_batch = [helpers.read_cell_file(path) for path in batch]
data_batch = [self._process_trace(trace, self.layers[0]) for trace in data_batch]
min_max_scaler = MinMaxScaler()
data_batch = min_max_scaler.fit_transform(data_batch)
encoder_inputs_ = data_batch
decoder_targets_ = data_batch
train_dict = {
self.encoder_inputs: encoder_inputs_,
self.decoder_targets: decoder_targets_,
}
if not in_memory:
return (train_dict, batch)
return train_dict
评论列表
文章目录