def save_imshow_grid(images, logs_dir, filename, shape):
"""
Plot images in a grid of a given shape.
"""
pickle.dump(images, open(os.path.join(logs_dir, "image.pk"), "wb"))
fig = plt.figure(1)
grid = ImageGrid(fig, 111, nrows_ncols=shape, axes_pad=0.05)
size = shape[0] * shape[1]
for i in trange(size, desc="Saving images"):
grid[i].axis('off')
grid[i].imshow(images[i])
Image.fromarray(images[i]).save(os.path.join(logs_dir,str(i)),"jpeg")
plt.savefig(os.path.join(logs_dir, filename))
python类trange()的实例源码
def uuidpool(num, tablename): # generate 'num' uuid's, return array
pool=[]
for i in trange(num, desc=tablename):
pool.append(str(uuid.uuid4()))
return pool
#------------------------------------------------------------------------------
def uuidpool(num, tablename): # generate 'num' uuid's, return array
pool=[]
for i in trange(num, desc=tablename):
pool.append(str(uuid.uuid4()))
return pool
#------------------------------------------------------------------------------
def uuidpool(num, tablename): # generate 'num' uuid's, return array
pool=[]
for i in trange(num, desc=tablename):
pool.append(str(uuid.uuid4()))
return pool
weatherstation_data_populate.py 文件源码
项目:aCloudGuru-DynamoDB
作者: acantril
项目源码
文件源码
阅读 20
收藏 0
点赞 0
评论 0
def p_table (stations, datapoints): # Populate Table
with db_r.Table('weatherstation_data').batch_writer() as batch:
for station in trange(stations, desc='Stations'):
for datapoint in trange(datapoints, desc='Datapoints'):
item = item_gen(station)
batch.put_item(Item=item)
#------------------------------------------------------------------------------
def uuidpool(num, tablename): # generate 'num' uuid's, return array
pool=[]
for i in trange(num, desc=tablename):
pool.append(str(uuid.uuid4()))
return pool
#------------------------------------------------------------------------------
def uuidpool(num, tablename): # generate 'num' uuid's, return array
pool=[]
for i in trange(num, desc=tablename):
pool.append(str(uuid.uuid4()))
return pool
#------------------------------------------------------------------------------
def train(self):
""" Run training function. Save model upon completion """
self.print_log('Training for %d epochs' % self.flags['num_epochs'])
tf_inputs = (self.x['TRAIN'], self.im_dims['TRAIN'], self.gt_boxes['TRAIN'])
self.step += 1
for self.epoch in trange(1, self.flags['num_epochs']+1, desc='epochs'):
train_order = randomize_training_order(len(self.names['TRAIN']))
for i in tqdm(train_order):
feed_dict = create_feed_dict(flags['data_directory'], self.names['TRAIN'], tf_inputs, i)
# Run a training iteration
if self.step % (self.flags['display_step']) == 0:
# Record training metrics every display_step interval
summary = self._record_train_metrics(feed_dict)
self._record_training_step(summary)
else:
summary = self._run_train_iter(feed_dict)
self._record_training_step(summary)
## Epoch finished
# Save model
if self.epoch % cfg.CHECKPOINT_RATE == 0:
self._save_model(section=self.epoch)
# Perform validation
if self.epoch % cfg.VALID_RATE == 0:
self.evaluate(test=False)
# # Adjust learning rate
# if self.epoch % cfg.TRAIN.LEARNING_RATE_DECAY_RATE == 0:
# self.lr = self.lr * cfg.TRAIN.LEARNING_RATE_DECAY
# self.print_log("Learning Rate: %f" % self.lr)
def train(self):
""" Run training function. Save model upon completion """
self.print_log('Training for %d epochs' % self.flags['num_epochs'])
tf_inputs = (self.x['TRAIN'], self.im_dims['TRAIN'], self.gt_boxes['TRAIN'])
self.step += 1
for self.epoch in trange(1, self.flags['num_epochs']+1, desc='epochs'):
train_order = randomize_training_order(len(self.names['TRAIN']))
for i in tqdm(train_order):
feed_dict = create_feed_dict(flags['data_directory'], self.names['TRAIN'], tf_inputs, i)
# Run a training iteration
if self.step % (self.flags['display_step']) == 0:
# Record training metrics every display_step interval
summary = self._record_train_metrics(feed_dict)
self._record_training_step(summary)
else:
summary = self._run_train_iter(feed_dict)
self._record_training_step(summary)
## Epoch finished
# Save model
if self.epoch % cfg.CHECKPOINT_RATE == 0:
self._save_model(section=self.epoch)
# Perform validation
if self.epoch % cfg.VALID_RATE == 0:
self.evaluate(test=False)
# # Adjust learning rate
# if self.epoch % cfg.TRAIN.LEARNING_RATE_DECAY_RATE == 0:
# self.lr = self.lr * cfg.TRAIN.LEARNING_RATE_DECAY
# self.print_log("Learning Rate: %f" % self.lr)
def process_digits(all_data, all_labels, data_directory, args):
""" Generate data and saves in the appropriate format """
for s in range(len(flags['all_names'])):
split = flags['all_names'][s]
print('Processing {0} Data'.format(split))
key = 'train' if split == 'train' else 'eval'
# Create writer (tf_records) or Image/Annotations/Names directories (PNGs)
if args[key] == 'tfrecords':
tf_writer = tf.python_io.TFRecordWriter(data_directory + 'clutteredMNIST_' + split + '.tfrecords')
elif args[key] == 'PNG':
make_Im_An_Na_directories(data_directory)
else:
raise ValueError('{0} is not a valid data format option'.format(args[key]))
# Generate data
for i in trange(flags['nums'][split]):
# Generate cluttered MNIST image
im_dims = [im_dims_generator(), im_dims_generator()]
num_digits = num_digits_generator()
img, gt_boxes = gen_nCluttered(all_data[s], all_labels[s], im_dims, num_digits)
# Save data
if args[key] == 'tfrecords':
img = np.float32(img.flatten()).tostring()
gt_boxes = np.int32(np.array(gt_boxes).flatten()).tostring()
tf_write(img, gt_boxes, [flags['im_dims'], flags['im_dims']], tf_writer)
elif args[key] == 'PNG':
fname = split + '_img' + str(i)
imsave(data_directory + 'Images/' + fname + '.png', np.float32(img))
np.savetxt(data_directory + 'Annotations/' + fname + '.txt', np.array(gt_boxes), fmt='%i')
with open(data_directory + 'Names/' + split + '.txt', 'a') as f:
f.write(fname + '\n')
###############################################################################
# Image generation functions
###############################################################################
def sample(L, p, samples=1000, cutoff=200):
'''Repeated single shot corrections for the toric code with perfect measurements.
Return an array of nb of cycles until failure for a given L and p.'''
results = []
for _ in trange(samples, desc='%d; %.2f'%(L,p), leave=False):
code = ToricCode(L)
i = 1
while code.step_error_and_perfect_correction(p) and i<cutoff:
i+=1
results.append(i)
return np.array(results, dtype=int)
def run_benchmark(input_data):
print("Running dry runs...")
for n in trange(args.dry_runs):
iteration(input_data)
print("\n Running measured runs...")
running_time = 0
for n in trange(args.runs):
start, end = iteration(input_data)
running_time += end - start
return running_time / float(args.runs)
def _maybe_generate_and_save(self, except_list=[]):
self.data = {}
for name, num in self.data_num.items():
if name in except_list:
tf.logging.info("Skip creating {} because of given except_list {}".format(name, except_list))
continue
path = self.get_path(name)
if not os.path.exists(path):
tf.logging.info("Creating {} for [{}]".format(path, self.task))
x = np.zeros([num, self.max_length, 2], dtype=np.float32)
y = np.zeros([num, self.max_length], dtype=np.int32)
for idx in trange(num, desc="Create {} data".format(name)):
n_nodes = self.rng.randint(self.min_length, self.max_length+ 1)
nodes, res = generate_one_example(n_nodes, self.rng)
x[idx,:len(nodes)] = nodes
y[idx,:len(res)] = res
np.savez(path, x=x, y=y)
self.data[name] = TSP(x=x, y=y, name=name)
else:
tf.logging.info("Skip creating {} for [{}]".format(path, self.task))
tmp = np.load(path)
self.data[name] = TSP(x=tmp['x'], y=tmp['y'], name=name)
def test_memory(insertions, samples, img_shape, misc_len, batch_size, capacity, img_dtype=np.float32):
print("image shape:", img_shape)
print("misc vector lenght:", misc_len)
print("batchsize:", batch_size)
print("capacity:", capacity)
print("image data type:", img_dtype.__name__)
memory = ReplayMemory(img_shape, misc_len, capacity, batch_size)
if img_dtype != np.float32:
s = [(np.random.random(img_shape) * 255).astype(img_dtype), np.random.random(misc_len).astype(np.float32)]
s2 = [(np.random.random(img_shape) * 255).astype(img_dtype), np.random.random(misc_len).astype(np.float32)]
else:
s = [np.random.random(img_shape).astype(img_dtype), np.random.random(misc_len).astype(np.float32)]
s2 = [np.random.random(img_shape).astype(img_dtype), np.random.random(misc_len).astype(np.float32)]
a = 0
r = 1.0
terminal = False
for _ in trange(capacity, leave=False, desc="Prefilling memory."):
memory.add_transition(s, a, s2, r, terminal)
start = time()
for _ in trange(insertions, leave=False, desc="Testing insertions speed"):
memory.add_transition(s, a, s2, r, terminal)
inserts_time = time() - start
start = time()
for _ in trange(samples, leave=False, desc="Testing sampling speed"):
sample = memory.get_sample()
sample_time = time() - start
print("\t{:0.1f} insertions/s. 1k insertions in: {:0.2f}s".format(insertions / inserts_time,
inserts_time / insertions * 1000))
print("\t{:0.1f} samples/s. 1k samples in: {:0.2f}s".format(samples / sample_time, sample_time / samples * 1000))
print()
def test(self, episodes_num=None, deterministic=True):
if episodes_num is None:
episodes_num = self.test_episodes_per_epoch
test_start_time = time.time()
test_rewards = []
test_actions = []
test_frameskips = []
for _ in trange(episodes_num, desc="Testing", file=sys.stdout,
leave=False, disable=not self.enable_progress_bar):
total_reward, actions, frameskips, _ = self.run_episode(deterministic=deterministic, return_stats=True)
test_rewards.append(total_reward)
test_actions += actions
test_frameskips += frameskips
self.doom_wrapper.reset()
if self.local_network.has_state():
self.local_network.reset_state()
test_end_time = time.time()
test_duration = test_end_time - test_start_time
min_score = np.min(test_rewards)
max_score = np.max(test_rewards)
mean_score = np.mean(test_rewards)
score_std = np.std(test_rewards)
log(
"TEST: mean: {}, min: {}, max: {}, test time: {}".format(
green("{:0.3f}±{:0.2f}".format(mean_score, score_std)),
red("{:0.3f}".format(min_score)),
blue("{:0.3f}".format(max_score)),
sec_to_str(test_duration)))
return test_rewards, test_actions, test_frameskips
def train(self):
max_epoch = int(math.ceil(1. * self.max_iter / len(self.train_loader)))
for epoch in tqdm.trange(self.epoch, max_epoch,
desc='Train', ncols=80):
self.epoch = epoch
self.train_epoch()
if self.iteration >= self.max_iter:
break
def __init__(self, cache=None, **kwargs):
super(GTZAN, self).__init__(**kwargs)
if kwargs.get('conf') is not None:
conf = kwargs['conf']
cache = conf.get('cache', None)
data_set_path = osp.join(DEFAULT_IMAGEST_BASE, self.data_set)
self.data_set_path = data_set_path
self.cache = cache
X, y = parse_anno_file(data_set_path)
if cache == 'raw':
import librosa
from tqdm import trange
X_new = np.zeros((len(X), 1, 661500, 1))
for i in trange(len(X)):
x,_ = librosa.load(osp.join(DEFAULT_DATA_BASE, X[i]))
x_len = min(661500, len(x))
X_new[i,:,:x_len,0] = x[:x_len]
if cache is not None and cache != 'raw':
X = self.load_cache_X(X, cache)
if cache == 'mfcc':
X_new = np.zeros((len(X), X[0].shape[0], 1280, 1))
for i, x in enumerate(X):
x_len = min(x.shape[1], 1280)
X_new[i,:,:x_len,0] = x[:,:x_len]
X = X_new
# layout_X
if self.layout_x == 'rel_path':
self.X = X
else:
self.X = self.init_layout_X(X)
# layout_y
self.y = self.init_layout_y(y)
def export_to_tf(self):
def make_example(key_idx, subject, action, pose, plen):
ex = tf.train.Example()
ex.features.feature["key_idx"].int64_list.value.append(int(key_idx))
ex.features.feature["subject"].int64_list.value.append(int(subject))
ex.features.feature["action"].int64_list.value.append(int(action))
ex.features.feature["plen"].int64_list.value.append(int(plen))
for sublist in pose.tolist():
for subsublist in sublist:
for value in subsublist:
ex.features.feature["pose"].float_list.value.append(value)
return ex
def write_split(is_training, keys):
writer = None
shard = 0
splitname = 'train' if is_training else 'val'
print('Transforming "%s" split...' % splitname)
t = trange(len(keys), dynamic_ncols=True)
for k in t:
if writer == None:
writer = tf.python_io.TFRecordWriter(
os.path.join(self.data_path, self.data_set + '_' + splitname + '_shard' + str(shard) + '.tf')
)
key_idx, subject, action, pose, plen = self.read_h5_data(k, is_training)
ex = make_example(key_idx, subject, action, pose, plen)
writer.write(ex.SerializeToString())
if ((k + 1) % 4096) == 0:
writer.close()
writer = None
shard += 1
if writer != None:
writer.close()
write_split(True, self.train_keys)
write_split(False, self.val_keys)
def main_loop(self):
# some final operations that might modify the graph
self._init_summary()
get_global_step_var() # ensure there is such var, before finalizing the graph
logger.info("Setup callbacks ...")
callbacks = self.config.callbacks
callbacks.setup_graph(self) # TODO use weakref instead?
logger.info("Initializing graph variables ...")
self.sess.run(tf.initialize_all_variables())
self.config.session_init.init(self.sess)
tf.get_default_graph().finalize()
self._start_concurrency()
with self.sess.as_default():
try:
self.global_step = get_global_step()
logger.info("Start training with global_step={}".format(self.global_step))
callbacks.before_train()
for epoch in range(self.config.starting_epoch, self.config.max_epoch+1):
with timed_operation(
'Epoch {}, global_step={}'.format(
epoch, self.global_step + self.config.step_per_epoch)):
for step in tqdm.trange(
self.config.step_per_epoch,
**get_tqdm_kwargs(leave=True)):
if self.coord.should_stop():
return
self.run_step()
#callbacks.trigger_step() # not useful?
self.global_step += 1
self.trigger_epoch()
except (KeyboardInterrupt, Exception):
raise
finally:
# Do I need to run queue.close?
callbacks.after_train()
self.coord.request_stop()
self.summary_writer.close()
self.sess.close()
def get_training_bbox(bbox_dir, imglist):
ret = []
def parse_bbox(fname):
root = ET.parse(fname).getroot()
size = root.find('size').getchildren()
size = map(int, [size[0].text, size[1].text])
box = root.find('object').find('bndbox').getchildren()
box = map(lambda x: float(x.text), box)
#box[0] /= size[0]
#box[1] /= size[1]
#box[2] /= size[0]
#box[3] /= size[1]
return np.asarray(box, dtype='float32')
with timed_operation('Loading Bounding Boxes ...'):
cnt = 0
import tqdm
for k in tqdm.trange(len(imglist)):
fname = imglist[k][0]
fname = fname[:-4] + 'xml'
fname = os.path.join(bbox_dir, fname)
try:
ret.append(parse_bbox(fname))
cnt += 1
except KeyboardInterrupt:
raise
except:
ret.append(None)
logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
return ret