def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(m.initial_state)
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
python类ptb_iterator()的实例源码
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
wps = len(data) // (time.time() - start_time)
return np.exp(costs / iters), wps
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = m.initial_state.eval()
for step, (x, y) in enumerate(
reader.ptb_iterator(data, m.batch_size, m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def run_epoch(session, model, data, is_train=False, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
start_time = time.time()
costs = 0.0
iters = 0
state = session.run(model.initial_state)
for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
if is_train:
fetches = [model.cost, model.final_state, model.train_op]
else:
fetches = [model.cost, model.final_state]
feed_dict = {}
feed_dict[model.input_data] = x
feed_dict[model.targets] = y
for layer_num, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[layer_num].c
feed_dict[h] = state[layer_num].h
if is_train:
cost, state, _ = session.run(fetches, feed_dict)
else:
cost, state = session.run(fetches, feed_dict)
costs += cost
iters += model.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * model.batch_size / (time.time() - start_time)))
return np.exp(costs / iters)
def run_epoch(session, m, data, eval_op, ITERS, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
#state = m.initial_state.eval()
state = session.run(m.initial_state) #.eval()
step = 0
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
# few iters for profiling, remove if complete training is needed
if step > ITERS - 1:
break
print("Time for %d iterations %.4f seconds" %
(ITERS, time.time() - start_time))
return np.exp(costs / iters)
def run_epoch(session, m, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
start_time = time.time()
costs = 0.0
iters = 0
print('m.initial_state:', m.initial_state)
state = session.run(m.initial_state) #.eval()
step = 0
for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
m.num_steps)):
cost, state, _ = session.run([m.cost, m.final_state, eval_op],
{m.input_data: x,
m.targets: y,
m.initial_state: state})
costs += cost
iters += m.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * m.batch_size / (time.time() - start_time)))
print("Time for one epoch, %d iters: %.4f seconds" %
(step+1, time.time() - start_time))
average_batch_time = (time.time() - start_time)/(step+1)
print("Average time per minibatch in this epoch: %.4f seconds" % average_batch_time)
return np.exp(costs / iters), average_batch_time
def run_epoch(session, model, data, eval_op, verbose=False):
"""Runs the model on the given data."""
epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
start_time = time.time()
costs = 0.0
accs = 0.0
iters = 0
# ?????????,??op:zero_state??
# tuple(num_layors*[batch_size,size])
lstm_state_value = session.run(model.initial_state)
for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
feed_dict = {}
feed_dict[model.input_data] = x
feed_dict[model.targets] = y
# foreach num = num_layors
for i, (c, h) in enumerate(model.initial_state):
# feed shape([batch_zie=20,size=200])
feed_dict[c] = lstm_state_value[i].c
feed_dict[h] = lstm_state_value[i].h
# feed_dict{x,y,c1,h1,c2,h2}
cost, acc, lstm_state_value, _ = session.run([model.cost, model.accuracy, model.final_state, eval_op],
feed_dict)
accs += acc
costs += cost # batch?????????cost
iters += model.num_steps
if verbose and step % (epoch_size // 10) == 10:
print("%.3f perplexity: %.3f speed: %.0f wps" %
(step * 1.0 / epoch_size, np.exp(costs / iters),
iters * model.batch_size / (time.time() - start_time)))
print("Accuracy:", accs / iters)
return np.exp(costs / iters), accs / iters
def gen_epoch_data(num_epochs, batch_size, seq_length):
for i in range(num_epochs):
yield reader.ptb_iterator(data, batch_size, seq_length)