def get_mnist(train):
"""Get MNIST dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,
std=params.dataset_std)])
# dataset and data loader
mnist_dataset = datasets.MNIST(root=params.data_root,
train=train,
transform=pre_process,
download=True)
mnist_data_loader = torch.utils.data.DataLoader(
dataset=mnist_dataset,
batch_size=params.batch_size,
shuffle=True)
return mnist_data_loader
python类batch_size()的实例源码
def get_usps(train):
"""Get USPS dataset loader."""
# image pre-processing
pre_process = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(
mean=params.dataset_mean,
std=params.dataset_std)])
# dataset and data loader
usps_dataset = USPS(root=params.data_root,
train=train,
transform=pre_process,
download=True)
usps_data_loader = torch.utils.data.DataLoader(
dataset=usps_dataset,
batch_size=params.batch_size,
shuffle=True)
return usps_data_loader
def calc_gradient_penalty(D, real_data, fake_data):
"""Calculatge gradient penalty for WGAN-GP."""
alpha = torch.rand(params.batch_size, 1)
alpha = alpha.expand(real_data.size())
alpha = make_cuda(alpha)
interpolates = make_variable(alpha * real_data + ((1 - alpha) * fake_data))
interpolates.requires_grad = True
disc_interpolates = D(interpolates)
gradients = grad(outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=make_cuda(
torch.ones(disc_interpolates.size())),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
gradient_penalty = params.penalty_lambda * \
((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
def valid_generator():
while True:
for start in range(0, len(ids_valid_split), batch_size):
x_batch = []
y_batch = []
end = min(start + batch_size, len(ids_valid_split))
ids_valid_batch = ids_valid_split[start:end]
for id in ids_valid_batch.values:
img = cv2.imread('input/train/{}.jpg'.format(id))
img = cv2.resize(img, (input_size, input_size))
mask = cv2.imread('input/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (input_size, input_size))
mask = np.expand_dims(mask, axis=2)
x_batch.append(img)
y_batch.append(mask)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch, y_batch
def get_targets(mini_batch,target_model):
# mini_batch format : (input_state,action,reward,output_state,tState,epsilon)
actions= np.argmax(np.asarray([item[1] for item in mini_batch]),axis=1).astype(int)
state_inputs = np.concatenate(tuple([exp[3] for exp in mini_batch]),axis=0)
train_inputs = np.concatenate(tuple([exp[0] for exp in mini_batch]),axis=0)
est_values = (target_model.predict_on_batch(state_inputs)).max(axis=1)
target = np.zeros(shape=(len(mini_batch),2))
for item in range(len(mini_batch)):
target[item,actions[item]] = mini_batch[item][2] + p.DISCOUNT*est_values[item]*int(not mini_batch[item][-2])
#target = np.asarray([mini_batch[item][2] + p.DISCOUNT*est_values[item] if not mini_batch[item][-2] else mini_batch[item][2] for item in range(len(mini_batch))])
#assert(target.shape[0] == p.batch_size)
return target, train_inputs
def run_pretrained(input_state,model,action_states,gameState):
print '\n\nLoading pretrained weights onto model...'
model.load_weights(p.PRETRAINED_PATH)
epsilon=1
while True:
print 'Running pretrained model (no exploration) with weights at ', p.PRETRAINED_PATH
nn_out = model.predict(input_state,batch_size=1,verbose=0)
nn_action = [[0,1]] if np.argmax(nn_out) else [[1,0]]
action,rand_flag = select_action(nn_action+action_states,prob=[epsilon,(1-epsilon)/2,(1-epsilon)/2])
rgbDisplay, reward, tState = gameState.frame_step(action)
#grayDisplay = (np.dot(imresize(rgbDisplay, (80,80), interp='bilinear')[:,:,:3], [0.299, 0.587, 0.114])).reshape((1,1,80,80))
grayDisplay = (np.dot(np.fliplr(imrotate(imresize(rgbDisplay, (80,80), interp='bilinear'), -90))[:,:,:3], [0.299, 0.587, 0.114])).reshape((1,1,80,80))
output_state = np.append(input_state[:,1:,:,:], grayDisplay,axis=1)
#############################################################################################################################################################################
test_submit_multi_gpu.py 文件源码
项目:Kaggle-Carvana-Image-Masking-Challenge
作者: petrosgk
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def data_loader(q, ):
for start in tqdm(range(0, len(ids_test), batch_size)):
x_batch = []
end = min(start + batch_size, len(ids_test))
ids_test_batch = ids_test[start:end]
for id in ids_test_batch.values:
img = cv2.imread('input/test/{}.jpg'.format(id))
if input_size is not None:
img = cv2.resize(img, (input_size, input_size))
x_batch.append(img)
x_batch = np.array(x_batch, np.float32) / 255
q.put((ids_test_batch, x_batch))
for g in gpus:
q.put((None, None))
def train_generator():
while True:
for start in range(0, len(ids_train_split), batch_size):
x_batch = []
y_batch = []
end = min(start + batch_size, len(ids_train_split))
ids_train_batch = ids_train_split[start:end]
for id in ids_train_batch.values:
img = cv2.imread('input/train/{}.jpg'.format(id))
img = cv2.resize(img, (input_size, input_size))
mask = cv2.imread('input/train_masks/{}_mask.png'.format(id), cv2.IMREAD_GRAYSCALE)
mask = cv2.resize(mask, (input_size, input_size))
img = randomHueSaturationValue(img,
hue_shift_limit=(-50, 50),
sat_shift_limit=(-5, 5),
val_shift_limit=(-15, 15))
img, mask = randomShiftScaleRotate(img, mask,
shift_limit=(-0.0625, 0.0625),
scale_limit=(-0.1, 0.1),
rotate_limit=(-0, 0))
img, mask = randomHorizontalFlip(img, mask)
mask = np.expand_dims(mask, axis=2)
x_batch.append(img)
y_batch.append(mask)
x_batch = np.array(x_batch, np.float32) / 255
y_batch = np.array(y_batch, np.float32) / 255
yield x_batch, y_batch
test_submit_multithreaded.py 文件源码
项目:Kaggle-Carvana-Image-Masking-Challenge
作者: petrosgk
项目源码
文件源码
阅读 24
收藏 0
点赞 0
评论 0
def data_loader(q, ):
for start in range(0, len(ids_test), batch_size):
x_batch = []
end = min(start + batch_size, len(ids_test))
ids_test_batch = ids_test[start:end]
for id in ids_test_batch.values:
img = cv2.imread('input/test/{}.jpg'.format(id))
img = cv2.resize(img, (input_size, input_size))
x_batch.append(img)
x_batch = np.array(x_batch, np.float32) / 255
q.put(x_batch)
test_submit_multithreaded.py 文件源码
项目:Kaggle-Carvana-Image-Masking-Challenge
作者: petrosgk
项目源码
文件源码
阅读 25
收藏 0
点赞 0
评论 0
def predictor(q, ):
for i in tqdm(range(0, len(ids_test), batch_size)):
x_batch = q.get()
with graph.as_default():
preds = model.predict_on_batch(x_batch)
preds = np.squeeze(preds, axis=3)
for pred in preds:
prob = cv2.resize(pred, (orig_width, orig_height))
mask = prob > threshold
rle = run_length_encode(mask)
rles.append(rle)