def update_lr(self,max_j):
for param_group in self.optim.param_groups:
param_group['lr'] = (0.5 * self.lr) * (1 + np.cos(np.pi * self.j / max_j))
# Optionally anneal the width settings throughout training.
# self.min_width = 0.25 + 0.25 * min(self.j / (max_j * 0.5), 1.0)
# self.max_width = 0.50 + 0.50 * min(self.j / (max_j * 0.5), 1.0)
# self.max_paths = [min(float(self.j) / (max_j * 0.5), 1.0)] * 3
# self.min_budget = 0.25 + 0.25 * min(self.j / (max_j * 0.5), 1.0)
self.max_budget = 0.50 + 0.50 * min(self.j / (max_j * 0.5), 1.0)
# Anneal kernel sizes towards max kernel size
self.max_kernel = 3 + int(((self.final_max_kernel - 3)//2) * min(self.j / (max_j * 0.5), 1.0) * 2)
self.j += 1
评论列表
文章目录