def __init__(self):
self.verbose = 0 # 0(warning) | 1(info) | 2(debug)
# training signature
self.machine = "daim" # "machine_id"
self.timestamp = "17080800" # "yymmdd##"
# training configuration
self.mode = 1 # 1(train) | 2(test model_file)
self.config = 1
self.seed = 1
self.render = False # whether render the window from the original envs or not
self.visualize = True # whether do online plotting and stuff or not
self.save_best = False # save model w/ highest reward if True, otherwise always save the latest model
self.agent_type, self.env_type, self.game, self.circuit_type = CONFIGS[self.config]
self.use_cuda = torch.cuda.is_available()
self.dtype = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
# prefix for model/log/visdom
self.refs = self.machine + "_" + self.timestamp # NOTE: using this as env for visdom
self.root_dir = os.getcwd()
# model files
# NOTE: will save the current model to model_name
self.model_name = self.root_dir + "/models/" + self.refs + ".pth"
# NOTE: will load pretrained model_file if not None
self.model_file = None#self.root_dir + "/models/{TODO:FILL_IN_PRETAINED_MODEL_FILE}.pth"
if self.mode == 2:
self.model_file = self.model_name # NOTE: so only need to change self.mode to 2 to test the current training
assert self.model_file is not None, "Pre-Trained model is None, Testing aborted!!!"
self.refs = self.refs + "_test" # NOTE: using this as env for visdom for testing, to avoid accidentally redraw on the training plots
# logging configs
self.log_name = self.root_dir + "/logs/" + self.refs + ".log"
self.logger = loggerConfig(self.log_name, self.verbose)
self.logger.warning("<===================================>")
if self.visualize:
self.vis = visdom.Visdom()
self.logger.warning("bash$: python -m visdom.server") # activate visdom server on bash
self.logger.warning("http://localhost:8097/env/" + self.refs) # open this address on browser
评论列表
文章目录