def __init__(self, kp, kd, kv, kw, N, u_limit=[100, 100], nn_limit=[100, 100]):
"""
kp: Proportional feedback gains.
kd: Derivative feedback gains.
kv: Input-side learning gain (scalar).
kw: Output-side learning gain (scalar).
N: Number of neurons.
u_limit: Limit on total output effort.
nn_limit: Limit on NN component feedforward effort.
The user must actively set self.learn = True to
have the NN start learning.
"""
self.nstates = 2 * len(kp)
self.ncontrols = len(kp)
self.nsigs = N
self.sig = lambda x: np.concatenate(([1], np.tanh(x)))
self.sigp = lambda x: np.tile(1/(np.cosh(x)**2), (self.nsigs+1, 1))
self.set_gains(kp, kd, kv, kw)
self.u_limit = np.array(u_limit, dtype=np.float32)
self.nn_limit = np.array(nn_limit, dtype=np.float32)
self.V = np.zeros((self.nstates+1, self.nsigs))
self.W = np.zeros((self.nsigs+1, self.ncontrols))
self.y = np.zeros(self.ncontrols)
self.saturated = False
self.learn = False
########################
评论列表
文章目录