def past_weight_grad_calculator(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):
"""
Do an efficient update of the weights given the two spike-trains.
This isn't actually implemented as an efficient update, but it will produce the identical result as if it were.
:param xs: An (n_samples, n_in) array
:param es: An (n_samples, n_out) array
:param kp_x: kp for the x units
:param kd_x: kd for the x units
:param kp_e: kp for the e units
:param kd_e: kd for the e units
:param shapes: (minibatch_size, n_in, n_out)
:return: An (n_in, n_out) approximate weight gradient.
"""
# TODO: Make this actually use sparsity, one of these days.
kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]
n_samples, n_in, n_out = shapes
rx = kd_x/(kp_x+kd_x)
re = kd_e/(kp_e+kd_e)
tx_last = create_shared_variable(np.zeros((n_samples, n_in))+1)
te_last = create_shared_variable(np.zeros((n_samples, n_out))+1)
x_last = create_shared_variable(np.zeros((n_samples, n_in)))
e_last = create_shared_variable(np.zeros((n_samples, n_out)))
t_last = tt.minimum(tx_last[:, :, None], te_last[:, None, :])
x_spikes = tt.neq(xs, 0)
dw_potentials = x_last[:, :, None] * e_last[:, None, :] * \
rx**(tx_last[:, :, None]-t_last) \
* re**(te_last[:, None, :]-t_last) \
* geoseries_sum(rx*re, t_end=t_last, t_start=1)
e_spikes = tt.neq(es, 0)
dws = (x_spikes[:, :, None]+e_spikes[:, None, :]-x_spikes[:, :, None]*e_spikes[:, None, :])*dw_potentials # (n_samples, n_in, n_out)
add_update(x_last, tt.switch(x_spikes, x_last*rx**tx_last + xs/as_floatx(kd_x), x_last))
add_update(e_last, tt.switch(e_spikes, e_last*rx**te_last + es/as_floatx(kd_e), e_last))
add_update(tx_last, tt.switch(x_spikes, 1, tx_last+1))
add_update(te_last, tt.switch(e_spikes, 1, te_last+1))
return dws.sum(axis=0)
评论列表
文章目录