def _activation_matrix(indices_list, weights, number_of_threads):
"""
Estimate activation for indices in weights
Memory overhead for multiprocessing is one copy of weights
plus a copy of cues for each thread.
Parameters
----------
indices_list : list[int]
events as cue indices in weights
weights : numpy.array
weight matrix with shape (outcomes, cues)
number_of_threads : int
Returns
-------
activation_matrix : numpy.array
estimated activations as matrix with shape (outcomes, events)
"""
assert number_of_threads >= 1, "Can't run with less than 1 thread"
activations_dim = (weights.shape[0], len(indices_list))
if number_of_threads == 1:
activations = np.empty(activations_dim, dtype=np.float64)
for row, event_cues in enumerate(indices_list):
activations[:, row] = weights[:, event_cues].sum(axis=1)
return activations
else:
shared_activations = mp.RawArray(ctypes.c_double, int(np.prod(activations_dim)))
weights = np.ascontiguousarray(weights)
shared_weights = mp.sharedctypes.copy(np.ctypeslib.as_ctypes(np.float64(weights)))
initargs = (shared_weights, weights.shape, shared_activations, activations_dim)
with mp.Pool(number_of_threads, initializer=_init_mp_activation_matrix, initargs=initargs) as pool:
pool.starmap(_run_mp_activation_matrix, enumerate(indices_list))
activations = np.ctypeslib.as_array(shared_activations)
activations.shape = activations_dim
return activations
评论列表
文章目录