def __compute_valid_convolution_nd(data, kernel, dimension: int):
convolution_shape = tuple(data.shape[i] - kernel.shape[i] + 1 for i in range(-1, -dimension - 1, -1))
list_dimension = reduce(lambda a, b: a * b, convolution_shape)
data_prefix = data.shape[:-dimension]
kernel_flat = kernel.ravel()
data_flat = numpy.zeros(data_prefix + (list_dimension, len(kernel_flat)))
for i in range(list_dimension):
tensor_slice_start = [0] * len(kernel.shape)
tensor_slice = [slice(None)] * len(data.shape)
tensor_slice_start[-1] = i
for r in range(-1, -len(kernel.shape) - 1, -1):
dimension_scale = data.shape[r] - kernel.shape[r] + 1
if tensor_slice_start[r] >= dimension_scale:
tensor_slice_start[r + 1] = tensor_slice_start[r] // dimension_scale
tensor_slice_start[r] %= dimension_scale
tensor_slice[r] = slice(tensor_slice_start[r], tensor_slice_start[r] + kernel.shape[r])
sub_convolution_index = (slice(None),) * (len(data.shape) - dimension) + tuple([i, slice(None)])
data_flat[sub_convolution_index] = data[tensor_slice].reshape(data_prefix + (reduce(lambda a, b: a * b, kernel.shape),))
convolution_flat = numpy.matmul(data_flat, numpy.flip(kernel_flat, axis=0))
convolution_nd = convolution_flat.reshape(data_prefix + convolution_shape)
return convolution_nd
评论列表
文章目录