def hist_comp(arry, hist, result):
# We have N threads per block
# And We have one block only
x = cuda.grid(1)
R = cuda.shared.array(9, dtype=float64)
# No of featureVectors
# array.shape[0] == 9*34
A = cuda.shared.array(shape=(9,34), dtype=float64)
# Vecture To Compair
# hist.shape[0] == BIN_COUNT == 34 ?
B = cuda.shared.array(34, dtype=float64)
for i in range(BIN_COUNT):
B[i] = hist[i]
A[x] = arry[x]
cuda.syncthreads()
# Do Actual Calculations.
# i.e: kullback_leibler_divergence
Sum = 0.00
for i in range(BIN_COUNT):
a = B[i]
b = A[x][i]
Sum += (a * (math.log(a/b) / math.log(2.0)))
# R Contains the KL-Divergences
R[x] = Sum
cuda.syncthreads()
# Finding the Min Divergence OR
# Finding the sum of all Divergences
# by Reducing Method
rSize = cuda.blockDim.x >> 1
while rSize > 0:
if x < rSize:
R[x] = (R[x]+R[x+rSize])
# R[x] = min(R[x],R[x+rSize])
rSize >>= 1
cuda.syncthreads()
# This implementation doesn't take care of last two values.
# So, Using Hack
# TODO: need to Fix It.
if x == 0 :
# R[x] = x if R[x] < R[x+1] else (x+1)
R[x] = (R[x]+R[x+1])
# R[x] = min(R[x],R[x+1])
result[x] = R[x]
评论列表
文章目录