def minEntropyStim(self):
"""Find the stimulus intensity based on the expected information gain.
Minimum Shannon entropy is used as selection criterion for the stimulus intensity in the upcoming trial.
"""
self.pdf = self.pdf
self.nX = len(self.stimRange)
self.nDims = np.ndim(self.pdf)
# make pdf the same dims as conditional prob table likelihood
self.pdfND = np.expand_dims(self.pdf, axis=self.nDims) # append new axis
self.pdfND = np.tile(self.pdfND, (self.nX)) # tile along new axis
# Probabilities of response r (succes, failure) after presenting a stimulus
# with stimulus intensity x at the next trial, multiplied with the prior (pdfND)
self.pTplus1success = np.multiply(self.likelihood, self.pdfND)
self.pTplus1failure = self.pdfND - self.pTplus1success
# Probability of success or failure given stimulus intensity x, p(r|x)
self.sumAxes = tuple(range(self.nDims)) # sum over all axes except the stimulus intensity axis
self.pSuccessGivenx = np.sum(self.pTplus1success, axis=self.sumAxes)
self.pFailureGivenx = np.sum(self.pTplus1failure, axis=self.sumAxes)
# Posterior probability of parameter values given stimulus intensity x and response r
# p(alpha, sigma | x, r)
self.posteriorTplus1success = self.pTplus1success / self.pSuccessGivenx
self.posteriorTplus1failure = self.pTplus1failure / self.pFailureGivenx
# Expected entropy for the next trial at intensity x, producing response r
self.entropySuccess = self.__entropy(self.posteriorTplus1success)
self.entropyFailure = self.__entropy(self.posteriorTplus1failure)
self.expectEntropy = np.multiply(self.entropySuccess, self.pSuccessGivenx) + np.multiply(self.entropyFailure,
self.pFailureGivenx)
self.minEntropyInd = np.argmin(self.expectEntropy) # index of smallest expected entropy
self.xCurrent = self.stimRange[self.minEntropyInd] # stim intensity at minimum expected entropy
self.iTrial += 1
if self.iTrial == (self.nTrials - 1):
self.stop = 1
评论列表
文章目录