def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1) # Predicted class
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
python类softmax()的实例源码
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
layers3D.py 文件源码
项目:reinforcement-learning-policy-gradients
作者: DarkElement75
项目源码
文件源码
阅读 21
收藏 0
点赞 0
评论 0
def set_inpt(self, inpt, mini_batch_size, timestep_n):
#Reshape 3d to 2d so we can softmax correctly
self.inpt = inpt.reshape((mini_batch_size*timestep_n, self.n_in))
#The wx+b changes our 2d input to be the correct output shape
self.inpt = softmax(T.dot(self.inpt, self.w) + self.b)
#Finally, now that we have the correct output shape, we
#Convert back to 3d, making sure to use self.n_out, since this is the output
#And it's already correctly shaped, just in 2d.
self.output = self.inpt.reshape((mini_batch_size, timestep_n, self.n_out))
layers.py 文件源码
项目:reinforcement-learning-policy-gradients
作者: DarkElement75
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def set_inpt(self, inpt, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax(T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
layers3D.py 文件源码
项目:reinforcement-learning-policy-gradients
作者: DarkElement75
项目源码
文件源码
阅读 19
收藏 0
点赞 0
评论 0
def set_inpt(self, inpt, mini_batch_size, timestep_n):
#Reshape 3d to 2d so we can softmax correctly
self.inpt = inpt.reshape((mini_batch_size*timestep_n, self.n_in))
#The wx+b changes our 2d input to be the correct output shape
self.inpt = softmax(T.dot(self.inpt, self.w) + self.b)
#Finally, now that we have the correct output shape, we
#Convert back to 3d, making sure to use self.n_out, since this is the output
#And it's already correctly shaped, just in 2d.
self.output = self.inpt.reshape((mini_batch_size, timestep_n, self.n_out))
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
def build_prediction(self):
# return NN.softmax(self.activation) #use this line to expose a slow subtensor
# implementation
return NN.sigmoid(self.activation)
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
self.inpt = inpt.reshape((mini_batch_size, self.n_in))
self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
self.y_out = T.argmax(self.output, axis=1)
self.inpt_dropout = dropout_layer(
inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)