python类softmax()的实例源码

network.py 文件源码 项目:python-machine-learning 作者: sho-87 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)  # Predicted class
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
network3.py 文件源码 项目:machine-deep_learning 作者: Charleswyt 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
layers3D.py 文件源码 项目:reinforcement-learning-policy-gradients 作者: DarkElement75 项目源码 文件源码 阅读 21 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, mini_batch_size, timestep_n):

        #Reshape 3d to 2d so we can softmax correctly
        self.inpt = inpt.reshape((mini_batch_size*timestep_n, self.n_in))

        #The wx+b changes our 2d input to be the correct output shape
        self.inpt = softmax(T.dot(self.inpt, self.w) + self.b)

        #Finally, now that we have the correct output shape, we 
        #Convert back to 3d, making sure to use self.n_out, since this is the output
        #And it's already correctly shaped, just in 2d.
        self.output = self.inpt.reshape((mini_batch_size, timestep_n, self.n_out))
layers.py 文件源码 项目:reinforcement-learning-policy-gradients 作者: DarkElement75 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax(T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
layers3D.py 文件源码 项目:reinforcement-learning-policy-gradients 作者: DarkElement75 项目源码 文件源码 阅读 19 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, mini_batch_size, timestep_n):

        #Reshape 3d to 2d so we can softmax correctly
        self.inpt = inpt.reshape((mini_batch_size*timestep_n, self.n_in))

        #The wx+b changes our 2d input to be the correct output shape
        self.inpt = softmax(T.dot(self.inpt, self.w) + self.b)

        #Finally, now that we have the correct output shape, we 
        #Convert back to 3d, making sure to use self.n_out, since this is the output
        #And it's already correctly shaped, just in 2d.
        self.output = self.inpt.reshape((mini_batch_size, timestep_n, self.n_out))
network3.py 文件源码 项目:neural-networks-and-deep-learning 作者: skylook 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
regression.py 文件源码 项目:Theano-Deep-learning 作者: GeekLiB 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def build_prediction(self):
        # return NN.softmax(self.activation) #use this line to expose a slow subtensor
        # implementation
        return NN.sigmoid(self.activation)
network3.py 文件源码 项目:DeepLearningPython35 作者: MichalDanielDobrzanski 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
        self.inpt = inpt.reshape((mini_batch_size, self.n_in))
        self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
        self.y_out = T.argmax(self.output, axis=1)
        self.inpt_dropout = dropout_layer(
            inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
        self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)


问题


面经


文章

微信
公众号

扫码关注公众号