lucas_stokey.py 文件源码

python
阅读 24 收藏 0 点赞 0 评论 0

项目:QuantEcon.lectures.code 作者: QuantEcon 项目源码 文件源码
def solve_time1_bellman(self):
        '''
        Solve the time 1 Bellman equation for calibration Para and initial grid mugrid0
        '''
        Para,mugrid0 = self.Para,self.mugrid
        S = len(Para.Pi)

        #First get initial fit
        PP = Planners_Allocation_Sequential(Para)
        c,n,x,V = map(np.vstack, zip(*map(lambda mu: PP.time1_value(mu),mugrid0)) )

        Vf,cf,nf,xprimef = {},{},{},{}
        for s in range(2):
            cf[s] = UnivariateSpline(x[:,s],c[:,s])
            nf[s] = UnivariateSpline(x[:,s],n[:,s])
            Vf[s] = UnivariateSpline(x[:,s],V[:,s])
            for sprime in range(S):
                xprimef[s,sprime] = UnivariateSpline(x[:,s],x[:,s])
        policies = [cf,nf,xprimef]


        #create xgrid
        xbar = [x.min(0).max(),x.max(0).min()]
        xgrid = np.linspace(xbar[0],xbar[1],len(mugrid0))
        self.xgrid = xgrid

        #Now iterate on bellman equation
        T = BellmanEquation(Para,xgrid,policies)
        diff = 1.
        while diff > 1e-5:
            PF = T(Vf)

            Vfnew,policies = self.fit_policy_function(PF)

            diff = 0.
            for s in range(S):
                diff = max(diff, np.abs((Vf[s](xgrid)-Vfnew[s](xgrid))/Vf[s](xgrid)).max() )

            print(diff)
            Vf = Vfnew

        #store value function policies and Bellman Equations
        self.Vf = Vf
        self.policies = policies
        self.T = T
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号