recursive_allocation.py 文件源码

python
阅读 21 收藏 0 点赞 0 评论 0

项目:QuantEcon.lectures.code 作者: QuantEcon 项目源码 文件源码
def solve_time1_bellman(self):
        '''
        Solve the time 1 Bellman equation for calibration model and initial grid mugrid0
        '''
        model, mugrid0 = self.model, self.mugrid
        S = len(model.pi)

        # First get initial fit
        PP = SequentialAllocation(model)
        c, n, x, V = map(np.vstack, zip(
            *map(lambda mu: PP.time1_value(mu), mugrid0)))

        Vf, cf, nf, xprimef = {}, {}, {}, {}
        for s in range(2):
            cf[s] = UnivariateSpline(x[:, s], c[:, s])
            nf[s] = UnivariateSpline(x[:, s], n[:, s])
            Vf[s] = UnivariateSpline(x[:, s], V[:, s])
            for sprime in range(S):
                xprimef[s, sprime] = UnivariateSpline(x[:, s], x[:, s])
        policies = [cf, nf, xprimef]

        # Create xgrid
        xbar = [x.min(0).max(), x.max(0).min()]
        xgrid = np.linspace(xbar[0], xbar[1], len(mugrid0))
        self.xgrid = xgrid

        # Now iterate on bellman equation
        T = BellmanEquation(model, xgrid, policies)
        diff = 1
        while diff > 1e-5:
            PF = T(Vf)
            Vfnew, policies = self.fit_policy_function(PF)
            diff = 0
            for s in range(S):
                diff = max(diff, np.abs(
                    (Vf[s](xgrid) - Vfnew[s](xgrid)) / Vf[s](xgrid)).max())
            Vf = Vfnew

        # Store value function policies and Bellman Equations
        self.Vf = Vf
        self.policies = policies
        self.T = T
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号