mcmcmc.py 文件源码

python
阅读 21 收藏 0 点赞 0 评论 0

项目:RD-MCL 作者: biologyguy 项目源码 文件源码
def run(self):
        """
        NOTE: Gibbs sampling is a way of selecting variables one at a time instead of all at once. This is beneficial in
        high dimensional variable space because it will increase the probability of accepting a new sample. It isn't
        implemented here, but it might be worth keeping in mind.
        """
        counter = 0
        while not self._check_convergence() and (counter <= self.steps or self.steps == 0):
            tmp_dump = self.dumpfile + ".temp"
            with open(tmp_dump, "wb") as ofile:
                dump_obj = [chain._dump_obj() for chain in self.chains]
                dill.dump(dump_obj, ofile, protocol=-1)
            shutil.move(tmp_dump, self.dumpfile)
            counter += 1
            child_list = OrderedDict()
            for chain in self.chains:  # Note that this will spin off (c * w) new processes, where c=chains, w=walkers
                for walker in chain.walkers:
                    # Start new process
                    func_args = []
                    for variable in walker.variables:
                        if walker.lava:
                            variable.draw_random()
                        else:
                            variable.draw_new_value(walker.heat)
                        func_args.append(variable.draw_value)

                    # Always add a new seed for the target function
                    func_args.append(self.rand_gen.randint(1, 999999999999999))
                    p = Process(target=self.mc_step_run, args=(walker, [func_args]))
                    p.start()
                    child_list[walker.name] = p

            # wait for remaining processes to complete
            while len(child_list) > 0:
                for _name, child in child_list.items():
                    if child.is_alive():
                        continue
                    else:
                        del child_list[_name]
                        break

            for chain in self.chains:
                # Get the normalized standard deviation among all historical walker scores for this chain
                history_series = pd.Series([score for walker in chain.walkers for score in walker.score_history])
                mu, std = norm.fit(history_series)
                for walker in chain.walkers:
                    self.step_parse(walker, std)
                    if self.best["score"] is None or walker.current_score > self.best["score"]:
                        self.best["score"] = walker.current_score
                        self.best["variables"] = OrderedDict([(x.name, x.current_value) for x in walker.variables])

            for chain in self.chains:
                chain.swap_hot_cold()
                # Send output to files
                if counter % self.sample_rate == 0:
                    chain.step_counter += 1
                    chain.write_sample()
        return
评论列表
文章目录


问题


面经


文章

微信
公众号

扫码关注公众号