python类freeze_support()的实例源码

pieface_gui.py 文件源码 项目:PIEFACE 作者: jcumby 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def main():
    if sys.platform.startswith('win'):
        # Hack for multiprocessing.freeze_support() to work from a
        # setuptools-generated entry point.
        multiprocessing.freeze_support()

    root = tk.Tk()
    root.minsize(450,380)    #width,height
    root.columnconfigure(0, weight=1)
    root.rowconfigure(0, weight=1)

    root.iconbitmap(iconloc)

    app = MainWindow(root)


    h = LoggingtoGUI(app.log.console)
    h.setLevel(logging.INFO)
    f = CritFilter()
    h.addFilter(f)
    log.addHandler(h)

    #log.addFilter(f)
    root.mainloop()
grey_run.py 文件源码 项目:python-GreyTheory 作者: Kalvar 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def __init__(self):
        mp.freeze_support() # for windows
        self.cpu_count = mp.cpu_count()
ttpassgen.py 文件源码 项目:TTPassGen 作者: tp7309 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def cli(mode, dictlist, rule, dict_cache, global_repeat_mode, part_size, append_mode, seperator, debug_mode, output):
    # On Windows calling this function is necessary.
    multiprocessing.freeze_support()

    if mode in _modes:
        generateCombinationDict(mode, dictlist, rule, dict_cache, global_repeat_mode, part_size, append_mode, seperator, debug_mode, output)
    else:
        click.echo(
            "unknown mode, try use 'python TTDictGen.py --help' for get more information.")
pypdfocr.py 文件源码 项目:cryptoluggage 作者: miguelinux314 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def main(): # pragma: no cover 
    multiprocessing.freeze_support()
    script = PyPDFOCR()
    script.go(sys.argv[1:])
EventMonkey.py 文件源码 项目:EventMonkey 作者: devgc 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def Main():
    multiprocessing.freeze_support()
    Config.Config.ClearLogs()

    ###GET OPTIONS###
    arguements = GetArguements()
    options = arguements.parse_args()

    # Check if there is geodb if frozen
    if getattr(sys,'frozen',False):
        geodb_file = os.path.join(
            'geodb',
            'GeoLite2-City.mmdb'
        )

        if not os.path.isfile(geodb_file):
            if GetYesNo(("There is no geodb found, would you like to download it? "
                        "This is required for using basic Geo IP support within the "
                        "report queries. If you choose not to use this functionality "
                        "expect errors for templates that use custom functions calling "
                        "geoip functions.")):
                InitGeoDb(geodb_file)
        else:
            SqliteCustomFunctions.GEO_MANAGER.AttachGeoDbs('geodb')

    if options.subparser_name == "process":
        options.db_name = os.path.join(
            options.output_path,
            options.evidencename+'.db'
        )
        manager = WindowsEventManager.WindowsEventManager(
            options
        )
        manager.ProcessEvents()
        CreateReports(options)
    elif options.subparser_name == "report":
        CreateReports(options)
    else:
        raise(Exception("Unknown subparser: {}".format(options.subparser_name)))
test_pipeline.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def train_classifier(test, blocker=False):

    number_train=20
    number_valid=30
    number_test=25

    steps = 1000
    batch_size= 1024
    conv_layers = 3

    if test:
        number_train=2
        number_valid=2
        number_test=2
        steps = 50
        batch_size = 20
        conv_layers = 2

    multiprocessing.freeze_support()

    episode_paths = frame.episode_paths(input_path)
    print('Found {} episodes'.format(len(episode_paths)))
    np.random.seed(seed=42)
    np.random.shuffle(episode_paths)

    if blocker:
        common_hparams = dict(use_action=True,  expected_positive_weight=0.05)
        labeller = humanrl.pong_catastrophe.PongBlockerLabeller()
    else:
        common_hparams = dict(use_action=False)
        labeller = humanrl.pong_catastrophe.PongClassifierLabeller()

    data_loader = DataLoader(labeller, TensorflowClassifierHparams(**common_hparams))
    datasets = data_loader.split_episodes(episode_paths,
                                          number_train, number_valid, number_test, use_all=False)


    hparams_list = [
        dict(image_crop_region=((34,34+160),(0,160)), #image_shape=[42, 42, 1], 
             convolution2d_stack_args=[(4, [3, 3], [2, 2])] * conv_layers, batch_size=batch_size, multiprocess=False,
             fully_connected_stack_args=[50,10],
             use_observation=False, use_image=True,
             verbose=True
         ) 
    ]

    start_experiment = time.time()
    print('Run experiment params: ', dict(number_train=number_train, number_valid=number_valid,
                                          number_test=number_test, steps=steps, batch_size=batch_size,
                                          conv_layers=conv_layers) )
    print('hparams', common_hparams, hparams_list[0])


    logdir = save_classifier_path
    run_experiments(
        logdir, data_loader, datasets, common_hparams, hparams_list, steps=steps, log_every=int(.1*steps))

    time_experiment = time.time() - start_experiment
    print('Steps: {}. Time in mins: {}'.format(steps, (1/60)*time_experiment))

    run_classifier_metrics()
FunctionMatrix.py 文件源码 项目:IDAPython-Scripts 作者: razygon 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _feature_paths(self, f_ea):
        '''
        Number of paths from startEA to 'ret'
        The target point cannot simplely just ust the 'last' node, in most conditions, the last node is not the ret point.
        For the start point, I really also doubt whether the first node is start node.... 

        prior feature: returnpoints
        '''
        return 0
        paths_count = 0
        start = sorted(self.ftable["dg"].nodes())[0]
        DEBUG_PRINT('start')
        DEBUG_PRINT(start)
        cutoff = len(self.ftable["dg"])/2
        if cutoff > 70:
            return 100
        for ret in self.ftable["returnpoints"]:
            tar = None
            for (x,y) in self.ftable["dg"].nodes():
                if y == ret:
                    tar = (x,y)
                    break
            if tar != None:
                DEBUG_PRINT((start, tar, cutoff))
                paths_count = paths_count + simple_paths_count(self.ftable["dg"], start, tar, cutoff)
                if paths_count > 100:
                    break
        DEBUG_PRINT(paths_count)
        return paths_count        
#         start = sorted(self.ftable["dg"].nodes())[0]
#         print 'start' 
#         print start
#         cutoff = len(self.ftable["dg"]) -1
#         DEBUG_PRINT( 'feature_paths' + hex(f_ea))
#         for ret in self.ftable["returnpoints"]:
#             tar = None
#             for (x,y) in self.ftable["dg"].nodes():
#                 if y ==ret:
#                     tar = (x,y)
#                     break
#             #only only node will be returned
#             if tar!=None:
#                 print tar
#                 count_conn = Queue()
#                 
#                 freeze_support()
#                 PYTHON_EXE = os.path.join(sys.exec_prefix, 'pythonw.exe') #if you use python.exe you get a command window
#                 multiprocessing.set_executable(PYTHON_EXE)
#                 p = Process(target = calc_path, args = (self.ftable["dg"], start, tar, cutoff)) #,count_conn
#                 p.start()
#   
#                 p.join(5)
#                 if p.is_alive():
#                     p.terminate()
#                     count_paths = -1
# #                 else:
# #                     try:
# #                         count_paths = count_conn.get()
# #                         print 'not main_)__'
# #                     except:
# #                         count_paths = -1
test_pipeline.py 文件源码 项目:human-rl 作者: gsastry 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def train_classifier(test, blocker=False):

    number_train=20
    number_valid=30
    number_test=25

    steps = 1000
    batch_size= 1024
    conv_layers = 3

    if test:
        number_train=2
        number_valid=2
        number_test=2
        steps = 50
        batch_size = 20
        conv_layers = 2

    multiprocessing.freeze_support()

    episode_paths = frame.episode_paths(input_path)
    print('Found {} episodes'.format(len(episode_paths)))
    np.random.seed(seed=42)
    np.random.shuffle(episode_paths)

    if blocker:
        common_hparams = dict(use_action=True,  expected_positive_weight=0.05)
        labeller = humanrl.pong_catastrophe.PongBlockerLabeller()
    else:
        common_hparams = dict(use_action=False)
        labeller = humanrl.pong_catastrophe.PongClassifierLabeller()

    data_loader = DataLoader(labeller, TensorflowClassifierHparams(**common_hparams))
    datasets = data_loader.split_episodes(episode_paths,
                                          number_train, number_valid, number_test, use_all=False)


    hparams_list = [
        dict(image_crop_region=((34,34+160),(0,160)), #image_shape=[42, 42, 1], 
             convolution2d_stack_args=[(4, [3, 3], [2, 2])] * conv_layers, batch_size=batch_size, multiprocess=False,
             fully_connected_stack_args=[50,10],
             use_observation=False, use_image=True,
             verbose=True
         ) 
    ]

    start_experiment = time.time()
    print('Run experiment params: ', dict(number_train=number_train, number_valid=number_valid,
                                          number_test=number_test, steps=steps, batch_size=batch_size,
                                          conv_layers=conv_layers) )
    print('hparams', common_hparams, hparams_list[0])


    logdir = save_classifier_path
    run_experiments(
        logdir, data_loader, datasets, common_hparams, hparams_list, steps=steps, log_every=int(.1*steps))

    time_experiment = time.time() - start_experiment
    print('Steps: {}. Time in mins: {}'.format(steps, (1/60)*time_experiment))

    run_classifier_metrics()


问题


面经


文章

微信
公众号

扫码关注公众号