python类Process()的实例源码

index.py 文件源码 项目:fccforensics 作者: RagtagOpen 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def run(self):
        self.total = self.get_total() or 5000000
        if not self.total:
            print('error loading document total; using estimate')

        index_queue = multiprocessing.Queue()

        bulk_index_process = multiprocessing.Process(
            target=self.bulk_index, args=(index_queue,),
        )
        bulk_index_process.start()

        for comment in self.iter_comments():
            self.stats['fetched'] += 1
            if not self.stats['fetched'] % 500:
                print('fetched %s/%s\t%s%%\t%s' % (self.stats['fetched'], self.total,
                    int(self.stats['fetched'] / self.total * 100),
                    comment['date_disseminated']))
            index_queue.put(comment)

        index_queue.put(None)
        bulk_index_process.join()
        return self.stats['fetched']
pjf_server.py 文件源码 项目:PyJFuzz 作者: mseclab 项目源码 文件源码 阅读 34 收藏 0 点赞 0 评论 0
def __init__(self, configuration):
        self.client_queue = multiprocessing.Queue(0)
        self.apply_patch()
        self.logger = self.init_logger()
        if ["debug", "html", "content_type", "notify", "ports"] not in configuration:
            raise PJFMissingArgument()
        if configuration.debug:
            print("[\033[92mINFO\033[0m] Starting HTTP ({0}) and HTTPS ({1}) built-in server...".format(
                configuration.ports["servers"]["HTTP_PORT"],
                configuration.ports["servers"]["HTTPS_PORT"]
            ))
        if not configuration.content_type:
            configuration.content_type = False
        if not configuration.content_type:
            configuration.content_type = "application/json"
        self.config = configuration
        self.json = PJFFactory(configuration)
        self.https = SSLWSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTPS_PORT"])
        self.http = WSGIRefServer(host="0.0.0.0", port=self.config.ports["servers"]["HTTP_PORT"])
        self.httpsd = multiprocessing.Process(target=run, kwargs={"server": self.https, "quiet": True})
        self.httpd = multiprocessing.Process(target=run, kwargs={"server": self.http, "quiet": True})
        if self.config.fuzz_web:
            self.request_checker = Thread(target=self.request_pool, args=())
        self.logger.debug("[{0}] - PJFServer successfully initialized".format(time.strftime("%H:%M:%S")))
test_memento.py 文件源码 项目:ipwb 作者: oduwsdl 项目源码 文件源码 阅读 32 收藏 0 点赞 0 评论 0
def startReplay(warcFilename):
    global p
    pathOfWARC = os.path.join(os.path.dirname(moduleLocation) +
                              '/samples/warcs/' + warcFilename)
    tempFilePath = '/tmp/' + ''.join(random.sample(
        string.ascii_uppercase + string.digits * 6, 6)) + '.cdxj'
    print('B2' + tempFilePath)
    p = Process(target=replay.start, args=[tempFilePath])
    p.start()
    sleep(5)

    cdxjList = indexer.indexFileAt(pathOfWARC, quiet=True)
    cdxj = '\n'.join(cdxjList)

    with open(tempFilePath, 'w') as f:
        f.write(cdxj)
psstream2datastore.py 文件源码 项目:oscars2016 作者: 0x0ece 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def main(argv):
    parser = argparse.ArgumentParser(
        description='A command line interface to move tweets from pubsub to bigquery')
    parser.add_argument('project_name', help='Project name in console')
    parser.add_argument('subscription', help='subscription to read from')
    parser.add_argument('-w','--workers', help='change the number of workers',
        default = 10, type=int)

    args = parser.parse_args(argv[1:])
    pool = [Process(target = worker, args = (args,)) for i in xrange(args.workers)]
    print("Starting pool of %d worker"%args.workers)
    for i in pool:
        i.start()

    for i in pool:
        i.join()
psbatch2datastore.py 文件源码 项目:oscars2016 作者: 0x0ece 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def main(argv):
    parser = argparse.ArgumentParser(
        description='A command line interface to move tweets from pubsub to bigquery')
    parser.add_argument('project_name', help='Project name in console')
    parser.add_argument('subscription', help='subscription to read from')
    parser.add_argument('-w','--workers', help='change the number of workers',
        default = 10, type=int)

    args = parser.parse_args(argv[1:])
    pool = [Process(target = worker, args = (args,)) for i in xrange(args.workers)]
    print("Starting pool of %d worker"%args.workers)
    for i in pool:
        i.start()

    for i in pool:
        i.join()
sphinx_.py 文件源码 项目:sphinxcontrib-versioning 作者: Robpol86 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def build(source, target, versions, current_name, is_root):
    """Build Sphinx docs for one version. Includes Versions class instance with names/urls in the HTML context.

    :raise HandledError: If sphinx-build fails. Will be logged before raising.

    :param str source: Source directory to pass to sphinx-build.
    :param str target: Destination directory to write documentation to (passed to sphinx-build).
    :param sphinxcontrib.versioning.versions.Versions versions: Versions class instance.
    :param str current_name: The ref name of the current version being built.
    :param bool is_root: Is this build in the web root?
    """
    log = logging.getLogger(__name__)
    argv = ('sphinx-build', source, target)
    config = Config.from_context()

    log.debug('Running sphinx-build for %s with args: %s', current_name, str(argv))
    child = multiprocessing.Process(target=_build, args=(argv, config, versions, current_name, is_root))
    child.start()
    child.join()  # Block.
    if child.exitcode != 0:
        log.error('sphinx-build failed for branch/tag: %s', current_name)
        raise HandledError
data_feeder.py 文件源码 项目:Tensormodels 作者: asheshjain399 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def _launch_pipeline(self):
    """This method creates two queues.
    filename_queue: stores the list of filesnames in data_file and label_file
    data_queue: stores the mini-batch
    """

    self.data_processes = [] # Holds process handles

    queue_size = 2 * self.num_preprocess_threads + 2 * self.num_gpu_towers
    self.data_queue = Queue(queue_size)  # This queue stores the data
    image_files = open(self.data_file, 'r').readlines()
    labels = open(self.label_file, 'r').readlines()
    print 'Size of queue: ', queue_size

    self.filename_queue = Queue(len(image_files))  # This queue stores the filenames
    p = Process(target=self._create_filename_queue, args=(self.filename_queue, image_files, labels, self.num_epochs))
    p.start()
    self.data_processes.append(p)

    print 'Data feeder started'
    for each_worker in range(self.num_preprocess_threads):
      p = Process(target=self._each_worker_process, args=(self.data_queue,))
      p.start()
      self.data_processes.append(p)
node.py 文件源码 项目:crankycoin 作者: cranklin 项目源码 文件源码 阅读 36 收藏 0 点赞 0 评论 0
def __init__(self, host, reward_address, **kwargs):
        self.host = host
        self.request_nodes_from_all()
        self.reward_address = reward_address
        self.broadcast_node(host)
        self.full_nodes.add(host)

        block_path = kwargs.get("block_path")
        if block_path is None:
            self.blockchain = Blockchain()
        else:
            self.load_blockchain(block_path)

        mining = kwargs.get("mining")
        if mining is True:
            self.NODE_TYPE = "miner"
            self.mining_process = Process(target=self.mine)
            self.mining_process.start()
            logger.debug("mining node started on %s with reward address of %s...", host, reward_address)
        logger.debug("full node server starting on %s with reward address of %s...", host, reward_address)
        self.node_process = Process(target=self.app.run, args=(host, self.FULL_NODE_PORT))
        self.node_process.start()
        logger.debug("full node server started on %s with reward address of %s...", host, reward_address)
rspet_client.py 文件源码 项目:RSPET 作者: panagiks 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def udp_flood(self):
        """Get target ip and port from server, start UPD flood wait for 'KILL'."""
        en_data = self.receive(3) # Max ip+port+payload length 999 chars
        en_data = self.receive(int(en_data))
        en_data = en_data.split(":")
        target_ip = en_data[0]
        target_port = int(en_data[1])
        msg = en_data[2]
        proc = Process(target=udp_flood_start, args=(target_ip, target_port, msg))
        proc.start()
        killed = False
        while not killed:
            en_data = self.receive(5)
            try:
                en_data = self.comm_dict[en_data]
            except KeyError:
                continue
            if en_data == 'KILL':
                proc.terminate()
                killed = True
        return 0
rspet_client.py 文件源码 项目:RSPET 作者: panagiks 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def udp_spoof(self):
        """Get target/spoofed ip and port from server, start UPD spoof wait for 'KILL'."""
        en_data = self.receive(3) # Max ip+port+spoofedip+spoofed port+payload length 999 chars
        en_data = self.receive(int(en_data))
        en_data = en_data.split(":")
        target_ip = en_data[0]
        target_port = int(en_data[1])
        spoofed_ip = en_data[2]
        spoofed_port = int(en_data[3])
        payload = en_data[4].encode('UTF-8')
        proc = Process(target=udp_spoof_start, args=(target_ip, target_port,
                                                     spoofed_ip, spoofed_port,
                                                     payload))
        proc.start()
        killed = False
        while not killed:
            en_data = self.receive(5)
            try:
                en_data = self.comm_dict[en_data]
            except KeyError:
                continue
            if en_data == 'KILL':
                proc.terminate()
                killed = True
        return 0
summarize_jobs.py 文件源码 项目:supremm 作者: ubccr 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def main():
    """
    main entry point for script
    """
    opts = getoptions(False)

    setuplogger(opts['log'])

    config = Config()

    threads = opts['threads']

    if threads <= 1:
        processjobs(config, opts, None)
        return
    else:
        proclist = []
        for procid in xrange(threads):
            p = Process(target=processjobs, args=(config, opts, procid))
            p.start()
            proclist.append(p)

        for proc in proclist:
            p.join()
realtimehandposepipeline.py 文件源码 项目:deep-prior 作者: moberweger 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def processFilesThreaded(self, filenames):
        """
        Run detector from files
        :param filenames: filenames to load
        :return: None
        """

        allstart = time.time()
        if not isinstance(filenames, list):
            raise ValueError("Files must be list of filenames.")

        p = Process(target=self.threadProducerFiles, args=[filenames])
        p.daemon = True
        c = Process(target=self.threadConsumer, args=[])
        c.daemon = True
        p.start()
        c.start()

        c.join()
        p.join()

        print("DONE in {}s".format((time.time() - allstart)))
realtimehandposepipeline.py 文件源码 项目:deep-prior 作者: moberweger 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def processVideoThreaded(self, device):
        """
        Use video as input
        :param device: device id
        :return: None
        """

        p = Process(target=self.threadProducerVideo, args=[device])
        p.daemon = True
        c = Process(target=self.threadConsumer, args=[])
        c.daemon = True
        p.start()
        c.start()

        c.join()
        p.join()
train_faster_rcnn_alt_opt.py 文件源码 项目:adversarial-frcnn 作者: xiaolonw 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def get_solvers(net_name):
    # Faster R-CNN Alternating Optimization
    n = 'faster_rcnn_alt_opt'
    # Solver for each training stage
    solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
               [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
               [net_name, n, 'stage2_rpn_solver60k80k.pt'],
               [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
    solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
    # Iterations for each training stage
    max_iters = [80000, 40000, 80000, 40000]
    # max_iters = [100, 100, 100, 100]
    # Test prototxt for the RPN
    rpn_test_prototxt = os.path.join(
        cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
    return solvers, max_iters, rpn_test_prototxt

# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
base.py 文件源码 项目:Tktr 作者: Intuity 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def run_wsgi(cls):
        if cls.wsgi_process != None:
            cls.make_browser()
            return
        cls.wsgi_process = Process(target=cls._run_wsgi)
        cls.wsgi_process.start()
        # Wait for it to come up
        success = False
        for i in range(10):
            try:
                if urllib.urlopen("http://localhost:%i/" % cls.port_num).getcode() == 200:
                    success = True
                    break
            except Exception:
                pass
            time.sleep(2)
        # Create a second app for routing etc
        cls.app = cls._make_app()
        # If we failed to run WSGI then clean-up
        if not success:
            cls.stop_wsgi()
            cls.wsgi_process = None
            raise Exception("Couldn't bring up WSGI server")
        cls.make_browser()
buffering.py 文件源码 项目:dsb3 作者: EliasVansteenkiste 项目源码 文件源码 阅读 23 收藏 0 点赞 0 评论 0
def buffered_gen_mp(source_gen, buffer_size=2):
    """
    Generator that runs a slow source generator in a separate process.
    buffer_size: the maximal number of items to pre-generate (length of the buffer)
    """
    if buffer_size < 2:
        raise RuntimeError("Minimal buffer size is 2!")

    buffer = mp.Queue(maxsize=buffer_size - 1)

    # the effective buffer size is one less, because the generation process
    # will generate one extra element and block until there is room in the buffer.

    def _buffered_generation_process(source_gen, buffer):
        for data in source_gen:
            buffer.put(data, block=True)
        buffer.put(None)  # sentinel: signal the end of the iterator
        buffer.close()  # unfortunately this does not suffice as a signal: if buffer.get()
        # was called and subsequently the buffer is closed, it will block forever.

    process = mp.Process(target=_buffered_generation_process, args=(source_gen, buffer))
    process.start()

    for data in iter(buffer.get, None):
        yield data
test_agents.py 文件源码 项目:oshino 作者: CodersOfTheNight 项目源码 文件源码 阅读 24 收藏 0 点赞 0 评论 0
def stub_server(request):
    from multiprocessing import Process
    from stubilous.server import run
    from stubilous.builder import Builder
    builder = Builder()
    builder.server(host="localhost", port=9998)
    builder.route("GET", "/health")("Ok", 200)
    config = builder.build()
    proc = Process(target=run, args=(config,))

    def on_close():
        proc.terminate()
        proc.join()

    request.addfinalizer(on_close)
    proc.start()
    return proc
train_faster_rcnn_alt_opt.py 文件源码 项目:faster-rcnn-resnet 作者: Eniac-Xie 项目源码 文件源码 阅读 18 收藏 0 点赞 0 评论 0
def get_solvers(net_name):
    # Faster R-CNN Alternating Optimization
    n = 'faster_rcnn_alt_opt'
    # Solver for each training stage
    solvers = [[net_name, n, 'stage1_rpn_solver60k80k.pt'],
               [net_name, n, 'stage1_fast_rcnn_solver30k40k.pt'],
               [net_name, n, 'stage2_rpn_solver60k80k.pt'],
               [net_name, n, 'stage2_fast_rcnn_solver30k40k.pt']]
    solvers = [os.path.join(cfg.MODELS_DIR, *s) for s in solvers]
    # Iterations for each training stage
    max_iters = [80000, 40000, 80000, 40000]
    # max_iters = [100, 100, 100, 100]
    # Test prototxt for the RPN
    rpn_test_prototxt = os.path.join(
        cfg.MODELS_DIR, net_name, n, 'rpn_test.pt')
    return solvers, max_iters, rpn_test_prototxt

# ------------------------------------------------------------------------------
# Pycaffe doesn't reliably free GPU memory when instantiated nets are discarded
# (e.g. "del net" in Python code). To work around this issue, each training
# stage is executed in a separate process using multiprocessing.Process.
# ------------------------------------------------------------------------------
upnp_reciever.py 文件源码 项目:Static-UPnP 作者: nigelb 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def start(self):
        self.setup_sockets()
        import StaticUPnP_Settings
        permissions = Namespace(**StaticUPnP_Settings.permissions)
        print(permissions)
        if permissions.drop_permissions:
            self.drop_privileges(permissions.user, permissions.group)

        self.running = Value(ctypes.c_int, 1)
        self.queue = Queue()
        self.reciever_thread = Process(target=self.socket_handler, args=(self.queue, self.running))
        self.reciever_thread.start()
        self.schedule_thread = Process(target=self.schedule_handler, args=(self.running,))
        self.schedule_thread.start()
        self.response_thread = Process(target=self.response_handler, args=(self.queue, self.running))
        self.response_thread.start()
mute.py 文件源码 项目:MUTE 作者: K4YT3X 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def aireplay():
    """
    Controls the attacks function
    Starts attack function in a sub-process
    """
    start_attack_proc = multiprocessing.Process(target=attack)
    start_attack_proc.start()
    while True:
        try:
            print('', end='', flush=True)
            pass
        except KeyboardInterrupt:
            os.system('clear')
            print(G + "[+] INFO: Stopping Attack..." + W)
            start_attack_proc.terminate()
            break


# --------------------------------ICON--------------------------------
downloadandsplit.py 文件源码 项目:histwords 作者: williamleif 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def run_parallel(num_processes, out_dir, source):
    page = requests.get("http://storage.googleapis.com/books/ngrams/books/datasetsv2.html")
    pattern = re.compile('href=\'(.*%s-%s-%s-.*\.gz)' % (source, TYPE, VERSION))
    urls = pattern.findall(page.text)
    del page
    queue = Queue()
    for url in urls:
        queue.put(url)
    ioutils.mkdir(out_dir + '/' + source + '/raw')
    download_dir = out_dir + '/' + source + '/raw/'
    ioutils.mkdir(download_dir)
    procs = [Process(target=split_main, args=[i, queue, download_dir]) for i in range(num_processes)]
    for p in procs:
        p.start()
    for p in procs:
        p.join()
multiprocess.py 文件源码 项目:hostapd-mana 作者: adde88 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def _import_mp():
    global Process, Queue, Pool, Event, Value, Array
    try:
        from multiprocessing import Manager, Process
        #prevent the server process created in the manager which holds Python 
        #objects and allows other processes to manipulate them using proxies
        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
        #channel between subprocesses and main process is still usable after
        #ctrl+C is received in the main process.
        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
        m = Manager()
        #reset it back so main process will receive a KeyboardInterrupt
        #exception on ctrl+c
        signal.signal(signal.SIGINT, old)
        Queue, Pool, Event, Value, Array = (
                m.Queue, m.Pool, m.Event, m.Value, m.Array
        )
    except ImportError:
        warn("multiprocessing module is not available, multiprocess plugin "
             "cannot be used", RuntimeWarning)
multiprocess.py 文件源码 项目:hostapd-mana 作者: adde88 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
        currentaddr = Value('c',bytes_(''))
        currentstart = Value('d',time.time())
        keyboardCaught = Event()
        p = Process(target=runner,
                   args=(iworker, testQueue,
                         resultQueue,
                         currentaddr,
                         currentstart,
                         keyboardCaught,
                         shouldStop,
                         self.loaderClass,
                         result.__class__,
                         pickle.dumps(self.config)))
        p.currentaddr = currentaddr
        p.currentstart = currentstart
        p.keyboardCaught = keyboardCaught
        old = signal.signal(signal.SIGILL, signalhandler)
        p.start()
        signal.signal(signal.SIGILL, old)
        return p
factory.py 文件源码 项目:aqbot 作者: aqisnotliquid 项目源码 文件源码 阅读 30 收藏 0 点赞 0 评论 0
def __init__(self):
        try:
            with open(self.AQBOT_CONF) as f:
                config = json.load(f)
        except IOError:
            exit(1)
        except:
            exit(1)

        try:
            for network in config['networks']:
                for channel in network['channels']:
                    worker = mp.Process(target=self._connect, args=(network, channel,))
                    worker.start()
        except:
            exit(1)
worker.py 文件源码 项目:highfive 作者: abau171 项目源码 文件源码 阅读 31 收藏 0 点赞 0 评论 0
def run_worker_pool(job_handler, host="localhost", port=48484,
                      *, max_workers=None):
    """
    Runs a pool of workers which connect to a remote HighFive master and begin
    executing calls.
    """

    if max_workers is None:
        max_workers = multiprocessing.cpu_count()

    processes = []
    for _ in range(max_workers):
        p = multiprocessing.Process(target=worker_main,
                args=(job_handler, host, port))
        p.start()
        processes.append(p)

    logger.debug("workers started")

    for p in processes:
        p.join()

    logger.debug("all workers completed")
main.py 文件源码 项目:IPProxy 作者: yutian2011 项目源码 文件源码 阅读 25 收藏 0 点赞 0 评论 0
def main():
    ip_queue = multiprocessing.Queue()
    msg_queue = multiprocessing.Queue()
    p1 = multiprocessing.Process(target=get_proxy,args=(ip_queue,msg_queue))
    p2 = multiprocessing.Process(target=test_and_verify.verify_db_data,args=(ip_queue,msg_queue))
    p3 = [multiprocessing.Process(target=test_and_verify.gevent_queue,args=(ip_queue,msg_queue)) for i in range(settings.TEST_PROCESS_NUM)]
    p4 = multiprocessing.Process(target=web_cache_run,args=(ip_queue,))
    p1.start()
    p2.start()
    for p in p3:
        p.start()
    pid_list = [os.getpid(),p1.pid,p2.pid,]
    pid_list.extend(p.pid for p in p3)
    if WEB_USE_REDIS_CACHE:
        p4.start()
        pid_list.append(p4.pid)
    with open(PID,"w") as f:
        f.write(json.dumps(pid_list))
    p1.join()
    p2.join()
    for p in p3:
        p.join()
    if WEB_USE_REDIS_CACHE:
        p4.join()
loadDistributed.py 文件源码 项目:zatt 作者: simonacca 项目源码 文件源码 阅读 26 收藏 0 点赞 0 评论 0
def create_server(self, is_leader=True):
        def server_factory(config):
            serverSetup(config)
            loop = asyncio.get_event_loop()
            loop.run_forever()

        self.server = Process(target=server_factory, args=(self.config,))
        self.server.start()

        if is_leader and self.address == self.leader_address:
            sleep(1)
            print('Restarting Leader to increment term')
            self.server.terminate()
            self.create_server(is_leader=False)  # prevents recurtion
            sleep(1)
        else:
            sleep(3)
        self.leader = DistributedDict(*self.leader_address)
        self.leader.config_cluster('add', *self.address)
run.py 文件源码 项目:zatt 作者: simonacca 项目源码 文件源码 阅读 27 收藏 0 点赞 0 评论 0
def test_5_add_server(self):
        print('Add new server')
        d = DistributedDict('127.0.0.1', 9110)
        d['test'] = 0
        self.pool.stop(self.pool.ids)
        self.pool.start(self.pool.ids)

        self.pool.configs[10] = {'address': ('127.0.0.1', 9120),
                                 'cluster': {('127.0.0.1', 9120), },
                                 'storage': '20.persist', 'debug': False}
        self.pool.servers[10] = Process(target=self.pool._run_server,
                                        args=(self.pool.configs[10],))
        self.pool.start(10)
        sleep(1)

        d.config_cluster('add', '127.0.0.1', 9120)
        sleep(1)

        del d
        d = DistributedDict('127.0.0.1', 9120)

        self.assertEqual(d, {'test': 0})
service_scan.py 文件源码 项目:Reconnoitre 作者: codingo 项目源码 文件源码 阅读 28 收藏 0 点赞 0 评论 0
def target_file(target_hosts, output_directory, dns_server, quiet, quick):
    targets = load_targets(target_hosts, output_directory, quiet)
    target_file = open(targets, 'r')
    try:
        target_file = open(targets, 'r')
        print("[*] Loaded targets from: %s" % targets)
    except:
        print("[!] Unable to load: %s" % targets)

    for ip_address in target_file:
       ip_address = ip_address.strip()
       create_dir_structure(ip_address, output_directory)

       host_directory = output_directory + "/" + ip_address
       nmap_directory = host_directory + "/scans"

       jobs = []
       p = multiprocessing.Process(target=nmap_scan, args=(ip_address, nmap_directory, dns_server, quick))
       jobs.append(p)
       p.start()
    target_file.close()
snmp_walk.py 文件源码 项目:Reconnoitre 作者: codingo 项目源码 文件源码 阅读 29 收藏 0 点赞 0 评论 0
def target_file(target_hosts, output_directory, quiet):
    targets = load_targets(target_hosts, output_directory, quiet)
    target_file = open(targets, 'r')
    try:
        target_file = open(targets, 'r')
        print("[*] Loaded targets from: %s" % targets)
    except:
        print("[!] Unable to load: %s" % targets)

    for ip_address in target_file:
       ip_address = ip_address.strip()

       snmp_directory = output_directory + '/' + ip_address+ '/scans/snmp/'
       check_directory(snmp_directory)

       jobs = []
       p = multiprocessing.Process(target=snmp_scans, args=(ip_address, snmp_directory))
       jobs.append(p)
       p.start()
    target_file.close()


问题


面经


文章

微信
公众号

扫码关注公众号