• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python multiprocessing.get_start_method函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.get_start_method函数的典型用法代码示例。如果您正苦于以下问题:Python get_start_method函数的具体用法?Python get_start_method怎么用?Python get_start_method使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_start_method函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: transfer_dir_parallel

    def transfer_dir_parallel(self, source, destination, jobs):
        """Transfer a directory to the remote side in parallel mode."""
        self.log.debug("Remote transfer: %s -> %s", source, destination)
        self.rmdir(destination)
        if os.path.isdir(source):
            self.mkdir(destination)
            self.log.info("Remote transfer in {} parallel jobs".format(
                jobs))
            self.log.debug("Multiprocessing start method is {}".format(
                multiprocessing.get_start_method()))
            self.log.debug(("Starting multiprocessing Pool for parallel "
                           "remote transfer"))
            with multiprocessing.Pool(jobs) as pool:
                self.log.debug("Starting async for parallel transfer")
                commands = []
                for f in glob.glob1(source, '*'):
                    command = self._copy.split()
                    path = os.path.join(source, f)
                    command.extend([path, '{0}:{1}'.format(
                        self.target_host[0], destination)])
                    commands.append(command)
                results = [
                    pool.apply_async(self._run_command, (cmd,))
                    for cmd in commands
                ]

                self.log.debug("Waiting async results for parallel transfer")
                for r in results:
                    r.get()  # self._run_command returns None
                self.log.debug(("Multiprocessing for parallel transfer "
                               "finished"))
        else:
            raise cdist.Error("Source {} is not a directory".format(source))
开发者ID:jdguffey,项目名称:cdist,代码行数:33,代码来源:remote.py


示例2: run

    def run(self):
        try:
            self._cmdline.this_module_name = self.__class__.__module__
            if multiprocessing.get_start_method() == 'spawn':
                common.set_global_options(self._cmdline)
                common.sysinit()

            signal.signal(signal.SIGTERM, self._sighandler)
            self.id = self._channel(self._dp_name, self.__class__)
            common.set_current_process(self.id)
            pattern.initialize(self.id)
            if hasattr(self._cmdline, 'clock') and \
               self._cmdline.clock == 'Lamport':
                self._logical_clock = 0
            self._log = logging.getLogger(str(self))
            self._start_comm_thread()
            self._lock = threading.Lock()
            self._lock.acquire()
            self._wait_for_go()

            if not hasattr(self, '_da_run_internal'):
                self._log.error("Process does not have entry point!")
                sys.exit(1)

            result = self._da_run_internal()
            self.report_times()

        except Exception as e:
            sys.stderr.write("Unexpected error at process %s:%r"% (str(self), e))
            traceback.print_tb(e.__traceback__)

        except KeyboardInterrupt as e:
            self._log.debug("Received KeyboardInterrupt, exiting")
            pass
开发者ID:cmkane01,项目名称:distalgo,代码行数:34,代码来源:sim.py


示例3: qtloop

async def qtloop():
    qt_app = None
    qtimport = False
    try:
        import PyQt5.QtCore, PyQt5.QtWidgets
        qtimport = True
    except ImportError:
        pass
    if qtimport:
        if multiprocessing.get_start_method() != "fork":
            print("""Cannot test if Qt can be started
    This is because forking is not possible, you are probably running under Windows
    Starting Qt blindly is not supported, as it may result in segfaults
    """,
            file=sys.stderr)
        else:
            p = Process(target=test_qt)
            p.start()
            p.join()
            if not p.exitcode:
                qt_app = PyQt5.QtWidgets.QApplication(["  "])
    if qt_app is None:
        msg = "Qt could not be started. Qt widgets will not work" #TODO: some kind of env variable to disable this warning
        print(msg,file=sys.stderr)
        return

    while 1:
        qt_app.processEvents()
        await asyncio.sleep(0.01)
开发者ID:sjdv1982,项目名称:seamless,代码行数:29,代码来源:mainloop.py


示例4: default_test_processes

def default_test_processes():
    """Default number of test processes when using the --parallel option."""
    # The current implementation of the parallel test runner requires
    # multiprocessing to start subprocesses with fork().
    if multiprocessing.get_start_method() != 'fork':
        return 1
    try:
        return int(os.environ['DJANGO_TEST_PROCESSES'])
    except KeyError:
        return multiprocessing.cpu_count()
开发者ID:Flimm,项目名称:django,代码行数:10,代码来源:runner.py


示例5: setUpClass

    def setUpClass(cls):
        import multiprocessing as mp
        try:
            mp.set_start_method("spawn")
        except RuntimeError:
            pass
        assert mp.get_start_method() == "spawn"

        write_settings()
        cls.app = QApplication([cls.__name__])
开发者ID:jopohl,项目名称:urh,代码行数:10,代码来源:QtTestCase.py


示例6: start

def start():
    parser = create_parser()
    args = parser.parse_args()
    config_type = args.type

    config = Config(config_type=config_type)
    setup(config, args)

    logger.info('Config type: %s' % (config_type))
    config.opts.piece_style = args.piece_style
    config.opts.bg_style = args.bg_style
    config.internet.distributed = args.distributed

    if args.cmd == 'self':
        if args.ucci:
            import cchess_alphazero.worker.play_with_ucci_engine as self_play
        else:
            if mp.get_start_method() == 'spawn':
                import cchess_alphazero.worker.self_play_windows as self_play
            else:
                from cchess_alphazero.worker import self_play
        return self_play.start(config)
    elif args.cmd == 'opt':
        from cchess_alphazero.worker import optimize
        return optimize.start(config)
    elif args.cmd == 'play':
        if args.cli:
            import cchess_alphazero.play_games.play_cli as play
        else:
            from cchess_alphazero.play_games import play
        config.opts.light = False
        pwhc = PlayWithHumanConfig()
        pwhc.update_play_config(config.play)
        logger.info(f"AI move first : {args.ai_move_first}")
        play.start(config, not args.ai_move_first)
    elif args.cmd == 'eval':
        if args.elo == False:
            from cchess_alphazero.worker import evaluator
        else:
            import cchess_alphazero.worker.compute_elo as evaluator
        config.eval.update_play_config(config.play)
        evaluator.start(config)
    elif args.cmd == 'sl':
        if args.onegreen:
            import cchess_alphazero.worker.sl_onegreen as sl
            sl.start(config, args.skip)
        else:
            from cchess_alphazero.worker import sl
            sl.start(config)
        
    elif args.cmd == 'ob':
        from cchess_alphazero.play_games import ob_self_play
        pwhc = PlayWithHumanConfig()
        pwhc.update_play_config(config.play)
        ob_self_play.start(config, args.ucci, args.ai_move_first)
开发者ID:zhuzhenping,项目名称:ChineseChess-AlphaZero,代码行数:55,代码来源:manager.py


示例7: _run_global_explorers_parallel

 def _run_global_explorers_parallel(self, out_path):
     self.log.debug("Running global explorers in {} parallel jobs".format(
         self.jobs))
     self.log.trace("Multiprocessing start method is {}".format(
         multiprocessing.get_start_method()))
     self.log.trace(("Starting multiprocessing Pool for global "
                    "explorers run"))
     args = [
         (e, out_path, ) for e in self.list_global_explorer_names()
     ]
     mp_pool_run(self._run_global_explorer, args, jobs=self.jobs)
     self.log.trace(("Multiprocessing run for global explorers "
                     "finished"))
开发者ID:zhaostu,项目名称:cdist,代码行数:13,代码来源:explorer.py


示例8: initialize_runtime_options

def initialize_runtime_options(options=None):
    """Sets and sanitizes runtime options.

    'options' should be a dict-like object containing mappings from options
    names to corresponding values.

    """
    import multiprocessing

    from . import compiler

    global GlobalOptions

    if not GlobalOptions:
        GlobalOptions = dict()
    if options:
        GlobalOptions.update(options)

    # Parse '--substitute-classes' and '--substitute-modules':
    GlobalOptions['substitute_classes'] = \
                            _parse_items(GlobalOptions.get('substitute_classes'))
    GlobalOptions['substitute_modules'] = \
                            _parse_items(GlobalOptions.get('substitute_modules'))

    if GlobalOptions.get('nodename') is None:
        GlobalOptions['nodename'] = ''

    _set_hostname()

    # Configure multiprocessing package to use chosen semantics:
    startmeth = GlobalOptions.get('start_method')
    if startmeth != multiprocessing.get_start_method(allow_none=True):
        multiprocessing.set_start_method(startmeth)

    # Convert 'compiler_flags' to a namespace object that can be passed directly
    # to the compiler:
    GlobalOptions['compiler_args'] \
        = compiler.ui.parse_compiler_args(
            GlobalOptions.get('compiler_flags', '').split())

    # Make sure the directory for storing trace files exists:
    if GlobalOptions.get('record_trace'):
        if 'logdir' not in GlobalOptions:
            raise ConfigurationError(
                "'record_trace' enabled without setting 'logdir'")
        os.makedirs(GlobalOptions['logdir'], exist_ok=True)
开发者ID:DistAlgo,项目名称:distalgo,代码行数:46,代码来源:common.py


示例9: _run_global_explorers_parallel

    def _run_global_explorers_parallel(self, out_path):
        self.log.info("Running global explorers in {} parallel jobs".format(
            self.jobs))
        self.log.debug("Multiprocessing start method is {}".format(
            multiprocessing.get_start_method()))
        self.log.debug(("Starting multiprocessing Pool for global "
                       "explorers run"))
        with multiprocessing.Pool(self.jobs) as pool:
            self.log.debug("Starting async for global explorer run")
            results = [
                pool.apply_async(self._run_global_explorer, (e, out_path,))
                for e in self.list_global_explorer_names()
            ]

            self.log.debug("Waiting async results for global explorer runs")
            for r in results:
                r.get()  # self._run_global_explorer returns None
            self.log.debug(("Multiprocessing run for global explorers "
                           "finished"))
开发者ID:jdguffey,项目名称:cdist,代码行数:19,代码来源:explorer.py


示例10: _transfer_dir_parallel

 def _transfer_dir_parallel(self, source, destination, jobs):
     """Transfer a directory to the remote side in parallel mode."""
     self.log.debug("Remote transfer in {} parallel jobs".format(
         jobs))
     self.log.trace("Multiprocessing start method is {}".format(
         multiprocessing.get_start_method()))
     self.log.trace(("Starting multiprocessing Pool for parallel "
                     "remote transfer"))
     args = [
         (command, )
         for command in self._transfer_dir_commands(source, destination)
     ]
     if len(args) == 1:
         self.log.debug("Only one dir entry, transfering sequentially")
         self._run_command(args[0])
     else:
         mp_pool_run(self._run_command, args, jobs=jobs)
     self.log.trace(("Multiprocessing for parallel transfer "
                     "finished"))
开发者ID:Unterstrichmoepunterstrich,项目名称:cdist,代码行数:19,代码来源:remote.py


示例11: wrapper

    def wrapper(*args, **kwargs):
        future = ProcessFuture()
        reader, writer = Pipe(duplex=False)

        if get_start_method() != 'fork':
            target = _trampoline
            args = [function.__name__, function.__module__] + list(args)
        else:
            target = function

        worker = launch_process(
            _function_handler, target, args, kwargs, writer)

        writer.close()

        future.set_running_or_notify_cancel()

        launch_thread(_worker_handler, future, worker, reader, timeout)

        return future
开发者ID:noxdafox,项目名称:pebble,代码行数:20,代码来源:process.py


示例12: SystemError

        # and you should call it again with exc=NULL to revert the effect"""
        ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
        raise SystemError("PyThreadState_SetAsyncExc failed")

class KillableThread(threading.Thread):
    def kill(self, exctype=SystemError):
        if not self.isAlive():
            return
        tid = self.ident
        _async_raise(tid, exctype)
    terminate = kill

USE_PROCESSES = os.environ.get("SEAMLESS_USE_PROCESSES")
if USE_PROCESSES is None:
    USE_PROCESSES = True
    if multiprocessing.get_start_method() != "fork":
        USE_PROCESSES = False
else:
    if USE_PROCESSES == "0" or USE_PROCESSES.upper() == "FALSE":
        USE_PROCESSES = False
if USE_PROCESSES:
    from multiprocessing import JoinableQueue as Queue
    Executor = Process
else:
    from queue import Queue
    Executor = KillableThread

def return_preliminary(result_queue, value):
    #print("return_preliminary", value)
    result_queue.put((-1, value))
开发者ID:sjdv1982,项目名称:seamless,代码行数:30,代码来源:execute.py


示例13: entrypoint

def entrypoint():
    GlobalOptions = common.global_options()
    if GlobalOptions.start_method != \
       multiprocessing.get_start_method(allow_none=True):
        multiprocessing.set_start_method(GlobalOptions.start_method)
    target = GlobalOptions.file
    source_dir = os.path.dirname(target)
    basename = strip_suffix(os.path.basename(target))
    if not os.access(target, os.R_OK):
        die("Can not access source file %s" % target)

    sys.path.insert(0, source_dir)
    try:
        module = import_da(basename,
                           from_dir=source_dir,
                           compiler_args=GlobalOptions.compiler_flags.split())
    except ImportError as e:
        die("ImportError: " + str(e))
    if not (hasattr(module, 'main') and
            isinstance(module.main, types.FunctionType)):
        die("'main' function not defined!")
    GlobalOptions.this_module_name = module.__name__
    GlobalOptions.main_module_name = module.__name__
    if GlobalOptions.inc_module_name is None:
        GlobalOptions.inc_module_name = module.__name__ + "_inc"
    common.sysinit()

    # Start the background statistics thread:
    RootLock.acquire()
    stat_th = threading.Thread(target=collect_statistics,
                               name="Stat Thread")
    stat_th.daemon = True
    stat_th.start()

    niters = GlobalOptions.iterations
    stats = {'sent' : 0, 'usrtime': 0, 'systime' : 0, 'time' : 0,
              'units' : 0, 'mem' : 0}
    # Start main program
    sys.argv = [target] + GlobalOptions.args
    try:
        for i in range(0, niters):
            log.info("Running iteration %d ..." % (i+1))

            walltime_start = time.perf_counter()
            module.main()

            print("Waiting for remaining child processes to terminate..."
                  "(Press \"Ctrl-C\" to force kill)")

            walltime = time.perf_counter() - walltime_start

            #log_performance_statistics(walltime)
            r = aggregate_statistics()
            for k, v in r.items():
                stats[k] += v

        for k in stats:
            stats[k] /= niters

        perffd = None
        # if GlobalOptions.perffile is not None:
        #     perffd = open(GlobalOptions.perffile, "w")
        if perffd is not None:
            print_simple_statistics(perffd)
            perffd.close()

        dumpfd = None
        # if GlobalOptions.dumpfile is not None:
        #     dumpfd = open(GlobalOptions.dumpfile, "wb")
        if dumpfd is not None:
            pickle.dump(stats, fd)
            dumpfd.close()

    except KeyboardInterrupt as e:
        log.info("Received keyboard interrupt.")
    except Exception as e:
        err_info = sys.exc_info()
        log.error("Caught unexpected global exception: %r", e)
        traceback.print_tb(err_info[2])

    log.info("Terminating...")
开发者ID:anirajk,项目名称:distalgo,代码行数:81,代码来源:api.py


示例14: _real_worker_func

def _real_worker_func(params):
    # In a context where `multiprocessing` is using the `spawn` forking model,
    # the new process doesn't inherit anything, so we lost all our logging
    # configuration here. Let's set it up again.
    if (hasattr(multiprocessing, 'get_start_method') and
            multiprocessing.get_start_method() == 'spawn'):
        from piecrust.main import _pre_parse_chef_args
        _pre_parse_chef_args(sys.argv[1:])

    wid = params.wid
    logger.debug("Worker %d initializing..." % wid)

    # We don't need those.
    params.inqueue._writer.close()
    params.outqueue._reader.close()

    # Initialize the underlying worker class.
    w = params.worker_class(*params.initargs)
    w.wid = wid
    try:
        w.initialize()
    except Exception as ex:
        logger.error("Working failed to initialize:")
        logger.exception(ex)
        params.outqueue.put(None)
        return

    use_threads = False
    if use_threads:
        # Create threads to read/write the jobs and results from/to the
        # main arbitrator process.
        local_job_queue = queue.Queue()
        reader_thread = threading.Thread(
                target=_job_queue_reader,
                args=(params.inqueue.get, local_job_queue),
                name="JobQueueReaderThread")
        reader_thread.start()

        local_result_queue = queue.Queue()
        writer_thread = threading.Thread(
                target=_job_results_writer,
                args=(local_result_queue, params.outqueue.put),
                name="JobResultWriterThread")
        writer_thread.start()

        get = local_job_queue.get
        put = local_result_queue.put_nowait
    else:
        get = params.inqueue.get
        put = params.outqueue.put

    # Start pumping!
    completed = 0
    time_in_get = 0
    time_in_put = 0
    while True:
        get_start_time = time.perf_counter()
        task = get()
        time_in_get += (time.perf_counter() - get_start_time)

        task_type, task_data = task
        if task_type == TASK_END:
            logger.debug("Worker %d got end task, exiting." % wid)
            wprep = {
                    'WorkerTaskGet': time_in_get,
                    'WorkerResultPut': time_in_put}
            try:
                rep = (task_type, True, wid, (wid, w.getReport(wprep)))
            except Exception as e:
                logger.debug("Error getting report: %s" % e)
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                rep = (task_type, False, wid, (wid, e))
            put(rep)
            break

        if task_type == TASK_JOB:
            task_data = (task_data,)

        for t in task_data:
            try:
                res = (TASK_JOB, True, wid, w.process(t))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                res = (TASK_JOB, False, wid, e)

            put_start_time = time.perf_counter()
            put(res)
            time_in_put += (time.perf_counter() - put_start_time)

            completed += 1

    if use_threads:
        logger.debug("Worker %d waiting for reader/writer threads." % wid)
        local_result_queue.put_nowait(None)
        reader_thread.join()
        writer_thread.join()
#.........这里部分代码省略.........
开发者ID:germanschnyder,项目名称:PieCrust2,代码行数:101,代码来源:workerpool.py


示例15: print

__author__ = 'fpbatta'

import time

import numpy as np
import sys
import multiprocessing as mp
if __name__ == '__main__':
    #mp.freeze_support()

    print(mp.get_all_start_methods())
    sm = mp.get_start_method()
    print("sm: ", sm)
    mp.set_start_method('forkserver', force=True)
    print("sm 2: ", sm)

    sys.path.append('/home/fpbatta/src/GUI/Plugins')
    sys.path.append('/home/fpbatta/src/GUI/Plugins/multiprocessing_plugin')

    #sys.path.append('/Users/fpbatta/src/GUImerge/GUI/Plugins/multiprocessing_plugin')
    from multiprocessing_plugin.multiprocessing_plugin import MultiprocessingPlugin


    m = MultiprocessingPlugin()

    m.startup(20000.)
    m.bufferfunction(np.random.random((11,1000)))

    for i in range(100):
        m.bufferfunction(200. * np.random.random((11,1000)))
        time.sleep(0.05)
开发者ID:fpbattaglia,项目名称:PythonPlugin,代码行数:31,代码来源:test_multiprocessing.py


示例16: __init__

    def __init__(self, model, device_ids=1, n_workers=None,
                 max_batch_size=None, max_image_size=DEFAULT_MAX_IMAGE_SIZE,
                 modder=None):
        """
        Args:
        - model (PyMjModel): MuJoCo model to use for rendering
        - device_ids (int/list): list of device ids to use for rendering.
            One or more workers will be assigned to each device, depending
            on how many workers are requested.
        - n_workers (int): number of parallel processes in the pool. Defaults
            to the number of device ids.
        - max_batch_size (int): maximum number of states that can be rendered
            in batch using .render(). Defaults to the number of workers.
        - max_image_size (int): maximum number pixels in images requested
            by .render()
        - modder (Modder): modder to use for domain randomization.
        """
        self._closed, self.pool = False, None

        if not (modder is None or inspect.isclass(modder)):
            raise ValueError("modder must be a class")

        if isinstance(device_ids, int):
            device_ids = list(range(device_ids))
        else:
            assert isinstance(device_ids, list), (
                "device_ids must be list of integer")

        n_workers = n_workers or 1
        self._max_batch_size = max_batch_size or (len(device_ids) * n_workers)
        self._max_image_size = max_image_size

        array_size = self._max_image_size * self._max_batch_size

        self._shared_rgbs = Array(ctypes.c_uint8, array_size * 3)
        self._shared_depths = Array(ctypes.c_float, array_size)

        self._shared_rgbs_array = np.frombuffer(
            self._shared_rgbs.get_obj(), dtype=ctypes.c_uint8)
        assert self._shared_rgbs_array.size == (array_size * 3), (
            "Array size is %d, expected %d" % (
                self._shared_rgbs_array.size, array_size * 3))
        self._shared_depths_array = np.frombuffer(
            self._shared_depths.get_obj(), dtype=ctypes.c_float)
        assert self._shared_depths_array.size == array_size, (
            "Array size is %d, expected %d" % (
                self._shared_depths_array.size, array_size))

        worker_id = Value(ctypes.c_int)
        worker_id.value = 0

        if get_start_method() != "spawn":
            raise RuntimeError(
                "Start method must be set to 'spawn' for the "
                "render pool to work. That is, you must add the "
                "following to the _TOP_ of your main script, "
                "before any other imports (since they might be "
                "setting it otherwise):\n"
                "  import multiprocessing as mp\n"
                "  if __name__ == '__main__':\n"
                "    mp.set_start_method('spawn')\n")

        self.pool = Pool(
            processes=len(device_ids) * n_workers,
            initializer=MjRenderPool._worker_init,
            initargs=(
                model.get_mjb(),
                worker_id,
                device_ids,
                self._shared_rgbs,
                self._shared_depths,
                modder))
开发者ID:m-j-mcdonald,项目名称:mujoco-py,代码行数:72,代码来源:mjrenderpool.py


示例17: _real_worker_func

def _real_worker_func(params):
    # In a context where `multiprocessing` is using the `spawn` forking model,
    # the new process doesn't inherit anything, so we lost all our logging
    # configuration here. Let's set it up again.
    if (hasattr(multiprocessing, 'get_start_method') and
            multiprocessing.get_start_method() == 'spawn'):
        from piecrust.main import _pre_parse_chef_args
        _pre_parse_chef_args(sys.argv[1:])

    wid = params.wid
    logger.debug("Worker %d initializing..." % wid)

    params.inqueue._writer.close()
    params.outqueue._reader.close()

    w = params.worker_class(*params.initargs)
    w.wid = wid
    try:
        w.initialize()
    except Exception as ex:
        logger.error("Working failed to initialize:")
        logger.exception(ex)
        params.outqueue.put(None)
        return

    get = params.inqueue.get
    put = params.outqueue.put

    completed = 0
    while True:
        try:
            task = get()
        except (EOFError, OSError):
            logger.debug("Worker %d encountered connection problem." % wid)
            break

        task_type, task_data = task
        if task_type == TASK_END:
            logger.debug("Worker %d got end task, exiting." % wid)
            try:
                rep = (task_type, True, wid, (wid, w.getReport()))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                rep = (task_type, False, wid, (wid, e))
            put(rep)
            break

        if task_type == TASK_JOB:
            task_data = (task_data,)

        for t in task_data:
            try:
                res = (TASK_JOB, True, wid, w.process(t))
            except Exception as e:
                if params.wrap_exception:
                    e = multiprocessing.ExceptionWithTraceback(
                            e, e.__traceback__)
                res = (TASK_JOB, False, wid, e)
            put(res)

            completed += 1

    logger.debug("Worker %d completed %d tasks." % (wid, completed))
开发者ID:thhgcn,项目名称:PieCrust2,代码行数:65,代码来源:workerpool.py


示例18: run

	def run(self, pool_size):
		"""
		this function manages the parallel processing of the url list using the python Pool class

		the function first reads the list of urls out of the page_lists directory, cleans it
			for known issues (eg common binary files), and issues with idna encoding (tricky!)

		then the page list is mapped to the process_url function  and executed in parallell

		pool_size is defined in the run_webxray.py file, see details there
		"""

		# the list of url MUST be in the page_lists directory!
		try:
			url_list = open(os.path.dirname(os.path.abspath(__file__)) + '/../page_lists/' + self.pages_file_name, 'r', encoding='utf-8')
		except:
			print('File "%s" does not exist, file must be in ./page_lists directory.  Exiting.' % self.pages_file_name)
			exit()

		# set up sql connection used to determine if items are already in the db
		if self.db_engine == 'mysql':		
			from webxray.MySQLDriver import MySQLDriver
			sql_driver = MySQLDriver(self.db_name)
		elif self.db_engine == 'postgres':	
			from webxray.PostgreSQLDriver import PostgreSQLDriver
			sql_driver = PostgreSQLDriver(self.db_name)
		elif self.db_engine == 'sqlite':	
			from webxray.SQLiteDriver import SQLiteDriver
			sql_driver = SQLiteDriver(self.db_name)

		# this list gets mapped to the Pool, very important!
		urls_to_process = set()

		# simple counter used solely for updates to CLI
		count = 0
		
		print('\t------------------------')
		print('\t Building List of Pages ')
		print('\t------------------------')
				
		for url in url_list:
			# skip lines that are comments
			if "#" in url[0]: continue
		
			count += 1
		
			# only do lines starting with https?://
			if not (re.match('^https?://.+', url)):
				print("\t\t%s | %-50s Not a valid address, Skipping." % (count, url[:50]))
				continue

			# non-ascii domains will crash phantomjs, so we need to convert them to 
			# 	idna/ascii/utf-8
			# this requires splitting apart the url, converting the domain to idna,
			#	and pasting it all back together
			
			split_url = urlsplit(url.strip())
			idna_fixed_netloc = split_url.netloc.encode('idna').decode('utf-8')
			url = urlunsplit((split_url.scheme,idna_fixed_netloc,split_url.path,split_url.query,split_url.fragment))

			# if it is a m$ office or other doc, skip
			if re.match('.+(pdf|ppt|pptx|doc|docx|txt|rtf|xls|xlsx)$', url):
				print("\t\t%s | %-50s Not an HTML document, Skipping." % (count, url[:50]))
				continue

			# skip if in db already unless we are doing a timeseries
			if self.allow_timeseries == False:
				if sql_driver.page_exists(url):
					print("\t\t%s | %-50s Exists in DB, Skipping." % (count, url[:50]))
					continue
	
			# only add if not in list already
			if url not in urls_to_process:
				print("\t\t%s | %-50s Adding." % (count, url[:50]))
				urls_to_process.add(url)
			else:
				print("\t\t%s | %-50s Already queued, Skipping." % (count, url[:50]))

		# close the db connection
		sql_driver.close()

		print('\t----------------------------------')
		print('\t%s addresses will now be webXray\'d'  % len(urls_to_process))
		print('\t\tBrowser(s) are %s' % self.browser_types)
		print('\t\tBrowser wait time is %s seconds' % self.browser_wait)
		print('\t\t...you can go take a walk. ;-)')
		print('\t----------------------------------')

		# for macOS (darwin) we must specify start method as 'forkserver'
		#	this is essentially voodoo to ward off evil spirits which 
		#	appear when large pool sizes are used on macOS
		# get_start_method must be set to 'allow_none', otherwise upon
		#	checking the method it gets set (!) - and if we then get/set again
		#	we get an error
		if sys.platform == 'darwin' and multiprocessing.get_start_method(allow_none=True) != 'forkserver':
			multiprocessing.set_start_method('forkserver')
		myPool = multiprocessing.Pool(pool_size)
		myPool.map(self.process_url, urls_to_process)

		# FYI
#.........这里部分代码省略.........
开发者ID:timlib,项目名称:webXray,代码行数:101,代码来源:Collector.py


示例19: commandline


#.........这里部分代码省略.........
        cls.construct_remote_exec_copy_patterns(args)
        base_root_path = cls.create_base_root_path(args.out_path)

        hostcnt = 0

        cfg = cdist.configuration.Configuration(args)
        configuration = cfg.get_config(section='GLOBAL')

        if args.tag or args.all_tagged_hosts:
            inventory.determine_default_inventory_dir(args, configuration)
            if args.all_tagged_hosts:
                inv_list = inventory.InventoryList(
                    hosts=None, istag=True, hostfile=None,
                    db_basedir=args.inventory_dir)
            else:
                inv_list = inventory.InventoryList(
                    hosts=args.host, istag=True, hostfile=args.hostfile,
                    db_basedir=args.inventory_dir,
                    has_all_tags=args.has_all_tags)
            it = inv_list.entries()
        else:
            it = itertools.chain(cls.hosts(args.host),
                                 cls.hosts(args.hostfile))

        process_args = []
        if args.parallel:
            log.trace("Processing hosts in parallel")
        else:
            log.trace("Processing hosts sequentially")
        for entry in it:
            if isinstance(entry, tuple):
                # if configuring by specified tags
                host = entry[0]
                host_tags = entry[1]
            else:
                # if configuring by host then check inventory for tags
                host = entry
                inventory.determine_default_inventory_dir(args, configuration)
                inv_list = inventory.InventoryList(
                    hosts=(host,), db_basedir=args.inventory_dir)
                inv = tuple(inv_list.entries())
                if inv:
                    # host is present in inventory and has tags
                    host_tags = inv[0][1]
                else:
                    # host is not present in inventory or has no tags
                    host_tags = None
            host_base_path, hostdir = cls.create_host_base_dirs(
                host, base_root_path)
            log.debug("Base root path for target host \"{}\" is \"{}\"".format(
                host, host_base_path))

            hostcnt += 1
            if args.parallel:
                pargs = (host, host_tags, host_base_path, hostdir, args, True,
                         configuration)
                log.trace(("Args for multiprocessing operation "
                           "for host {}: {}".format(host, pargs)))
                process_args.append(pargs)
            else:
                try:
                    cls.onehost(host, host_tags, host_base_path, hostdir,
                                args, parallel=False,
                                configuration=configuration)
                except cdist.Error as e:
                    failed_hosts.append(host)
        if args.parallel and len(process_args) == 1:
            log.debug("Only 1 host for parallel processing, doing it "
                      "sequentially")
            try:
                cls.onehost(*process_args[0])
            except cdist.Error as e:
                failed_hosts.append(host)
        elif args.parallel:
            log.trace("Multiprocessing start method is {}".format(
                multiprocessing.get_start_method()))
            log.trace(("Starting multiprocessing Pool for {} "
                       "parallel host operation".format(args.parallel)))

            results = mp_pool_run(cls.onehost,
                                  process_args,
                                  jobs=args.parallel)
            log.trace(("Multiprocessing for parallel host operation "
                       "finished"))
            log.trace("Multiprocessing for parallel host operation "
                      "results: %s", results)

            failed_hosts = [host for host, result in results if not result]

        time_end = time.time()
        log.verbose("Total processing time for %s host(s): %s", hostcnt,
                    (time_end - time_start))

        if len(failed_hosts) > 0:
            raise cdist.Error("Failed to configure the following hosts: " +
                              " ".join(failed_hosts))
        elif not args.out_path:
            # If tmp out path created then remove it, but only if no failed
            # hosts.
            shutil.rmtree(base_root_path)
开发者ID:zhaostu,项目名称:cdist,代码行数:101,代码来源:config.py


示例20: _iterate_once_parallel

    def _iterate_once_parallel(self):
        self.log.debug("Iteration in parallel mode in {} jobs".format(
            self.jobs))
        objects_changed = False

        cargo = []
        for cdist_object in self.object_list():
            if cdist_object.requirements_unfinished(cdist_object.requirements):
                """We cannot do anything for this poor object"""
                continue

            if cdist_object.state == core.CdistObject.STATE_UNDEF:
                """Prepare the virgin object"""

                # self.object_prepare(cdist_object)
                # objects_changed = True
                cargo.append(cdist_object)

        n = len(cargo)
        if n == 1:
            self.log.debug("Only one object, preparing sequentially")
            self.object_prepare(cargo[0])
            objects_changed = True
        elif cargo:
            self.log.trace("Multiprocessing start method is {}".format(
                multiprocessing.get_start_method()))

            self.log.trace("Multiprocessing cargo: %s", cargo)

            cargo_types = set()
            for c in cargo:
                cargo_types.add(c.cdist_type)
            self.log.trace("Multiprocessing cargo_types: %s", cargo_types)
            nt = len(cargo_types)
            if nt == 1:
                self.log.debug(("Only one type, transfering explorers "
                                "sequentially"))
                self.explorer.transfer_type_explorers(cargo_types.pop())
            else:
                self.log.trace(("Starting multiprocessing Pool for {} "
                                "parallel transfering types' explorers".format(
                                    nt)))
                args = [
                    (ct, ) for ct in cargo_types
                ]
                mp_pool_run(self.explorer.transfer_type_explorers, args,
                            jobs=self.jobs)
                self.log.trace(("Multiprocessing for parallel transfering "
                                "types' explorers finished"))

            self.log.trace(("Starting multiprocessi 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python multiprocessing.log_to_stderr函数代码示例发布时间:2022-05-27
下一篇:
Python multiprocessing.get_logger函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap