• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python multiprocessing.Event类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.Event的典型用法代码示例。如果您正苦于以下问题:Python Event类的具体用法?Python Event怎么用?Python Event使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Event类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

class fmanager:
    def __init__(self,data,fn):
        self.sf = Event()
        self.sf.clear()

        self.nproc=cpu_count()
        self.pipes = [Pipe() for i in xrange(self.nproc)]
        self.e = [evaluator(self.pipes[i][1],self.sf,data,fn) for i in xrange(self.nproc)]
        null = [i.start() for i in self.e]
        return

    def __del__(self):
        self.sf.set()
        null = [i.join() for i in self.e]
        null = [i.terminate() for i in self.e]

        return

    def eval(self,x):
        nd = len(x)
        for i in xrange(nd):
            self.pipes[i % self.nproc][0].send([i, x[i]])
        solns = []
        while len(solns) < nd:
            for i in xrange(self.nproc):
                if self.pipes[i][0].poll(0.005):
                    solns.append(self.pipes[i][0].recv())
        solns.sort(key=lambda i: i[0])
        return [i[1] for i in solns]
开发者ID:markm541374,项目名称:pdirect,代码行数:29,代码来源:multitest.py


示例2: execute_action

    def execute_action(self, action):
        event = Event()
        queue = Queue()
        proc = Process(
            target=execute_action_proc,
            args=(self.execute, action, event, queue))
        proc.start()

        # Send heartbeat.
        heartbeat_retry = 0
        while not event.is_set():
            event.wait(config.ACTIVITY_HEARTBEAT_INTERVAL)
            try:
                res = self.heartbeat(self.task_token)
                if res['cancelRequested']:
                    proc.terminate()
                    proc.join()
                    return Result('cancelled', -1, '', '', '', -1)
            except Exception as err:
                if heartbeat_retry <= config.ACTIVITY_HEARTBEAT_MAX_RETRY:
                    heartbeat_retry += 1
                    continue
                else:
                    proc.terminate()
                    proc.join()
                    raise

        # Evaluate the result.
        result = queue.get_nowait()
        proc.join()
        return result
开发者ID:badboy99tw,项目名称:mass,代码行数:31,代码来源:__init__.py


示例3: New_Process_Actor

class New_Process_Actor(Actor):
    '''Create an Actor in a new process. Connected as usual with scipysim 
    channels. When this Actor is started, it launches a new process, creates
    an instance of the Actor class passed to it in a second thread, and starts
    that actor.
    '''
    def __init__(self, cls, *args, **kwargs):
        super(New_Process_Actor, self).__init__()
        self.cls = cls
        self.args = list(args)
        self.kwargs = kwargs
        self.mqueue = MQueue()
        self.mevent = MEvent()
        
        if 'input_channel' not in kwargs:
            kwargs['input_channel'] = self.args[0]
        
        chan = kwargs['input_channel']
        kwargs['input_channel'] = self.mqueue
        
        
        print 'chan: ', chan
        self.c2p = Channel2Process(chan, self.mevent, self.mqueue)
        
        self.c2p.start()


    def run(self):
        self.t = Process(target=target, args=(self.cls, self.args, self.kwargs))
        self.t.start()
        self.mevent.set() # signal that process is ready to receive
        self.c2p.join()
        self.t.join()
开发者ID:hardbyte,项目名称:scipy-sim,代码行数:33,代码来源:plotter.py


示例4: test_play_and_record

    def test_play_and_record(self):
        """
        Verifies that a Device and play back prerecorded events.
        """
        device = evemu.Device(self.get_device_file())
        devnode = device.devnode
        events_file = self.get_events_file()
        # device.record() calls evemu_record() and is thus missing the
        # description that the input file has
        with open(events_file) as e:
            indata = extract_events(strip_comments(e.readlines()))

        recording_started = Event()
        q = Queue()
        record_process = Process(target=record,
                                 args=(recording_started, devnode, q))
        record_process.start()
        recording_started.wait(100)
        device.play(open(events_file))

        outdata = strip_comments(q.get())
        record_process.join()

        self.assertEquals(len(indata), len(outdata))
        fuzz = re.compile("E: \d+\.\d+ (.*)")
        for i in range(len(indata)):
            lhs = fuzz.match(indata[i])
            self.assertTrue(lhs)
            rhs = fuzz.match(outdata[i])
            self.assertTrue(rhs)
            self.assertEquals(lhs.group(1), rhs.group(1))
开发者ID:bentiss,项目名称:evemu,代码行数:31,代码来源:test_device.py


示例5: initProcessCommunications

    def initProcessCommunications(self):
        # Queues and events for the acquisition processes
        self.newFreqEvent = Event()
        self.controlEvent = Event()
        self.newScanEvent = Event()
        self.captureRunningEvent = Event()
        self.recordingEvent = Event()

        self.currentVolt = Value('d',0.0)
        self.currentSamples = Value('i',0)
        self.currentFreq = Value('d',0.0)
        self.currentThick = Value('d',0.0)
        self.currentThin = Value('d',0.0)
        self.currentPower = Value('d',0.0)
        self.currentLW = Value('d',0.0)
        self.currentCycle = Value('i',0)
        self.protonsPerCycle = Value('i',0)
        self.protonsForHRS = Value('i',0)
        self.protonPulse = Value('b',False)
        self.iscool = Value('d',0.0)

        self.dataQueue = Queue()
        self.freqQueue = Queue()
        self.errorQueue = Queue()
        self.messageQueue = Queue()
        self.dataStreamQueue = Queue()
开发者ID:rubendegroote,项目名称:CRISTALCLEAR,代码行数:26,代码来源:scanner.py


示例6: test_sentinel

def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
开发者ID:277800076,项目名称:django-q,代码行数:7,代码来源:test_cluster.py


示例7: test_sentinel

def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, list_key='sentinel_test:q')
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
开发者ID:sebasmagri,项目名称:django-q,代码行数:7,代码来源:test_cluster.py


示例8: DeviceServer

class DeviceServer(ThreadedTCPServer, Process):
	
	#causes handle_request to return
	timeout = 1
	
	def __init__(self, mux, muxdevice, server_address, RequestHandlerClass):
		Process.__init__(self)
		ThreadedTCPServer.__init__(self, server_address, RequestHandlerClass)
		self.mux = mux
		self.muxdev = muxdevice
		self._stop = Event()

	def stop(self):
		self._stop.set()
		
	def stopped(self):
		return self._stop.is_set()

	def run(self):
		if self.stopped():
			_LOGGER.warning("Thread already stopped")
		
		while not self.stopped():
			self.handle_request()
		self.socket.close()
		_LOGGER.debug("%s will exit now" % (str(self)))
开发者ID:L45eMy,项目名称:Worker,代码行数:26,代码来源:deviceconnection.py


示例9: Logger

class Logger(object):
    def __init__(self, filename):
        self.qtag = Queue()
        self.done = Event()
        self.tag = None
        self.filename = filename
        self.file = None
    def start(self):
        self.file = open(self.filename, 'w')
        print 'Opened',self.filename,'for writing.'
    def set_tag(self, tag):
        self.qtag.put(tag)
    def set_done(self):
        self.done.set()
    def log(self, nodeid, msgid, data):
        if not self.qtag.empty():
            self.tag = self.qtag.get()
        if self.done.is_set():
            self.done.clear()
            return True
        L = ['%f'%time.time(), '%d'%nodeid, '%d'%msgid] + map(str,data)
        if self.tag:
            L.append(self.tag)
        print >>self.file, ','.join(L)
        self.file.flush()
    def close(self):
        if self.file:
            self.file.close()
            print 'File closed.'
开发者ID:malloch,项目名称:emergeData,代码行数:29,代码来源:gesture_recorder.py


示例10: main

def main():
    # Use alphazero self-play for data generation
    agents_meta = parse_schedule() 

    # worker variable of main process
    board = Board()
    sigexit = Event()
    sigexit.set()  # pre-set signal so main proc generator will iterate only once

    # subprocess data generator
    helper = DataHelper(data_files=[])
    helper.set_agents_meta(agents_meta=agents_meta)     
    generator = helper.generate_batch(TRAINING_CONFIG["batch_size"])

    # start generating
    with h5py.File(f"{DATA_CONFIG['data_path']}/latest.train.hdf5", 'a') as hf:    
        for state_batch, value_batch, probs_batch in generator:
            for batch_name in ("state_batch", "value_batch", "probs_batch"):
                if batch_name not in hf:
                    shape = locals()[batch_name].shape
                    hf.create_dataset(batch_name, (0, *shape), maxshape=(None, *shape))
                hf[batch_name].resize(hf[batch_name].shape[0] + 1, axis=0)
                hf[batch_name][-1] = locals()[batch_name]

            # prevent main proc from generating data too quick
            # since sigexit has been set, proc will iterate only once
            run_proc(helper.buffer, helper.buffer_size, helper.lock,
                     sigexit, agents_meta, board) 
            board.reset()
开发者ID:Vigilans,项目名称:GomokuAI,代码行数:29,代码来源:data_helper.py


示例11: __init__

	def __init__(self, id, config, sequence, hist_obj, results_path, log_id):
		Process.__init__(self)
		self.id = id
		self.config = config
		self.sequence = sequence
		self.hist_obj = hist_obj
		self.agent = Agent(self.id, config, sequence)
		self.results_path = results_path
		self.log_id = log_id
		self.leader_send = None
		self.leader_recv = None
		self.support_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.root_div_send = None
		self.leader_div_send = None
		self.agent_div_recv = [None for i in range(1, self.config.num_agents)] if self.agent.id_leader == None else None
		self.support_div_recv = [None for i in range(1, self.config.num_sup+1)] if self.agent.id_supporters else None
		self.leader_reset_send = None
		self.leader_reset_recv = None
		self.support_reset_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_reset_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.event_restart = Event()
		self.stop_event = Event()
		self.support_stop_event = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.energy_number = Queue(1)
		self.support_energy_number = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
开发者ID:Ryusoru,项目名称:DMA-3DPSP,代码行数:26,代码来源:workerprocess.py


示例12: __init__

class SubProcessWrapper:
	cname = __name__ + '.SubProcessWrapper'
	def __init__(self, target, name=None):
		self.target = target
		self.running = False
		self.name = name if name else target.task_name()
		self.kill_event = Event()
		self.logger = logging.getLogger(self.cname)

	def run(self):
		self.logger.info("starting SubProcessTask: {}".format(self.target.task_name()))
		th = Thread(target=self.target, name=self.target.task_name())
		th.start()		
		signal.signal(signal.SIGINT, signal.SIG_IGN)
		self.kill_event.wait()
		self.logger.info("stopping SubProcessTask: {}".format(self.target.task_name()))
		self.target.stop()
		th.join()
		self.logger.info("Stopped SubProcessTask: {}".format(self.target.task_name()))

	def __call__(self):
		self.run()

	def get_kill_event(self):
		return self.kill_event
开发者ID:jmcotelo,项目名称:acequia,代码行数:25,代码来源:subprocess.py


示例13: __init__

class QueueTask:
    def __init__(self):
        self.queue = JoinableQueue()
        self.event = Event()
        atexit.register( self.queue.join )

        process = Process(target=self.work)
        process.daemon = True
        process.start()


    def work(self):
        while True:
            func, args, wait_for = self.queue.get()

            for evt in wait_for: 
                evt.wait()
            func(*args)
            self.event.set()

            self.queue.task_done()


    def enqueue(self, func, args=[], wait_for=[]):
        self.event.clear()
        self.queue.put( (func, args, wait_for) )

        return self.event 
开发者ID:wbkifun,项目名称:fdtd_accelerate,代码行数:28,代码来源:queue_multiprocessing_test.py


示例14: ClassifierWorkerPool

class ClassifierWorkerPool(object):
    def __init__(self):
        self.queue = Queue(100)
        self.workers = []
        self.stop = Event()
        self.stop.clear()
        self.queue_feeder = QueueFeeder(self.queue, self.stop)

        row = TrainedClassifiers.objects(name=config.classifier).first()

        if not row:
            raise Exception("Classifier %s does not exists" % config.classifier)

        self.trained_classifier = row.get_classifier()

    def start(self):
        self.queue_feeder.start()

        for i in range(0, config.classifier_pool_size):
            worker = ClassifierWorker(self.trained_classifier, self.queue, self.stop)
            worker.start()
            self.workers.append(worker)

    def terminate(self):
        self.stop.set()
        self.queue_feeder.join()
        for w in self.workers:
            w.join()
开发者ID:BastinRobin,项目名称:streamcrab,代码行数:28,代码来源:pool.py


示例15: DataLoaderOnTheFly

class DataLoaderOnTheFly():
    def __init__(self, config):
        default_config = Config(proc_count = 4, limit_batch_count = None)
        self.config = default_config(**config)
        self.exit = Event()
        self.batch_queue = Queue(maxsize = 10)
        if self.config.limit_batch_count is None:
            self.limited = False
        else:
            self.limited = True
            self.batch_list = []
            self.index = -1
        self.workers = []
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        if self.limited:
            if len(self.batch_list) < self.config.limit_batch_count:
                self.batch_list.append(Config(self.batch_queue.get()))
            self.index = (self.index + 1) % self.config.limit_batch_count
            return Config(self.batch_list[self.index])
        else:
            return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        for w in self.workers:
            w.join()
开发者ID:WarBean,项目名称:MLUtil,代码行数:30,代码来源:data_loader.py


示例16: run

    def run(self):
        logger = self.ipc_logger()
        input_queue = Queue(20 * self.n_processes)
        done_event = Event()
        processes = [
            ProteinDigestingProcess(
                self.connection, self.hypothesis_id, input_queue,
                self.digestor, done_event=done_event,
                message_handler=logger.sender()) for i in range(
                self.n_processes)
        ]
        protein_ids = self.protein_ids
        i = 0
        n = len(protein_ids)
        chunk_size = 2
        interval = 30
        for process in processes:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            process.start()

        last = i
        while i < n:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            if i - last > interval:
                self.log("... Dealt Proteins %d-%d %0.2f%%" % (
                    i - chunk_size, min(i, n), (min(i, n) / float(n)) * 100))
                last = i

        done_event.set()
        for process in processes:
            process.join()
        logger.stop()
开发者ID:mobiusklein,项目名称:glycan_profiling,代码行数:34,代码来源:peptide_permutation.py


示例17: DataLoaderOnTheGround

class DataLoaderOnTheGround():
    def __init__(self, config):
        default_config = Config(proc_count = 4)
        self.config = default_config(**config)
        self.exit = Event()
        self.task_list = config.task_list
        self.task_queue = Queue(maxsize = 10)
        self.batch_queue = Queue(maxsize =  10)
        self.workers = []
        self.distributor = Process(target = task_distributor, args = (self,))
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))

        self.distributor.daemon = True
        self.distributor.start()
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        self.distributor.join()
        for w in self.workers:
            w.join()
开发者ID:WarBean,项目名称:MLUtil,代码行数:25,代码来源:data_loader.py


示例18: single_output

def single_output(stop_event: Event):
    print("single output get queue:")
    sum_limit = 1000
    counter = 0
    manager, output_q = get_queue_client(QueueManager.MachineSettingCrawler, QueueManager.Method_Whois_Input)
    while not stop_event.is_set():
        try:
            while not output_q.empty() or not stop_event.is_set():
                result = output_q.get(False, 1)
                counter += 1
                if isinstance(result, list):
                    for item in result:
                        print("server queue output:", str(item), "count:", counter)
                else:
                    # print(result)
                    pass
                if counter/sum_limit > 0 and counter % sum_limit==0:
                    print("current output count is:", counter)
                time.sleep(0.000001)
        except Exception as ex:
            pass
            # manager, output_q = get_queue_client(QueueManager.MachineSettingCrawler, QueueManager.Method_Whois_Output)
        finally:
            print("going to sleep.")
            time.sleep(1)
开发者ID:paulnaoki,项目名称:DomainFinderSrcUniversal,代码行数:25,代码来源:SiteCheckerTest.py


示例19: test_recycle

def test_recycle(r):
    # set up the Sentinel
    list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    start_event = Event()
    stop_event = Event()
    # override settings
    Conf.RECYCLE = 2
    Conf.WORKERS = 1
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, list_key=list_key)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    Conf.SAVE_LIMIT = 1
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    r.delete(list_key)
开发者ID:sebasmagri,项目名称:django-q,代码行数:35,代码来源:test_cluster.py


示例20: testThreadChecker

    def testThreadChecker(self):
        stop_event = Event()
        link = "munichre.com"
        checker = SiteThreadChecker(full_link=link, thread_pool_size=3, max_page=3000, max_level=10)

        def crawl():
            checker.crawling()

        queue_server_t = Process(target=run_queue_server)
        queue_server_t.start()
        output_t = Process(target=single_output, args=(stop_event,))
        output_t.start()
        # link = "http://sweetlifebake.com/#axzz3t4Nx7b7N"
        crawl_t = Thread(target=crawl)
        crawl_t.start()
        timeout = 1000
        counter = 0
        while counter < timeout:
            time.sleep(1)
            counter += 1
        print("is going to sudden death.")
        stop_event.set()
        checker.sudden_death()
        if crawl_t.is_alive():
            crawl_t.join()
        output_t.terminate()
        queue_server_t.terminate()

        print("finished")
开发者ID:paulnaoki,项目名称:DomainFinderSrcUniversal,代码行数:29,代码来源:SiteCheckerTest.py



注:本文中的multiprocessing.Event类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python multiprocessing.JoinableQueue类代码示例发布时间:2022-05-27
下一篇:
Python multiprocessing.Condition类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap