• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python multiprocessing.JoinableQueue类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.JoinableQueue的典型用法代码示例。如果您正苦于以下问题:Python JoinableQueue类的具体用法?Python JoinableQueue怎么用?Python JoinableQueue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了JoinableQueue类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: queue_info

def queue_info(iters=None,):
    work = JoinableQueue()

    for filename in iters:
        work.put(obj=filename)
    time.sleep(1)
    return work
开发者ID:nubeabierta,项目名称:turbolift,代码行数:7,代码来源:executable.py


示例2: main

def main():

    num_page = 6000
    num_processes = 60
    num_works = num_page / num_processes
    q = JoinableQueue()
    pool = list()
    final_set = set()
    
    for index in xrange(1,num_processes+1):
        p =  Process(target=fetch_feature,args=(q,index,num_works))
        p.start()
    
    for index in xrange(1,num_processes+1):    
        final_set = final_set.union(q.get())
    
        #p.join()
    #    pool.append(p)
        
    #for p in pool:
    #   p.join()
    result_file = open('result.out','w');

    for feature in final_set:
        print feature
        result_file.write(feature+'\n')
   
    result_file.close()    
    print len(final_set)
开发者ID:bbiiggppiigg,项目名称:urcosme_crawler,代码行数:29,代码来源:feature_collector.py


示例3: emailSubsystem

class emailSubsystem(object):
    def __init__(self):
        ### will move to Celery eventually; with Celery, the app would be able to periodically
        # wakeup and check on replyQueue to see which emails were send, which were not and
        # what to do ...

        self.emailQueue = JoinableQueue()
        self.replyQueue = JoinableQueue()

        self.worker = Process(target=sendEmailWorker, args=(self.emailQueue, self.replyQueue))

    def start(self):
        # temporarily comment out starting a new process as it seems to leave zombies
        # and causes app not to start as max process limit is reached.
        #self.worker.start()
        return

    def shutdown(self):
        # post poison pill
        # wait on the queue to be done; ie join on emailQueue
        # wait on the worker process to die; ie join on worker

        self.emailQueue.put(None)
        self.emailQueue.join()
        self.worker.join()
开发者ID:haribcva,项目名称:the_library_app,代码行数:25,代码来源:library.py


示例4: SimpleSynergeticServer

class SimpleSynergeticServer(Process):
    
    def __init__(self, authen_key):
        Process.__init__(self)
        self.__task_queue = JoinableQueue(1)
        self.__return_queue = Queue(1)
        self.serv = Listener(('', 40000), authkey=authen_key)
    
    def run(self):
        print 'Server Works'
        copy_reg.pickle(types.MethodType, _reduce_method)
        #Start the synergeticProcess in Deamon Mode
        worker_p = SynergeticProcess(self.__task_queue, self.__return_queue)
        worker_p.deamon = True
        worker_p.start()          
        while True:
            print 'wait for Client'
            pool_conn = self.serv.accept()
            print 'connection Client Accepted'
            while True:
                print 'in LOOP Simple Server'
                #There is no need for task_id in this version
                try:
                    print 'Try to recv MSG'
                    unpickled_msg = pool_conn.recv()
                    print 'MSG Reseved'
                except Exception as e: # EOFError:
                    print 'Fail To Receive MSG:', e
                    break 
                if unpickled_msg[0] == 'MODULES':
                    self.import_mods( unpickled_msg[1] )
                    ret = 'MODULES-READY'
                else:    
                    self.__task_queue.put(unpickled_msg)
                    ret = self.__return_queue.get()
                try:
                    print 'SEND RESPONCE'
                    try:
                        pool_conn.send( ret )
                    except EOFError:
                        print 'SENT TO POOL FAILD'
                    print 'RESPONCE SENT ', ret
                except EOFError:
                    break
            pool_conn.close()
    
    def import_mods(self, mods_d):
        for mod_name, mod_bytecode in mods_d.items():
            try:
                fobj = open(mod_name + ".pyc", 'wb')
            except Exception as e:
                print("Synergeticprocessing.SimpleServer --> Module file error: %s" % e)
            else:
                fobj.write( mod_bytecode )
            finally:
                fobj.close()
        for mod in mods_d:
            print 'blocking'
            __import__( mod )
            print 'imported ', mod
开发者ID:dpritsos,项目名称:synergeticprocessing,代码行数:60,代码来源:synergeticservers.py


示例5: evaluate

def evaluate(points,meshToBasis,kernel,quadRule,coeffs,nprocs=None):
    """Evaluate a kernel using the given coefficients"""


    if nprocs==None: nprocs=cpu_count()

    inputQueue=JoinableQueue()

    nelements=meshToBasis.nelements

    for elem in meshToBasis: inputQueue.put(elem)

    buf=sharedctypes.RawArray('b',len(points[0])*numpy.dtype(numpy.complex128).itemsize)
    result=numpy.frombuffer(buf,dtype=numpy.complex128)
    result[:]=numpy.zeros(1,dtype=numpy.complex128)

    time.sleep(.5)
    workers=[]

    for id in range(nprocs):
        worker=EvaluationWorker(points,kernel,quadRule,coeffs,inputQueue,result)
        worker.start()
        workers.append(worker)


    inputQueue.join()
    for worker in workers: worker.join()

    return result.copy()
开发者ID:tbetcke,项目名称:PyBEM2D,代码行数:29,代码来源:evaluation.py


示例6: getdata_multiprocess

 def getdata_multiprocess(self,task_funcsiter=None,task_funcsiterparas={},
                         task_funcsconst=None,task_funcsconstparas={},processnum=None,
                         threadnum=2):
     def _start_processes(taskqueue,resultqueue,taskqueue_lk,task_funcsconst,
                          task_funcsconstparas,processnum,threadnum):
         for i in range(processnum):
             p = Process(target=self.multiprocess_task, args=(taskqueue,resultqueue,
                                      taskqueue_lk,threadnum,
                                      task_funcsconst,task_funcsconstparas
                                      ),name='P'+str(i))
             p.daemon=True
             p.start()
             
     processnum=processnum if processnum else multiprocessing.cpu_count()
     #任务传送queue
     taskqueue=JoinableQueue()
     #任务写入/唤醒lock
     taskqueue_lk = multiprocessing.Condition(multiprocessing.Lock())
     #结果传送queue
     resultqueue=Queue()
     
     _start_processes(taskqueue,resultqueue,taskqueue_lk,task_funcsconst,
                         task_funcsconstparas,processnum,threadnum)
     #放入任务,唤醒进程
     if task_funcsconst is None:
         self._put_tasks(zip(task_funcsiter,task_funcsiterparas),taskqueue,taskqueue_lk)
     else:
         self._put_tasks(task_funcsiterparas,taskqueue,taskqueue_lk)
     logger.info('main join!')
     taskqueue.join()
     logger.info('main end!')
     return self._get_results(resultqueue)
开发者ID:rainwu,项目名称:stockdata,代码行数:32,代码来源:StockDataProc1.py


示例7: parallelPrepareImg

def parallelPrepareImg(img, info, name, idx):
  # Make Color Image
  if img.ndim == 2:
    img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
  elif img.shape[2] == 4:
    img = img[:, :, :3]
  # Prepare processes
  numProcs = 3
  taskQueue = JoinableQueue()
  resultQueue = ProcQueue()
  processes = []
  for i in range(numProcs):
    t = Process(target=singleWindowProcess, args=(taskQueue, resultQueue, img))
    t.daemon = True
    t.start()
    processes.append(t)
  j = 0
  # Add tasks to the queue
  for b in info:
    idx.write(b[4])
    taskQueue.put( (b,j) )
    j += 1
  for i in range(len(processes)):
    taskQueue.put('stop')
  # Collect results
  data = np.zeros([len(info), 227, 227, 3])
  retrieved = 0
  while retrieved < len(info):
    j,win = resultQueue.get()
    data[j,:,:,:] = win
    retrieved += 1
  # Substract mean and return
  data -= imagenet.IMAGENET_MEAN[14:241,14:241,:]
  return data.swapaxes(2, 3).swapaxes(1, 2)
开发者ID:jccaicedo,项目名称:localization-agent,代码行数:34,代码来源:extractCNNFeaturesFullImage.py


示例8: task_writer

def task_writer(task: JoinableQueue):
    for n in News.objects.all()[:50].iterator():
        task.put(n)

    for i in range(PROCESS_NUM):
        task.put("end")
    print("task writer ends")
开发者ID:aregina,项目名称:news_project,代码行数:7,代码来源:multiprocessingTest.py


示例9: find_vocabulary

def find_vocabulary(data_dir, stats_dir, category, min_num_images, save_description):
    print "Start find vocabulary"
    filequeue = JoinableQueue()
    photoqueue = Queue()

    init_dict = initialize_variables(None, None, False)

    # Create new processes
    num_processes = cpu_count()
    temp_dir = os.path.join(stats_dir, "database_temp", "vocab", category)
    if not os.path.exists(temp_dir):
        os.makedirs(temp_dir)
    processes = [FindVocabularyProcess(filequeue, photoqueue, init_dict, 30.0, num_processes, temp_dir, category) for i in xrange(num_processes)]
    for p in processes:
        p.start()

    #Add the files to the process queue
    add_files_to_queue(data_dir, category, filequeue)
    #Add a poison pill for each process
    for i in xrange(num_processes):
        filequeue.put("Stop")

    for p in processes:
        p.join()

    merge_vocabulary_files(data_dir, temp_dir, min_num_images, save_description)

    print "Removing temp files"
    shutil.rmtree(temp_dir)

    print "Done with find vocabulary"
开发者ID:crmauceri,项目名称:VisualCommonSense,代码行数:31,代码来源:get_photo_meta_multiprocess.py


示例10: save_transaction_list

def save_transaction_list(data_dir, stats_dir, category, concept_vocabulary, save_description):
    print "Start saving transaction list"
    filequeue = JoinableQueue()

    concept_vocabulary_list, concept_vocabulary_freq = zip(*concept_vocabulary)
    init_dict = initialize_variables(concept_vocabulary_list, None, True)

    # Create new processes
    temp_dir = os.path.join(stats_dir, "transaction_list")
    if not os.path.exists(temp_dir):
        os.makedirs(temp_dir)
    else:
        print "todo"
    lock = Lock()
    num_processes = cpu_count()
    processes = [TransactionListProcess(filequeue, init_dict, 30, num_processes, temp_dir, save_description, lock) for i in xrange(num_processes)]
    for p in processes:
        p.start()

    #Add the files to the process queue
    add_files_to_queue(data_dir, category, filequeue)
    #Add a poison pill for each process
    for i in xrange(num_processes):
        filequeue.put("Stop")

    for p in processes:
        p.join()

    print "Removing temp files"
    shutil.rmtree(temp_dir)

    print "Done with saving transaction list"
开发者ID:crmauceri,项目名称:VisualCommonSense,代码行数:32,代码来源:get_photo_meta_multiprocess.py


示例11: scheduler

def scheduler(db,category):
    task=JoinableQueue()
    for i in range(cpu_count()):
        pid=os.fork()
        if pid==0:
            consumer(category,task)
            os._exit(0) # 防止子进程向下执行
            # print('此处不会被执行')    
        elif pid<0:
            logging.error('创建子进程失败')   

    with ThreadPoolExecutor() as executor:
        cursor = db['image_match_result_{}'.format(category)].find(
            {'$or': [{'robot_processed': False}, {'robot_processed': {'$exists': False}}]}, 
            {'_id': 1, 'b_image_url': 1, 'c_image_url': 1}
        )
        for item in cursor:
            item['mark']=True # 标错
            executor.submit(producer, item, task)
        cursor = db['item_match_result_{}'.format(category)].find(
            {'$or': [{'robot_processed': False}, {'robot_processed': {'$exists': False}}]}, 
            {'_id': 1, 'b_image_url': 1, 'c_image_url': 1}
        )
        for item in cursor:
            item['mark']=False # 标对
            executor.submit(producer, item, task)
    task.join()
    os.kill(0,signal.SIGKILL)
开发者ID:xxoome,项目名称:collector,代码行数:28,代码来源:robot.py


示例12: test_basic

def test_basic():
    in_queue = JoinableQueue()

    algolia_reader = Algoliaio("MyAppID", "MyKey", 1000)
    algolia_reader.scan_and_queue(in_queue, p_index="INT_Rubriques",p_query=None, p_connect_timeout=30, p_read_timeout=60)

    assert in_queue.qsize() > 2600
开发者ID:GalakFayyar,项目名称:TabordNG,代码行数:7,代码来源:test_algolia.py


示例13: __init__

    def __init__(self,
                 network_retries=SynchronousScanner.DEFAULT_NETWORK_RETRIES,
                 network_timeout=SynchronousScanner.DEFAULT_NETWORK_TIMEOUT,
                 max_processes_nb=_DEFAULT_MAX_PROCESSES_NB,
                 max_processes_per_hostname_nb=_DEFAULT_PROCESSES_PER_HOSTNAME_NB):
        # type: (Optional[int], Optional[int], Optional[int], Optional[int]) -> None
        """Create a scanner for running scanning commands concurrently using a pool of processes.

        Args:
            network_retries (Optional[int]): How many times SSLyze should retry a connection that timed out.
            network_timeout (Optional[int]): The time until an ongoing connection times out.
            max_processes_nb (Optional[int]): The maximum number of processes to spawn for running scans concurrently.
            max_processes_per_hostname_nb (Optional[int]): The maximum number of processes that can be used for running
                scans concurrently against a single server. A lower value will reduce the chances of DOS-ing the server.
        """
        self._network_retries = network_retries
        self._network_timeout = network_timeout
        self._max_processes_nb = max_processes_nb
        self._max_processes_per_hostname_nb = max_processes_per_hostname_nb

        # Create hostname-specific queues to ensure aggressive scan commands targeting this hostname are never
        # run concurrently
        self._hostname_queues_dict = {}
        self._processes_dict = {}

        self._task_queue = JoinableQueue()  # Processes get tasks from task_queue and
        self._result_queue = JoinableQueue()  # put the result of each task in result_queue
        self._queued_tasks_nb = 0
开发者ID:pfeiffersz,项目名称:sslyze,代码行数:28,代码来源:concurrent_scanner.py


示例14: __init__

    def __init__(self, p_max_items_by_queue=50000, p_forkserver=False, p_log_every=10000):
        """Class creation"""
        if p_forkserver:
            mp.set_start_method('forkserver')

        self.readers = None
        self.writer = None
        self.writer_store_args = None
        self.process = None
        self.process_args = None
        if p_max_items_by_queue is None:
            self.in_queue = JoinableQueue()
            self.out_queue = JoinableQueue()
        else:
            self.in_queue = JoinableQueue(p_max_items_by_queue)
            self.out_queue = JoinableQueue(p_max_items_by_queue)

        self.counters = {
            'nb_items_processed': Value('i', 0),
            'nb_items_error': Value('i', 0),
            'nb_items_scanned': Value('i', 0),
            'nb_items_stored': Value('i', 0),
            'whole_storage_time': Value('f', 0),
            'bulk_storage_time': Value('f', 0),
            'whole_process_time': Value('f', 0),
            'real_process_time': Value('f', 0),
            'idle_process_time': Value('f', 0),
            'scan_time': Value('f', 0),
            'log_every': p_log_every
        }
开发者ID:GalakFayyar,项目名称:TabordNG,代码行数:30,代码来源:Swallow.py


示例15: cdp_no_split_single

def cdp_no_split_single(loaded_seq_list, loaded_seq_name_list,
                        ref_file,
                        nt, cores):
    """
    Aligns a single SRNA_seq object to multiple refseq seqs in a Ref object
    at a time.  No splitting of read counts.
    """

    refs = RefSeq()
    refs.load_ref_file(ref_file)
    print(colored("------------------ALIGNING READS------------------\n", 'green'))

    workers = cores
    work_queue = JoinableQueue()
    processes = []
    mgr = Manager()
    count = 0
    counts_by_ref = mgr.dict()  # {header:[count1, count2,.......]}
    for header, seq in refs:
        work_queue.put((header, seq,))
        count += 1
        if count % 10000 == 0:
            _cdp_no_split_single_queue(counts_by_ref, loaded_seq_list, nt, processes, work_queue, workers)
    _cdp_no_split_single_queue(counts_by_ref, loaded_seq_list, nt, processes, work_queue, workers)

    _cdp_single_output(counts_by_ref.copy(), loaded_seq_name_list, ref_file, nt)
开发者ID:Carroll-Lab,项目名称:scram,代码行数:26,代码来源:cdp.py


示例16: run

    def run(self):

        # Changes the process name shown by ps for instance
        setProcTitle ("agentcluster master [version: %s] [monitoring: %d seconds]" % (__version__,self.monitoring_period) );

        try:
            logger.info ( 'Agent cluster server starting' );

            logger.info ( 'Configurations will be scanned in directories:' );
            for directory in confdir.data:
                logger.info ( '  o %s', os.path.abspath(directory) );

            self.watchdog = Watchdog(self.monitoring_period)
            self.watchdog.start()

            # Generates a deadlock to enter in sleep mode
            # Only an external signal can break this deadlock
            logger.info ( 'Agent cluster server started' );
            queue = JoinableQueue()
            queue.put(object());
            queue.join();

        except KeyboardInterrupt:
            logger.info ( 'Agent cluster server interrupted' );
        except Exception:
            logger.error ( 'Exception catched in main process: %s', sys.exc_info()[1] );
            logger.debug ( "", exc_info=True );
        finally:
            # First stop the monitoring to avoid restarting killed agents
            if self.watchdog is not None:
                self.watchdog.shutdown = True
                self.watchdog.join()
            logger.info ( 'Agent cluster server end' );
            logging.shutdown()
开发者ID:GillesBouissac,项目名称:agentcluster,代码行数:34,代码来源:agentclusterd.py


示例17: processData

def processData(imageList,featuresDir,featuresExt,task):
  numProcs = 8
  taskQueue = JoinableQueue()
  resultQueue = Queue()
  processes = []
  for i in range(numProcs):
    t = Process(target=worker, args=(taskQueue, resultQueue, task))
    t.daemon = True
    t.start()
    processes.append(t)

  for img in imageList:
    filename = featuresDir+'/'+img+'.'+featuresExt
    idxFile = re.sub(r'\..+$',r'.idx',filename)
    content = open(filename)
    index = open(idxFile)
    taskQueue.put( (img,content.read(),index.read()) )
    #taskQueue.put( (img,filename,idxFile) )
    index.close()
    content.close()
  for i in range(len(processes)):
    taskQueue.put('stop')

  results = []
  retrieved = 0
  while retrieved < len(imageList):
    data = resultQueue.get()
    retrieved += 1
    if data != 'Ignore':
      results.append(data)
  return results
开发者ID:jccaicedo,项目名称:localization-agent,代码行数:31,代码来源:dataProcessor.py


示例18: batchProcess

	def batchProcess(self, arr_to_enque, work_method, t=False):
		q = JoinableQueue()
		output = JoinableQueue()
		extra = JoinableQueue()
		third = JoinableQueue()
		if t: 
			args = ((q, output, extra, third))
		else:
			args=(q, output, extra)
		for obj in arr_to_enque:
			q.put(obj)
		processes = [Process(target=work_method, args=args, name=str(obj)) for obj in arr_to_enque]
		for p in processes:
			p.start()
		for p in processes: 
			p.join(30)
			if p.is_alive():
				print "ERROR JOINING PROCESS FOR: ", p.name
				p.terminate()
				raise Exception("Goal Conversion Error:", (self.account_id, self.project_id, exp_id, var_ids))
		print "end batch process"
		if t:
			return (output, extra, third)
		else:
			return (output, extra)
开发者ID:darwishoptly,项目名称:ResultsExporter,代码行数:25,代码来源:OptlyData.py


示例19: aggress

def aggress(map):
    global startMap
    startMap = map

    #print "Regressing..."
    state = State()

    jobs = []

    longestSolution = Value('d', 20)
    highestScore = Value('d', 0)

    queue = JoinableQueue()

    manager = Manager()

    d = manager.dict()
    d.clear()

    l = RLock()

    if multiProc:
        queue.put((state, map, 1))

        for i in range(numProcs):
           p = Process(target = multiMain, args=(startMap, l, d, queue,highestScore))
           p.start()

        queue.join()
    else:
        a(l, highestScore, d, None, state, map, 1)
开发者ID:aelaguiz,项目名称:icfp2012,代码行数:31,代码来源:aggress.py


示例20: Mothership

class Mothership(object):

    """ Monitor of producer and consumers """

    def __init__(self, producer, consumers):
        self._queue = JoinableQueue()

        self._producer_proxy = ProducerProxy(self._queue, producer)
        self._consumer_pool = list(ConsumerProxy(self._queue, consumer) \
                                   for consumer in consumers)

    def start(self):
        """ Start working """
        logger.info('Starting Producers'.center(20, '='))
        self._producer_proxy.start()

        logger.info('Starting Consumers'.center(20, '='))
        for consumer in self._consumer_pool:
            consumer.start()

        self._producer_proxy.join()
        self._queue.join()

    def __enter__(self):
        return self

    def __exit__(self, types, value, tb):
        return
开发者ID:Kotaimen,项目名称:mason,代码行数:28,代码来源:workq.py



注:本文中的multiprocessing.JoinableQueue类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python multiprocessing.Lock类代码示例发布时间:2022-05-27
下一篇:
Python multiprocessing.Event类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap