• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python pool.apply_async函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.pool.apply_async函数的典型用法代码示例。如果您正苦于以下问题:Python apply_async函数的具体用法?Python apply_async怎么用?Python apply_async使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了apply_async函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: process_iteration

def process_iteration(Ns, ps, landscape, config):
	output_dir = config.output_dir + config.ext
	
	if config.background_image != None:
		background_path = config.input_dir + "/" + config.background_image
	else:
		background_path = None
	
	#Create a point to hold the iteration
	p = Point()
	p.add_iteration()
	
	#draw_population(Ns[0], landscape, ps.totalK, 0, output_dir, 2.0, background_path)
	
	if config.display:
		pool = mp.Pool(config.num_processors)

	for t in xrange(min(ps.max_time_steps, len(Ns))):
		if config.display:
			pool.apply_async(draw_population, [Ns[t], landscape, ps.totalK, t, output_dir, 2.0, background_path])
		
		p.add_time_step([t] + population_statistics(ps, landscape, Ns[t]))
	
	pool.close()

	#Write the iteration results to file as a trajectory containing a single point
	write_trajectories([Trajectory(points=[p])], None, ps.sentinels, output_dir + "/results.txt")

	if config.save_time_steps:
		np.savez(output_dir + "/populations.npz", *Ns)

	pool.join()
开发者ID:saamrenton,项目名称:GMBI,代码行数:32,代码来源:gmbiIO.py


示例2: pass_data_to_search

def pass_data_to_search(symbol,path,start_time_seconds,end_time_seconds,date,time_interval,tt,code_path):

    jobs=[]
    dic_files={}
    lis=[]
    slot_results=[]
    
    file_name = path+'b'+date+'.l.bz2'
    # file_name = path + date+'/'+dic_files[lis[index]]+'.bz2'
        
    size=os.path.getsize(file_name)
    total_rows=size/69
    total_processes1=40
    slots=total_rows/total_processes1

    #Multiprocessing each file as chunk
    # mapper(0,slots,total_processes1,symbol,start_time_seconds,end_time_seconds,date,time_interval,file_name,tt,code_path)
    # mapper(1,slots,total_processes1,symbol,start_time_seconds,end_time_seconds,date,time_interval,file_name,tt,code_path)
    
    pool = multiprocessing.Pool(total_processes1)
    

    for i in range(total_processes1):

        pool.apply_async(mapper, args = (i,slots,total_processes1,symbol,start_time_seconds,end_time_seconds,date,time_interval,file_name,tt,code_path))
        
    pool.close()
    pool.join()    
开发者ID:kaush-utkarsh,项目名称:init-py-crawlers,代码行数:28,代码来源:merged.py


示例3: _listArtifacts

    def _listArtifacts(self, urls, gavs):
        """
        Loads maven artifacts from list of GAVs and tries to locate the artifacts in one of the
        specified repositories.

        :param urls: repository URLs where the given GAVs can be located
        :param gavs: List of GAVs
        :returns: Dictionary where index is MavenArtifact object and value is it's repo root URL.
        """
        def findArtifact(gav, urls, artifacts):
            artifact = MavenArtifact.createFromGAV(gav)
            for url in urls:
                if maven_repo_util.gavExists(url, artifact):
                    #Critical section?
                    artifacts[artifact] = ArtifactSpec(url, [ArtifactType(artifact.artifactType, True, set(['']))])
                    return

            logging.warning('Artifact %s not found in any url!', artifact)

        artifacts = {}
        pool = ThreadPool(maven_repo_util.MAX_THREADS)
        for gav in gavs:
            pool.apply_async(findArtifact, [gav, urls, artifacts])

        # Close the pool and wait for the workers to finnish
        pool.close()
        pool.join()

        return artifacts
开发者ID:jboss-eap,项目名称:maven-repository-builder,代码行数:29,代码来源:artifact_list_builder.py


示例4: papply

def papply( f, seq, pool_size=cores, callback=None ):
    """
    Apply the given function to each element of the given sequence, optionally invoking the given
    callback with the result of each application. Do so in parallel, using a thread pool no
    larger than the given size.

    :param callable f: the function to be applied

    :param Sequence seq: the input sequence

    :param int pool_size: the desired pool size, if absent the number of CPU cores will be used.
            The actual pool size may be smaller if the input sequence is small.

    :param callable callback: an optional function to be invoked with the return value of f

    >>> l=[]; papply( lambda a, b: a + b, [], 1, callback=l.append ); l
    []
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2) ], 1, callback=l.append); l
    [3]
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2), (3, 4) ], 1, callback=l.append ); l
    [3, 7]
    """
    if pool_size == 1:
        for args in seq:
            result = apply( f, args )
            if callback is not None:
                callback( result )
    else:
        with thread_pool( min( pool_size, len( seq ) ) ) as pool:
            for args in seq:
                pool.apply_async( f, args, callback=callback )
开发者ID:kushaldas,项目名称:cgcloud,代码行数:31,代码来源:util.py


示例5: main

def main():
    if len(sys.argv) < 3:
        print("Syntax:")
        print(
            "  {} [min_yeses] [out_csv_file]".format(
                sys.argv[0]
            )
        )
        sys.exit(1)

    min_yeses = eval(sys.argv[1])
    out_csv_file = sys.argv[2]

    pconfig = config.PaperworkConfig()
    pconfig.read()

    src_dir = pconfig.settings['workdir'].value
    print("Source work directory : {}".format(src_dir))
    src_dsearch = docsearch.DocSearch(src_dir)
    src_dsearch.reload_index()

    nb_threads = multiprocessing.cpu_count()
    pool = multiprocessing.pool.ThreadPool(processes=nb_threads)

    with open(out_csv_file, 'a', newline='') as csvfile:
        csvwriter = csv.writer(csvfile)
        for min_yes in min_yeses:
            pool.apply_async(
                _run_simulation,
                (src_dsearch, min_yes, csvwriter,)
            )
        pool.close()
        pool.join()
    print("All done !")
开发者ID:jflesch,项目名称:paperwork,代码行数:34,代码来源:simulate-workdir_3d.py


示例6: create_process_pool

def create_process_pool(index):
    print index
    li = range(3)
    pool = multiprocessing.Pool(processes = len(li))
    for sub_index in li:
        pool.apply_async(print_process_index, (index, sub_index))
    pool.close()
    pool.join()
开发者ID:zeekvfu,项目名称:MindCache-blog-links,代码行数:8,代码来源:multiprocessing_test.bug.py


示例7: update_all

def update_all(opts):
    """Updates all menus"""
    pool = NoDaemonPool(processes=5)
    pool.apply_async(update_applications, (opts,))
    pool.apply_async(update_bookmarks, (opts,))
    pool.apply_async(update_recent_files, (opts,))
    pool.apply_async(update_devices, (opts,))
    pool.apply_async(update_rootmenu, (opts,))
    pool.close()
    pool.join()
开发者ID:ju1ius,项目名称:uxdgmenu,代码行数:10,代码来源:daemon.py


示例8: buildList

    def buildList(self):
        """
        Build the artifact "list" from sources defined in the given configuration.

        :returns: Dictionary described above.
        """
        priority = 0
        pool_dict = {}

        for source in self.configuration.artifactSources:
            priority += 1
            pool = pool_dict.setdefault(source['type'], ThreadPool(self.MAX_THREADS_DICT[source['type']]))
            pool.apply_async(self._read_artifact_source, args=[source, priority],
                             callback=self._add_result)

        for pool in pool_dict.values():
            pool.close()

        at_least_1_runs = True
        all_keys = range(1, len(self.configuration.artifactSources) + 1)
        finished = False
        while at_least_1_runs:
            for i in range(30):
                time.sleep(1)

                if not self.errors.empty():
                    for pool in pool_dict.values():
                        logging.debug("Terminating pool %s", str(pool))
                        pool.terminate()
                    finished = True
                    break

            at_least_1_runs = False
            if not finished:            
                self.results_lock.acquire()
                finished = sorted(list(self.results.keys()))
                self.results_lock.release()
                if all_keys != finished:
                    logging.debug("Still waiting for priorities %s to finish", str(list(set(all_keys) - set(finished))))
                    at_least_1_runs = True

        for pool in pool_dict.values():
            if pool._state != multiprocessing.pool.TERMINATE:
                pool.join()

        if not self.errors.empty():
            raise RuntimeError("%i error(s) occured during reading of artifact list." % self.errors.qsize())

        return self._get_artifact_list()
开发者ID:jboss-eap,项目名称:maven-repository-builder,代码行数:49,代码来源:artifact_list_builder.py


示例9: papply

def papply( f, seq, pool_size=cores, callback=None ):
    """
    Apply the given function to each element of the given sequence, optionally invoking the given
    callback with the result of each application. Do so in parallel, using a thread pool no
    larger than the given size.

    :param callable f: the function to be applied

    :param Sequence seq: the input sequence

    :param int pool_size: the desired pool size, if absent the number of CPU cores will be used.
    The actual pool size may be smaller if the input sequence is small.A pool size of 0 will make
    this function emulate the apply() builtin, i.e. f (and the callback, if provided) will be
    invoked serially in the current thread.

    :param callable callback: an optional function to be invoked with the return value of f

    >>> l=[]; papply( lambda a, b: a + b, [], pool_size=0, callback=l.append ); l
    []
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2) ], pool_size=0, callback=l.append); l
    [3]
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2), (3, 4) ], pool_size=0, callback=l.append ); l
    [3, 7]
    >>> l=[]; papply( lambda a, b: a + b, [], pool_size=1, callback=l.append ); l
    []
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2) ], pool_size=1, callback=l.append); l
    [3]
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2), (3, 4) ], pool_size=1, callback=l.append ); l
    [3, 7]
    >>> l=[]; papply( lambda a, b: a + b, [], pool_size=2, callback=l.append ); l
    []
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2) ], pool_size=2, callback=l.append); l
    [3]
    >>> l=[]; papply( lambda a, b: a + b, [ (1, 2), (3, 4) ], pool_size=2, callback=l.append ); l
    [3, 7]
    """
    __check_pool_size( pool_size )
    n = len( seq )
    if n:
        if pool_size == 0:
            for args in seq:
                result = apply( f, args )
                if callback is not None:
                    callback( result )
        else:
            with thread_pool( min( pool_size, n ) ) as pool:
                for args in seq:
                    pool.apply_async( f, args, callback=callback )
开发者ID:arkal,项目名称:cgcloud,代码行数:48,代码来源:util.py


示例10: func_wrapper

 def func_wrapper(*args, **kwargs):
     """Closure for function."""
     pool = multiprocessing.pool.ThreadPool(processes=1)
     async_result = pool.apply_async(item, args, kwargs)
     # raises a TimeoutError if execution exceeds max_timeout
     # print async_result.get(max_timeout)
     return async_result.get(max_timeout)
开发者ID:nguyenhoang857,项目名称:coursesurfer,代码行数:7,代码来源:sniper_server.py


示例11: run_trajectory

def run_trajectory(t, ps, landscape, ptv, num_iterations, num_processors):
    # Get the points in the trajectory
    points = t.points()

    # Determine the index of each unique point (sometimes points are equal due to rounding)
    uinds = [i for i, p in enumerate(points) if i == 0 or not p.equals(points[i - 1])]

    # Create a process pool, using as many processors as are available, or
    # are required to allow each point to run concurrently
    pool = mp.Pool(processes=min(num_processors, len(points)))

    results = []
    for i in uinds:
        # Modify the parameter set to match the current point
        psm = ps.copy()
        psm.modify_for_point(points[i], ptv)
        psm.convert_to_age_classes()

        # Launch a process to run the simulation(s) for the point. This modifies the point in place
        args = [points[i], psm, landscape, num_iterations, num_processors]
        results.append(pool.apply_async(run_iterations_for_point, args))

    pool.close()
    pool.join()

    # Merge the unique and non-unique points back together
    for i, r in zip(uinds, results):
        points[i] = r.get(None)

    # Return a new trajectory containing the results for each point
    return io.Trajectory(points=points)
开发者ID:saamrenton,项目名称:GMBI,代码行数:31,代码来源:GMBI.py


示例12: func_wrapper

 def func_wrapper(self, *args, **kwargs):
     """Closure for function."""
     pool = multiprocessing.pool.ThreadPool(processes=1)
     async_result = pool.apply_async(f, (self,) + args, kwargs)
     timeout = kwargs.pop('timeout_max_timeout', max_timeout) or max_timeout
     # raises a TimeoutError if execution exceeds max_timeout
     return async_result.get(timeout)
开发者ID:gitter-badger,项目名称:DeepClassificationBot,代码行数:7,代码来源:bot.py


示例13: compute_stats

def compute_stats(client_factory, db_names=None, table_names=None,
    continue_on_error=False, parallelism=multiprocessing.cpu_count()):
  """
  Runs COMPUTE STATS over the selected tables. The target tables can be filtered by
  specifying a list of databases and/or table names. If no filters are specified this will
  run COMPUTE STATS on all tables in all databases.

  parallelism controls the size of the thread pool to which compute_stats
  is sent.
  """
  logging.info("Enumerating databases and tables for compute stats.")

  pool = multiprocessing.pool.ThreadPool(processes=parallelism)
  futures = []
  with client_factory() as impala_client:
    all_dbs = set(name.split('\t')[0].lower() for name
        in impala_client.execute("show databases").data)
    selected_dbs = all_dbs if db_names is None else set(db_names)
    for db in all_dbs.intersection(selected_dbs):
      all_tables =\
          set([t.lower() for t in impala_client.execute("show tables in %s" % db).data])
      selected_tables = all_tables if table_names is None else set(table_names)
      for table in all_tables.intersection(selected_tables):
        # Submit command to threadpool
        futures.append(pool.apply_async(compute_stats_table,
            (client_factory, db, table, continue_on_error,)))
    # Wait for all stats commands to finish
    for f in futures:
      f.get()
开发者ID:apache,项目名称:incubator-impala,代码行数:29,代码来源:compute_table_stats.py


示例14: parallel_reduce

def parallel_reduce(func, iterable, processes= 4, args=(), kwargs={}):    
    #print "Made it to parallel reduce!"
    #print 'Iterable Set to Reduce: ', iterable
    
    comp_stack = list(iterable)
    pair_list = []
    
    pool = multiprocessing.pool.Pool(processes)

    while len(comp_stack) > 1:
        while len(comp_stack) > 1:
            pair_list.append((comp_stack.pop(), comp_stack.pop()))
            
        #print 'List of pairs to reduce: ', pair_list
    
        results = []
        while len(pair_list) > 0:    
            pair = pair_list.pop()
            results.append(pool.apply_async(func, pair))
    
        #print 'Async Result Objects: ', results
                
        while True:
            if all([result.ready() for result in results]): break
                    
        comp_stack = [result.get() for result in results]
        #print 'After reduce: ', comp_stack
    
    return comp_stack
开发者ID:dbgoodman,项目名称:chiptools,代码行数:29,代码来源:pll_reduce.py


示例15: main

def main():
  if len(sys.argv) > 1:
    _, pkg_name, pkg_version = sys.argv
    download_package(pkg_name, pkg_version)
    return

  pool = multiprocessing.pool.ThreadPool(processes=min(multiprocessing.cpu_count(), 4))
  results = []

  for requirements_file in REQUIREMENTS_FILES:
    # If the package name and version are not specified in the command line arguments,
    # download the packages that in requirements.txt.
    # requirements.txt follows the standard pip grammar.
    for line in open(requirements_file):
      # A hash symbol ("#") represents a comment that should be ignored.
      line = line.split("#")[0]
      # A semi colon (";") specifies some additional condition for when the package
      # should be installed (for example a specific OS). We can ignore this and download
      # the package anyways because the installation script(bootstrap_virtualenv.py) can
      # take it into account.
      l = line.split(";")[0].strip()
      if not l:
        continue
      pkg_name, pkg_version = l.split('==')
      results.append(pool.apply_async(
        download_package, args=[pkg_name.strip(), pkg_version.strip()]))

    for x in results:
      x.get()
开发者ID:apache,项目名称:incubator-impala,代码行数:29,代码来源:pip_download.py


示例16: _run_tests

  def _run_tests(self):
    pool = multiprocessing.pool.ThreadPool(processes=self.suite_concurrency)
    outstanding_suites = []
    for suite in self.suite_runners:
      suite.task = pool.apply_async(suite.run)
      outstanding_suites.append(suite)

    ret = True
    try:
      while len(outstanding_suites) > 0:
        for suite in list(outstanding_suites):
          if suite.timed_out():
            msg = "Task %s not finished within timeout %s" % (suite.name,
                suite.suite.timeout_minutes,)
            logging.error(msg)
            raise Exception(msg)
          task = suite.task
          if task.ready():
            this_task_ret = task.get()
            outstanding_suites.remove(suite)
            if this_task_ret:
              logging.info("Suite %s succeeded.", suite.name)
            else:
              logging.info("Suite %s failed.", suite.name)
              ret = False
        time.sleep(5)
    except KeyboardInterrupt:
      logging.info("\n\nDetected KeyboardInterrupt; shutting down!\n\n")
      raise
    finally:
      pool.terminate()
    return ret
开发者ID:twmarshall,项目名称:Impala,代码行数:32,代码来源:test-with-docker.py


示例17: from_carrays

def from_carrays(path, format_categories='bcolz', format_codes='bcolz', format_values='bcolz', parallel=True):
    assert os.path.exists(path), 'No path {}'.format(path)
    df_columns = glob.glob(os.path.join(path, '*'))
    df = dict()
    if parallel:
        pool = multiprocessing.pool.ThreadPool()
        results = []
        for i, k in enumerate(df_columns):
            p = pool.apply_async(_from_carray, args=(k,), kwds={'format_categories': format_categories, 'format_codes': format_codes, 'format_values': format_values})
            results.append(p)
        pool.close()
        pool.join()
        for x in results:
            meta, s = x.get()
            df[meta['name']] = s
    else:
        for i, k in enumerate(df_columns):
            meta, s = _from_carray(k, format_categories=format_categories, format_codes=format_codes, format_values=format_values)
            df[meta['name']] = s

    # # # this is slow when we have non categoricals as series for some reason
    with log.timedlogger('constructing dataframe from %s column dict' % len(df)):
        df = pandas.DataFrame(df)  # TODO: fast DataFrame constructor

    return df
开发者ID:cottrell,项目名称:notebooks,代码行数:25,代码来源:bc.py


示例18: _queue_job

 def _queue_job(self, pool, key, data_file, data_file_size):
     pool.apply_async(
         _fetch_and_process_chunk,
         [],
         {
             "app_config": self.config,
             "debug": self.debug,
             "data_file": data_file,
             "data_file_size": data_file_size,
             "download_progress_per_file": self.download_progress_per_file,
             "site": self.site,
             "pgdata": self.pgdata,
             "tablespaces": self.tablespaces,
         },
         lambda *args: self.job_completed(key),
         lambda exception: self.job_failed(key, exception),
     )
开发者ID:ohmu,项目名称:pghoard,代码行数:17,代码来源:restore.py


示例19: test_multi_own_ca

 def test_multi_own_ca(self):
     pool = multiprocessing.pool.ThreadPool(processes=5)
     threads = []
     for i in range(5):
         threads.append(pool.apply_async(issue_n_certs, ("ownca", range(5))))
     vals = []
     for t in threads:
         vals.extend(t.get())
     nt.assert_equal(sorted(vals), sorted(list(set(vals))))
开发者ID:b-long,项目名称:ezbake-platform-services,代码行数:9,代码来源:test_ca.py


示例20: test_multithread

    def test_multithread(self):
        ca = ezbakeca.EzbakeCA("threadingCA")
        pool = multiprocessing.pool.ThreadPool(processes=5)

        threads = []
        for i in range(5):
            threads.append(pool.apply_async(issue_n_certs, (ca, range(5))))
        vals = []
        for t in threads:
            vals.extend(t.get())
        nt.assert_equal(sorted(vals), sorted(list(set(vals))))
开发者ID:b-long,项目名称:ezbake-platform-services,代码行数:11,代码来源:test_ca.py



注:本文中的multiprocessing.pool.apply_async函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python pool.close函数代码示例发布时间:2022-05-27
下一篇:
Python managers.SyncManager类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap