• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python multiprocessing.Pool类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.Pool的典型用法代码示例。如果您正苦于以下问题:Python Pool类的具体用法?Python Pool怎么用?Python Pool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了Pool类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: start_crawlers

def start_crawlers(connector_class, num_processes=1):
    """
    Starts a spider process for each spider class in the project

    :param num_processes: the number of simultaneous crawling processes
    :param connector_class: the connector class that should be used by the
    spiders
    """
    spider_classes = pyjobs_crawlers.tools.get_spiders_classes()

    if num_processes == 0:
        connector = connector_class()
        with _get_lock('ALL') as acquired:
            if acquired:
                crawl(spider_classes, connector)
            else:
                print("Crawl process of 'ALL' already running")
            return

    # Splits the spider_classes list in x lists of size num_processes
    spider_classes_chunks = list()
    for x in range(0, len(spider_classes), num_processes):
        spider_classes_chunks.append(spider_classes[x:x + num_processes])

    # Start num_processes number of crawling processes
    for spider_classes_chunk in spider_classes_chunks:
        process_params_chunk = [(spider_class, connector_class)
                                for spider_class in spider_classes_chunk]
        p = Pool(len(process_params_chunk))
        p.map(start_crawl_process, process_params_chunk)
开发者ID:BenoitEchernier,项目名称:crawlers,代码行数:30,代码来源:run.py


示例2: crawl_recursive_threaded

def crawl_recursive_threaded(dirpath, ext):
    from database import indexer
    from database import utils
    from multiprocessing import Pool

    # convert to our infos
    cdir = indexer.DirInfo(dirpath, ext)
    cInfos = indexer.dirs_to_info(cdir.subfolders(), ext)

    # comment if you want a silent indexing
    print(cdir.to_string())

    # recursive pooled call
    # NOTE: child calls must not be pooled
    p = Pool(utils.Settings.config['processes'])
    infos = p.map(crawl_recursive, cInfos)
    p.close()

    # remove hierarchy
    dirInfos = [d for sublist in infos for d in sublist]
    dirInfos.append(cdir)

    print('I was crawling with %d processes' %
          utils.Settings.config['processes'])

    return dirInfos
开发者ID:TUWien,项目名称:Benchmarking,代码行数:26,代码来源:crawler.py


示例3: rc

def rc(rf, alphabet, numOfThreads):
	tryn=0
	counterTmp = 0
	printCounter = 1000
	listBasic = []
	if rf.endswith('.rar'):
		funcChosen = unrar
	elif rf.endswith('.zip') or rf.endswith('.7z') :
		funcChosen = zipFileUnzip
	for a in range(1,len(alphabet)+1):
		for b in itertools.product(alphabet,repeat=a):
			k="".join(b)
			k=re.escape(k)
			listBasic.append(k)
			tryn+=1
			if len(listBasic) == numOfThreads:
				pool = Pool(numOfThreads)
				pool.map_async(funcChosen, listBasic, callback = exitPass)
				pool.close()
				if resultPass:
					timeWasted = time.time()-start
					print 'Found! Password is '+resultPass
					print "It took " +str(round(time.time()-start,3))+" seconds"
					print "Speed: "+str(round(tryn/float(timeWasted),2))+" passwords/sec"
					print "Tried "+str(tryn)+" passwords"
					exit()
				listBasic = []
			counterTmp+=1
			if counterTmp >= printCounter:
				print 'Trying combination number '+str(tryn)+':'+str(k)
				timeWasted = round(time.time()-start,2)
				if timeWasted > 0:
					print "It took already " +str(timeWasted) +" seconds. Speed: "+str(round(tryn/float(timeWasted),2))+" passwords/sec"
				counterTmp=0
开发者ID:jonDel,项目名称:pyrarcr,代码行数:34,代码来源:pyrarcr.py


示例4: k_rbm

def k_rbm(infile, outfile):
    #dataset
    data = sio.loadmat(infile)['data']

    # reconstruction cost
    cost_dict = {}
    p = Pool(5)
    first_arg = ["Thread-1", "Thread-2", "Thread-3", "Thread-4", "Thread-5"]
    second_arg = data
    a,b,c,d,e = p.map(rbm_star, itertools.izip(first_arg, itertools.repeat(second_arg)))
    # p.map(rbm_star, itertools.izip(first_arg, itertools.repeat(second_arg)))
    # get the costs from the tuples
    cost_1 = a[0]
    cost_2 = b[1]
    cost_3 = c[2]
    cost_4 = d[3]
    cost_5 = e[4]
    # find the cluster assignments
    for i in xrange(len(cost_1)):
        mincost = min(cost_1[i],cost_2[i],cost_3[i],cost_4[i],cost_5[i])
        if mincost == cost_1[i]:
            cost_dict[i+1] = 1
        elif mincost == cost_2[i]:
            cost_dict[i+1] = 2
        elif mincost == cost_3[i]:
            cost_dict[i+1] = 3
        elif mincost == cost_4[i]:
            cost_dict[i+1] = 4
        else:
            cost_dict[i+1] = 5

    # store results
    json.dump(cost_dict, open(outfile, 'w'))
开发者ID:gupta-abhay,项目名称:deep-voteaggregate,代码行数:33,代码来源:web-cluster.py


示例5: spawn_runpy

def spawn_runpy(cp, wait=60, cb=check_rst):
    "as decorator to run job"
    global WAITQ, RUNQ, CFG
    pool = Pool(processes=CFG['MAXJOBS'])
    while len(WAITQ) > 0 or len(RUNQ) > 0:
        if len(RUNQ) <= CFG['MAXJOBS'] and len(WAITQ) > 0:
            path, test = WAITQ.pop()
            rst = pool.apply_async(call_runpy, (cp, path, test,))
            RUNQ.append((rst, test, timeit.default_timer()))
        else:
            for r in RUNQ:
                usec = float("%.2f" %(timeit.default_timer()-r[2]))
                if r[0].successful:
                    print "[{0}] success used {1} usec".format(r[1], usec)
                    RUNQ.remove(r)
                    if cb:
                        cb(r[1], 'pass', usec)
                else:
                    if usec > CFG['TIMEOUT']:
                        print "[{0}] unsuccess used timeout {1} usec".format(r[1], usec)
                        r[0].terminate()
                        if cb:
                            cb(r[1], 'fail', usec)

        time.sleep(float(wait))
开发者ID:funningboy,项目名称:smtdv,代码行数:25,代码来源:unittest.py


示例6: compress_file

        def compress_file(self,corpus, np=4,separator=None):
                """
		construct WLZW pattern out of a corpus, parallelism is an option
		@param corpus - string, file path of the corpus
		@param np - number of processes, if np = 1 the algorithm is run in serial
		@param separator - the separator string to separate doc id and document. pass None if no doc id is given
		@return set, the final set containing all frequent patterns
		"""

                #if only one process, no need for parallelization
                if np==1:
                        return set(_compress_file((corpus,0,np,separator)))

                p=Pool(processes=np)
                l=[]
                for i in range(0,np):
                        l.append((corpus,i,np,separator))
                result=p.imap_unordered(_compress_file,l,1)

                if np==1:
                        final_set=result.next()
                else:
                        final_set=_union(result)

                return final_set
开发者ID:xiang7,项目名称:ForumMiner,代码行数:25,代码来源:WLZW.py


示例7: get

    def get(self):
        mode = toAlpha3Code(self.get_argument('lang'))
        text = self.get_argument('q')
        if not text:
            self.send_error(400, explanation='Missing q argument')
            return

        def handleCoverage(coverage):
            if coverage is None:
                self.send_error(408, explanation='Request timed out')
            else:
                self.sendResponse([coverage])

        if mode in self.analyzers:
            pool = Pool(processes=1)
            result = pool.apply_async(getCoverage, [text, self.analyzers[mode][0], self.analyzers[mode][1]])
            pool.close()

            @run_async_thread
            def worker(callback):
                try:
                    callback(result.get(timeout=self.timeout))
                except TimeoutError:
                    pool.terminate()
                    callback(None)

            coverage = yield tornado.gen.Task(worker)
            handleCoverage(coverage)
        else:
            self.send_error(400, explanation='That mode is not installed')
开发者ID:jatinluthra14,项目名称:apertium-apy,代码行数:30,代码来源:servlet.py


示例8: main

def main(path, out, cores):
    """
    Compute contact energies for each pdb in path and write results to 'out'.
    :param path: str
    :param out: str
    :param cores: int
    :return: None
    """
    # Find all pdbs in path
    workload = []
    for file in os.listdir(path):
        if os.path.splitext(file)[1].lower() == ".pdb":
            workload.append(file)
    # Print few newlines to prevent progressbar from messing up the shell
    print("\n\n")
    # Compute energies
    pool = Pool(processes=cores)
    results = []
    for (nr, pdb) in enumerate(workload):
        updateprogress(pdb, nr / len(workload))
        e = computecontactenergy(os.path.join(path, pdb), pool)
        results.append((pdb, e))
    pool.close()
    # Make 100% to appear
    updateprogress("Finished", 1)
    # Store output
    with open(out, "w") as handler:
        handler.write("PDB,Energy in kcal/mol\n")
        for pair in results:
            handler.write("{},{}\n".format(*pair))
开发者ID:maxemil,项目名称:InteractionPotential,代码行数:30,代码来源:energies.py


示例9: JobPool

class JobPool(object):

    """
    Pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=4):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_task, (self.message_queue,))
        atexit.register(self.clear)

    def add_analysis(self, analysis):
        """
        Add analysis to the pool.
        """
        analysis.set_started()
        self.message_queue.put(analysis)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
开发者ID:ANSSI-FR,项目名称:polichombr,代码行数:26,代码来源:jobpool.py


示例10: YaraJobPool

class YaraJobPool(object):

    """
    Yara pool container.
    """
    pool = None
    message_queue = None

    def __init__(self, max_instances=3):
        self.message_queue = Queue()
        self.pool = Pool(max_instances, execute_yara_task,
                         (self.message_queue,))
        atexit.register(self.clear)

    def add_yara_task(self, yara_task):
        """
        Adds the yara task.
        """
        self.message_queue.put(yara_task)

    def clear(self):
        """
        Pool cleanup.
        """
        self.pool.terminate()
        self.pool.join()
开发者ID:ANSSI-FR,项目名称:polichombr,代码行数:26,代码来源:jobpool.py


示例11: get_location

    def get_location(self):
        """

        Extracts the location of each pixel in the satellite image

        """
        self.ncols = self.satellite_gdal.RasterXSize / 2
        self.nrows = self.satellite_gdal.RasterYSize / 2
        self.length_df = self.nrows * self.ncols
        print 'Columns, rows', self.ncols, self.nrows
        cols_grid, rows_grid = np.meshgrid(
                    range(0, self.ncols), 
                    range(0, self.nrows))
        self.cols_grid = cols_grid.flatten()
        self.rows_grid = rows_grid.flatten()
        print 'Checking the meshgrid procedure works'
        # getting a series of lat lon points for each pixel
        self.geotransform = self.satellite_gdal.GetGeoTransform()
        print 'Getting locations'
        self.location_series = np.array(parmap.starmap(
                        pixel_to_coordinates, 
                        zip(self.cols_grid, self.rows_grid), 
                        self.geotransform,
                        processes = self.processes))
        print 'Converting to Points'
        pool = Pool(self.processes)
        self.location_series = pool.map(
                        point_wrapper, 
                        self.location_series)
开发者ID:patrick-dd,项目名称:landsat-landstats,代码行数:29,代码来源:data_cleaning.py


示例12: get_fractional_errors

def get_fractional_errors(R_star, L_star, P_c, T_c):
	"""
		Pass in "guess" conditions.
		Will then calculate inward and outward errors,

		Returns:
			[Data array]
			dY - over/undershoots (+/-, going outward)
				[dx handled outside this]
	"""

	# R_star, L_star, P_c, T_c = x

	P_c_0		= modelparameters.P_c # core pressure, [dyne cm^-2]
	T_c_0 		= modelparameters.T_c # core temperature, [K]
	R_star_0 	= modelparameters.R_star
	L_star_0 	= modelparameters.L_star

	print ""
	print "R: " + str(R_star / R_star_0)
	print "L: " + str(L_star / L_star_0)
	print "P: " + str(P_c / P_c_0)
	print "T: " + str(T_c / T_c_0)


	X 		= modelparameters.X
	Y 		= modelparameters.Y
	Z 		= modelparameters.Z
	mu 		= modelparameters.mu
	params 	= (X, Y, Z, mu)

	M_star 	= modelparameters.M_star
	m_fitting_point	= modelparameters.m_fitting_point

	pool = Pool(2)
	outward_results = pool.apply_async(integrate.integrate_outwards, 
		[M_star, m_fitting_point, P_c, T_c, mu, X, Y, Z] )

	inward_results  = pool.apply_async(integrate.integrate_inwards, 
		[M_star, m_fitting_point, R_star, L_star, mu, X, Y, Z] )

	m_outward, y_outward, infodict_outward 	= outward_results.get()

	m_inward, y_inward, infodict_inward 	= inward_results.get()

	dr = y_inward[-1,0] - y_outward[-1,0]
	dl = y_inward[-1,1] - y_outward[-1,1]
	dP = y_inward[-1,2] - y_outward[-1,2]
	dT = y_inward[-1,3] - y_outward[-1,3]

	dY = np.array([dr, dl, dP, dT])

	print ''
	print 'fractional errors:'
	print "dR: " + str(dr / y_inward[-1,0])
	print "dL: " + str(dl / y_inward[-1,1])
	print "dP: " + str(dP / y_inward[-1,2])
	print "dT: " + str(dT / y_inward[-1,3])

	return dY
开发者ID:egentry,项目名称:stellarstructure,代码行数:60,代码来源:shootf.py


示例13: score_all_genes

  def score_all_genes(self, graph, num_procs=1):
    partial_score_gene = partial(score_gene, graph=graph, top_genes=self.top_genes)
    p = Pool(num_procs)
    result = p.map(partial_score_gene, list(self.vd.gene_names()))
    p.close()

    # convert them all to percentiles
    cent_hist = numpy.array([x[1] for x in result if x[1] != -1])
    nn_hist = numpy.array([x[2] for x in result if x[2] != -1])

    batch = []

    for gene, cent_score, nn_score in result:
      # edge case: gene is a top gene
      if gene in self.top_genes:
        cent_perc = 1
        nn_perc = 1
      # edge case: gene isn't in network
      elif cent_score == -1 or \
           nn_score == -1:
        cent_perc = 0
        nn_perc = 0
      else:
        cent_perc = scipy.stats.percentileofscore(cent_hist, cent_score) / 100.0
        nn_perc = 1 - scipy.stats.percentileofscore(nn_hist, nn_score) / 100.0

        print "gene:  %s\n  c:   %s\n  c_p: %s\n  n:   %s\n  n_p: %s" % \
          (gene, cent_score, cent_perc, nn_score, nn_perc)

      batch.append((cent_score, cent_perc, nn_score, nn_perc, gene))

    self.vd._c.executemany("UPDATE genes SET cent_score = ?, cent_perc = ?, " \
      "nn_score = ?, nn_perc = ? WHERE name = ?", batch)
    self.vd._conn.commit()
开发者ID:polyatail,项目名称:varprior,代码行数:34,代码来源:static.py


示例14: main

def main():

    parser = ArgumentParser(description="Speed up your SHA. A different hash style.")
    parser.add_argument('-1', '--sha1', action='store_true')
    parser.add_argument('-2', '--sha224', action='store_true')
    parser.add_argument('-3', '--sha256', action='store_true')
    parser.add_argument('-4', '--sha384', action='store_true')
    parser.add_argument('-5', '--sha512', action='store_true')
    parser.add_argument('-f', '--file', type=str, help="The path to the file")

    if len(sys.argv) == 1:
        parser.print_help()
        return

    global args
    args = parser.parse_args()

    hashtree = ''

    big_file = open(args.file, 'rb')
    pool = Pool(multiprocessing.cpu_count())

    for chunk_hash in pool.imap(hashing, chunks(big_file)):
        hashtree += chunk_hash + ":hash"

    pool.terminate()

    print(str(hashing(hashtree.encode('ascii'))))
开发者ID:Detry322,项目名称:blockhash,代码行数:28,代码来源:mains.py


示例15: main

def main():
    """
    ---------------------------------------------------------------------------
    AUTHOR: Kyle Hernandez
    EMAIL: [email protected]

    Calculate the distribution of polymorphic RAD loci across site classes.
    ---------------------------------------------------------------------------

    USAGE: python snp_locations.py gmatrix.tab file.gff out.tab n_threads

    ARGUMENTS:
    	gmatrix.tab - Tab-delimited genotype matrix file of variant sites
        file.gff    - GFF file
        out.tab     - Output file of counts
        n_threads   - The number of threads to run
    """

    # Load the GFF and SNP positions into dictionaries
    load_gff()
    intergenic = process_matrix()
    
    # Map:
    # Create a pool of n_threads workers and use them to process
    # scaffolds separately
    ch_vals = sorted(gff_dict.keys())
    sys.stdout.write("Counting features...\n")
    pool    = Pool(processes=n_threads)
    ct_list = pool.map(process_dicts, ch_vals)

    # Reduce:
    # Process the list of dicts
    print_counts(intergenic, ct_list)
开发者ID:jacob-ogre,项目名称:chlamy-wt-var,代码行数:33,代码来源:snp_locations.py


示例16: matrix_vector_iteration_by_processes

def matrix_vector_iteration_by_processes(A,x,k):
	# create a temporary directory to store the matrix and the vectors
	tmpdir = tempfile.mkdtemp()

	nvec = get_nvec(x)
	y = x.copy()

	save_matrix(tmpdir,A)
	for i in xrange(nvec):
		save_x(tmpdir,x,i)

	# start processes
	pool = Pool(processes=min(nvec,6))
	processes = []

	for i in xrange(nvec):
		processes.append( pool.apply_async(matrix_vector_iteration_process, (tmpdir,i,k)) ) 

	# fetch results (vector/matrix shape version)
	if x.ndim  == 1:
		processes[0].get()
		y = load_x(tmpdir,0)
	else:
		for i in xrange(nvec):
			processes[i].get()
			y[:,i] = load_x(tmpdir,i)

	pool.close()

	# remove temporary directory (with all it contains)
	shutil.rmtree(tmpdir)

	return y
开发者ID:sbordt,项目名称:markovmixing,代码行数:33,代码来源:iterate_distributions.py


示例17: fetch_imagery

def fetch_imagery(image_locations, local_dir):
    pool = Pool(cpu_count())
    tupled = [(loc[0], loc[1], local_dir) for loc in image_locations]
    try:
        pool.map(fetch_imagery_uncurried, tupled)
    finally:
        pool.close()
开发者ID:azavea,项目名称:raster-foundry,代码行数:7,代码来源:cog.py


示例18: compute_tdbf

def compute_tdbf():
    conn = db_conn('bnc')
    cur = conn.cursor()
    # select keys and parsed from table
    sql = 'SELECT xmlID, divIndex, globalID, parsed FROM entropy_DEM100'
    cur.execute(sql)
    data = cur.fetchall()
    # initialize
    pool = Pool(multiprocessing.cpu_count())
    manager = Manager()
    queue = manager.Queue()
    # mp
    args = [(d, queue) for d in data]
    result = pool.map_async(compute_tdbf_worker, args, chunksize=5000)
    # manager loop
    while True:
        if result.ready():
            print('\n all rows processed')
            break
        else:
            sys.stdout.write('\r{}/{} processed'.format(queue.qsize(), len(args)))
            sys.stdout.flush()
            time.sleep(1)
    # update
    processed_results = result.get()
    for i, res in enumerate(processed_results):
        xml_id, div_idx, g_id, sub_tree, td, bf = res
        sql = 'UPDATE entropy_DEM100 SET parsedSimple = %s, td = %s, bf = %s WHERE xmlID = %s AND divIndex = %s AND globalID = %s'
        cur.execute(sql, (sub_tree, td, bf, xml_id, div_idx, g_id))
        if i % 999 == 0 and i > 0:
            sys.stdout.write('\r{}/{} updated'.format(i+1, len(processed_results)))
            sys.stdout.flush()
    conn.commit()
开发者ID:innerfirexy,项目名称:acl2016short,代码行数:33,代码来源:bnc_tdbf.py


示例19: downloadImages

	def downloadImages(self, dirName, urlData):
		child_folder = 'pictures'
		failures = 0
		dirName = os.path.join(dirName,child_folder)
		process_pool = Pool(processes=self._pool_size)
		results = []

		for ud in urlData:
			abs_img = os.path.join(dirName,urlparse(ud).path.strip('/'))
			try:
				os.makedirs(dirname(abs_img))
			except:
				pass
			results.append( process_pool.apply_async( urllib.urlretrieve, [ ud,  abs_img ] ) )

		self.initialize_bar(max=len(results))
		for result in results:
			try:
				result.get(self._timeout)
			except Exception:
				failures += 1
			else:
				self.update_bar()

		self.finish_bar()
		if failures: print("   Completed with errors: Downloaded {0}/{1}".format(len(results) - failures, len(results)))
		self.finish_bar()
开发者ID:erktheerk,项目名称:image-scraper,代码行数:27,代码来源:__init__.py


示例20: get_needle_tips

def get_needle_tips(images):
    """Get sample tips from images."""
    tips = []
    results = []

    # Do not make more processes than needed for the number of images.
    if len(images) > multiprocessing.cpu_count():
        proc_count = multiprocessing.cpu_count()
    else:
        proc_count = len(images)

    pool = Pool(processes=proc_count)

    for image in images:
        results.append(pool.apply_async(_get_ellipse_point,
                                        args=(image,)))

    for result in results:
        tip = result.get()
        if tip is not None:
            tips.append(tip)

    if len(tips) == 0:
        raise ValueError("No sample tip points found.")

    return tips
开发者ID:ludovico86,项目名称:concert,代码行数:26,代码来源:imageprocessing.py



注:本文中的multiprocessing.Pool类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python multiprocessing.Process类代码示例发布时间:2022-05-27
下一篇:
Python multiprocessing.Pipe类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap