本文整理汇总了Python中multiprocessing.pool.Pool类的典型用法代码示例。如果您正苦于以下问题:Python Pool类的具体用法?Python Pool怎么用?Python Pool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Pool类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: parse
def parse(document, pages, parse_refs=True,
progress_monitor=NullProgressMonitor(),
pool_size=DEFAULT_POOL_SIZE):
progress_monitor.start('Parsing Pages', pool_size + 1)
# Prepare input
pages = [(page.local_url, page.url) for page in
pages.values() if page.local_url is not None]
pages_chunks = chunk_it(pages, pool_size)
inputs = []
for pages_chunk in pages_chunks:
inputs.append((document.parser, document.pk, parse_refs, pages_chunk))
# Close connection to allow the new processes to create their own.
connection.close()
# Split work
progress_monitor.info('Sending {0} chunks to worker pool'
.format(len(inputs)))
pool = Pool(pool_size)
for result in pool.imap_unordered(sub_process_parse, inputs, 1):
progress_monitor.work('Parsed 1/{0} of the pages'.\
format(pool_size), 1)
# Word Count
word_count = 0
for page in document.pages.all():
word_count += page.word_count
document.word_count = word_count
document.save()
progress_monitor.work('Counted Total Words', 1)
pool.close()
progress_monitor.done()
开发者ID:bartdag,项目名称:recodoc2,代码行数:34,代码来源:generic_parser.py
示例2: _get_data
def _get_data(data_folder = "data/crcm4_data", v_name = "pcp",
member_list = None, year_range = None, months = None):
"""
returns seasonal means of each year for all members in the list
Note!: uses caching
"""
year_range = list(year_range)
cache_file = "_".join(member_list) + "_" + "_".join(map(str, months)) + \
"_{0}_from_{1}_to_{2}_cache.bin".format(v_name, year_range[0], year_range[-1])
if os.path.isfile(cache_file):
return pickle.load(open(cache_file))
p = Pool(processes=len(member_list))
#prepare input for the parallel processes
m_folders = map(lambda x: os.path.join(data_folder,"{0}_p1{1}".format(x, v_name)), member_list)
year_ranges = [year_range] * len(member_list)
months_for_p = [months] * len(member_list)
#calculate means
result = p.map(_get_annual_means_for_year_range_p, zip(m_folders, year_ranges, months_for_p))
result = np.concatenate(result, axis = 0) #shape = (n_members * len(year_range)) x nx x ny
print result.shape
pickle.dump(result, open(cache_file, "w"))
return result
开发者ID:guziy,项目名称:PlotWatrouteData,代码行数:29,代码来源:plot_projected_changes.py
示例3: runexp
def runexp(filename, n_topics_range, β=.1, n_samples=1000, n_runs=8,
seed=1337):
kwargsin = locals()
docs, n_words = get_imdb_docs(filename)
random.seed(seed)
args = []
pool = Pool()
for tid, (_, n_topics) in enumerate(product(range(n_runs), n_topics_range)):
seed_i = random.randrange(2**31)
args.append((docs, n_topics, n_words, β, n_samples, seed_i, tid,))
data = []
runtime = []
res = pool.map(_expitr, args)
for evx, rt, nt in res:
data.append({'Log evidence': evx,
'Runtime': rt,
'Number of topics': nt})
out = {
'res': {
'data': pd.DataFrame(data),
},
'args': kwargsin,
}
return out
开发者ID:BaxterEaves,项目名称:ijcai-iml-2016,代码行数:27,代码来源:exp_model_selection.py
示例4: extract_all_labels
def extract_all_labels(filenames, out_filepath=DATA_FOLDER+'labels.p', chunk_size=2000):
print "EXTRACTING ALL LABELS INTO {0}".format(out_filepath)
all_labels = []
label_dict = {}
filenames_chunks = util.chunks(filenames, chunk_size)
for i, chunk in enumerate(filenames_chunks):
pool = Pool(processes=util.CPU_COUNT)
chunk_labels = pool.map(extract_labels, chunk)
pool.close()
for filepath, labels in zip(chunk, chunk_labels):
if labels is not None:
file_id = util.filename_without_extension(filepath)
label_dict[file_id] = labels
all_labels += labels
print i+1, '/', len(filenames_chunks)
#Write labels to file
with open(out_filepath,'w') as f:
pickle.dump(label_dict, f)
print '\nLabels:'
print len(set(all_labels))
print Counter(all_labels)
开发者ID:gzuidhof,项目名称:text-mining,代码行数:27,代码来源:extract.py
示例5: main_mh
def main_mh():
samples_dir_p = Path("/RECH2/huziy/BC-MH/bc_mh_044deg/Samples")
out_dir_root = Path("/RECH2/huziy/MH_streamflows/")
if samples_dir_p.name.lower() == "samples":
out_folder_name = samples_dir_p.parent.name
else:
out_folder_name = samples_dir_p.name
varnames = ["STFA", ]
# ======================================
out_dir_p = out_dir_root.joinpath(out_folder_name)
if not out_dir_p.is_dir():
out_dir_p.mkdir(parents=True)
inputs = []
for y in range(1981, 2010):
inputs.append(dict(year=y, varnames=varnames, samples_dir=samples_dir_p, out_dir=out_dir_p, target_freq_hours=24))
# Extract the data for each year in parallel
pool = Pool(processes=3)
pool.map(extract_data_for_year_in_parallel, inputs)
开发者ID:guziy,项目名称:RPN,代码行数:29,代码来源:select_vars_from_samples_and_aggregate.py
示例6: run
def run():
setup_logger()
logger.info('Started')
queue = multiprocessing.Queue(maxsize=EVENT_QUEUE_MAX_SIZE)
pool = Pool(processes=WORKERS,
initializer=worker,
initargs=(queue,))
event_handler = EventHandler(queue)
observer = init_observer()
try:
delete_all_files(FRAMES_PATH)
observer.schedule(event_handler, path=FRAMES_PATH, recursive=True)
signal.signal(signal.SIGINT, signal_handler)
observer.start()
while True:
pool._maintain_pool() #restart workers if needed
time.sleep(1)
now = datetime.datetime.now()
if now - event_handler.last_event > datetime.timedelta(minutes=1):
logger.warning("No events received in the last minute.")
# Sometimes watchdog stops receiving events.
# We exit, so the process can be restarted.
break
except KeyboardInterrupt as err:
logger.warning("Keyboard interruption")
except Exception as err:
logger.exception(err)
finally:
observer.stop()
observer.join()
pool.terminate()
logger.warning("Bye")
开发者ID:jbochi,项目名称:live_thumb,代码行数:34,代码来源:broadcaster.py
示例7: runexp
def runexp(n_topics_lst, n_vocab_lst, n_words_lst, alpha_lst, beta_lst,
n_docs, n_runs, tol=.05):
kwargsin = locals()
params = it.product(n_topics_lst, n_vocab_lst, n_words_lst, alpha_lst,
beta_lst)
args = []
for i, (n_topics, n_vocab, n_words, α, β) in enumerate(params):
args.append([n_topics, n_vocab, n_words, α, β, n_docs, tol])
args = [tuple(arg + [random.randrange(2**31), i]) for
i, arg in enumerate(args*n_runs)]
pool = Pool()
res = pool.map(_expitr, args)
data = []
for t, n, n_topics, n_vocab, n_words, α, β, n_docs in res:
datum = {'time': t,
'Number of samples': n,
'Number of topics': n_topics,
'Vobabulary size': n_words,
'Number of words': n_words,
'Number of documents': n_docs,
'α': α,
'β': β, }
data.append(datum)
res = {
'df': pandas.DataFrame(data),
}
return {'res': res, 'args': kwargsin}
开发者ID:BaxterEaves,项目名称:ijcai-iml-2016,代码行数:32,代码来源:exp_complexity.py
示例8: __init__
def __init__(self, processes=None, initializer=None, initargs=(), process=None):
"""
@param process: Process subclass to use
"""
if process is not None:
self.Process = process
Pool.__init__(self, processes, initializer, initargs)
开发者ID:infinity0,项目名称:tag-routing,代码行数:7,代码来源:contextprocess.py
示例9: parallel_main
def parallel_main():
recs = sys.stdin.readlines()
vals = [int(rec) for rec in recs]
p = Pool()
results = p.map(solve, vals)
for v1, v2 in results:
print("{} {}".format(v1, v2))
开发者ID:chenhh,项目名称:Uva,代码行数:7,代码来源:uva_106_TLE.py
示例10: parallel_main
def parallel_main():
recs = iter(sys.stdin.readlines())
cuts_list = []
cuts_list_append = cuts_list.append
cuts = []
cuts_extend = cuts.extend
cuts_append = cuts.append
cuts_clear = cuts.clear
while True:
# length of stick
L = int(next(recs))
if L == 0:
break
# number of cut
n_cut = int(next(recs))
# cutting points
cuts_clear()
cuts_append(0)
cuts_extend(list(map(int, next(recs).split())))
cuts_append(L)
cuts_list_append(cuts[:])
p = Pool(4)
results = p.map(min_cut, cuts_list)
for res in results:
print(res)
开发者ID:chenhh,项目名称:Uva,代码行数:28,代码来源:uva_10003_TLE.py
示例11: sum_lines
def sum_lines(self, SPEEDUP=True):
filesname = []
for item_dir in self.dirlist.keys():
for item_file in self.dirlist[item_dir][1]:
filesname.append(item_dir + '/' + item_file)
if SPEEDUP:
# when python version is less then 3.3, multiprocessing.pool.Pool
# don't support the context management protocol
if sys.version_info.major is 3 and sys.version_info.minor >= 3:
with Pool(self.MAX_RES) as res_pool:
return reduce(self._adder, res_pool.map(self._count_filelines, filesname))
else:
# in python2.x(maybe python3.[0-2]),
# multiprocessing must pickle things to sling them among processes,
# and bound methods are not picklable.
# the workaround (whether you consider it "easy" or not;-) is to
# add the infrastructure to your program to allow such methods to be pickled,
# registering it with the copy_reg standard library method.
# the following is a elusion to make it work in python2.x
res_pool = Pool(processes=self.MAX_RES)
retval = res_pool.map(_filecounter, filesname)
return reduce(self._adder, retval)
else:
for filename in filesname:
with open(filename, 'rb') as filebuf:
self.filesline += len(filebuf.readlines())
return self.filesline
开发者ID:edonyM,项目名称:toolkitem,代码行数:28,代码来源:filesline.py
示例12: check
def check(self, artdict):
print("Checking for infobox existence")
pool = Pool(processes=100)
revs = []
for a in artdict:
rev = artdict[a]["Revision"].split('oldid=')[1].strip()
revs.append((a, rev))
texts = dict(pool.map(self.get_text, revs))
for a in artdict:
text = texts[a]
if text is None:
artdict[a]["MultiInfobox"] = 0
artdict[a]["Infobox programming language"] = -1
artdict[a]["Infobox software"] = -1
artdict[a]["Infobox file format"] = -1
else:
if 'infobox programming language' in text.lower():
artdict[a]["Infobox programming language"] = text.lower().index('infobox programming language')
else:
artdict[a]["Infobox programming language"] = -1
if 'infobox software' in text.lower():
artdict[a]["Infobox software"] = text.lower().index('infobox software')
else:
artdict[a]["Infobox software"] = -1
if 'infobox file format' in text.lower():
artdict[a]["Infobox file format"] = text.lower().index('infobox file format')
else:
artdict[a]["Infobox file format"] = -1
artdict[a]["MultiInfobox"] = text.lower().count("{{infobox")
return artdict
开发者ID:softlang,项目名称:wikionto,代码行数:30,代码来源:infobox_position.py
示例13: test_stemming
def test_stemming():
with open("tests.txt") as file:
pool = Pool(4)
results = pool.map(validate, file)
for result in results:
if result:
yield assert_output, result[0], result[1]
开发者ID:brkastner,项目名称:stemming_dictionary,代码行数:7,代码来源:tests.py
示例14: main
def main():
global pool
pool = Pool(POOL_SIZE)
nseeds = 100
# print("== generating seeds...")
# generate_seeds(nseeds)
#print("running const density experiments...")
#run_constant_density(0.1, range(100, 1000, 100), nseeds)
#print("running const size experiments...")
#run_constant_size(50, range(100, 1000, 100), nseeds)
print("== running aggregate interval experiments (const density)...")
# run_aggregate_interval_constant_density(0.1, range(100, 1000, 100), nseeds, [100, 500] + list(range(1000, 4000, 1000)))
run_aggregate_interval_constant_density(0.1, range(100, 1000, 100), nseeds, [3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.2, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.3, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.4, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
reset_pool()
run_aggregate_interval_constant_density(0.5, range(100, 1000, 100), nseeds, [100, 500, 1000, 2000,4000, 5000, 6000, 7000, 8000, 9000, 10000, 50000])
pool.close()
pool.join()
开发者ID:Darma,项目名称:wiselib,代码行数:31,代码来源:run_shawn_experiments.py
示例15: run_parallel
def run_parallel(num_processes, experiment_names, methods, sparsity_factors, run_ids):
"""
Run multiple experiments in parallel.
Parameters
----------
num_processes : int
The maximum number of processes that can run concurrently.
experiment_names : list of str
The names of experiments to run.
methods : list of str
The methods to run the experiments under (mix1, mix2, or full).
sparsity_factors : list of float
The sparsity of inducing points to run the experiments at.
run_ids : list of int
The ids of the configurations under which to run the experiments.
"""
# Setup an array of individual experiment configurations.
experiment_configs = []
for experiment in experiment_names:
for method in methods:
for sparsity_factor in sparsity_factors:
for run_id in run_ids:
experiment_configs.append({'experiment_name': experiment,
'method': method,
'sparsity_factor': sparsity_factor,
'run_id': run_id})
# Now run the experiments.
pool = Pool(num_processes)
pool.map(run_config, experiment_configs)
开发者ID:Karl-Krauth,项目名称:Sparse-GP,代码行数:31,代码来源:run_experiment.py
示例16: start
def start(self):
"""Starts a server that controls local workers.
Calling this function starts a pool of `num_workers` workers used to run
targets sent to the server. The server will run indefinitely unless shut
down by the user.
"""
try:
serv = Listener((self.hostname, self.port))
workers = Pool(
processes=self.num_workers,
initializer=Worker,
initargs=(self.status, self.queue, self.waiting),
)
logging.info(
"Started %s workers, listening on port %s",
self.num_workers,
serv.address[1],
)
self.wait_for_clients(serv)
except OSError as e:
if e.errno == 48:
raise ServerError(
(
"Could not start workers listening on port {}. "
"The port may already be in use."
).format(self.port)
)
except KeyboardInterrupt:
logging.info("Shutting down...")
workers.close()
workers.join()
self.manager.shutdown()
开发者ID:mailund,项目名称:gwf,代码行数:34,代码来源:local.py
示例17: get_urls1
def get_urls1():
f2 = open('app_links.txt','r')
nprocs = 100 # nprocs is the number of processes to run
ParsePool = Pool(nprocs)
#ParsePool.map(btl_test,url)
ParsedURLS = ParsePool.map(urlsDeatilsExtract,f2)
开发者ID:mounarajan,项目名称:google-play-apps-crawl,代码行数:7,代码来源:script.py
示例18: __init__
def __init__(self):
try:
cpus = cpu_count()
except NotImplementedError:
cpus = 1
self._taskqueue = Queue(maxsize=(2 * cpus))
Pool.__init__(self)
开发者ID:naviga-tor,项目名称:navigator,代码行数:7,代码来源:truncatedata.py
示例19: run_parallel
def run_parallel(n_process):
"""
Creates a process for each element in the array returned by ``get_configs()`` and the experiment corresponding
the each element. The maximum number of processes to run in parallel is determined by ``n_process``
"""
p = Pool(n_process)
p.map(run_config, ExperimentRunner.get_configs())
开发者ID:jfutoma,项目名称:savigp,代码行数:8,代码来源:experiment_run.py
示例20: parallel
def parallel(self):
self.getInput()
p = Pool(4)
millis1 = int(round(time.time() * 1000))
self.results = p.map(solve, self.input)
millis2 = int(round(time.time() * 1000))
print("Time in milliseconds: %d " % (millis2 - millis1))
self.makeOutput()
开发者ID:llchen223,项目名称:Codejam,代码行数:8,代码来源:main.py
注:本文中的multiprocessing.pool.Pool类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论