本文整理汇总了Python中multiprocessing.pool.ThreadPool类的典型用法代码示例。如果您正苦于以下问题:Python ThreadPool类的具体用法?Python ThreadPool怎么用?Python ThreadPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ThreadPool类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _test_herd_management
def _test_herd_management(self, cache):
globals()['call_count'] = 0
def test_callable(v):
global call_count
call_count += 1
sleep(0.1)
return v
pool = ThreadPool(processes=10)
processes = []
for _ in xrange(10):
to_func = {
'key': self._test_key,
'callback': test_callable,
'callback_params': {'v': 17},
}
async_result = pool.apply_async(
cache.get_or_set, kwds=to_func
)
processes.append(async_result)
results = []
for thread in processes:
thread.wait()
results.append(thread.get())
# Checking that callable method was applied only once
self.assertEqual(globals()['call_count'], 1)
# Checking results - they all should be the same
self.assertEqual(results, [17] * 10)
开发者ID:lemurchik,项目名称:django-extmemcachedcache,代码行数:34,代码来源:tests.py
示例2: _test_monitor_tables_locking_errors
def _test_monitor_tables_locking_errors(self):
"""Test that intensive read/write operations to the MySQL Monitor tables
do not trigger locking errors.
This test will be successful if there will be no generated error at
the end.
"""
# Setting these variables will cause the Monitor to connect more
# frequently to the backend hosts to check their health, thus increasing
# the probability of locking errors to appear.
self.run_query_proxysql_admin("UPDATE global_variables SET variable_value=100 WHERE variable_name='mysql-monitor_connect_interval'")
self.run_query_proxysql_admin("UPDATE global_variables SET variable_value=100 WHERE variable_name='mysql-monitor_ping_interval'")
self.run_query_proxysql_admin("LOAD MYSQL VARIABLES TO RUNTIME")
queries = []
q1 = "select * from monitor.mysql_server_connect_log ORDER BY RANDOM() LIMIT 10"
q2 = "select * from monitor.mysql_server_ping_log ORDER BY RANDOM() LIMIT 10"
for _ in xrange(10000):
queries.append(random.choice([q1, q2]))
pool = ThreadPool(processes=5)
pool.map(self.run_query_proxysql_admin, queries)
# If we reached this point without an error, it means that the test
# has passed.
self.assertEqual(1, 1)
开发者ID:AlexeyDeyneko,项目名称:proxysql,代码行数:27,代码来源:admin_tables_test.py
示例3: test_threadsafe
def test_threadsafe(self):
# Ensure that the eventdb is thread-safe by hammering on it with
# multiple threads simultaneously. We should only get one positive.
pool = ThreadPool(10)
results = pool.map(self.event_db.check_event, repeat(self.event, 1000))
self.assertEqual(results.count(True), 1)
self.assertEqual(results.count(False), 999)
开发者ID:jdswinbank,项目名称:Comet,代码行数:7,代码来源:test_event_db.py
示例4: __init__
def __init__(self):
super(Foo,self).__init__(400,400)
l = pyglet.text.Label('FOOBAR',font_name="Courier Sans",font_size=20,x=self.width//2,y=self.height//2,multiline=True,width=200)
pool = ThreadPool(processes=1)
self.r = pool.apply_async(foo)
@self.event
def on_key_press(s,m):
if s == pyglet.window.key.C:
print("EXTERNAL")
l.text = self.r.get()
@self.event
def on_draw():
self.clear()
l.draw()
count = 10
offset =(2*pi)/ 10.0
for i in range(count):
line((200,200),(200+cos(offset*i)*100,200+sin(offset*i)*100))
pyglet.app.run()
开发者ID:xoryouyou,项目名称:NetArgos,代码行数:27,代码来源:pool.py
示例5: fetch_plugins
def fetch_plugins(old_index):
ans = {}
pool = ThreadPool(processes=10)
entries = tuple(parse_index())
result = pool.map(partial(parallel_fetch, old_index), entries)
for entry, plugin in zip(entries, result):
if isinstance(plugin, dict):
ans[entry.name] = plugin
else:
if entry.name in old_index:
ans[entry.name] = old_index[entry.name]
log('Failed to get plugin', entry.name, 'at', datetime.utcnow().isoformat(), 'with error:')
log(plugin)
# Move staged files
for plugin in ans.itervalues():
if plugin['file'].startswith('staging_'):
src = plugin['file']
plugin['file'] = src.partition('_')[-1]
os.rename(src, plugin['file'])
raw = bz2.compress(json.dumps(ans, sort_keys=True, indent=4, separators=(',', ': ')))
atomic_write(raw, PLUGINS)
# Cleanup any extra .zip files
all_plugin_files = {p['file'] for p in ans.itervalues()}
extra = set(glob.glob('*.zip')) - all_plugin_files
for x in extra:
os.unlink(x)
return ans
开发者ID:BatteringRam,项目名称:calibre,代码行数:27,代码来源:plugins_mirror.py
示例6: analyze_commits
def analyze_commits(project_name, target_repo, existing_target_branches, fork_list):
print 'Analyzing commits'
pool = ThreadPool(processes=10)
existing_target_commits = []
for fork_repo in fork_list:
for target_branch in existing_target_branches:
print ' Analyzing %s (branch: %s) ' % (fork_repo.full_name, target_branch),
fork_repo_commits = fork_repo.get_commits(sha=target_branch)
max_commits_to_analyze = 30
analyzed_commits = 0
fork_commits_to_analyze = []
for fork_comm in fork_repo_commits:
if analyzed_commits == max_commits_to_analyze:
break
fork_commits_to_analyze.append(fork_comm)
analyzed_commits += 1
partial_c_in_root = functools.partial(commit_is_in_root,
existing_target_commits,
target_repo, fork_repo)
pool.map(partial_c_in_root, fork_commits_to_analyze)
print
开发者ID:andresriancho,项目名称:w3af-misc,代码行数:32,代码来源:fork_info.py
示例7: _fit
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
tRatio = self.getOrDefault(self.trainRatio)
seed = self.getOrDefault(self.seed)
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
condition = (df[randCol] >= tRatio)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
def singleTrain(paramMap):
model = est.fit(train, paramMap)
metric = eva.evaluate(model.transform(validation, paramMap))
return metric
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
metrics = pool.map(singleTrain, epm)
train.unpersist()
validation.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(TrainValidationSplitModel(bestModel, metrics))
开发者ID:Altiscale,项目名称:spark,代码行数:29,代码来源:tuning.py
示例8: bench_compression_comparison
def bench_compression_comparison(n_chunks, df_length, append_mul, pool_size, pool_step, repeats,
use_raw_lz4, use_HC):
_str = construct_test_data(df_length, append_mul)
chunk_size = len(_str) / 1024 ** 2.0
_strarr = [_str] * n_chunks
# Single threaded
# ---------------
measurements = bench_single(repeats, _strarr, use_HC)
print_results(1, chunk_size, n_chunks, chunk_size*n_chunks, measurements)
single_mean = np.mean(measurements)
# Multi-threaded
# --------------
for sz in range(2, pool_size + 1, pool_step):
if use_raw_lz4:
pool = ThreadPool(sz)
else:
pool = None
c.set_compression_pool_size(sz)
measurements = bench_multi(repeats, _strarr, use_HC, pool=pool)
print_results(sz, chunk_size, n_chunks, chunk_size * n_chunks, measurements, compare=single_mean)
if pool:
pool.close()
pool.join()
print("")
开发者ID:manahl,项目名称:arctic,代码行数:26,代码来源:benchmark_lz4.py
示例9: _load_lyrics
def _load_lyrics(self, songdict):
total = []
for songlist in songdict.values():
total += songlist
pool = ThreadPool()
pool.map(Song.load, total)
开发者ID:ajm188,项目名称:fugl,代码行数:7,代码来源:tswizzle.py
示例10: copytree_and_gzip
def copytree_and_gzip(self, source_dir, target_dir):
"""
Copies the provided source directory to the provided target directory.
Gzips JavaScript, CSS and HTML and other files along the way.
"""
# Figure out what we're building...
build_list = []
# Walk through the source directory...
for (dirpath, dirnames, filenames) in os.walk(source_dir):
for f in filenames:
# Figure out what is going where
source_path = os.path.join(dirpath, f)
rel_path = os.path.relpath(dirpath, source_dir)
target_path = os.path.join(target_dir, rel_path, f)
# Add it to our list to build
build_list.append((source_path, target_path))
logger.debug("Gzipping {} files".format(len(build_list)))
# Build em all
if not getattr(self, 'pooling', False):
[self.copyfile_and_gzip(*u) for u in build_list]
else:
cpu_count = multiprocessing.cpu_count()
logger.debug("Pooling build on {} CPUs".format(cpu_count))
pool = ThreadPool(processes=cpu_count)
pool.map(self.pooled_copyfile_and_gzip, build_list)
开发者ID:datadesk,项目名称:django-bakery,代码行数:28,代码来源:build.py
示例11: run_command
def run_command(self, command, *args, **opts):
if len(self.members) <= 0:
raise TomcatError("Cluster has no members")
hosts = opts.setdefault('hosts', self.members.keys())
threads = opts.setdefault('threads',
min(self.member_count(), self.max_threads))
abort_on_error = opts.setdefault('abort_on_error', False)
if abort_on_error:
abort = Value('b', 0)
def run_cmd(host):
try:
if abort_on_error and abort.value:
raise TomcatError('Aborted')
self.log.debug("Performing %s%s on %s", command, args, host)
self._run_progress_callback(event=events.CMD_START,
command=command, args=args, node=host)
rv = getattr(self.members[host], command)(*args)
self._run_progress_callback(event=events.CMD_END,
command=command, args=args, node=host)
except Exception as e:
if abort_on_error:
abort.value = True
rv = e
return (host, rv)
pool = ThreadPool(processes=threads)
return ClusterCommandResults(pool.map(run_cmd, hosts))
开发者ID:mvalenzuelaDandB,项目名称:pytomcat,代码行数:30,代码来源:__init__.py
示例12: resolve_playlist
def resolve_playlist(url):
resolve_pool = ThreadPool(processes=16)
logger.info("Resolving YouTube-Playlist '%s'", url)
playlist = []
page = 'first'
while page:
params = {
'playlistId': url,
'maxResults': 50,
'key': yt_key,
'part': 'contentDetails'
}
if page and page != "first":
logger.debug("Get YouTube-Playlist '%s' page %s", url, page)
params['pageToken'] = page
result = session.get(yt_api_endpoint+'playlistItems', params=params)
data = result.json()
page = data.get('nextPageToken')
for item in data["items"]:
video_id = item['contentDetails']['videoId']
playlist.append(video_id)
playlist = resolve_pool.map(resolve_url, playlist)
resolve_pool.close()
return [item for item in playlist if item]
开发者ID:Chateaudur,项目名称:mopidy-youtube,代码行数:28,代码来源:backend.py
示例13: StartInfrastructure
def StartInfrastructure(inf_id, auth):
"""
Start all virtual machines in an infrastructure previously stopped.
Args:
- inf_id(str): infrastructure id.
- auth(Authentication): parsed authentication tokens.
Return(str): error messages; empty string means all was ok.
"""
InfrastructureManager.logger.info("Starting the infrastructure id: " + str(inf_id))
sel_inf = InfrastructureManager.get_infrastructure(inf_id, auth)
exceptions = []
if Config.MAX_SIMULTANEOUS_LAUNCHES > 1:
pool = ThreadPool(processes=Config.MAX_SIMULTANEOUS_LAUNCHES)
pool.map(
lambda vm: InfrastructureManager._start_vm(vm, auth, exceptions),
reversed(sel_inf.get_vm_list())
)
else:
for vm in sel_inf.get_vm_list():
InfrastructureManager._start_vm(vm, auth, exceptions)
if exceptions:
msg = ""
for e in exceptions:
msg += str(e) + "\n"
raise Exception("Error starting the infrastructure: %s" % msg)
InfrastructureManager.logger.info("Infrastructure successfully restarted")
return ""
开发者ID:lxhiguera,项目名称:im,代码行数:34,代码来源:InfrastructureManager.py
示例14: monitorthread
def monitorthread(self):
stock_codes = []
for item in self.conn.mystock.todaydata.find():
stock_codes.append(item['code'])
pool = ThreadPool(40)
pool.map(self.proxy, stock_codes)
开发者ID:tuoxie119,项目名称:stocktrade,代码行数:7,代码来源:fenxing.py
示例15: collect_logs
def collect_logs(self):
"""Collect all the microservice log files."""
log_dir = os.path.join(self.options.log_dir, 'service_logs')
if not os.path.exists(log_dir):
os.makedirs(log_dir)
def fetch_service_log(service):
try:
logging.debug('Fetching logs for "%s"...', service)
deployer = (self if service in HALYARD_SERVICES
else self.__spinnaker_deployer)
deployer.do_fetch_service_log_file(service, log_dir)
except Exception as ex:
message = 'Error fetching log for service "{service}": {ex}'.format(
service=service, ex=ex)
if ex.message.find('No such file') >= 0:
message += '\n Perhaps the service never started.'
# dont log since the error was already captured.
else:
logging.error(message)
message += '\n{trace}'.format(
trace=traceback.format_exc())
write_data_to_secure_path(
message, os.path.join(log_dir, service + '.log'))
logging.info('Collecting server log files into "%s"', log_dir)
all_services = list(SPINNAKER_SERVICES)
all_services.extend(HALYARD_SERVICES)
thread_pool = ThreadPool(len(all_services))
thread_pool.map(fetch_service_log, all_services)
thread_pool.terminate()
开发者ID:jtk54,项目名称:spinnaker,代码行数:32,代码来源:validate_bom__deploy.py
示例16: run_test_case_list
def run_test_case_list(
self, test_case_list, max_concurrent, timeout_ok=False,
max_retries=0, retry_interval_secs=5, full_trace=False):
"""Run a list of test cases.
Args:
test_case_list: [list of OperationContract] Specifies the tests to run.
max_concurrent: [int] The number of cases that can be run concurrently.
timeout_ok: [bool] If True then individual tests can timeout and still
be considered having a successful AgentOperationStatus.
max_retries: [int] Number of independent retries permitted on
individual operations if the operation status fails. A value of 0
indicates that a test should only be given a single attempt.
retry_interval_secs: [int] Time between retries of individual operations.
full_trace: [bool] If True then provide detailed execution tracing.
"""
num_threads = min(max_concurrent, len(test_case_list))
pool = ThreadPool(processes=num_threads)
def run_one(test_case):
"""Helper function to run individual tests."""
self.run_test_case(
test_case=test_case, timeout_ok=timeout_ok,
max_retries=max_retries, retry_interval_secs=retry_interval_secs,
full_trace=full_trace)
self.logger.info(
'Running %d tests across %d threads.',
len(test_case_list), num_threads)
pool.map(run_one, test_case_list)
self.logger.info('Finished %d tests.', len(test_case_list))
开发者ID:jtk54,项目名称:citest,代码行数:30,代码来源:agent_test_case.py
示例17: testParallelTableUploadAndDownloadTunnel
def testParallelTableUploadAndDownloadTunnel(self):
p = 'ds=test'
table, data = self._gen_table(partition=p.split('=', 1)[0], partition_type='string',
partition_val=p.split('=', 1)[1])
self.assertTrue(table.exist_partition(p))
records = [table.new_record(values=d) for d in data]
n_blocks = 5
blocks = list(range(n_blocks))
n_threads = 2
thread_pool = ThreadPool(n_threads)
def gen_block_records(block_id):
c = len(data)
st = int(c / n_blocks * block_id)
if block_id < n_blocks - 1:
ed = int(c / n_blocks * (block_id + 1))
else:
ed = c
return records[st: ed]
def write(w):
def inner(arg):
idx, r = arg
w.write(idx, r)
return inner
with table.open_writer(partition=p, blocks=blocks) as writer:
thread_pool.map(write(writer), [(i, gen_block_records(i)) for i in blocks])
for step in range(1, 4):
reads = []
expected = []
with table.open_reader(partition=p) as reader:
count = reader.count
for i in range(n_blocks):
start = int(count / n_blocks * i)
if i < n_blocks - 1:
end = int(count / n_blocks * (i + 1))
else:
end = count
for record in reader[start:end:step]:
reads.append(record)
expected.extend(data[start:end:step])
self.assertEqual(len(expected), len(reads))
for val1, val2 in zip(expected, [r.values for r in reads]):
for it1, it2 in zip(val1[:-1], val2[:-1]):
if isinstance(it1, dict):
self.assertEqual(len(it1), len(it2))
self.assertTrue(any(it1[k] == it2[k] for k in it1))
elif isinstance(it1, list):
self.assertSequenceEqual(it1, it2)
else:
self.assertEqual(it1, it2)
table.drop()
开发者ID:fivejjs,项目名称:aliyun-odps-python-sdk,代码行数:60,代码来源:test_tunnels.py
示例18: read
def read(self, sftppath, localPath = None, numParallelConnections = 1):
if localPath is None:
localPath = os.getcwd() # local path - can be changed later
sftp = paramiko.SFTPClient.from_transport(self.transport)
if (numParallelConnections > 1):
pool = ThreadPool(numParallelConnections)
def getFile(sftppath, localpath):
pconnection = SFTPConnection(self.connectionInfo)
pconnection.connect()
psftp = paramiko.SFTPClient.from_transport(pconnection.transport)
psftp.get(sftppath, localpath)
psftp.close()
pconnection.close()
def recursiveRead(sftp, sftppath, localPath):
fileattr = sftp.lstat(sftppath)
if not stat.S_ISDIR(fileattr.st_mode): #it is a file
if (numParallelConnections > 1):
pool.apply_async(getFile, args= (sftppath, os.path.join(localPath, os.path.basename(sftppath))))
else:
sftp.get(sftppath, os.path.join(localPath, os.path.basename(sftppath)))
else: #it is a directory
try: #creating local directory, using try-catch to handle race conditions
os.makedirs(os.path.join(localPath, os.path.basename(sftppath)))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
for file in sftp.listdir_attr(sftppath):
recursiveRead(sftp, os.path.join(sftppath, file.filename), os.path.join(localPath, os.path.basename(sftppath)))
recursiveRead(sftp, sftppath, localPath)
sftp.close()
if (numParallelConnections > 1):
pool.close()
pool.join()
开发者ID:guptarajat,项目名称:data-connectors,代码行数:35,代码来源:sftp_connector.py
示例19: generate_my_keys
def generate_my_keys(the_n, the_e, the_d):
pool = ThreadPool(processes=1)
async_result = pool.apply_async(newKey, (the_n, the_e, the_d)) # tuple of args for foo
(n, e, d) = async_result.get() # get the return value from your function.
return (n, e, d)
开发者ID:aabdulr,项目名称:Python-Secure-Chat,代码行数:7,代码来源:RSA.py
示例20: get_available_google_ips
def get_available_google_ips(seeds, threads=None, max=None):
global print_progress
threads = threads if threads else (500 if monkey else 10)
max = max if max else 50
print_progress = print_progress_builder(max)
gen = random_ip_generator(seeds)
pool = ThreadPool(processes=threads)
available_ips = []
ips = set()
emptyCount = 0
while len(available_ips) <= max:
latent_ips = [gen.next() for _ in range(threads)]
print '%s' % latent_ips
latent_ips=set(latent_ips).difference(ips);
print '%s' % latent_ips
if not latent_ips :
print "emptyCount:%s" % emptyCount
if emptyCount > 1 :
break
emptyCount += 1
continue
results = pool.map(ping, latent_ips)
for ip, dt in results:
if dt > 0:
if ip in ips:
continue;
available_ips.append((ip, dt))
ips.add(ip)
sorted_ips = map(lambda x: x[0],
sorted(available_ips,
lambda (_, a), (__, b): bi_value(a-b)))
开发者ID:qxo,项目名称:google-ip-explorer,代码行数:31,代码来源:google.py
注:本文中的multiprocessing.pool.ThreadPool类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论