本文整理汇总了Python中multiprocessing.get_context函数的典型用法代码示例。如果您正苦于以下问题:Python get_context函数的具体用法?Python get_context怎么用?Python get_context使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_context函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_get_context_using_python3_posix
def test_get_context_using_python3_posix():
""" get_context() respects configuration.
If default context is changed this test will need to change too.
"""
assert get_context() is multiprocessing.get_context(None)
with dask.config.set({"multiprocessing.context": "forkserver"}):
assert get_context() is multiprocessing.get_context("forkserver")
with dask.config.set({"multiprocessing.context": "spawn"}):
assert get_context() is multiprocessing.get_context("spawn")
开发者ID:caseyclements,项目名称:dask,代码行数:10,代码来源:test_multiprocessing.py
示例2: get_multiproc_context
def get_multiproc_context(capabilities):
best_concurrency = capabilities.get('Process Startup Method', 'fork')
if hasattr(multiprocessing, 'get_context'):
for each in (best_concurrency, 'fork', 'spawn'):
if hasattr(multiprocessing, 'get_all_start_methods'):
if each in multiprocessing.get_all_start_methods():
return multiprocessing.get_context(each)
else:
try:
return multiprocessing.get_context(each)
except ValueError:
pass # invalid concurrency for this system
return None
开发者ID:godaddy,项目名称:Thespian,代码行数:13,代码来源:multiprocCommon.py
示例3: main
def main():
ctx = mp.get_context('spawn')
q = ctx.Queue()
p = ctx.Process(target=foo, args=(q,))
p.start()
print(q.get())
p.join()
开发者ID:showa-yojyo,项目名称:bin,代码行数:7,代码来源:mp05context.py
示例4: loadAttributes
def loadAttributes( self, numberOfThreads = 7 ):
# Put the driver directories in a queue
ctx = multiprocessing.get_context('fork')
driversInQueue = ctx.Queue()
driverFiles = os.listdir( self.__dir )
numberOfDriversToProcess = 0
for driverFile in driverFiles:
driverId = int(int(driverFile.split('.')[0]))
driver = Driver( driverId )
driversInQueue.put( driver )
numberOfDriversToProcess += 1
# The thread reading function
def readDataFunction( inputQueue, outputQueue, driverTopDir ):
while True:
driver = inputQueue.get()
driver.readTripsFromDirectory( self.__dir )
numberOfTrips = driver.numberOfTrips()
tripData = []
for i in range(numberOfTrips):
trip = driver.getTrip( i + 1 )
values, labels = trip.attributes()
tripData.append( ( trip.id(), values, labels ) )
outputQueue.put( (driver.id(), tripData ) )
return
# Start the reading threads
threads = []
driversOutQueue = ctx.Queue()
for i in range( numberOfThreads):
thread = ctx.Process( target = readDataFunction, args = (driversInQueue, driversOutQueue, self.__dir ) )
thread.start()
threads.append( thread )
# Set up the logger
log = ProcessLogger( numberOfDriversToProcess, "Drivers processed : " )
outputData = []
labels = []
for i in range( numberOfDriversToProcess ):
driverId, tripData = driversOutQueue.get()
# Loop over the trips for this driver
for trip in tripData:
if len(labels) == 0: # This is the first entry. Retrieve the header.
labels.append( 'driverId' )
labels.append( 'tripId' )
for label in trip[2]:
labels.append(label)
outputData = numpy.array([]).reshape(0,len(labels))
tripId = trip[0]
attributes = trip[1]
tripRow = numpy.hstack( (driverId, tripId, attributes) )
outputData = numpy.vstack( (outputData, tripRow) )
log.taskEnded()
for t in threads:
t.terminate()
return pandas.DataFrame(outputData, columns=labels)
开发者ID:hyb148,项目名称:Kaggle-AXA-telematics,代码行数:60,代码来源:DriverData.py
示例5: main
def main(center, host, port, http_port, bokeh_port, show, _bokeh, bokeh_whitelist):
given_host = host
host = host or get_ip()
ip = socket.gethostbyname(host)
loop = IOLoop.current()
scheduler = Scheduler(center, ip=ip,
services={('http', http_port): HTTPScheduler})
if center:
loop.run_sync(scheduler.sync_center)
scheduler.start(port)
if _bokeh:
try:
import bokeh
import distributed.bokeh
hosts = ['%s:%d' % (h, bokeh_port) for h in
['localhost', '127.0.0.1', ip, socket.gethostname(),
host] + list(bokeh_whitelist)]
dirname = os.path.dirname(distributed.__file__)
paths = [os.path.join(dirname, 'bokeh', name)
for name in ['status', 'tasks']]
binname = sys.argv[0][:-len('dscheduler')] + 'bokeh'
args = ([binname, 'serve'] + paths +
['--log-level', 'warning',
'--check-unused-sessions=50',
'--unused-session-lifetime=1',
'--port', str(bokeh_port)] +
sum([['--host', host] for host in hosts], []))
if show:
args.append('--show')
bokeh_options = {'host': host if given_host else '127.0.0.1',
'http-port': http_port,
'tcp-port': port,
'bokeh-port': bokeh_port}
with open('.dask-web-ui.json', 'w') as f:
json.dump(bokeh_options, f, indent=2)
if sys.version_info[0] >= 3:
from bokeh.command.bootstrap import main
ctx = multiprocessing.get_context('spawn')
bokeh_proc = ctx.Process(target=main, args=(args,))
bokeh_proc.daemon = True
bokeh_proc.start()
else:
bokeh_proc = subprocess.Popen(args)
logger.info(" Bokeh UI at: http://%s:%d/status/"
% (ip, bokeh_port))
except ImportError:
logger.info("Please install Bokeh to get Web UI")
except Exception as e:
logger.warn("Could not start Bokeh web UI", exc_info=True)
loop.start()
loop.close()
scheduler.stop()
bokeh_proc.terminate()
logger.info("End scheduler at %s:%d", ip, port)
开发者ID:sonlia,项目名称:distributed,代码行数:60,代码来源:dscheduler.py
示例6: _do
def _do():
mp_ctx = multiprocessing.get_context('spawn')
cpu_count = multiprocessing.cpu_count()
with ThreadPoolExecutor(max_workers=cpu_count) as executor:
yield from yield_pkgs(
executor, submit_pkgs(executor, [Package.root_package()], mp_ctx),
mp_ctx)
开发者ID:harai,项目名称:auto-import-jedi-sample,代码行数:7,代码来源:packages.py
示例7: test_read_text
def test_read_text(hdfs):
db = pytest.importorskip('dask.bag')
import multiprocessing as mp
pool = mp.get_context('spawn').Pool(2)
with hdfs.open('%s/text.1.txt' % basedir, 'wb') as f:
f.write('Alice 100\nBob 200\nCharlie 300'.encode())
with hdfs.open('%s/text.2.txt' % basedir, 'wb') as f:
f.write('Dan 400\nEdith 500\nFrank 600'.encode())
with hdfs.open('%s/other.txt' % basedir, 'wb') as f:
f.write('a b\nc d'.encode())
b = db.read_text('hdfs://%s/text.*.txt' % basedir)
with dask.config.set(pool=pool):
result = b.str.strip().str.split().map(len).compute()
assert result == [2, 2, 2, 2, 2, 2]
b = db.read_text('hdfs://%s/other.txt' % basedir)
with dask.config.set(pool=pool):
result = b.str.split().flatten().compute()
assert result == ['a', 'b', 'c', 'd']
开发者ID:mrocklin,项目名称:dask,代码行数:25,代码来源:test_hdfs.py
示例8: test_ipc_handle_serialization
def test_ipc_handle_serialization(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=serialize_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
proc.join(3)
开发者ID:yuguen,项目名称:numba,代码行数:28,代码来源:test_ipc.py
示例9: start_child
def start_child(self):
ctx = mp.get_context(self.SPAWN_METHOD)
state = ctx.Value(_State, False, False, False)
process = ctx.Process(target=_worker,
args=(state, self.loader))
process.start()
asyncio.async(self.connect(process, state))
开发者ID:chemiron,项目名称:aiopool,代码行数:7,代码来源:spawn.py
示例10: test_ipc_handle
def test_ipc_handle(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# manually prepare for serialization as bytes
handle_bytes = bytes(ipch.handle)
size = ipch.size
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (handle_bytes, size, result_queue)
proc = ctx.Process(target=base_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
proc.join(3)
开发者ID:yuguen,项目名称:numba,代码行数:25,代码来源:test_ipc.py
示例11: __init__
def __init__(self, env_fns, spaces=None, context='spawn'):
"""
If you don't specify observation_space, we'll have to create a dummy
environment to get it.
"""
ctx = mp.get_context(context)
if spaces:
observation_space, action_space = spaces
else:
logger.log('Creating dummy env object to get spaces')
with logger.scoped_configure(format_strs=[]):
dummy = env_fns[0]()
observation_space, action_space = dummy.observation_space, dummy.action_space
dummy.close()
del dummy
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)
self.obs_bufs = [
{k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}
for _ in env_fns]
self.parent_pipes = []
self.procs = []
with clear_mpi_env_vars():
for env_fn, obs_buf in zip(env_fns, self.obs_bufs):
wrapped_fn = CloudpickleWrapper(env_fn)
parent_pipe, child_pipe = ctx.Pipe()
proc = ctx.Process(target=_subproc_worker,
args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))
proc.daemon = True
self.procs.append(proc)
self.parent_pipes.append(parent_pipe)
proc.start()
child_pipe.close()
self.waiting_step = False
self.viewer = None
开发者ID:MrGoogol,项目名称:baselines,代码行数:35,代码来源:shmem_vec_env.py
示例12: __init__
def __init__(self, initialValue = None):
ctx = multiprocessing.get_context()
self.mgr = multiprocessing.Manager()
self.data = self.mgr.dict()
self.data["value"] = initialValue
self.condition = ctx.Condition()
开发者ID:wannabegeek,项目名称:AlgoBacktest,代码行数:7,代码来源:mp_example.py
示例13: testfs
def testfs(tmpdir):
# We can't use forkserver because we have to make sure
# that the server inherits the per-test stdout/stderr file
# descriptors.
if hasattr(multiprocessing, 'get_context'):
mp = multiprocessing.get_context('fork')
else:
# Older versions only support *fork* anyway
mp = multiprocessing
if threading.active_count() != 1:
raise RuntimeError("Multi-threaded test running is not supported")
mnt_dir = str(tmpdir)
with mp.Manager() as mgr:
cross_process = mgr.Namespace()
mount_process = mp.Process(target=run_fs,
args=(mnt_dir, cross_process))
mount_process.start()
try:
wait_for_mount(mount_process, mnt_dir)
yield (mnt_dir, cross_process)
except:
cleanup(mnt_dir)
raise
else:
umount(mount_process, mnt_dir)
开发者ID:cbenhagen,项目名称:python-llfuse,代码行数:28,代码来源:test_fs.py
示例14: __init__
def __init__(self, host='127.0.0.1', http_port=9786, tcp_port=8786,
bokeh_port=8787, bokeh_whitelist=[], log_level='info',
show=False, prefix=None, use_xheaders=False):
self.port = bokeh_port
ip = socket.gethostbyname(host)
hosts = ['localhost',
'127.0.0.1',
ip,
host]
with ignoring(Exception):
hosts.append(socket.gethostbyname(ip))
with ignoring(Exception):
hosts.append(socket.gethostbyname(socket.gethostname()))
hosts = ['%s:%d' % (h, bokeh_port) for h in hosts]
hosts.append("*")
hosts.extend(map(str, bokeh_whitelist))
args = ([binname, 'serve'] + paths +
['--log-level', 'warning',
'--check-unused-sessions=50',
'--unused-session-lifetime=1',
'--port', str(bokeh_port)] +
sum([['--host', h] for h in hosts], []))
if prefix:
args.extend(['--prefix', prefix])
if show:
args.append('--show')
if use_xheaders:
args.append('--use-xheaders')
if log_level in ('debug', 'info', 'warning', 'error', 'critical'):
args.extend(['--log-level', log_level])
bokeh_options = {'host': host,
'http-port': http_port,
'tcp-port': tcp_port,
'bokeh-port': bokeh_port}
with open('.dask-web-ui.json', 'w') as f:
json.dump(bokeh_options, f, indent=2)
if sys.version_info[0] >= 3:
from bokeh.command.bootstrap import main
ctx = multiprocessing.get_context('spawn')
self.process = ctx.Process(target=main, args=(args,))
self.process.daemon = True
self.process.start()
else:
import subprocess
self.process = subprocess.Popen(args)
logger.info(" Bokeh UI at: http://%s:%d/status/"
% (ip, bokeh_port))
开发者ID:gdementen,项目名称:distributed,代码行数:60,代码来源:application.py
示例15: pool
def pool(self):
return Pool(
processes=self.processes,
initializer=initializer,
initargs=self.initargs,
context=get_context('forkserver'),
)
开发者ID:qari,项目名称:encoded,代码行数:7,代码来源:mpindexer.py
示例16: _exec_task_process
def _exec_task_process(self, ctxt, task_id, task_type, origin, destination,
instance, task_info):
mp_ctx = multiprocessing.get_context('spawn')
mp_q = mp_ctx.Queue()
mp_log_q = mp_ctx.Queue()
p = mp_ctx.Process(
target=_task_process,
args=(ctxt, task_id, task_type, origin, destination, instance,
task_info, mp_q, mp_log_q))
p.start()
LOG.info("Task process started: %s", task_id)
self._rpc_conductor_client.set_task_host(
ctxt, task_id, self._server, p.pid)
self._handle_mp_log_events(p, mp_log_q)
p.join()
if mp_q.empty():
raise exception.CoriolisException("Task canceled")
result = mp_q.get(False)
if isinstance(result, str):
raise exception.TaskProcessException(result)
return result
开发者ID:cloudbase,项目名称:coriolis,代码行数:25,代码来源:server.py
示例17: _launch
def _launch(self):
context = multiprocessing.get_context('fork')
client_ch, server_ch = context.Pipe()
self.process = context.Process(
target=self.run_server, args=(server_ch, ), daemon=True)
self.process.start()
server_ch.close()
self.client_ch = Connection.from_Connection(client_ch)
开发者ID:drewja,项目名称:curio,代码行数:8,代码来源:workers.py
示例18: pool
def pool(self):
return Pool(
processes=self.queue_worker.processes,
initializer=initializer,
initargs=self.initargs,
maxtasksperchild=self.maxtasks,
context=get_context('forkserver'),
)
开发者ID:ENCODE-DCC,项目名称:snovault,代码行数:8,代码来源:mpindexer.py
示例19: init_pool
def init_pool(self):
return Pool(
processes=self.processes,
initializer=initializer,
initargs=self.initargs,
maxtasksperchild=self.maxtasks,
context=get_context('spawn'),
)
开发者ID:j1z0,项目名称:snovault,代码行数:8,代码来源:mpindexer.py
示例20: test_no_collision
def test_no_collision(self):
bar1 = self.import_bar1()
bar2 = self.import_bar2()
with capture_cache_log() as buf:
res1 = bar1()
cachelog = buf.getvalue()
# bar1 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
with capture_cache_log() as buf:
res2 = bar2()
cachelog = buf.getvalue()
# bar2 should save new index and data
self.assertEqual(cachelog.count('index saved'), 1)
self.assertEqual(cachelog.count('data saved'), 1)
self.assertEqual(cachelog.count('index loaded'), 0)
self.assertEqual(cachelog.count('data loaded'), 0)
self.assertNotEqual(res1, res2)
try:
# Make sure we can spawn new process without inheriting
# the parent context.
mp = multiprocessing.get_context('spawn')
except ValueError:
print("missing spawn context")
q = mp.Queue()
# Start new process that calls `cache_file_collision_tester`
proc = mp.Process(target=cache_file_collision_tester,
args=(q, self.tempdir,
self.modname_bar1,
self.modname_bar2))
proc.start()
# Get results from the process
log1 = q.get()
got1 = q.get()
log2 = q.get()
got2 = q.get()
proc.join()
# The remote execution result of bar1() and bar2() should match
# the one executed locally.
self.assertEqual(got1, res1)
self.assertEqual(got2, res2)
# The remote should have loaded bar1 from cache
self.assertEqual(log1.count('index saved'), 0)
self.assertEqual(log1.count('data saved'), 0)
self.assertEqual(log1.count('index loaded'), 1)
self.assertEqual(log1.count('data loaded'), 1)
# The remote should have loaded bar2 from cache
self.assertEqual(log2.count('index saved'), 0)
self.assertEqual(log2.count('data saved'), 0)
self.assertEqual(log2.count('index loaded'), 1)
self.assertEqual(log2.count('data loaded'), 1)
开发者ID:yuguen,项目名称:numba,代码行数:58,代码来源:test_dispatcher.py
注:本文中的multiprocessing.get_context函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论