本文整理汇总了Python中multiprocessing.SimpleQueue类的典型用法代码示例。如果您正苦于以下问题:Python SimpleQueue类的具体用法?Python SimpleQueue怎么用?Python SimpleQueue使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SimpleQueue类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: run
def run(self, tasks, render, update, render_args=(), render_kwargs={}, update_args=(), update_kwargs={}):
# establish ipc queues using a manager process
task_queue = SimpleQueue()
result_queue = SimpleQueue()
# start process to generate image samples
producer = Process(target=self._producer, args=(tasks, task_queue))
producer.start()
# start worker processes
workers = []
for pid in range(self._processes):
p = Process(target=self._worker, args=(render, render_args, render_kwargs, task_queue, result_queue))
p.start()
workers.append(p)
# consume results
for _ in tasks:
result = result_queue.get()
update(result, *update_args, **update_kwargs)
# shutdown workers
for _ in workers:
task_queue.put(None)
开发者ID:raysect,项目名称:source,代码行数:25,代码来源:workflow.py
示例2: export_table
def export_table(host, port, auth_key, db, table, directory, fields, delimiter, format,
error_queue, progress_info, sindex_counter, exit_event):
writer = None
try:
# This will open at least one connection for each rdb_call_wrapper, which is
# a little wasteful, but shouldn't be a big performance hit
conn_fn = lambda: r.connect(host, port, auth_key=auth_key)
table_info = rdb_call_wrapper(conn_fn, "info", write_table_metadata, db, table, directory)
sindex_counter.value += len(table_info["indexes"])
task_queue = SimpleQueue()
writer = launch_writer(format, directory, db, table, fields, delimiter, task_queue, error_queue)
writer.start()
rdb_call_wrapper(conn_fn, "table scan", read_table_into_queue, db, table,
table_info["primary_key"], task_queue, progress_info, exit_event)
except (r.ReqlError, r.ReqlDriverError) as ex:
error_queue.put((RuntimeError, RuntimeError(ex.message), traceback.extract_tb(sys.exc_info()[2])))
except:
ex_type, ex_class, tb = sys.exc_info()
error_queue.put((ex_type, ex_class, traceback.extract_tb(tb)))
finally:
if writer is not None and writer.is_alive():
task_queue.put(StopIteration())
writer.join()
开发者ID:HiroIshikawa,项目名称:21playground,代码行数:26,代码来源:_export.py
示例3: data_from_file
async def data_from_file(main2gvf: mp.SimpleQueue,
coder: KanervaCoder):
data = np.load('offline_data.npy')
for i, item in enumerate(data):
# if i > 500:
# break
item[-1] = coder(x1=item[-1], x2=item[-2])
main2gvf.put(item)
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:8,代码来源:final_project.py
示例4: merge_db
def merge_db(db_folder, new_db_name, db_to_merge):
assert path.exists(db_folder), '`{}` is a wrong path to db folder, please correct it.'.format(db_folder)
shutdown = Event()
writer_queue = SimpleQueue()
writer = Writer(db_folder=db_folder, db_name=new_db_name, queue=writer_queue, shutdown=shutdown)
reader = Reader(db_folder=db_folder, db_to_merge=db_to_merge,
queue=writer_queue, shutdown=shutdown)
reader.start()
writer.start()
pbar = tqdm(total=len(db_to_merge))
c = 0
while not shutdown.is_set():
try:
new_c = writer.counter.value
progress = new_c - c
if progress > 0:
pbar.update(progress)
c = new_c
Event().wait(2)
except KeyboardInterrupt:
print()
print("Main thread grab the keyboard interrupt")
break
shutdown.set()
pbar.close()
# writer.join()
# reader.join()
print("writer alive", writer.is_alive())
print("reader alive", reader.is_alive())
if writer.is_alive():
print("Waiting writer...")
writer.join()
print("WRITER EXECUTED")
if reader.is_alive():
print("Waiting reader...")
writer_queue.get()
print("Waiting reader 2...")
reader.join()
print("READER EXECUTED")
print("Done.")
开发者ID:getzneet,项目名称:SpatialEconomy,代码行数:55,代码来源:merge_db.py
示例5: fork_process
def fork_process(logger, group=None, target=None, name=None, args=(), kwargs={}):
"""
Forks a child, making sure that all exceptions from the child are safely sent to the parent
If a target raises an exception, the exception is re-raised in the parent process
@return tuple consisting of process exit code and target's return value
"""
if is_windows():
logger.warn(
"Not forking for %s due to Windows incompatibilities (see #184). "
"Measurements (coverage, etc.) might be biased." % target
)
return fake_windows_fork(group, target, name, args, kwargs)
try:
sys.modules["tblib.pickling_support"]
except KeyError:
import tblib.pickling_support
tblib.pickling_support.install()
q = SimpleQueue()
def instrumented_target(*args, **kwargs):
ex = tb = None
try:
send_value = (target(*args, **kwargs), None, None)
except:
_, ex, tb = sys.exc_info()
send_value = (None, ex, tb)
try:
q.put(send_value)
except:
_, send_ex, send_tb = sys.exc_info()
e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
q.put(e_out)
p = Process(group=group, target=instrumented_target, name=name, args=args, kwargs=kwargs)
p.start()
result = q.get()
p.join()
if isinstance(result, tuple):
if result[1]:
raise_exception(result[1], result[2])
return p.exitcode, result[0]
else:
msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
if result.args[2]:
chained_message = "This error masked the send error '%s':\n%s" % (
result.args[2],
"".join(traceback.format_tb(result.args[3])),
)
msg += "\n" + chained_message
ex = Exception(msg)
raise_exception(ex, result.args[1])
开发者ID:Hawks12,项目名称:pybuilder,代码行数:54,代码来源:utils.py
示例6: learning_loop
def learning_loop(exit_flag: mp.Value,
gvfs: Sequence[Sequence[Learner]],
behaviour_gvf: SARSA,
main2gvf: mp.SimpleQueue,
gvf2main: mp.SimpleQueue,
gvf2plot: mp.SimpleQueue):
action, action_prob, obs, x = None, None, None, None
# get first state
while exit_flag.value == 0 and obs is None:
while exit_flag.value == 0 and main2gvf.empty():
time.sleep(0.001)
if exit_flag.value == 0:
obs, x = main2gvf.get()
action, action_prob = behaviour_gvf.policy(obs=obs, x=x)
gvf2main.put(action)
# main loop
while exit_flag.value == 0:
while exit_flag.value == 0 and main2gvf.empty():
time.sleep(0.001)
if exit_flag.value:
break
# get data from servos
obsp, xp = main2gvf.get()
actionp, action_probp = behaviour_gvf.policy(obs=obsp, x=xp)
# update weights
for g in chain.from_iterable(gvfs):
g.update(x, obs,
action, action_prob,
xp, obsp,
actionp, action_probp)
# send action
gvf2main.put(actionp)
# send data to plots
gdata = [[g.data(x, obs, action, xp, obsp)
for g in gs]
for gs in gvfs]
data = dict(ChainMap(*chain.from_iterable(gdata)))
data['obs'] = obs
gvf2plot.put(data)
# go to next state
obs = obsp
x = xp
action = actionp
action_prob = action_probp
print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:53,代码来源:module4.py
示例7: data_from_file
async def data_from_file(main2gvf: mp.SimpleQueue,
gvf2plot: mp.SimpleQueue,
coder: KanervaCoder):
data = np.load('offline_data.npy')
for item in data:
item[-1] = coder(item[-2])
main2gvf.put(item)
time.sleep(0.1)
while not gvf2plot.empty():
time.sleep(0.1)
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:12,代码来源:module_two.py
示例8: _fit
def _fit(self, X, y, blocks):
"""Fit base clustering estimators on X."""
self.blocks_ = blocks
processes = []
# Here the blocks will be passed to subprocesses
data_queue = SimpleQueue()
# Here the results will be passed back
result_queue = SimpleQueue()
for x in range(self.n_jobs):
processes.append(mp.Process(target=_parallel_fit, args=(self.fit_,
self.partial_fit_, self.base_estimator,
self.verbose, data_queue, result_queue)))
processes[-1].start()
# First n_jobs blocks are sent into the queue without waiting for the
# results. This variable is a counter that takes care of this.
presend = 0
blocks_computed = 0
blocks_all = len(np.unique(blocks))
for block in self._blocks(X, y, blocks):
if presend >= self.n_jobs:
b, clusterer = result_queue.get()
blocks_computed += 1
if clusterer:
self.clusterers_[b] = clusterer
else:
presend += 1
if self.partial_fit_:
if block[0] in self.clusterers_:
data_queue.put(('middle', block, self.clusterers_[b]))
continue
data_queue.put(('middle', block, None))
# Get the last results and tell the subprocesses to finish
for x in range(self.n_jobs):
if blocks_computed < blocks_all:
print("%s blocks computed out of %s" % (blocks_computed,
blocks_all))
b, clusterer = result_queue.get()
blocks_computed += 1
if clusterer:
self.clusterers_[b] = clusterer
data_queue.put(('end', None, None))
time.sleep(1)
return self
开发者ID:MSusik,项目名称:beard,代码行数:51,代码来源:blocking.py
示例9: plotting_loop
def plotting_loop(exit_flag: mp.Value,
gvf2plot: mp.SimpleQueue,
plots: Sequence[Plot]):
while exit_flag.value == 0:
if locks:
print('plot gp a 1 a')
gplock.acquire()
print('plot gp a 1 b')
while exit_flag.value == 0 and gvf2plot.empty():
if locks:
print('plot gp r 1 a')
gplock.release()
print('plot gp r 1 b')
time.sleep(0.001)
if locks:
print('plot gp a 2 a')
gplock.acquire()
print('plot gp a 2 b')
if locks:
print('plot gp r 2 a')
gplock.release()
print('plot gp r 2 b')
if exit_flag.value:
break
if locks:
print('plot gp a 3 a')
gplock.acquire()
print('plot gp a 3 b')
d = gvf2plot.get()
if locks:
print('plot gp r 3 a')
gplock.release()
print('plot gp r 3 b')
for plot, data in zip(plots, d):
plot.update(data)
for plot in plots:
try:
index = np.arange(len(plot.y[0]))
np.savetxt(f"{plot.title}.csv",
np.column_stack(sum(((np.asarray(y),) for y in plot.y),
(index,))),
delimiter=',')
except ValueError:
continue
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:48,代码来源:final_project.py
示例10: __init__
def __init__(self, db_file="sqlite_db.sqlite", lock_wait_time=120):
self.db_file = db_file
self.connection = sqlite3.connect(self.db_file)
self.broker_cursor = self.connection.cursor()
self.broker_queue = SimpleQueue()
self.broker = None
self.lock_wait_time = lock_wait_time
开发者ID:biologyguy,项目名称:RD-MCL,代码行数:7,代码来源:helpers.py
示例11: _open_frontend
def _open_frontend(self):
from multiprocessing import Process, SimpleQueue
connection = SimpleQueue()
frontend = Process(
target=self._open_frontend_process,
args=(connection, [k for k in sys.argv[1:] if k != "--frontend"]))
frontend.start()
cmdline = connection.get()
frontend.join()
if self.interactive:
argv_backup = list(sys.argv)
sys.argv[1:] = cmdline.split()
Main.setup_argv(True, True)
if self.interactive:
sys.argv = argv_backup
print("Running with the following command line: %s" % sys.argv)
开发者ID:EgBulychev,项目名称:veles,代码行数:17,代码来源:__main__.py
示例12: start
def start(parsed_args):
from multiprocessing import Process, SimpleQueue
processes = []
msg_queue = SimpleQueue()
word_count_queue = SimpleQueue()
unique_words_queue = SimpleQueue()
median_queue = SimpleQueue()
# Prep workers to read from msg queue and write to other queues
for i in range(workers):
p = Process(target=worker,
args=(msg_queue, unique_words_queue, word_count_queue))
processes.append(p)
p.start()
# Prep a process to accumulate word_count_queue for ft1.txt
p = Process(target=accumulator,
args=(word_count_queue, parsed_args.outdir))
processes.append(p)
p.start()
# Prep a process to re-sequence unique words counted
p = Process(target=buffered_resequener,
args=(unique_words_queue, median_queue))
processes.append(p)
p.start()
# Prep a process to keep a running median of unique words for ft2.txt
p = Process(target=running_median,
args=(median_queue, parsed_args.outdir))
processes.append(p)
p.start()
# Start reading msgs for the msg_queue
ingest(parsed_args.file, msg_queue)
# Sending an indication to stop, one for each worker
for i in range(workers):
msg_queue.put(None)
# This step gathers the child processes, but may be unnecessary
for p in processes:
p.join()
开发者ID:mhakanda,项目名称:insight-data-engineering-code-challenge,代码行数:44,代码来源:tweetStats.py
示例13: plotting_loop
def plotting_loop(exit_flag: mp.Value,
gvf2plot: mp.SimpleQueue,
plots: Sequence[Plot]):
while exit_flag.value == 0:
while exit_flag.value == 0 and gvf2plot.empty():
time.sleep(0.001)
if exit_flag.value:
break
data = gvf2plot.get()
for plot in plots:
plot.update(data)
for plot in plots:
index = np.arange(len(plot.y[0]))
np.savetxt(f"{plot.title}.csv",
sum(((np.asarray(y),) for y in plot.y), (index,)),
delimiter=',')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:19,代码来源:module_two.py
示例14: __init__
def __init__(self, server, nickname, user, host='localhost'):
self.server = server
self.nickname = nickname
self.realname = nickname
self.user = user
self.host = host
self._readbuffer = ""
self._writebuffer = ""
self.request_queue = SimpleQueue()
self.response_queue = SimpleQueue()
# dict of board => list of users
self.board_watchers = defaultdict(list)
# dict of board, thread => list of users
self.thread_watchers = defaultdict(lambda: defaultdict(list))
Process(
target=Ami,
name='immediate api worker',
args=(self.request_queue, self.response_queue)
).start()
开发者ID:ATRAN2,项目名称:Futami,代码行数:23,代码来源:client.py
示例15: __init__
def __init__(self, request_queue, response_queue):
self.request_queue = request_queue
self.response_queue = response_queue
self.update_request_queue = SimpleQueue()
Process(
target=self.update_loop,
name='periodic api worker',
args=(response_queue, self.update_request_queue),
).start()
logger.debug("initialization complete")
self.request_loop()
开发者ID:ATRAN2,项目名称:Futami,代码行数:14,代码来源:ami.py
示例16: learning_loop
def learning_loop(exit_flag: mp.Value,
gvfs: Sequence[Sequence[GTDLearner]],
main2gvf: mp.SimpleQueue,
gvf2plot: mp.SimpleQueue):
action, action_prob, obs, x = None, None, None, None
# get first state
while exit_flag.value == 0 and obs is None:
while exit_flag.value == 0 and main2gvf.empty():
time.sleep(0.001)
if exit_flag.value == 0:
action, action_prob, obs, x = main2gvf.get()
i = 1
# main loop
while exit_flag.value == 0:
while exit_flag.value == 0 and main2gvf.empty():
time.sleep(0.001)
if exit_flag.value:
break
i += 1
ude = False
rupee = False
if 5000 < i < 5100:
ude = True
if i == 7000:
rupee = True
# get data from servos
actionp, action_probp, obsp, xp = main2gvf.get()
# update weights
for gs, xi, xpi in zip(gvfs, x, xp):
for g in gs:
g.update(action, action_prob, obs, obsp, xi, xpi, ude, rupee)
# send data to plots
gdata = [[g.data(xi, obs, action, xpi, obsp)
for g in gs]
for gs, xi, xpi in zip(gvfs, x, xp)]
data = dict(ChainMap(*chain.from_iterable(gdata)))
data['obs'] = obs
gvf2plot.put(data)
# go to next state
obs = obsp
x = xp
action = actionp
action_prob = action_probp
print('Done learning!')
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:53,代码来源:module3.py
示例17: __init__
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
if max_workers <= 0:
raise ValueError("max_workers must be greater than 0")
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
开发者ID:erics8,项目名称:wwqLyParse,代码行数:39,代码来源:process.py
示例18: __init__
def __init__(self, data_structure, processes, scan_function, init_args,
_mp_init_function):
""" Init the scanner.
data_structure is a world.DataSet
processes is the number of child processes to use
scan_function is the function to use for scanning
init_args are the arguments passed to the init function
_mp_init_function is the function used to init the child processes
"""
assert(isinstance(data_structure, world.DataSet))
self.data_structure = data_structure
self.list_files_to_scan = data_structure._get_list()
self.processes = processes
self.scan_function = scan_function
# Queue used by processes to pass results
self.queue = SimpleQueue()
init_args.update({'queue': self.queue})
# NOTE TO SELF: initargs doesn't handle kwargs, only args!
# Pass a dict with all the args
self.pool = multiprocessing.Pool(processes=processes,
initializer=_mp_init_function,
initargs=(init_args,))
# Recommended time to sleep between polls for results
self.SCAN_START_SLEEP_TIME = 0.001
self.SCAN_MIN_SLEEP_TIME = 1e-6
self.SCAN_MAX_SLEEP_TIME = 0.1
self.scan_sleep_time = self.SCAN_START_SLEEP_TIME
self.queries_without_results = 0
self.last_time = time()
self.MIN_QUERY_NUM = 1
self.MAX_QUERY_NUM = 5
# Holds a friendly string with the name of the last file scanned
self._str_last_scanned = None
开发者ID:Fenixin,项目名称:Minecraft-Region-Fixer,代码行数:37,代码来源:scan.py
示例19: learning_loop
def learning_loop(exit_flag: mp.Value,
gvfs: Sequence[GTDLearner],
main2gvf: mp.SimpleQueue,
gvf2plot: mp.SimpleQueue):
action, action_prob, obs, x = None, None, None, None
# get first state
while exit_flag.value == 0 and obs is None:
while exit_flag.value == 0 and main2gvf.empty():
time.sleep(0.001)
if exit_flag.value == 0:
action, action_prob, obs, x = main2gvf.get()
# main loop
while exit_flag.value == 0:
while exit_flag.value == 0 and main2gvf.empty():
time.sleep(0.001)
if exit_flag.value:
break
# get data from servos
actionp, action_probp, obsp, xp = main2gvf.get()
# update weights
for g in gvfs:
g.update(action, action_prob, obs, obsp, x, xp)
# send data to plots
data = [[obs]] + [g.data(x, obs, action, xp, obsp) for g in gvfs]
gvf2plot.put(data)
# go to next state
obs = obsp
x = xp
action = actionp
action_prob = action_probp
开发者ID:yasuiniko,项目名称:cmput607-W18,代码行数:36,代码来源:module_two.py
示例20: ProcessPoolExecutor
class ProcessPoolExecutor(_base.Executor):
def __init__(self, max_workers=None):
"""Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors.
"""
_check_system_limits()
if max_workers is None:
self._max_workers = os.cpu_count() or 1
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
# prevent the worker processes from idling. But don't make it too big
# because futures in the call queue cannot be cancelled.
self._call_queue = multiprocessing.Queue(self._max_workers +
EXTRA_QUEUED_CALLS)
# Killed worker processes can produce spurious "broken pipe"
# tracebacks in the queue's own worker thread. But we detect killed
# processes anyway, so silence the tracebacks.
self._call_queue._ignore_epipe = True
self._result_queue = SimpleQueue()
self._work_ids = queue.Queue()
self._queue_management_thread = None
# Map of pids to processes
self._processes = {}
# Shutdown is a two-step process.
self._shutdown_thread = False
self._shutdown_lock = threading.Lock()
self._broken = False
self._queue_count = 0
self._pending_work_items = {}
def _start_queue_management_thread(self):
# When the executor gets lost, the weakref callback will wake up
# the queue management thread.
def weakref_cb(_, q=self._result_queue):
q.put(None)
if self._queue_management_thread is None:
# Start the processes so that their sentinels are known.
self._adjust_process_count()
self._queue_management_thread = threading.Thread(
target=_queue_management_worker,
args=(weakref.ref(self, weakref_cb),
self._processes,
self._pending_work_items,
self._work_ids,
self._call_queue,
self._result_queue))
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
_threads_queues[self._queue_management_thread] = self._result_queue
def _adjust_process_count(self):
for _ in range(len(self._processes), self._max_workers):
p = multiprocessing.Process(
target=_process_worker,
args=(self._call_queue,
self._result_queue))
p.start()
self._processes[p.pid] = p
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._broken:
raise BrokenProcessPool('A child process terminated '
'abruptly, the process pool is not usable anymore')
if self._shutdown_thread:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._pending_work_items[self._queue_count] = w
self._work_ids.put(self._queue_count)
self._queue_count += 1
# Wake up queue management thread
self._result_queue.put(None)
self._start_queue_management_thread()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown_thread = True
if self._queue_management_thread:
# Wake up queue management thread
self._result_queue.put(None)
if wait:
self._queue_management_thread.join()
# To reduce the risk of opening too many files, remove references to
# objects that use file descriptors.
self._queue_management_thread = None
self._call_queue = None
#.........这里部分代码省略.........
开发者ID:5outh,项目名称:Databases-Fall2014,代码行数:101,代码来源:process.py
注:本文中的multiprocessing.SimpleQueue类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论