本文整理汇总了Python中multiprocessing.Manager类的典型用法代码示例。如果您正苦于以下问题:Python Manager类的具体用法?Python Manager怎么用?Python Manager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Manager类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: controller_failure_unit_test
def controller_failure_unit_test():
s = ["1001"]
s1 = ["1002"]
clear_config(s)
clear_config(s1)
manager1 = Manager()
manager2 = Manager()
failure1 = manager1.Value('i', 0)
failed_list1 = manager1.list([])
failure2 = manager2.Value('i', 0)
failed_list2 = manager2.list([])
processes = []
process2 = mp.Process(target=controller_failure_detection, args=(s, '1', failure1, failed_list1,))
processes.append(process2)
process4 = mp.Process(target=controller_failure_detection, args=(s, '2', failure2, failed_list2,))
processes.append(process4)
for p in processes:
p.start()
print 'STARTING:', p, p.is_alive()
r = random.randint(1, 10)
time.sleep(r)
print 'terminated'
t1 = time.time()
logging.debug(str( ["controller failed at:"] + [t1]))
processes[0].terminate()
# Exit the completed processes
for p in processes:
p.join()
print 'JOINED:', p, p.is_alive()
开发者ID:originye,项目名称:OVS,代码行数:30,代码来源:main_test.py
示例2: correction_terms_threaded
def correction_terms_threaded(self):
'''Finds the correction terms assoctiated to the quadratic form,
for each of the equivalance classes it finds the maximum by
iterating through the relation vectors of the group.
Uses multiprocessing.'''
print 'Using multiprocessing'
pool = Pool() # default: processes=None => uses cpu_count()
manager = Manager()
start_time = time.time()
coef_lists = lrange(self.group.structure)
# representatives = elements of C_1(V) (np.matrix)
representatives = map(lambda l: self.find_rep(l), coef_lists)
# list of maxes
lst = manager.list([None for i in xrange(len(representatives))])
alphalist = list(self.get_alpha()) # cannot pickle generators
pool.map_async(functools.partial(process_alpha_outside, self,
representatives, lst), alphalist)
pool.close()
pool.join() # wait for pool to finish
# get corrterms via (|alpha|^2+b)/4
print 'Computed from quadratic form in %g seconds' \
% (time.time() - start_time)
return [Fraction(Fraction(alpha, self.int_inverse[1]) + self.b, 4) \
for alpha in lst]
开发者ID:panaviatornado,项目名称:hfhom,代码行数:25,代码来源:ndqf.py
示例3: processFiles
def processFiles(patch_dir):
root = os.getcwd()
glbl.data_dirs = {}
if root != patch_dir: working_path = root+"/"+patch_dir
else: working_path = root
for path, dirs, files in os.walk(working_path):
if len(dirs) == 0: glbl.data_dirs[path] = ''
# Multiprocessing Section
#########################################
Qids = glbl.data_dirs.keys()
manager = Manager() # creates shared memory manager object
results = manager.dict() # Add dictionary to manager, so it can be accessed across processes
nextid = Queue() # Create Queue object to serve as shared id generator across processes
for qid in Qids: nextid.put(qid) # Load the ids to be tested into the Queue
for x in range(0,multiprocessing.cpu_count()): # Create one process per logical CPU
p = Process(target=processData, args=(nextid,results)) # Assign process to processCBR function, passing in the Queue and shared dictionary
glbl.jobs.append(p) # Add the process to a list of running processes
p.start() # Start process running
for j in glbl.jobs:
j.join() # For each process, join them back to main, blocking on each one until finished
# write out results
c = 1
sets = results.keys()
sets.sort()
for x in sets:
if results[x] != 'None':
FINAL = open('result'+str(c)+'.txt','w')
n = "\n************************************************************************************************\n"
FINAL.write(n+"* "+x+' *\n'+n+results[x]+"\n")
FINAL.close()
c += 1
开发者ID:talonsensei,项目名称:Bfx_scripts,代码行数:35,代码来源:processPatchesv4_Rpy1.py
示例4: download
def download(self, sources, output_directory, filename):
"""Download a file from one of the provided sources
The sources will be ordered by least amount of errors, so most
successful hosts will be tried first. In case of failure, the next
source will be attempted, until the first successful download is
completed or all sources have been depleted.
Args:
sources: A list of dicts with 'host_name' and 'url' keys.
output_directory (str): Directory to save the downloaded file in.
filename (str): Filename assigned to the downloaded file.
Returns:
A dict with 'host_name' and 'filename' keys if the download is
successful, or an empty dict otherwise.
"""
valid_sources = self._filter_sources(sources)
if not valid_sources:
return {'error': 'no valid sources'}
manager = Manager()
successful_downloads = manager.list([])
def f(source):
if not successful_downloads:
result = self.download_from_host(source, output_directory, filename)
if 'error' in result:
self._host_errors[source['host_name']] += 1
else:
successful_downloads.append(result)
multiprocessing.dummy.Pool(len(valid_sources)).map(f, valid_sources)
return successful_downloads[0] if successful_downloads else {}
开发者ID:imiric,项目名称:plowshare-wrapper,代码行数:35,代码来源:plowshare.py
示例5: main
def main():
init_params()
vk = connect_to_vk(LOGIN, PASSWORD)
audio_list = vk.method('audio.get', {})
total = len(audio_list)
if not os.path.exists(DOWNLOAD_DIR):
os.makedirs(DOWNLOAD_DIR)
manager = Manager()
workers_list = []
progress_list = manager.dict()
downloaded_tracks = manager.Value('i', 0)
lock = Lock()
for f in audio_list[:WORKERS_COUNT - 1]:
start_download_process(f, workers_list, progress_list, downloaded_tracks, lock)
del audio_list[:WORKERS_COUNT - 1]
while any(worker.is_alive() for worker in workers_list) or len(audio_list):
if audio_list and len(workers_list) < WORKERS_COUNT:
f = audio_list.pop(0)
start_download_process(f, workers_list, progress_list, downloaded_tracks, lock)
print_progress(progress_list, downloaded_tracks.value, total)
clean_workers(workers_list)
time.sleep(0.1)
print "Done!"
开发者ID:kernel72,项目名称:vk_downloader,代码行数:29,代码来源:vk_downloader.py
示例6: __init__
def __init__(self,port):
manager = Manager()
self.status=manager.dict()
self.sendbuf=manager.list()
self.p = Process(target=SocketManager, args=(port,self.status,self.sendbuf) )
self.p.daemon=True
self.p.start()
开发者ID:youtsumi,项目名称:HinOTORI,代码行数:7,代码来源:DomeToolKit.py
示例7: run
def run(args):
# Limit it to a single GPU.
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
conn = create_db(args.db)
m = Manager()
logs = args.logging
datasets = args.datasets
embeddings = args.embeddings
settings = args.settings
# So we don't litter the fs
dir_ = tempfile.mkdtemp(prefix='baseline-speed-test-')
try:
configs = get_configs(args.config)
if not args.single:
full_configs = []
for config in configs:
full_configs.extend(edit_config(config, args.frameworks, args.no_crf, args.no_attn))
configs = full_configs
if args.verbose:
for config in configs:
pprint(config)
print()
print()
steps = len(configs)
pg = create_progress_bar(steps)
for config in configs:
write_config = deepcopy(config)
config['train']['epochs'] = args.trials
task_name = config['task']
system_info = m.dict()
p = Process(
target=run_model,
args=(
system_info,
config,
logs,
settings,
datasets,
embeddings,
task_name,
dir_,
int(args.gpu)
)
)
p.start()
pid = p.pid
p.join()
log_file = os.path.join(dir_, 'timing-{}.log'.format(pid))
speeds = parse_logs(log_file)
save_data(conn, speeds, write_config, system_info)
pg.update()
pg.done()
finally:
shutil.rmtree(dir_)
开发者ID:dpressel,项目名称:baseline,代码行数:60,代码来源:run.py
示例8: solve
def solve(iterations, proc_count):
queue = JoinableQueue()
partition = get_iterations_partition(iterations, proc_count)
for iteration in partition:
queue.put(iteration)
for i in range(process_count):
queue.put(None)
manager = Manager()
result = manager.list()
processes = []
cur_time = time.time()
for i in range(process_count):
proc = Process(target=worker, args=(queue, result,))
proc.start()
processes.append(proc)
queue.join()
for proc in processes:
proc.join()
cur_time = time.time() - cur_time
print_results(cur_time, result, iterations)
开发者ID:RoadToExclusivity,项目名称:PP,代码行数:25,代码来源:main.py
示例9: multi_download
def multi_download(url_and_name_list, num_threads=8):
''' accepts list of tuples, where t[0] = url and t[1] = filename '''
manager = Manager()
#pylint: disable=no-member
m_list = manager.list()
#pylint: enable=no-member
log = logging.getLogger('multi_dl')
log.debug('starting pool with ' + str(num_threads) + ' workers')
monitor_thread = Process(target=download_monitor,
args=((m_list, len(url_and_name_list)),))
monitor_thread.start()
workers = Pool(processes=num_threads)
work = workers.map_async(single_download,
zip(url_and_name_list, repeat(m_list)))
# this hack makes the async_map respond to ^C interrupts
try:
work.get(0xFFFF)
monitor_thread.join()
sys.stdout.write('\n\n')
except KeyboardInterrupt:
print 'parent received control-c'
exit()
开发者ID:Hastegan,项目名称:e621dl,代码行数:26,代码来源:downloader.py
示例10: spawn
def spawn(self, n=GAME_CT):
# Fallback on import error or single core
try:
from multiprocessing import Process, Manager, cpu_count
except ImportError:
return self.run(n)
# For low n multiprocessing does not gain much speed up
if cpu_count() <= 1 or n < 500:
return self.run(n)
m = Manager()
self.results = m.list()
procs = []
load = [n // cpu_count()] * cpu_count()
# add the rest from division to last cpu
load[-1] += n % cpu_count()
for count in load:
proc = Process(target=self.run, args=(count,))
proc.start()
procs.append(proc)
[p.join() for p in procs]
开发者ID:erikb85,项目名称:pyBattleship,代码行数:25,代码来源:Statistics.py
示例11: run
def run():
# build the mdp
start = time.time()
room_size = 3
num_rooms = 5
mdp = maze_mdp.MazeMDP(room_size=room_size, num_rooms=num_rooms)
# build the agent
m = Manager()
init_dict = {(s, a): 0 for s in mdp.states for a in mdp.ACTIONS + [None]}
shared_weights = m.dict(init_dict)
shared_value_weights = m.dict(init_dict)
agent = async_actor_critic.AsyncActorCritic(actions=mdp.ACTIONS, discount=mdp.DISCOUNT,
weights=shared_weights, value_weights=shared_value_weights, tau=.3, learning_rate=.5)
# build a single experiment
rewards = m.list()
start_state_values = m.list()
max_steps = (2 * room_size * num_rooms) ** 2
exp = experiment.Experiment(mdp=mdp, agent=agent, num_episodes=800, max_steps=max_steps,
rewards=rewards, start_state_values=start_state_values)
# run the experiment
multiexperiment = experiment.MultiProcessExperiment(experiment=exp, num_agents=NUM_PROCESSES)
multiexperiment.run()
# report results
end = time.time()
print 'took {} seconds to converge'.format(end - start)
mdp.print_state_values(shared_value_weights)
optimal = mdp.EXIT_REWARD + (2 * room_size * num_rooms * mdp.MOVE_REWARD)
utils.plot_values(rewards, optimal, 'rewards')
utils.plot_values(start_state_values, optimal, 'start state value')
开发者ID:jialrs,项目名称:async_rl,代码行数:33,代码来源:run_experiement.py
示例12: __init__
def __init__(self):
manager = Manager()
self.flow_to_state_map = manager.dict()
self.flow_to_state_map.clear()
self.trigger = manager.Value('i', 0)
self.comp = manager.Value('i', 0) # sequential = 0, parallel = 1
开发者ID:StonyBrookUniversity,项目名称:pyresonance,代码行数:7,代码来源:base_fsm.py
示例13: scanner_network
def scanner_network(self,gateway):
get_ip = len(gateway)-1
gateway = gateway[:get_ip]
ranger = str(self.ip_range.text()).split("-")
self.control = True
jobs = []
manager = Manager()
on_ips = manager.dict()
for n in xrange(int(ranger[0]),int(ranger[1])):
ip="%s{0}".format(n)%(gateway)
p = Process(target=self.working,args=(ip,on_ips))
jobs.append(p)
p.start()
for i in jobs: i.join()
for i in on_ips.values():
Headers = []
n = i.split("|")
self.data['IPaddress'].append(n[0])
self.data['MacAddress'].append(n[1])
self.data['Hostname'].append("<unknown>")
for n, key in enumerate(reversed(self.data.keys())):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
self.scanner_OFF(False,"txt_status")
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
开发者ID:Mohammedmustaf,项目名称:3vilTwinAttacker,代码行数:31,代码来源:arps_Posion.py
示例14: crackTicket
def crackTicket(ticket, label, hashList):
try:
data = base64.b64decode(ticket)
except:
#print "DEBUG\n" + str(ticket) + "DEBUG\n\n"
return "FAIL" + str(label) + "\n"
manager = Manager()
enctickets = manager.list()
if data[0] == '\x76':
try:
enctickets.append((str(decoder.decode(data)[0][2][0][3][2])))
except:
#print "DEBUG\n" + str(ticket) + "DEBUG\n\n"
return "FAIL" + str(label)
elif data[:2] == '6d':
for ticket in data.strip().split('\n'):
try:
enctickets.append((str(decoder.decode(ticket.decode('hex'))[0][4][3][2])))
except:
#print "DEBUG\n" + str(ticket) + "DEBUG\n\n"
return "FAIL" + str(label)
print "\nAccount: " + label
for currentHash in hashList:
ntlmHash_hex = binascii.unhexlify(currentHash)
kdata, nonce = kerberos.decrypt(ntlmHash_hex, 2, enctickets[0])
if kdata:
print "NTLM Hash: " + currentHash
break
return ""
开发者ID:xan7r,项目名称:kerberoast,代码行数:34,代码来源:autoTGS_NtlmCrack.py
示例15: func_thread
def func_thread():
a = numpy.random.rand(1000000)
b = numpy.random.rand(1000000)
nodata = 0.3
print "here"
manager = Manager()
lock = Lock()
d = manager.dict()
ps = []
start_time = time.clock()
for i in numpy.where((a > 0.7) & (a < 0.9) & (a != nodata)):
for j in numpy.where((b > 0.5) & (b < 0.9) & (b != nodata)):
index = numpy.intersect1d(i, j)
length = len(index)/2
array1 = index[:length]
array2 = index[length:]
for processes in range(2):
p = Process(target=f_thread, args=(d, a, b, array1, lock))
ps.append(p)
p.start()
for p in ps:
p.join()
print time.clock() - start_time, "seconds"
print len(d)
开发者ID:geobricks,项目名称:Playground,代码行数:29,代码来源:fast_loop.py
示例16: pricing
def pricing(dual):
cpus = cpu_count() - int(argv[2])
'''process for getting new columns'''
final = pow(2, K)
if K < 23:
section = final
else:
section = 100 * cpus # probar valores
to = 0
since = 1
manager = Manager()
elements = manager.list([RETAILERS, DCS, PLANTS])
out = manager.Queue() # queue with the result from each worker
while to < final:
p = Pool(cpus)
to = min(since + section, final)
boss = p.apply_async(coordinator, (out,))
workers = [p.apply_async(work, (k, elements, dual, out)) for k in xrange(since, to)]
enviados = 0
for w in workers:
enviados += w.get()
out.put('ok')
a = boss.get()
assert a.counter == enviados
since = to + 1
p.terminate()
return a
开发者ID:NellyMonserrat,项目名称:Desempeno_LIP,代码行数:27,代码来源:procesadores.py
示例17: run_multiprocesses_likelihood
def run_multiprocesses_likelihood(self):
lik = 0.0
workers = []
workers_no = self.configuration.num_threads
corpusSplitlist = self.split_average_data(workers_no)
likmanager = Manager()
ManagerReturn_corpusSplitlist = []
ManagerReturn_corpusSplitlist_lik = []
for dataSplit in corpusSplitlist:
likreturn_dataSplit = likmanager.list()
likreturn_dataSplit_likvalue = likmanager.Value("",0.0)
worker = Process(target=self.splitlikelihood, args=(dataSplit, likreturn_dataSplit, likreturn_dataSplit_likvalue))
worker.start()
workers.append(worker)
ManagerReturn_corpusSplitlist.append(likreturn_dataSplit)
ManagerReturn_corpusSplitlist_lik.append(likreturn_dataSplit_likvalue)
for w in workers:
w.join()
# compute all the likelihood for the splits:
for v in ManagerReturn_corpusSplitlist_lik:
lik += v.value
# update all the docs into corpus, since we compute the doc distribution in likelihood()
self.corpus.clear()
for dataSplit in ManagerReturn_corpusSplitlist:
for doc in dataSplit:
self.corpus.append(doc)
return lik
开发者ID:shuangyinli,项目名称:pyTWTM,代码行数:30,代码来源:twtm.py
示例18: aggress
def aggress(map):
global startMap
startMap = map
#print "Regressing..."
state = State()
jobs = []
longestSolution = Value('d', 20)
highestScore = Value('d', 0)
queue = JoinableQueue()
manager = Manager()
d = manager.dict()
d.clear()
l = RLock()
if multiProc:
queue.put((state, map, 1))
for i in range(numProcs):
p = Process(target = multiMain, args=(startMap, l, d, queue,highestScore))
p.start()
queue.join()
else:
a(l, highestScore, d, None, state, map, 1)
开发者ID:aelaguiz,项目名称:icfp2012,代码行数:31,代码来源:aggress.py
示例19: concurrent_test
def concurrent_test(robot, rooms, num_trials, start_location = -1, chromosome = None):
"""
Run the tests in multiple processes. Can be directly swapped out for testAllMaps.
"""
# Setup variables
num_rooms = len(rooms) # Total number of rooms
total_trials = num_trials * num_rooms # Total number of trials
processes = [] # List for all processes
manager = Manager() # Manager to handle result transfer
dict = manager.dict() # Dict which will store results
# Create a process for each room, storing parameters in instance variables
for i, room in enumerate(rooms):
process = SimulationProcess(i, dict)
process.robot = robot
process.room = room
process.num_trials = num_trials
process.start_location = start_location
process.chromosome = chromosome
process.start()
processes.append(process)
#end for
# Print the results
total_score = 0
for i, process in enumerate(processes):
process.join()
(score, std) = dict[i]
print("Room %d of %d done (score: %d std: %d)" % (i + 1, num_rooms, score, std))
total_score += score
#end for
print("Average score over %d trials: %d" % (total_trials, total_score / num_rooms))
return total_score / num_rooms
#end concurrent_test
开发者ID:Jing361,项目名称:Artificial-Intelligence,代码行数:35,代码来源:roomba_concurrent.py
示例20: LockingSession
class LockingSession(object):
def __init__(self, dataman, session_filename):
self.dataman = dataman
self.session_filename = session_filename
self.lock = Manager().Lock()
def acquire(self):
self.lock.acquire()
self.session = DataManager.shelf(self.session_filename)
def release(self):
self.session.close()
self.session = None
self.lock.release()
def __getitem__(self, item):
self.acquire()
ret = self.session[item]
self.release()
return ret
def __setitem__(self, item, value):
self.acquire()
self.session[item] = value
self.release()
开发者ID:dkiela,项目名称:thesis,代码行数:25,代码来源:datamanager.py
注:本文中的multiprocessing.Manager类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论