本文整理汇总了Python中multiprocessing.current_process函数的典型用法代码示例。如果您正苦于以下问题:Python current_process函数的具体用法?Python current_process怎么用?Python current_process使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了current_process函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: choice_set_worker
def choice_set_worker(work_queue,done_queue,network,trip_data,master_config,trip_times,ext_bound):
this_network=network
#initialize link randomizer
link_randomizer=None
if master_config.choice_set_config['method']=='doubly_stochastic' and not master_config.choice_set_config['randomize_after']:
link_randomizer=master_config.choice_set_config.get_link_randomizer(this_network,master_config)
if '1' in current_process().name:
for var in link_randomizer['variables']:
print var," zero p: ",link_randomizer['zero']['probs'][var]
print var," posi m: ",link_randomizer['pos']['means'][var]
if master_config.choice_set_config['method']=='doubly_stochastic' and master_config.choice_set_config['randomize_after']:
link_randomizer=master_config.choice_set_config['randomize_after_dev']
idx=0
for trip_id in iter(work_queue.get,'STOP'):
idx=idx+1
print time.asctime(time.localtime()), "-", current_process().name, "-",idx, ". trip_id: ", trip_id[0], ", sub_trip: ", trip_id[1], ", stage: ", trip_id[2]
the_set,chosen_overlap=generate_choice_set(this_network,trip_data[trip_id],master_config.choice_set_config,link_randomizer,master_config['time_dependent_relation'],trip_times[trip_id[0]],ext_bound)
done_queue.put((trip_id[0],the_set,chosen_overlap))
done_queue.put('STOP')
return True
开发者ID:sfcta,项目名称:BikeRouter,代码行数:25,代码来源:prepare_estimation.py
示例2: TestCustomLogging
def TestCustomLogging(l, n, s):
for i in range(n):
l.info("Info - {}- {}".format(multiprocessing.current_process().name, time.strftime("%d.%m.%Y %H:%M:%S", time.gmtime())))
l.error("Error - {} - {}".format(multiprocessing.current_process().name, time.strftime("%d.%m.%Y %H:%M:%S", time.gmtime())))
time.sleep(0.2)
if not s.is_set():
s.set()
开发者ID:AsdwGroup,项目名称:AnimeSubBot,代码行数:7,代码来源:test_custom_logging.py
示例3: scheduler
def scheduler(list_key=Conf.Q_LIST):
"""
Creates a task from a schedule at the scheduled time and schedules next run
"""
for s in Schedule.objects.exclude(repeats=0).filter(next_run__lt=timezone.now()):
args = ()
kwargs = {}
# get args, kwargs and hook
if s.kwargs:
try:
# eval should be safe here cause dict()
kwargs = eval('dict({})'.format(s.kwargs))
except SyntaxError:
kwargs = {}
if s.args:
args = ast.literal_eval(s.args)
# single value won't eval to tuple, so:
if type(args) != tuple:
args = (args,)
q_options = kwargs.get('q_options', {})
if s.hook:
q_options['hook'] = s.hook
# set up the next run time
if not s.schedule_type == s.ONCE:
next_run = arrow.get(s.next_run)
if s.schedule_type == s.HOURLY:
next_run = next_run.replace(hours=+1)
elif s.schedule_type == s.DAILY:
next_run = next_run.replace(days=+1)
elif s.schedule_type == s.WEEKLY:
next_run = next_run.replace(weeks=+1)
elif s.schedule_type == s.MONTHLY:
next_run = next_run.replace(months=+1)
elif s.schedule_type == s.QUARTERLY:
next_run = next_run.replace(months=+3)
elif s.schedule_type == s.YEARLY:
next_run = next_run.replace(years=+1)
s.next_run = next_run.datetime
s.repeats += -1
# send it to the cluster
q_options['list_key'] = list_key
q_options['group'] = s.name or s.id
kwargs['q_options'] = q_options
s.task = tasks.async(s.func, *args, **kwargs)
# log it
if not s.task:
logger.error(
_('{} failed to create a task from schedule [{}]').format(current_process().name, s.name or s.id))
else:
logger.info(
_('{} created a task from schedule [{}]').format(current_process().name, s.name or s.id))
# default behavior is to delete a ONCE schedule
if s.schedule_type == s.ONCE:
if s.repeats < 0:
s.delete()
return
# but not if it has a positive repeats
s.repeats = 0
# save the schedule
s.save()
开发者ID:KorayAgaya,项目名称:django-q,代码行数:60,代码来源:cluster.py
示例4: pusher
def pusher(task_queue, event, broker=None):
"""
Pulls tasks of the broker and puts them in the task queue
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
"""
if not broker:
broker = get_broker()
logger.info(_('{} pushing tasks at {}').format(current_process().name, current_process().pid))
while True:
try:
task_set = broker.dequeue()
except Exception as e:
logger.error(e)
# broker probably crashed. Let the sentinel handle it.
sleep(10)
break
if task_set:
for task in task_set:
ack_id = task[0]
# unpack the task
try:
task = signing.SignedPackage.loads(task[1])
except (TypeError, signing.BadSignature) as e:
logger.error(e)
broker.fail(ack_id)
continue
task['ack_id'] = ack_id
task_queue.put(task)
logger.debug(_('queueing from {}').format(broker.list_key))
if event.is_set():
break
logger.info(_("{} stopped pushing tasks").format(current_process().name))
开发者ID:lucemia,项目名称:django-q,代码行数:33,代码来源:cluster.py
示例5: cluster_tuples_parallel
def cluster_tuples_parallel(self, patterns, matched_tuples, child_conn):
updated_patterns = list(patterns)
count = 0
for t in matched_tuples:
count += 1
if count % 500 == 0:
print(multiprocessing.current_process(), count, \
"tuples processed")
# go through all patterns(clusters of tuples) and find the one with
# the highest similarity score
max_similarity = 0
max_similarity_cluster_index = 0
for i in range(0, len(updated_patterns)):
extraction_pattern = updated_patterns[i]
accept, score = self.similarity_all(t, extraction_pattern)
if accept is True and score > max_similarity:
max_similarity = score
max_similarity_cluster_index = i
# if max_similarity < min_degree_match create a new cluster
if max_similarity < self.config.threshold_similarity:
c = Pattern(t)
updated_patterns.append(c)
# if max_similarity >= min_degree_match add to the cluster with
# the highest similarity
else:
updated_patterns[max_similarity_cluster_index].add_tuple(t)
# Eliminate clusters with two or less patterns
new_patterns = [p for p in updated_patterns if len(p.tuples) > 5]
pid = multiprocessing.current_process().pid
print(multiprocessing.current_process(), "Patterns: ", len(new_patterns))
child_conn.send((pid, new_patterns))
开发者ID:davidsbatista,项目名称:BREDS,代码行数:35,代码来源:breds-parallel.py
示例6: main
def main(OBJECTID, lck, count, length, getArea=False):
"""
OBJECTID - the objectid of the feature from the wfs service
lck - multiprocess lock
count - how many features have been processed
length - the total number of features to be processed
getArea - boolean flag to indicate whether to capture the area of intersection
"""
try:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',)
logging.info(str(os.getpid()) + " OBJECTID " + str(OBJECTID) + " (" + str(count) + " out of " + str(length) + ")")
multiprocessing.current_process().cnt += 1
conn = dbconnect('species_especies_schema') # connect to PostGIS
# intersect the species range features with the intersectingfeature features
if getArea: # populate the area using the intersection area between the wdpa and the species
conn.cur.execute("SELECT * from especies.intersect_species_wdpa_area(%s,false)" % OBJECTID)
else:
conn.cur.execute("SELECT * from especies.intersect_species_wdpa(%s,false)" % OBJECTID)
intersectingfeatures = conn.cur.fetchall() # get all of the intersecting PAs for the species
if len(intersectingfeatures) > 0:
for intersectingfeature in intersectingfeatures: # iterate through the intersectingfeatures
if getArea: # populate the output table
conn.cur.execute("SELECT especies.insert_species_wdpa_area(%s,%s,%s,%s)" % (OBJECTID, intersectingfeature[1], intersectingfeature[2], intersectingfeature[3]))
else:
conn.cur.execute("SELECT especies.insert_species_wdpa(%s,%s,%s)" % (OBJECTID, intersectingfeature[1], intersectingfeature[2]))
else:
raise Exception("No intersecting features for OBJECTID %s" % OBJECTID)
except Exception as inst:
logging.error(str(os.getpid()) + " " + inst.args[0])
finally:
conn.cur.close()
del(conn)
开发者ID:andrewcottam,项目名称:eSpecies-Preprocessing-Scripts,代码行数:34,代码来源:species_wdpa_analysis_worker.py
示例7: monitor
def monitor(result_queue, broker=None):
"""
Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue
"""
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_("{} monitoring at {}").format(name, current_process().pid))
for task in iter(result_queue.get, "STOP"):
# acknowledge
ack_id = task.pop("ack_id", False)
if ack_id:
broker.acknowledge(ack_id)
# save the result
if task.get("cached", False):
save_cached(task, broker)
else:
save_task(task)
# log the result
if task["success"]:
logger.info(_("Processed [{}]").format(task["name"]))
else:
logger.error(_("Failed [{}] - {}").format(task["name"], task["result"]))
logger.info(_("{} stopped monitoring results").format(name))
开发者ID:aparsons,项目名称:django-q,代码行数:25,代码来源:cluster.py
示例8: pusher
def pusher(task_queue, event, list_key=Conf.Q_LIST, r=redis_client):
"""
Pulls tasks of the Redis List and puts them in the task queue
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
:type list_key: str
"""
logger.info(_("{} pushing tasks at {}").format(current_process().name, current_process().pid))
while True:
try:
task = r.blpop(list_key, 1)
except Exception as e:
logger.error(e)
# redis probably crashed. Let the sentinel handle it.
sleep(10)
break
if task:
# unpack the task
try:
task = signing.SignedPackage.loads(task[1])
except (TypeError, signing.BadSignature) as e:
logger.error(e)
continue
task_queue.put(task)
logger.debug(_("queueing from {}").format(list_key))
if event.is_set():
break
logger.info(_("{} stopped pushing tasks").format(current_process().name))
开发者ID:sebasmagri,项目名称:django-q,代码行数:28,代码来源:cluster.py
示例9: run
def run(queue, params):
""" Start a publisher thread to publish forever """
try:
(dbFile, dbBackupDir, times, submitType) = getStartParams(params)
log.info('{0} started'.format(current_process().name))
commandQueue = Queue.Queue()
interruptRequest = threading.Event()
publishThread = threading.Thread(target=publishForever,
args=(commandQueue, interruptRequest, times, submitType, dbFile, dbBackupDir),
name="PublishThread")
publishThread.start()
dispatch = {'stop' : stopCommandHandler, 'set' : setCommandHandler}
while not stopRequest.isSet():
(command, params) = queue.get()
log.info('{0} recieved command: [{1}]'.format(current_process().name, str(command)))
try:
dispatch[command](interruptRequest, commandQueue, params)
queue.put(Response())
except Exception as ex:
queue.put(Response(message=str(ex), status=ResponseStatus.ERROR))
log.info('{0} Waiting for {1} to stop'.format(current_process().name, publishThread.name))
publishThread.join()
log.info('{0} ... OK'.format(current_process().name))
except Exception as ex:
log.exception(repr(ex))
log.info('{0} terminated'.format(__name__))
开发者ID:unix-beard,项目名称:newsbot,代码行数:34,代码来源:feedpublish.py
示例10: reducer
def reducer(q_manager, project_drs, options):
_logger.info(multiprocessing.current_process().name + ' with pid ' +
str(os.getpid()))
reducer_queue_consume(q_manager, project_drs, options)
_logger.info(multiprocessing.current_process().name + ' with pid ' +
str(os.getpid()) + ' finished cleanly.')
return
开发者ID:laliberte,项目名称:cdb_query,代码行数:7,代码来源:queues_manager.py
示例11: getTaskProcess
def getTaskProcess(self):
while True:
array=[]
if self.taskleft()>0:
for i in range(10):
try:
req = self.q_request.get(block=True,timeout=1000)
array.append(req)
except:
continue
# break
# req = self.q_request.get()
with self.lock: #要保证该操作的原子性,进入critical area
self.running=self.running+1
threadname=multiprocessing.current_process().name
print '进程'+threadname+'发起请求: '
ans=self.do_job(self.job,req,threadname)
# ans = self.connectpool.getConnect(req)
# self.lock.release()
if self.needfinishqueue>0:
self.q_finish.put((req,ans))
# self.lock.acquire()
with self.lock:
self.running= self.running-1
threadname=multiprocessing.current_process().name
print '进程'+threadname+'完成请求'
开发者ID:chromecrown,项目名称:Scan-T,代码行数:33,代码来源:processtool.py
示例12: _remove_adapters
def _remove_adapters(self, adapter, info_file, sum_file, tmp_decontam_fastq, tmp_rmadapter_fastq):
sys.stdout.write("[Preqc] %s removing adapters from %s" %(multiprocessing.current_process().name, tmp_decontam_fastq))
cutadapt_cmd = " cutadapt -b %s -O %d -m %d --info-file %s -o %s %s " %(adapter, self.min_overlap, self.min_readlen, info_file, tmp_rmadapter_fastq, tmp_decontam_fastq)
sys.stdout.write(multiprocessing.current_process().name + "\t" + cutadapt_cmd + "\n")
p = subprocess.Popen(shlex.split(cutadapt_cmd), stdout=open(sum_file, 'w'))
p.wait()
return p.returncode
开发者ID:svm-zhang,项目名称:RPGC,代码行数:7,代码来源:preqc.py
示例13: worker
def worker(work_queue, done_queue):
try:
for url in iter(work_queue.get, 'STOP'):
status_code = print_site_status(url)
done_queue.put("%s | %s | %s" % (current_process().name, url, status_code))
except Exception, e:
done_queue.put("%s | %s | %s | %s" % (current_process().name, url,5000,e.message))
开发者ID:agentidea,项目名称:site-manager,代码行数:7,代码来源:SiteManager.py
示例14: Map
def Map(L):
sentence_max = 0
#temp_file = get_tempfile(L)
text = prepjob(L)
#data_file = load(temp_file)
local_words = {}
print multiprocessing.current_process().name, 'to map region:', \
L[1],"to",L[2]
while True:
raw_line = text.readline()
if not raw_line:
break
pass
for sentence in splitSentence(raw_line):
for (i, word) in enumerate(sentence.split()):
if i > sentence_max:
sentence_max = i
if not word.isspace():
sanitized = sanitize(word).lower()
local_words[sanitized] = incrementTuple(i,
local_words.get(sanitized, (0, {})))
out = []
sum = 0
for (key, value) in local_words.items():
if key is not '' and value is not None:
sum += value[0]
out.append((key, value))
print multiprocessing.current_process().name, 'mapped tokens:', \
sum, 'sentence max:', sentence_max
#data_file.close()
#os.remove(temp_file)
return out
开发者ID:Zanshinmu,项目名称:pyprobable-turing,代码行数:34,代码来源:wordcount.py
示例15: doStuff
def doStuff(self):
"""
This is the method that does the work
"""
while (not self.stop_event.is_set()) and (not self.waiting_queue.empty()):
# Get a job from the queue
try:
self.waiting_lock.acquire()
job = self.waiting_queue.get()
except queue.Queue.Empty:
break
finally:
self.waiting_lock.release()
# Do the work
print("{0}: Starting {1}".format(multiprocessing.current_process(), job))
time.sleep(1)
print("{0}: Finished {1}".format(multiprocessing.current_process(), job))
time.sleep(1)
# Put the result back on the result Queue. (Doesn't have to be the same object as Source Q)
try:
self.complete_lock.acquire()
self.complete_queue.put(job)
except queue.Queue.Empty:
break
finally:
self.complete_lock.release()
开发者ID:James-Chapman,项目名称:python-code-snippets,代码行数:29,代码来源:ProcessWorker.py
示例16: _threaded_resolve_AS
def _threaded_resolve_AS():
"""Get an ASN from the queue, resolve it, return its routes to the
*main* process and repeat until signaled to stop.
This function is going to be spawned as a thread.
"""
while True:
current_AS = q.get()
if current_AS == 'KILL':
q.task_done()
break
try:
resp = comm.get_routes_by_autnum(current_AS, ipv6_enabled=True)
if resp is None:
raise LookupError
routes = parsers.parse_AS_routes(resp)
except LookupError:
logging.warning("{}: {}: No Object found for {}"
.format(mp.current_process().name,
threading.current_thread().name,
current_AS))
routes = None
except Exception as e:
logging.error("{}: {}: Failed to resolve DB object {}. {}"
.format(mp.current_process().name,
threading.current_thread().name,
current_AS, e))
routes = None
result_q.put((current_AS, routes))
q.task_done()
开发者ID:stkonst,项目名称:PolicyParser,代码行数:30,代码来源:resolvers.py
示例17: dequeue
def dequeue(self):
while not self.is_quit():
t = None
try:
t = self.queue.get(True)
except IOError:
# Anticipate Ctrl-C
#print("Quit W1: %s" % self.name)
self.quit.value = 1
break
if isinstance(t, tuple):
self.out_counter.increment()
self.worker_out_counter.increment()
topic = t[0]
msg = t[1]
ctime = t[2]
if isinstance(msg, rospy.Message):
doc = ros_datacentre.util.msg_to_document(msg)
doc["__recorded"] = ctime or datetime.now()
doc["__topic"] = topic
try:
#print(self.sep + threading.current_thread().getName() + "@" + topic+": ")
#pprint.pprint(doc)
self.collection.insert(doc)
except InvalidDocument, e:
print("InvalidDocument " + current_process().name + "@" + topic +": \n")
print e
except InvalidStringData, e:
print("InvalidStringData " + current_process().name + "@" + topic +": \n")
print e
开发者ID:Jailander,项目名称:mongodb_store,代码行数:31,代码来源:mongodb_log.py
示例18: proc1
def proc1(pipe):
print(multiproc.current_process().pid)
print(os.getpid()) # 和上面方法一样,可以获得当前进程的pid
pipe.send([('Hello , my name is process %s !' % multiproc.current_process().pid), multiproc.current_process().pid])
pipe.send([('This is process %s second send !' % multiproc.current_process().pid), multiproc.current_process().pid])
print('waiting....')
print(pipe.recv())
开发者ID:Johnson-wu,项目名称:python,代码行数:7,代码来源:multiprocess_pipe01.py
示例19: main
def main():
# get data from parent over stdin
data = pickle.load(sys.stdin)
sys.stdin.close()
# set some stuff
_logger.setLevel(data['dist_log_level'])
forking.prepare(data)
# create server for a `HostManager` object
server = managers.Server(HostManager._registry, ('', 0), data['authkey'])
current_process()._server = server
# report server address and number of cpus back to parent
conn = connection.Client(data['parent_address'], authkey=data['authkey'])
conn.send((data['index'], server.address, slot_count))
conn.close()
# set name etc
current_process().set_name('Host-%s:%s' % server.address)
util._run_after_forkers()
# register a cleanup function
def cleanup(directory):
debug('removing directory %s', directory)
shutil.rmtree(directory)
debug('shutting down host manager')
util.Finalize(None, cleanup, args=[data['dir']], exitpriority=0)
# start host manager
debug('remote host manager starting in %s', data['dir'])
server.serve_forever()
开发者ID:jcsalterego,项目名称:py3k-atsign,代码行数:32,代码来源:mp_distributing.py
示例20: print_
def print_(object):
import threading
import sys
# START OF CRITICAL SECTION
__builtin__.__GIL__.acquire()
try:
import multiprocessing
if multiprocessing.current_process().name == 'MainProcess':
sys.stdout.write("<%s:%s> : %s\n" % (multiprocessing.current_process().name, threading.current_thread().name, object))
else:
sys.stdout.write("<PID #%d> : %s\n" % (multiprocessing.current_process().pid, object))
except ImportError:
sys.stdout.write("<%s> : %s\n" % (threading.current_thread().name, object))
sys.stdout.flush()
__builtin__.__GIL__.release()
# END OF CRITICAL SECTION
return None
开发者ID:cocaman,项目名称:pythonect,代码行数:33,代码来源:lang.py
注:本文中的multiprocessing.current_process函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论