• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python multiprocessing.log_to_stderr函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中multiprocessing.log_to_stderr函数的典型用法代码示例。如果您正苦于以下问题:Python log_to_stderr函数的具体用法?Python log_to_stderr怎么用?Python log_to_stderr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了log_to_stderr函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main():
  multiprocessing.log_to_stderr()
  logger = multiprocessing.get_logger()
  logger.setLevel(logging.INFO)
  pool = RedisPool(30)
  p = WorkerEngine(pool, 'Fetcher', 25)
  p.start(logger)
开发者ID:gearnode,项目名称:pyq,代码行数:7,代码来源:worker.py


示例2: SumPixels

def SumPixels(file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = TiffDatatArray()
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        new_sum = data_array.GetDataArray()
        roi_sums = numpy.add(roi_sums, new_sum)

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
开发者ID:roehrig,项目名称:XrayDiffractionDataTool,代码行数:25,代码来源:CustomThreads.py


示例3: test_log

def test_log():
    multiprocessing.log_to_stderr()
    logger = multiprocessing.get_logger()
    logger.setLevel(logging.INFO)
    t1 = time.time()
    print(time.time() - t1)
    logger.info("done")
开发者ID:cuijiabin,项目名称:python_text,代码行数:7,代码来源:mysql2mongo.py


示例4: init_logging

def init_logging():
    """Initialize logging system."""

    debug = '--debug' in sys.argv
    level = logging.DEBUG if debug else logging.INFO
    multiprocessing.log_to_stderr()
    logger.setLevel(level)
开发者ID:takumainoue,项目名称:.emacs.d,代码行数:7,代码来源:tern_django.py


示例5: __init__

    def __init__(self,cmd_param):
        '''
        主进程类 为了兼容windows而这样创建类
        '''
        super(Mtask,self).__init__()
        self.start_time = datetime.datetime.now()
        self.parent_timeout = 180
        self.parent_timeout_flag = 0
        self.child_timeout =120
        self.child_num = 10
        self.slice_num = 20
        self.process_list = []
        self.result =[]
        self.batch_id = 0
        self.print_flag = 1
        self.mult_debug_flag = 0
        self.cmd_param = cmd_param
       


        if self.mult_debug_flag:
            #设置进程log日志
            multiprocessing.log_to_stderr()
            logger=multiprocessing.get_logger()
            logger.setLevel(logging.INFO)
开发者ID:cesaiskra,项目名称:hosts_manage,代码行数:25,代码来源:Copy+of+mtask.py


示例6: run_prevalence

def run_prevalence(out_dir, remove_rt, working_dir, produce_prevalence):
    """
    Create exact prevalences over a list of DRMs provided by a sequence
    object when provided with simulated resistant and susceptible files.

    Args:
        out_dir: The final folder into which to place the anonymized files.
        remove_rt: Are we removing a piece of the RT gene?
        working_dir: The folder in which to place temporary files.
        produce_prevalence: what prevalence to produce

    Returns:
        True on completion.
    """

    print "Building exact prevalences."

    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    csv_rows = []
    threads = []

    with open(os.path.join(working_dir, error_filename), 'rb') as handle:
        error_data = pickle.load(handle)
        platform = error_data['platform']
        paired_end = error_data['paired_end']

        manifest_queue = multiprocessing.Manager().Queue()
        multiprocessing.log_to_stderr()
        p = LoggingPool(len(error_data['files']))
    
        for result in error_data['files']:

            if remove_rt:
                result['sequence'].remove_rt = True

            process_result = p.apply_async(run_prevalence_thread, [
                manifest_queue, platform, paired_end,
                    result, working_dir, out_dir, produce_prevalence
            ])

        p.close()
        p.join()

        with open(os.path.join(out_dir, 
            final_csv_filename), 'w') as csv_handle,\
            open(os.path.join(out_dir, 
            final_json_filename), 'w') as json_handle:
            csv_handle.write(','.join(csv_header_rows) + '\n')
            test = sample.header(plat.platforms[platform],
                paired_end)
            test['samples'] = []
            while not manifest_queue.empty():
                s = manifest_queue.get()
                test['samples'].append(s.encode(plat.platforms[platform]))
                csv_handle.write(s.dump_csv())
            json_handle.write(json.dumps(test, indent=2))
                
    return True
开发者ID:hyraxbio,项目名称:simulated-data,代码行数:60,代码来源:run_simulation.py


示例7: SumData

def SumData(roi_start_list, roi_end_list, file_list, file_path, istart, iend, queue):
    logger = mp.get_logger()
    mp.log_to_stderr(logging.INFO)

    roi_sums = mproc.SHARED_ARRAY
    data_array = XYDataArray()
    num_rois = len(roi_start_list)
    logger.info("Reading files from %d to %d" % (istart, iend))
    if istart > 0:
        istart = istart - 1

    # Process each file in the list that falls in the range istart to iend
    for i in range(istart, iend):

        # Read in the information from the file and create numpy arrays from that information.
        data_array.CreateArrays(os.path.join(file_path, file_list[i]))

        # Sum the data in the arrays that lies between the roi values.  Do this
        # for each roi that was created.
        for j in range(num_rois):
#           logger.info("Summing roi %d from file %d" % (j, i))
            roi_sums[j][i] = roi_sums[j][i] + data_array.SumROIData(roi_start_list[j], roi_end_list[j])

        # Add a value of 1 to the queue so that the user interface can be updated
        # with the latest progress.
        queue.put(1)
    return roi_sums
开发者ID:roehrig,项目名称:XrayDiffractionDataTool,代码行数:27,代码来源:CustomThreads.py


示例8: setup_logger

def setup_logger(loglevel=conf.DAEMON_LOG_LEVEL, logfile=None,
        format=conf.LOG_FORMAT, **kwargs):
    """Setup the ``multiprocessing`` logger. If ``logfile`` is not specified,
    ``stderr`` is used.

    Returns logger object.
    """
    if not _monkeypatched[0]:
        monkeypatch()
        _monkeypatched[0] = True

    logger = get_default_logger(loglevel=loglevel)
    if logger.handlers:
        # Logger already configured
        return logger
    if logfile:
        if hasattr(logfile, "write"):
            log_file_handler = logging.StreamHandler(logfile)
        else:
            log_file_handler = logging.FileHandler(logfile)
        formatter = logging.Formatter(format)
        log_file_handler.setFormatter(formatter)
        logger.addHandler(log_file_handler)
    else:
        import multiprocessing
        multiprocessing.log_to_stderr()
    return logger
开发者ID:miracle2k,项目名称:celery,代码行数:27,代码来源:log.py


示例9: init

	def init(self, nprocs=None, spawnonce=True):
		multiprocessing.log_to_stderr(logging.WARN)
		if nprocs is None:
			self.nprocs = multiprocessing.cpu_count()
		else:
			self.nprocs = multiprocessing.cpu_count() + nprocs if nprocs < 0 else nprocs
		self.proc = []
		self.spawnonce = spawnonce
开发者ID:matthewwardrop,项目名称:python-parampy,代码行数:8,代码来源:symmetric.py


示例10: main

def main():
	if len(sys.argv) >1:
	  threads = sys.argv[1]
	else:
	  threads =1
	print('Threads to process :',threads)
	multiprocessing.log_to_stderr()
	logger =multiprocessing.get_logger()
	logger.setLevel(logging.INFO)

#	while True:
 	Threadstart(int(threads))
开发者ID:iyersv,项目名称:TestRepo,代码行数:12,代码来源:kafka_weather_stations_consumer.py


示例11: which_host

    def which_host(self, urllist, attr):
        """check every url in the given list against all regular expressions
        and extract the value of the chosen html attribute.
        Then use a queue and enough processes to download all matched urls"""
        # make a queue and enough processes as numprocs
        self.q = Queue()
        self.ps = (Process(target=self.use_queue, args=()) for i in range(self.numprocs))

        # enable multiprocessing logging feature
        if debug:
            logger.setLevel(logging.DEBUG)
            log_to_stderr(logging.DEBUG)


        for p in self.ps:
            # start all processes
            p.start()
        
        # piping the urllist urls into a set to purge duplicates
        finalset = set()
        for L in urllist:
            self.stringl = L.get(attr, None)
            # remove the anonym.to string before urls
            if self.stringl.startswith("http://anonym.to/?"):
                self.stringl = re.sub('http://anonym.to/\?', '', self.stringl)
            finalset.add(self.stringl)


        for L in finalset:
            # iterate over the regexp dictionary items; when finding a url
            # matching, put the the class name, url and self.basedir in the queue
            for k, v in regexp_dict.items():
                if k.search(L):
                    self.logger.info("downloading %s" % L)
                    # instantiate and then pass the parse method to the queue.
                    # it downloads but doesn't make the queue do its job
#                    parser = v(L, self.basedir)
#                    self.q.put((parser.parse()))

                    # add the class name and the parameters needed for its __init__
                    # into the queue
                    self.q.put((v, (L, self.basedir)))
                    self.img_counter = self.img_counter + 1
                else:
                    continue

        for i in range(self.numprocs):
            # put a STOP to end the iter builtin inside use_queue
            self.q.put("STOP")

        self.logger.info('%d images were present' % self.img_counter)
        print("%d images were present" % self.img_counter)
开发者ID:Donearm,项目名称:PyImagedownloader,代码行数:52,代码来源:pyimagedownloader.py


示例12: main

def main():
        global csvout
 	if len(sys.argv) >1:
	  threads = sys.argv[1]
	else:
	  threads = 1
	print('Threads to run :',threads)
	
        multiprocessing.log_to_stderr()
	logger = multiprocessing.get_logger()
	logger.setLevel(logging.INFO)
	
	Threadstart(threads)
开发者ID:iyersv,项目名称:TestRepo,代码行数:13,代码来源:kafka_raw_data_consumer.py


示例13: multiprocess

def multiprocess(args):
    step = ((args.token_hi - args.token_lo) / args.worker_count) + 1
    tr1 = range(args.token_lo, args.token_hi, step)  # intermediate points
    tr2 = [(t, t + 1) for t in tr1[1:]]  # add adjacent points
    tr3 = [t for st in tr2 for t in st]  # flatten
    tr4 = [args.token_lo] + tr3 + [args.token_hi]  # add end points
    token_ranges = [tr4[i:i + 2] for i in range(0, len(tr4), 2)]  # make pairs
 
    rate = args.throttle_rate / args.worker_count
 
    multiprocessing.log_to_stderr().setLevel(logging.INFO)
    manager = Manager()
    results = manager.dict()  # create a special shared dict to gather results
 
    workers = [
        Process(
            target=main,
            args=(
                args, worker_index, token_ranges[worker_index], rate, results
            )
        )
        for worker_index in range(args.worker_count)
    ]
 
    os_times_start = os.times()
 
    for worker in workers:
        worker.start()
 
    for worker in workers:
        worker.join()
 
    os_times_stop = os.times()
    exitcode = 0
 
    for worker in workers:
        if worker.exitcode:
            exitcode = worker.exitcode
            break
 
    if results:
        # transform the special dict
        results_dict = analyze_results(results, os_times_stop, os_times_start)
 
        if args.json_output:
            print_json(args, results_dict)
        else:
            print_arguments(args)
            print_results(results_dict)
 
    return(exitcode)
开发者ID:mvallebr,项目名称:cql_record_processor,代码行数:51,代码来源:bm_copy.py


示例14: run_error

def run_error(platform, working_dir, pcr_error, env_error, 
    human_error, paired_end):
    """
    Simulate sequencing error from the output generated by run_diversity.

    Args:
        platform: string One of "roche", "illumina" or "ion".
        working_dir: path The folder in which to place temporary files.
        pcr_error: bool Should we include a PCR error
        env_error: bool Should we include an ENV error
        human_error: bool Should we include human DNA
        paired_end: bool Are we simulating paired_end data?

    Returns:
        True on completion.
    """

    print "Simulating reads from sequence sets."

    error_data = {}
    # properties of the simulation
    error_data['paired_end'] = paired_end
    error_data['platform'] = platform

    with open(os.path.join(working_dir, evolved_filename), 'rb') as handle:

        evolved_data = pickle.load(handle)

        multiprocessing.log_to_stderr()
        p = LoggingPool(len(evolved_data))
        file_queue = multiprocessing.Manager().Queue()
       
        for result in evolved_data:

            p.apply_async(run_error_thread, [
                file_queue, result, platform, working_dir, 
                pcr_error, env_error, human_error, paired_end
            ])

        p.close()
        p.join()

        error_data['files'] = []
        while not file_queue.empty():
            error_data['files'].append(file_queue.get())

    with open(os.path.join(working_dir, error_filename), 'wb') as handle:
        pickle.dump(error_data, handle, protocol=pickle.HIGHEST_PROTOCOL)

    return True
开发者ID:hyraxbio,项目名称:simulated-data,代码行数:50,代码来源:run_simulation.py


示例15: main

def main():

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    if len(sys.argv) >= 4:
        #directory containing fastq library
        fastqsDir = workerForBam.getAbsPath(sys.argv[1])
        
        #directory containing other directories with fasta names
        fastaDir = workerForBam.getAbsPath(sys.argv[2])

        #directory containing file locks
        lockDirPath = workerForBam.getAbsPath(sys.argv[3])
        
        #directory containing temp output -> fastQ's, jobsFile 
        outDir = workerForBam.getAbsPath(sys.argv[4])

        #write all fastq's processing in job file
        combineJobPath = writeCombineBAMJobsFromSAI(outDir, fastqsDir,\
                                                        fastaDir,\
                                                        lockDirPath)

        #call workers to generate paired BAMs from SAIs
        #results = callPairedSAIToBAMWorkers(fastqsDir, fastaDir)
        #print results

    else:
        print 'err: files missing'
开发者ID:mohit-shrma,项目名称:bioinfotools,代码行数:29,代码来源:BAMsFromPairedFastqsSAI.py


示例16: doMultiprocess

def doMultiprocess(app):
    applist = {'pdb2pqr':doPDB2PQR,
               'apbs':doAPBS,
               'chimera':doChimera}
    multiprocessing.log_to_stderr(logging.INFO)               # set logging to info level rather than DEBUG
    pdb_ids = glbl.model_input.keys()
    manager = Manager()                                       # creates shared memory manager object
    nextPDBid = Queue()                                       # Create Queue object to serve as shared id generator across processes
    for pid in pdb_ids: nextPDBid.put(pid)                    # Load the ids to be tested into the Queue
    for x in range(0,multiprocessing.cpu_count()):            # Create one process per logical CPU
        p = Process(target=applist[app], args=(nextPDBid,))   # Assign process to app function, passing in the Queue
        glbl.jobs.append(p)                                   # Add the process to a list of running processes
        p.start()                                             # Start process running
    for j in glbl.jobs:
        j.join()  
    return     
开发者ID:talonsensei,项目名称:Bfx_scripts,代码行数:16,代码来源:calcElectro2.py


示例17: setUp

    def setUp(self):
        """Start the agent and wait for it to start"""
        super(FunctionalBase, self).setUp()
        mpl = multiprocessing.log_to_stderr()
        mpl.setLevel(logging.INFO)
        self.test_port = os.environ.get('TEST_PORT', '9999')
        # Build a basic standalone agent using the config option defaults.
        # 127.0.0.1:6835 is the fake Ironic client.
        self.agent = agent.IronicPythonAgent(
            'http://127.0.0.1:6835', 'localhost',
            ('0.0.0.0', int(self.test_port)), 3, 10, None, 300, 1,
            'agent_ipmitool', True)
        self.process = multiprocessing.Process(
            target=self.agent.run)
        self.process.start()
        self.addCleanup(self.process.terminate)

        # Wait for process to start, otherwise we have a race for tests
        sleep_time = 0.1
        tries = 0
        max_tries = int(os.environ.get('IPA_WAIT_TRIES', '100'))
        while tries < max_tries:
            try:
                return self.request('get', 'commands')
            except requests.ConnectionError:
                time.sleep(sleep_time)
                tries += 1

        raise IOError('Agent did not start after %s seconds.' % (max_tries *
                                                                 sleep_time))
开发者ID:Tehsmash,项目名称:ironic-python-agent,代码行数:30,代码来源:base.py


示例18: run

def run(host, port, params, force):
    # configure logging
    mplogger = mp.log_to_stderr()
    mplogger.setLevel(params['loglevel'])
    logger.setLevel(params['loglevel'])

    if logger.getEffectiveLevel() <= 10:
        logRequests = True
    else:
        logRequests = False

    # create the server
    manager = TaskManager(params, force)
    server = TaskManagerServer(
        (host, port), logRequests=logRequests, allow_none=True)

    server.register_function(manager.load_tasks, 'panda_reload')
    server.register_function(manager.get_sim_root, 'panda_connect')
    server.register_function(manager.get_next_task, 'panda_request')
    server.register_function(manager.set_complete, 'panda_complete')
    server.register_function(manager.set_error, 'panda_error')
    server.register_function(manager.get_status, 'panda_status')
    server.register_multicall_functions()
    server.register_introspection_functions()

    logger.info("Started XML-RPC server at %s:%d" % (host, port))

    server.serve_forever()
开发者ID:jhamrick,项目名称:optimal-mental-rotation,代码行数:28,代码来源:manager.py


示例19: main

def main():

    logger = multiprocessing.log_to_stderr()
    logger.setLevel(multiprocessing.SUBDEBUG)

    if len(sys.argv) >= 3:
        #directory containing samples which contains fastq
        samplesDir = getAbsPath(sys.argv[1])
        
        #quality scores file
        qScoreFile = getAbsPath(sys.argv[2]).rstrip('/')

        #perl script for qscore calculation
        qScorePerlScript = getAbsPath(sys.argv[3]).rstrip('/')
        
        #get all sample directory inside samplesDir
        allSamplesDir = allSampleDirsPath(samplesDir)
        
        #call child workers to do the job
        results = prepAndRunWorkers(allSamplesDir, qScoreFile, qScorePerlScript)

        #print final results
        print "avgQScoreComputation success: ", all(results)
    else:
        print 'err: files missing'
开发者ID:mohit-shrma,项目名称:bioinfotools,代码行数:25,代码来源:avgQScoreMulti.py


示例20: Estep_parallel

def Estep_parallel( Xin, w, MuList, SigmaList, nProc=2, chunksize=1, doVerbose=False ):
  ''' Returns
      -------
        logResp : N x K vector of log posterior probabilities 

                  logResp[n,k] : n-th data point's posterior under k-th comp
  '''
  '''def obs_comp_generator( MuList, SigmaList):
    for k in xrange( len(MuList) ):
      yield k,MuList[k], SigmaList[k]
  GMMCompIterator = obs_comp_generator( MuList, SigmaList )
  '''
  if doVerbose:
    logger = mp.log_to_stderr()
    logger.setLevel( logging.INFO )

  global X
  X = Xin

  GMMCompIterator = [ (k,MuList[k],SigmaList[k]) for k in xrange( len(MuList) )]
  mypool = mp.Pool( processes=nProc )
  myParOp = mypool.map_async( loggausspdf_globaldata, GMMCompIterator, chunksize=chunksize )
  resultList = myParOp.get()
  #st = time.time()
  logResp = np.vstack(resultList)
  #print '  Reduction: %.2f sec' % (time.time()-st)
  # Time to agg results into single matrix: 0.07 sec for N=250000,K=25
  # Conclusion: agg results takes almost no time relative to each individual job
  return logResp.T + np.log(w)
开发者ID:anshe80,项目名称:MLRaptor,代码行数:29,代码来源:GlobalSharedGMMLogPDF.py



注:本文中的multiprocessing.log_to_stderr函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python multiprocessing.set_start_method函数代码示例发布时间:2022-05-27
下一篇:
Python multiprocessing.get_start_method函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap