本文整理汇总了Python中multiprocessing.get_logger函数的典型用法代码示例。如果您正苦于以下问题:Python get_logger函数的具体用法?Python get_logger怎么用?Python get_logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_logger函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ftp_download
def ftp_download(file_path, is_override, output_directory, uri, user, password, worker_semaphore, inserted_task_event, tasks, log_format, log_level):
"""
download a specify file from the given ftp server and output the specify directory
#>>> ftp_download('/tmp/archives.zip.002', True, 'f:/', '16.60.160.90', 'edwin', 'edwin')
#>>> os.path.isfile('f:/archives.zip.002')
#True
"""
try:
path, filename = os.path.split(file_path)
logger = multiprocessing.get_logger()
init_logger(logger, log_level, log_format)
ftp = ftplib.FTP(host=uri, user=user, passwd=password)
ftp.cwd(path)
output_file = os.path.join(output_directory, filename)
logger.info('Start downloading %s' % file_path)
if os.path.isfile(output_file) and not is_override:
return
if os.path.isfile(output_file):
os.remove(output_file)
ftp.retrbinary('RETR %s' % filename, lambda data: open(output_file, 'ab').write(data))
ftp.close()
logger.info('Complete file to %s' % output_file)
except Exception as e:
logger = multiprocessing.get_logger()
logger.error('Download %s failed, error info %s' % (file_path, e))
output_file = os.path.join(output_directory, filename)
if os.path.exists(output_file):
os.remove(output_file)
tasks.put(filename)
inserted_task_event.set()
finally:
logger = multiprocessing.get_logger()
logger.debug('Release lock %s' % id(worker_semaphore))
worker_semaphore.release()
开发者ID:shangerxin,项目名称:PersontalTools,代码行数:35,代码来源:sync_client.py
示例2: main
def main(args):
try:
if args[0] == "filter":
return run_filter(*args[1:])
elif args[0] == "algo":
return run_algo(*args[1:])
except:
get_logger().error(traceback.format_exc())
return False
开发者ID:filmor,项目名称:python-ma,代码行数:9,代码来源:solve_gevps.py
示例3: __init__
def __init__(self):
proc_count = cpu_count() / 5 * 4
if proc_count < 1:
proc_count = 1
self.pool = multiprocessing.Pool(proc_count, initializer=setup_jenkins_console_logger)
self.workspace_path = ""
self.runlist = []
self.params = {}
self.process_cli()
multiprocessing.get_logger().info("\n{0}\nIntegration tests runner started.\n{0}\n".format("*" * 80))
开发者ID:rokuz,项目名称:omim,代码行数:10,代码来源:integration_tests_runner.py
示例4: spawn_test_process
def spawn_test_process(test, flags):
spell = ["{0} {1}".format(test, flags)]
process = subprocess.Popen(spell, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
multiprocessing.get_logger().info(spell[0])
out, err = process.communicate()
# We need the out for getting the list of tests from an exec file
# by sending it the --list_tests flag
return test, filter(None, out.splitlines()), err, process.returncode
开发者ID:rokuz,项目名称:omim,代码行数:11,代码来源:integration_tests_runner.py
示例5: loop
def loop(self):
while True:
multiprocessing.get_logger().info("%s server: Waiting for signal" % self.sessionID)
self.event.wait()
while not self.queue.empty():
print("%s server: Got request signal", self.sessionID)
self.event.clear()
request = queues[x].get(False)
if isinstance(request, basestring) and request.lower() == "terminate":
# terminate signal received => save dataframe and exit event loop => process gets terminated
self.dataFrameManager.saveDataFrame()
break
else:
self.requestManager.processRequest(request)
开发者ID:ojengwa,项目名称:seldon-ucl,代码行数:14,代码来源:server.py
示例6: __call__
def __call__(self, *args, **kwargs):
try:
result = self.__callable(*args, **kwargs)
except Exception as e:
# Here we add some debugging help. If multiprocessing's
# debugging is on, it will arrange to log the traceback
multiprocessing.get_logger().error(traceback.format_exc())
# Re-raise the original exception so the Pool worker can
# clean up
raise
# It was fine, give a normal answer
return result
开发者ID:hyraxbio,项目名称:simulated-data,代码行数:14,代码来源:run_simulation.py
示例7: setup_logger
def setup_logger(settings):
class ColorFilter(logging.Filter):
def filter(self, record):
if not hasattr(record, 'xcolor'):
record.xcolor = ''
return True
logger = multiprocessing.get_logger()
logger.setLevel(logging.DEBUG)
if settings['log_file_dir'] and settings['log_file_level']:
pname = multiprocessing.current_process().name
ctime = time.strftime('%Y-%m-%d_%H-%M-%S')
filename = os.path.join(settings['log_file_dir'],
'log_%s_%s.txt' % (pname, ctime))
filehandler = logging.FileHandler(filename, mode='w', encoding='utf8')
filehandler.setLevel(settings['log_file_level'])
fileformatter = logging.Formatter(
'[%(asctime)s] %(levelname)-8s %(message)s')
filehandler.setFormatter(fileformatter)
logger.addHandler(filehandler)
if settings['log_stderr_level']:
streamhandler = logging.StreamHandler(stream=sys.stderr)
streamhandler.setLevel(settings['log_stderr_level'])
streamhandler.addFilter(ColorFilter())
streamformatter = logging.Formatter(
'[%(levelname)s/%(processName)s] %(xcolor)s%(message)s' +
Colors.NORMAL)
streamhandler.setFormatter(streamformatter)
logger.addHandler(streamhandler)
return logger
开发者ID:alex-stefa,项目名称:spdy-compliance,代码行数:30,代码来源:spdy_compliance.py
示例8: invoke_cmd_worker
def invoke_cmd_worker(item):
try:
logger = multiprocessing.get_logger()
pid = multiprocessing.current_process().pid
plugin_dir, plugin, filepath, events_limit = item
worker_fpath = os.path.abspath(__file__)
cmd = 'gzip -d -c %s | python2.7 %s %s %s %s' % (
filepath, worker_fpath, plugin_dir, plugin, events_limit
)
logger.info(
'%d: Starting job: %s', pid, cmd
)
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=True,
env=env
)
output = process.communicate()[0]
return output
except Exception as e:
traceback.print_exc(e)
开发者ID:mapsme,项目名称:Alohalytics,代码行数:25,代码来源:worker.py
示例9: wrapped_function
def wrapped_function(*args, **kwargs):
global return_value
logger = multiprocessing.get_logger()
# create a pipe to retrieve the return value
parent_conn, child_conn = multiprocessing.Pipe()
# create and start the process
subproc = multiprocessing.Process(target=subprocess_func, name=" multiproc function call", args=(func,
child_conn,
mem_in_mb,
cpu_time_in_s,
wall_time_in_s,
num_processes) + args,
kwargs=kwargs)
logger.debug("Your function is called now.")
return_value = None
# start the process
subproc.start()
child_conn.close()
try:
# read the return value
return_value = parent_conn.recv()
except EOFError: # Don't see that in the unit tests :(
logger.debug("Your function call closed the pipe prematurely -> None will be returned")
return_value = None
except:
raise
finally:
# don't leave zombies behind
subproc.join()
return (return_value);
开发者ID:belkhir-nacim,项目名称:ac_pysmac,代码行数:35,代码来源:limit_function_call.py
示例10: run
def run(plugin_name, start_date, end_date, plugin_dir,
data_dir='/mnt/disk1/alohalytics/by_date',
results_dir='./stats',
events_limit=0):
"""
Pyaloha stats processing pipeline:
0. Load worker, aggregator, processor classes from a specified plugin (script)
1. Run workers (data preprocessors) on alohalytics files within specified range
2. Accumulate [and postprocess] worker results with an aggregator instance
3. Run stats processor and print results to stdout
"""
aggregator = aggregate_raw_data(
data_dir, results_dir, plugin_dir, plugin_name,
start_date, end_date, events_limit
)
stats = load_plugin(
plugin_name, plugin_dir=plugin_dir
).StatsProcessor(aggregator)
logger = multiprocessing.get_logger()
logger.info('Stats: processing')
stats.process_stats()
logger.info('Stats: outputting')
stats.print_stats()
logger.info('Stats: done')
开发者ID:vicpopov,项目名称:Alohalytics,代码行数:30,代码来源:main.py
示例11: run_periodic_tasks
def run_periodic_tasks(self):
logger = get_logger()
applied = default_periodic_status_backend.run_periodic_tasks()
for task, task_id in applied:
logger.debug(
"PeriodicWorkController: Periodic task %s applied (%s)" % (
task.name, task_id))
开发者ID:abecciu,项目名称:celery,代码行数:7,代码来源:controllers.py
示例12: __init__
def __init__(self, host, pipe, port=80, channels=None):
"""
Create a new client.
host : host to connect
pipe : pipe of paths
port : port to connect
channels : map of file descriptors
"""
asynchat.async_chat.__init__(self, map=channels)
self._log = multiprocessing.get_logger()
self._host = host
self._pipe = pipe
self._port = port
self._time = 0
self._htime = 0
self._path = ""
self._header = ""
self._body = ""
self._data = ""
self._protocol = ""
self._status = -1
self._status_msg = ""
self._close = False
self._chunked = True
self._content_length = -1
self.set_terminator(HTTPAsyncClient.TERMINATOR)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.connect((self._host, self._port))
self._log.debug(self.logmsg("HTTPAsyncClient connected to %s:%d",
self._host, self._port))
self.send_request()
开发者ID:menski,项目名称:ppr-s11,代码行数:34,代码来源:http.py
示例13: smac_classpath
def smac_classpath():
"""
Small function gathering all information to build the java class path.
:returns: string representing the Java classpath for SMAC
"""
import multiprocessing
from pkg_resources import resource_filename
logger = multiprocessing.get_logger()
smac_folder = resource_filename("pysmac", 'smac/%s' % pysmac.remote_smac.SMAC_VERSION)
smac_conf_folder = os.path.join(smac_folder, "conf")
smac_patches_folder = os.path.join(smac_folder, "patches")
smac_lib_folder = os.path.join(smac_folder, "lib")
classpath = [fname for fname in os.listdir(smac_lib_folder) if fname.endswith(".jar")]
classpath = [os.path.join(smac_lib_folder, fname) for fname in classpath]
classpath = [os.path.abspath(fname) for fname in classpath]
classpath.append(os.path.abspath(smac_conf_folder))
classpath.append(os.path.abspath(smac_patches_folder))
# For Windows compability
classpath = (os.pathsep).join(classpath)
logger.debug("SMAC classpath: %s", classpath)
return classpath
开发者ID:Krxsy,项目名称:pysmac,代码行数:31,代码来源:java_helper.py
示例14: error
def error(msg, *args):
"""Shortcut to multiprocessing's logger"""
############################################################# DEBUG
import sys
sys.stdout.flush()
############################################################# DEBUG
return mp.get_logger().error(msg, *args)
开发者ID:rbharath,项目名称:deepchem,代码行数:7,代码来源:featurize.py
示例15: test_log
def test_log():
multiprocessing.log_to_stderr()
logger = multiprocessing.get_logger()
logger.setLevel(logging.INFO)
t1 = time.time()
print(time.time() - t1)
logger.info("done")
开发者ID:cuijiabin,项目名称:python_text,代码行数:7,代码来源:mysql2mongo.py
示例16: download_wallpapers
def download_wallpapers(r, task, image_dir, resolution, quantity):
"""Downloads a number of wallpapers based on the resolution and quantity
and adds each new wallpaper to the Redis database. For any wallpapers that
are 1080p a low resolution version is created and the pair of images
is added to the list of background images.
"""
logger = get_logger()
wall = WallDownloader(image_dir)
for wallpaper in wall.downloads(resolution, quantity):
logger.info("Wallpaper: " + wallpaper + " downloaded!")
r.incrbyfloat('job:' + task + ':progress', 75.0 / quantity)
image_name = WallDownloader.get_filename(wallpaper)
uuid = r.get('image:' + image_name + ':uuid')
if not uuid:
uuid = generate_uuid()
r.set('image:' + image_name + ':uuid', uuid)
# Add the image to the list for the current resolution
r.set('image:' + uuid + ':' + resolution, wallpaper)
r.sadd('image:' + resolution + ':uuids', uuid)
# Add 1080p images to list of background images
if resolution == '1920x1080':
create_background(r, task, uuid, image_dir, image_name)
开发者ID:b100w11,项目名称:wallpaper-downloader,代码行数:26,代码来源:worker.py
示例17: prepare_download
def prepare_download(task, image_dir, output_dir):
"""Prepares a download request submitted by a user, fetches a random set
of wallpapers based on the resolution and number of wallpapers specified and
creates a compressed zip file for download.
"""
# Connect to the local Redis database, get logger
r = redis.StrictRedis(host='localhost')
logger = get_logger()
# Get the resolution and number of downloads requested
resolution = r.get('job:' + task + ':resolution')
quantity = int(r.get('job:' + task + ':quantity'))
# Exit if the job timed out (resolution and quantity requested no longer exist)
# This should not happen unless the system is under heavy load, log issue
if not resolution or not quantity:
logger.warning('Job: ' + task + ' No resolution or quantity, job expired?')
sys.exit(1)
# Download more wallpapers if there are not enough for the resolution
if r.scard('image:' + resolution + ':uuids') < max_wallpapers:
logger.info('Job: ' + task + ' Downloading: ' + resolution + ' wallpapers!')
download_wallpapers(r, task, image_dir, resolution, quantity)
# Get a random set of wallpapers and create a zip file
wallpapers = []
for uuid in r.srandmember('image:' + resolution + ':uuids', quantity):
wallpapers.append(r.get('image:' + uuid + ':' + resolution))
logger.info('Job: ' + task + ' Wallpapers: ' + ', '.join(wallpapers))
# Create a zip file, set key in redis
compress_wallpapers(r, task, output_dir, wallpapers)
sys.exit(0)
开发者ID:b100w11,项目名称:wallpaper-downloader,代码行数:35,代码来源:worker.py
示例18: create_background
def create_background(r, task, uuid, image_dir, image_name):
"""Creates a low resolution version of a 1080p wallpaper provided and saves
the matching pair of images to the list of background images that are
randomly displayed on the website.
"""
logger = get_logger()
# Get the wallpaper file path
uuid = r.get('image:' + image_name + ':uuid')
file = r.get('image:' + uuid + ':1920x1080')
new_img_dir = os.path.join(os.path.normpath(image_dir), '800x480')
new_file = os.path.join(os.path.normpath(new_img_dir), image_name + '_800x480.jpg')
if not os.path.exists(new_img_dir):
os.makedirs(new_img_dir)
# Resize the image as 800x480 at 50% quality, save as new image
logger.info('Job: ' + task + ' UUID: ' + uuid + ', creating background: ' + new_file)
with Image(filename=file) as img:
with img.clone() as new_img:
new_img.compression_quality = 50
new_img.resize(800, 480)
new_img.save(filename=new_file)
# Add the new background to the list
r.set('image:' + uuid + ':800x480', new_file)
r.sadd('image:backgrounds:uuids', uuid)
开发者ID:b100w11,项目名称:wallpaper-downloader,代码行数:28,代码来源:worker.py
示例19: test_setup_logger_no_handlers_file
def test_setup_logger_no_handlers_file(self):
from multiprocessing import get_logger
l = get_logger()
l.handlers = []
tempfile = mktemp(suffix="unittest", prefix="celery")
l = setup_logger(logfile=tempfile, loglevel=0)
self.assertTrue(isinstance(l.handlers[0], logging.FileHandler))
开发者ID:sensisoft,项目名称:celery,代码行数:7,代码来源:test_log.py
示例20: run_cross_tests
def run_cross_tests(server_match, client_match, jobs, skip_known_failures):
logger = multiprocessing.get_logger()
logger.debug('Collecting tests')
with open(path_join(TEST_DIR, CONFIG_FILE), 'r') as fp:
j = json.load(fp)
tests = crossrunner.collect_cross_tests(j, server_match, client_match)
if not tests:
print('No test found that matches the criteria', file=sys.stderr)
print(' servers: %s' % server_match, file=sys.stderr)
print(' clients: %s' % client_match, file=sys.stderr)
return False
if skip_known_failures:
logger.debug('Skipping known failures')
known = crossrunner.load_known_failures(TEST_DIR)
tests = list(filter(lambda t: crossrunner.test_name(**t) not in known, tests))
dispatcher = crossrunner.TestDispatcher(TEST_DIR, TEST_DIR, jobs)
logger.debug('Executing %d tests' % len(tests))
try:
for r in [dispatcher.dispatch(test) for test in tests]:
r.wait()
logger.debug('Waiting for completion')
return dispatcher.wait()
except (KeyboardInterrupt, SystemExit):
logger.debug('Interrupted, shutting down')
dispatcher.terminate()
return False
开发者ID:SouthStar,项目名称:thrift,代码行数:27,代码来源:test.py
注:本文中的multiprocessing.get_logger函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论