本文整理汇总了Python中utils.on_error.report函数的典型用法代码示例。如果您正苦于以下问题:Python report函数的具体用法?Python report怎么用?Python report使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了report函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: swarming_trigger
def swarming_trigger(swarming, raw_request, xsrf_token):
"""Triggers a request on the Swarming server and returns the json data.
It's the low-level function.
Returns:
{
'request': {
'created_ts': u'2010-01-02 03:04:05',
'name': ..
},
'task_id': '12300',
}
"""
logging.info('Triggering: %s', raw_request['name'])
headers = {'X-XSRF-Token': xsrf_token}
result = net.url_read_json(
swarming + '/swarming/api/v1/client/request',
data=raw_request,
headers=headers)
if not result:
on_error.report('Failed to trigger task %s' % raw_request['name'])
return None
return result
开发者ID:misscache,项目名称:luci-py,代码行数:25,代码来源:swarming.py
示例2: trigger_by_manifest
def trigger_by_manifest(swarming, manifest):
"""Given a task manifest, triggers it for execution on swarming.
Args:
swarming: URL of a swarming service.
manifest: instance of Manifest.
Returns:
tuple(Task id, priority) on success. tuple(None, None) on failure.
"""
logging.info('Triggering: %s', manifest.task_name)
manifest_text = manifest.to_json()
result = net.url_read(swarming + '/test', data={'request': manifest_text})
if not result:
on_error.report('Failed to trigger task %s' % manifest.task_name)
return None, None
try:
data = json.loads(result)
except (ValueError, TypeError):
msg = '\n'.join((
'Failed to trigger task %s' % manifest.task_name,
'Manifest: %s' % manifest_text,
'Bad response: %s' % result))
on_error.report(msg)
return None, None
if not data:
return None, None
return data['test_keys'][0]['test_key'], data['priority']
开发者ID:bpsinc-native,项目名称:src_tools_swarming_client,代码行数:28,代码来源:swarming.py
示例3: CMDcollect
def CMDcollect(parser, args):
"""Retrieves results of a Swarming task.
The result can be in multiple part if the execution was sharded. It can
potentially have retries.
"""
add_collect_options(parser)
add_sharding_options(parser)
(options, args) = parser.parse_args(args)
if not args:
parser.error('Must specify one task name.')
elif len(args) > 1:
parser.error('Must specify only one task name.')
auth.ensure_logged_in(options.swarming)
try:
return collect(
options.swarming,
args[0],
options.shards,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
开发者ID:bpsinc-native,项目名称:src_tools_swarming_client,代码行数:28,代码来源:swarming.py
示例4: CMDtrigger
def CMDtrigger(parser, args):
"""Triggers a Swarming task.
Accepts either the hash (sha1) of a .isolated file already uploaded or the
path to an .isolated file to archive, packages it if needed and sends a
Swarming manifest file to the Swarming server.
If an .isolated file is specified instead of an hash, it is first archived.
Passes all extra arguments provided after '--' as additional command line
arguments for an isolated command specified in *.isolate file.
"""
add_trigger_options(parser)
add_sharding_options(parser)
args, isolated_cmd_args = extract_isolated_command_extra_args(args)
parser.add_option(
'--dump-json',
metavar='FILE',
help='Dump details about the triggered task(s) to this file as json')
options, args = parser.parse_args(args)
process_trigger_options(parser, options, args)
auth.ensure_logged_in(options.swarming)
if file_path.is_url(options.isolate_server):
auth.ensure_logged_in(options.isolate_server)
try:
tasks, task_name = trigger(
swarming=options.swarming,
isolate_server=options.isolate_server or options.indir,
namespace=options.namespace,
file_hash_or_isolated=args[0],
task_name=options.task_name,
extra_args=isolated_cmd_args,
shards=options.shards,
dimensions=options.dimensions,
env=dict(options.env),
deadline=options.deadline,
verbose=options.verbose,
profile=options.profile,
priority=options.priority)
if tasks:
if task_name != options.task_name:
print('Triggered task: %s' % task_name)
if options.dump_json:
data = {
'base_task_name': task_name,
'tasks': tasks,
}
tools.write_json(options.dump_json, data, True)
return int(not tasks)
except Failure:
on_error.report(None)
return 1
开发者ID:bpsinc-native,项目名称:src_tools_swarming_client,代码行数:53,代码来源:swarming.py
示例5: CMDcollect
def CMDcollect(parser, args):
"""Retrieves results of one or multiple Swarming task by its ID.
The result can be in multiple part if the execution was sharded. It can
potentially have retries.
"""
add_collect_options(parser)
parser.add_option(
'-j', '--json',
help='Load the task ids from .json as saved by trigger --dump-json')
options, args = parser.parse_args(args)
if not args and not options.json:
parser.error('Must specify at least one task id or --json.')
if args and options.json:
parser.error('Only use one of task id or --json.')
if options.json:
options.json = unicode(os.path.abspath(options.json))
try:
with fs.open(options.json, 'rb') as f:
data = json.load(f)
except (IOError, ValueError):
parser.error('Failed to open %s' % options.json)
try:
tasks = sorted(
data['tasks'].itervalues(), key=lambda x: x['shard_index'])
args = [t['task_id'] for t in tasks]
except (KeyError, TypeError):
parser.error('Failed to process %s' % options.json)
if options.timeout is None:
options.timeout = (
data['request']['properties']['execution_timeout_secs'] +
data['request']['expiration_secs'] + 10.)
else:
valid = frozenset('0123456789abcdef')
if any(not valid.issuperset(task_id) for task_id in args):
parser.error('Task ids are 0-9a-f.')
try:
return collect(
options.swarming,
args,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir,
options.perf)
except Failure:
on_error.report(None)
return 1
开发者ID:mellowdistrict,项目名称:luci-py,代码行数:51,代码来源:swarming.py
示例6: CMDtrigger
def CMDtrigger(parser, args):
"""Triggers a Swarming task.
Accepts either the hash (sha1) of a .isolated file already uploaded or the
path to an .isolated file to archive.
If an .isolated file is specified instead of an hash, it is first archived.
Passes all extra arguments provided after '--' as additional command line
arguments for an isolated command specified in *.isolate file.
"""
add_trigger_options(parser)
add_sharding_options(parser)
parser.add_option(
'--dump-json',
metavar='FILE',
help='Dump details about the triggered task(s) to this file as json')
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(
options.swarming, task_request, options.shards)
if tasks:
print('Triggered task: %s' % options.task_name)
tasks_sorted = sorted(
tasks.itervalues(), key=lambda x: x['shard_index'])
if options.dump_json:
data = {
'base_task_name': options.task_name,
'tasks': tasks,
'request': task_request_to_raw_request(task_request),
}
tools.write_json(unicode(options.dump_json), data, True)
print('To collect results, use:')
print(' swarming.py collect -S %s --json %s' %
(options.swarming, options.dump_json))
else:
print('To collect results, use:')
print(' swarming.py collect -S %s %s' %
(options.swarming, ' '.join(t['task_id'] for t in tasks_sorted)))
print('Or visit:')
for t in tasks_sorted:
print(' ' + t['view_url'])
return int(not tasks)
except Failure:
on_error.report(None)
return 1
开发者ID:mellowdistrict,项目名称:luci-py,代码行数:47,代码来源:swarming.py
示例7: isolated_to_hash
def isolated_to_hash(isolate_server, namespace, arg, algo, verbose):
"""Archives a .isolated file if needed.
Returns the file hash to trigger and a bool specifying if it was a file (True)
or a hash (False).
"""
if arg.endswith('.isolated'):
file_hash = archive(isolate_server, namespace, arg, algo, verbose)
if not file_hash:
on_error.report('Archival failure %s' % arg)
return None, True
return file_hash, True
elif isolateserver.is_valid_hash(arg, algo):
return arg, False
else:
on_error.report('Invalid hash %s' % arg)
return None, False
开发者ID:bpsinc-native,项目名称:src_tools_swarming_client,代码行数:17,代码来源:swarming.py
示例8: isolated_to_hash
def isolated_to_hash(arg, algo):
"""Archives a .isolated file if needed.
Returns the file hash to trigger and a bool specifying if it was a file (True)
or a hash (False).
"""
if arg.endswith(".isolated"):
file_hash = isolated_format.hash_file(arg, algo)
if not file_hash:
on_error.report("Archival failure %s" % arg)
return None, True
return file_hash, True
elif isolated_format.is_valid_hash(arg, algo):
return arg, False
else:
on_error.report("Invalid hash %s" % arg)
return None, False
开发者ID:Teamxrtc,项目名称:webrtc-streaming-node,代码行数:17,代码来源:swarming.py
示例9: swarming_trigger
def swarming_trigger(swarming, raw_request):
"""Triggers a request on the Swarming server and returns the json data.
It's the low-level function.
Returns:
{
'request': {
'created_ts': u'2010-01-02 03:04:05',
'name': ..
},
'task_id': '12300',
}
"""
logging.info("Triggering: %s", raw_request["name"])
result = net.url_read_json(swarming + "/_ah/api/swarming/v1/tasks/new", data=raw_request)
if not result:
on_error.report("Failed to trigger task %s" % raw_request["name"])
return None
return result
开发者ID:Teamxrtc,项目名称:webrtc-streaming-node,代码行数:21,代码来源:swarming.py
示例10: CMDcollect
def CMDcollect(parser, args):
"""Retrieves results of one or multiple Swarming task by its ID.
The result can be in multiple part if the execution was sharded. It can
potentially have retries.
"""
add_collect_options(parser)
parser.add_option("-j", "--json", help="Load the task ids from .json as saved by trigger --dump-json")
options, args = parser.parse_args(args)
if not args and not options.json:
parser.error("Must specify at least one task id or --json.")
if args and options.json:
parser.error("Only use one of task id or --json.")
if options.json:
try:
with open(options.json) as f:
tasks = sorted(json.load(f)["tasks"].itervalues(), key=lambda x: x["shard_index"])
args = [t["task_id"] for t in tasks]
except (KeyError, IOError, TypeError, ValueError):
parser.error("Failed to parse %s" % options.json)
else:
valid = frozenset("0123456789abcdef")
if any(not valid.issuperset(task_id) for task_id in args):
parser.error("Task ids are 0-9a-f.")
try:
return collect(
options.swarming,
None,
args,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir,
)
except Failure:
on_error.report(None)
return 1
开发者ID:Teamxrtc,项目名称:webrtc-streaming-node,代码行数:40,代码来源:swarming.py
示例11: CMDtrigger
def CMDtrigger(parser, args):
"""Triggers a Swarming task.
Accepts either the hash (sha1) of a .isolated file already uploaded or the
path to an .isolated file to archive.
If an .isolated file is specified instead of an hash, it is first archived.
Passes all extra arguments provided after '--' as additional command line
arguments for an isolated command specified in *.isolate file.
"""
add_trigger_options(parser)
add_sharding_options(parser)
parser.add_option(
"--dump-json", metavar="FILE", help="Dump details about the triggered task(s) to this file as json"
)
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(options.swarming, task_request, options.shards)
if tasks:
print ("Triggered task: %s" % options.task_name)
tasks_sorted = sorted(tasks.itervalues(), key=lambda x: x["shard_index"])
if options.dump_json:
data = {"base_task_name": options.task_name, "tasks": tasks}
tools.write_json(options.dump_json, data, True)
print ("To collect results, use:")
print (" swarming.py collect -S %s --json %s" % (options.swarming, options.dump_json))
else:
print ("To collect results, use:")
print (
" swarming.py collect -S %s %s" % (options.swarming, " ".join(t["task_id"] for t in tasks_sorted))
)
print ("Or visit:")
for t in tasks_sorted:
print (" " + t["view_url"])
return int(not tasks)
except Failure:
on_error.report(None)
return 1
开发者ID:Teamxrtc,项目名称:webrtc-streaming-node,代码行数:40,代码来源:swarming.py
示例12: CMDrun
def CMDrun(parser, args):
"""Triggers a task and wait for the results.
Basically, does everything to run a command remotely.
"""
add_trigger_options(parser)
add_collect_options(parser)
add_sharding_options(parser)
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(options.swarming, task_request, options.shards)
except Failure as e:
on_error.report("Failed to trigger %s(%s): %s" % (options.task_name, args[0], e.args[0]))
return 1
if not tasks:
on_error.report("Failed to trigger the task.")
return 1
print ("Triggered task: %s" % options.task_name)
task_ids = [t["task_id"] for t in sorted(tasks.itervalues(), key=lambda x: x["shard_index"])]
try:
return collect(
options.swarming,
options.task_name,
task_ids,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir,
)
except Failure:
on_error.report(None)
return 1
开发者ID:Teamxrtc,项目名称:webrtc-streaming-node,代码行数:34,代码来源:swarming.py
示例13: swarming_trigger
def swarming_trigger(swarming, raw_request):
"""Triggers a request on the Swarming server and returns the json data.
It's the low-level function.
Returns:
{
'request': {
'created_ts': u'2010-01-02 03:04:05',
'name': ..
},
'task_id': '12300',
}
"""
logging.info('Triggering: %s', raw_request['name'])
result = net.url_read_json(
swarming + '/_ah/api/swarming/v1/tasks/new', data=raw_request)
if not result:
on_error.report('Failed to trigger task %s' % raw_request['name'])
return None
if result.get('error'):
# The reply is an error.
msg = 'Failed to trigger task %s' % raw_request['name']
if result['error'].get('errors'):
for err in result['error']['errors']:
if err.get('message'):
msg += '\nMessage: %s' % err['message']
if err.get('debugInfo'):
msg += '\nDebug info:\n%s' % err['debugInfo']
elif result['error'].get('message'):
msg += '\nMessage: %s' % result['error']['message']
on_error.report(msg)
return None
return result
开发者ID:mellowdistrict,项目名称:luci-py,代码行数:36,代码来源:swarming.py
示例14: CMDrun
def CMDrun(parser, args):
"""Triggers a task and wait for the results.
Basically, does everything to run a command remotely.
"""
add_trigger_options(parser)
add_collect_options(parser)
add_sharding_options(parser)
args, isolated_cmd_args = extract_isolated_command_extra_args(args)
options, args = parser.parse_args(args)
process_trigger_options(parser, options, args)
auth.ensure_logged_in(options.swarming)
if file_path.is_url(options.isolate_server):
auth.ensure_logged_in(options.isolate_server)
try:
tasks, task_name = trigger(
swarming=options.swarming,
isolate_server=options.isolate_server or options.indir,
namespace=options.namespace,
file_hash_or_isolated=args[0],
task_name=options.task_name,
extra_args=isolated_cmd_args,
shards=options.shards,
dimensions=options.dimensions,
env=dict(options.env),
deadline=options.deadline,
verbose=options.verbose,
profile=options.profile,
priority=options.priority)
except Failure as e:
on_error.report(
'Failed to trigger %s(%s): %s' %
(options.task_name, args[0], e.args[0]))
return 1
if not tasks:
on_error.report('Failed to trigger the task.')
return 1
if task_name != options.task_name:
print('Triggered task: %s' % task_name)
try:
# TODO(maruel): Use task_ids, it's much more efficient!
return collect(
options.swarming,
task_name,
options.shards,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
开发者ID:bpsinc-native,项目名称:src_tools_swarming_client,代码行数:54,代码来源:swarming.py
示例15: run_shell_out
def run_shell_out(url, mode):
# Enable 'report_on_exception_exit' even though main file is *_test.py.
on_error._is_in_test = lambda: False
# Hack it out so registering works.
on_error._ENABLED_DOMAINS = (socket.getfqdn(),)
# Don't try to authenticate into localhost.
on_error.net.OAuthAuthenticator = lambda *_: None
if not on_error.report_on_exception_exit(url):
print 'Failure to register the handler'
return 1
# Hack out certificate verification because we are using a self-signed
# certificate here. In practice, the SSL certificate is signed to guard
# against MITM attacks.
on_error._SERVER.engine.session.verify = False
if mode == 'crash':
# Sadly, net is a bit overly verbose, which breaks
# test_shell_out_crash_server_down.
logging.error = lambda *_, **_kwargs: None
logging.warning = lambda *_, **_kwargs: None
raise ValueError('Oops')
if mode == 'report':
# Generate a manual report without an exception frame. Also set the version
# value.
setattr(sys.modules['__main__'], '__version__', '123')
on_error.report('Oh dang')
if mode == 'exception':
# Report from inside an exception frame.
try:
raise TypeError('You are not my type')
except TypeError:
on_error.report('Really')
if mode == 'exception_no_msg':
# Report from inside an exception frame.
try:
raise TypeError('You are not my type #2')
except TypeError:
on_error.report(None)
return 0
开发者ID:Crawping,项目名称:chromium_extract,代码行数:46,代码来源:on_error_test.py
示例16: map_and_run
def map_and_run(
isolated_hash, storage, cache, leak_temp_dir, root_dir, hard_timeout,
grace_period, extra_args):
"""Maps and run the command. Returns metadata about the result."""
result = {
'duration': None,
'exit_code': None,
'had_hard_timeout': False,
'internal_failure': None,
'stats': {
# 'download': {
# 'duration': 0.,
# 'initial_number_items': 0,
# 'initial_size': 0,
# 'items_cold': '<large.pack()>',
# 'items_hot': '<large.pack()>',
# },
# 'upload': {
# 'duration': 0.,
# 'items_cold': '<large.pack()>',
# 'items_hot': '<large.pack()>',
# },
},
'outputs_ref': None,
'version': 3,
}
if root_dir:
if not fs.isdir(root_dir):
fs.makedirs(root_dir, 0700)
prefix = u''
else:
root_dir = os.path.dirname(cache.cache_dir) if cache.cache_dir else None
prefix = u'isolated_'
run_dir = make_temp_dir(prefix + u'run', root_dir)
out_dir = make_temp_dir(prefix + u'out', root_dir)
tmp_dir = make_temp_dir(prefix + u'tmp', root_dir)
try:
start = time.time()
bundle = isolateserver.fetch_isolated(
isolated_hash=isolated_hash,
storage=storage,
cache=cache,
outdir=run_dir)
if not bundle.command:
# Handle this as a task failure, not an internal failure.
sys.stderr.write(
'<The .isolated doesn\'t declare any command to run!>\n'
'<Check your .isolate for missing \'command\' variable>\n')
if os.environ.get('SWARMING_TASK_ID'):
# Give an additional hint when running as a swarming task.
sys.stderr.write('<This occurs at the \'isolate\' step>\n')
result['exit_code'] = 1
return result
result['stats']['download'] = {
'duration': time.time() - start,
'initial_number_items': cache.initial_number_items,
'initial_size': cache.initial_size,
'items_cold': base64.b64encode(large.pack(sorted(cache.added))),
'items_hot': base64.b64encode(
large.pack(sorted(set(cache.linked) - set(cache.added)))),
}
change_tree_read_only(run_dir, bundle.read_only)
cwd = os.path.normpath(os.path.join(run_dir, bundle.relative_cwd))
command = bundle.command + extra_args
file_path.ensure_command_has_abs_path(command, cwd)
sys.stdout.flush()
start = time.time()
try:
result['exit_code'], result['had_hard_timeout'] = run_command(
process_command(command, out_dir), cwd, tmp_dir, hard_timeout,
grace_period)
finally:
result['duration'] = max(time.time() - start, 0)
except Exception as e:
# An internal error occured. Report accordingly so the swarming task will be
# retried automatically.
logging.exception('internal failure: %s', e)
result['internal_failure'] = str(e)
on_error.report(None)
finally:
try:
if leak_temp_dir:
logging.warning(
'Deliberately leaking %s for later examination', run_dir)
else:
# On Windows rmtree(run_dir) call above has a synchronization effect: it
# finishes only when all task child processes terminate (since a running
# process locks *.exe file). Examine out_dir only after that call
# completes (since child processes may write to out_dir too and we need
# to wait for them to finish).
if fs.isdir(run_dir):
try:
success = file_path.rmtree(run_dir)
except OSError as e:
logging.error('Failure with %s', e)
success = False
if not success:
print >> sys.stderr, (
'Failed to delete the run directory, forcibly failing\n'
#.........这里部分代码省略.........
开发者ID:mellowdistrict,项目名称:luci-py,代码行数:101,代码来源:run_isolated.py
示例17: isolated_handle_options
def isolated_handle_options(options, args):
"""Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments.
Returns:
tuple(command, data).
"""
isolated_cmd_args = []
if not options.isolated:
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
args = args[:index]
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args[1:]
args = args[:1]
if len(args) != 1:
raise ValueError(
'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called '
'process.')
# Old code. To be removed eventually.
options.isolated, is_file = isolated_to_hash(
options.isolate_server, options.namespace, args[0],
isolated_format.get_hash_algo(options.namespace), options.verbose)
if not options.isolated:
raise ValueError('Invalid argument %s' % args[0])
elif args:
is_file = False
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
if index != 0:
raise ValueError('Unexpected arguments.')
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args
command = isolated_get_run_commands(
options.isolate_server, options.namespace, options.isolated,
isolated_cmd_args, options.verbose)
# If a file name was passed, use its base name of the isolated hash.
# Otherwise, use user name as an approximation of a task name.
if not options.task_name:
if is_file:
key = os.path.splitext(os.path.basename(args[0]))[0]
else:
key = options.user
options.task_name = u'%s/%s/%s' % (
key,
'_'.join(
'%s=%s' % (k, v)
for k, v in sorted(options.dimensions.iteritems())),
options.isolated)
try:
data = isolated_get_data(options.isolate_server)
except (IOError, OSError):
on_error.report('Failed to upload the zip file')
raise ValueError('Failed to upload the zip file')
return command, data
开发者ID:misscache,项目名称:luci-py,代码行数:62,代码来源:swarming.py
示例18: trigger_task_shards
def trigger_task_shards(
swarming, isolate_server, namespace, isolated_hash, task_name, extra_args,
shards, dimensions, env, deadline, verbose, profile, priority):
"""Triggers multiple subtasks of a sharded task.
Returns:
Dict with task details, returned to caller as part of --dump-json output.
None in case of failure.
"""
# Collects all files that are necessary to bootstrap a task execution
# on the bot. Usually it includes self contained run_isolated.zip and
# a bunch of small other scripts. All heavy files are pulled
# by run_isolated.zip. Updated in 'setup_run_isolated'.
bundle = zip_package.ZipPackage(ROOT_DIR)
# Make a separate Manifest for each shard, put shard index and number of
# shards into env and subtask name.
manifests = []
for index in xrange(shards):
manifest = Manifest(
isolate_server=isolate_server,
namespace=namespace,
isolated_hash=isolated_hash,
task_name=get_shard_task_name(task_name, shards, index),
extra_args=extra_args,
dimensions=dimensions,
env=setup_googletest(env, shards, index),
deadline=deadline,
verbose=verbose,
profile=profile,
priority=priority)
setup_run_isolated(manifest, bundle)
manifests.append(manifest)
# Upload zip bundle file to get its URL.
try:
bundle_url = upload_zip_bundle(isolate_server, bundle)
except (IOError, OSError):
on_error.report('Failed to upload the zip file for task %s' % task_name)
return None, None
# Attach that file to all manifests.
for manifest in manifests:
manifest.add_bundled_file('swarm_data.zip', bundle_url)
# Trigger all the subtasks.
tasks = {}
priority_warning = False
for index, manifest in enumerate(manifests):
task_id, priority = trigger_by_manifest(swarming, manifest)
if not task_id:
break
if not priority_warning and priority != manifest.priority:
priority_warning = True
print >> sys.stderr, 'Priority was reset to %s' % priority
tasks[manifest.task_name] = {
'shard_index': index,
'task_id': task_id,
'view_url': '%s/user/task/%s' % (swarming, task_id),
}
# Some shards weren't triggered. Abort everything.
if len(tasks) != len(manifests):
if tasks:
print >> sys.stderr, 'Not all shards were triggered'
for task_dict in tasks.itervalues():
abort_task(swarming, task_dict['task_id'])
return None
return tasks
开发者ID:bpsinc-native,项目名称:src_tools_swarming_client,代码行数:70,代码来源:swarming.py
示例19: map_and_run
def map_and_run(isolated_hash, storage, cache, leak_temp_dir, root_dir, extra_args):
"""Maps and run the command. Returns metadata about the result."""
# TODO(maruel): Include performance statistics.
result = {"exit_code": None, "internal_failure": None, "outputs_ref": None, "version": 1}
if root_dir:
if not os.path.isdir(root_dir):
os.makedirs(root_dir, 0700)
prefix = u""
else:
root_dir = os.path.dirname(cache.cache_dir) if cache.cache_dir else None
prefix = u"isolated_"
run_dir = make_temp_dir(prefix + u"run", root_dir)
out_dir = make_temp_dir(prefix + u"out", root_dir)
tmp_dir = make_temp_dir(prefix + u"tmp", root_dir)
try:
bundle = isolateserver.fetch_isolated(
isolated_hash=isolated_hash, storage=storage, cache=cache, outdir=run_dir, require_command=True
)
change_tree_read_only(run_dir, bundle.read_only)
cwd = os.path.normpath(os.path.join(run_dir, bundle.relative_cwd))
command = bundle.command + extra_args
file_path.ensure_command_has_abs_path(command, cwd)
result["exit_code"] = run_command(process_command(command, out_dir), cwd, tmp_dir)
except Exception as e:
# An internal error occured. Report accordingly so the swarming task will be
# retried automatically.
logging.error("internal failure: %s", e)
result["internal_failure"] = str(e)
on_error.report(None)
finally:
try:
if leak_temp_dir:
logging.warning("Deliberately leaking %s for later examination", run_dir)
else:
if os.path.isdir(run_dir) and not file_path.rmtree(run_dir):
# On Windows rmtree(run_dir) call above has a synchronization effect:
# it finishes only when all task child processes terminate (since a
# running process locks *.exe file). Examine out_dir only after that
# call completes (since child processes may write to out_dir too and
# we need to wait for them to finish).
print >>sys.stderr, (
"Failed to delete the run directory, forcibly failing\n"
"the task because of it. No zombie process can outlive a\n"
"successful task run and still be marked as successful.\n"
"Fix your stuff."
)
if result["exit_code"] == 0:
result["exit_code"] = 1
if os.path.isdir(tmp_dir) and not file_path.rmtree(tmp_dir):
print >>sys.stderr, (
"Failed to delete the temporary directory, forcibly failing\n"
"the task because of it. No zombie process can outlive a\n"
"successful task run and still be marked as successful.\n"
"Fix your stuff."
)
if result["exit_code"] == 0:
result["exit_code"] = 1
# This deletes out_dir if leak_temp_dir is not set.
result["outputs_ref"], success = delete_and_upload(storage, out_dir, leak_temp_dir)
if not success and result["exit_code"] == 0:
result["exit_code"] = 1
except Exception as e:
# Swallow any exception in the main finally clause.
logging.error("Leaking out_dir %s: %s", out_dir, e)
result["internal_failure"] = str(e)
return result
开发者ID:Teamxrtc,项目名称:webrtc-streaming-node,代码行数:68,代码来源:run_isolated.py
示例20: run_tha_test
def run_tha_test(isolated_hash, storage, cache, extra_args):
"""Downloads the dependencies in the cache, hardlinks them into a temporary
directory and runs the executable from there.
A temporary directory is created to hold the output files. The content inside
this directory will be uploaded back to |storage| packaged as a .isolated
file.
Arguments:
isolated_hash: the sha-1 of the .isolated file that must be retrieved to
recreate the tree of files to run the target executable.
storage: an isolateserver.Storage object to retrieve remote objects. This
object has a reference to an isolateserver.StorageApi, which does
the actual I/O.
cache: an isolateserver.LocalCache to keep from retrieving the same objects
constantly by caching the objects retrieved. Can be on-disk or
in-memory.
extra_args: optional arguments to add to the command stated in the .isolate
file.
"""
run_dir = make_temp_dir('run_tha_test', cache.cache_dir)
out_dir = unicode(make_temp_dir('isolated_out', cache.cache_dir))
result = 0
try:
try:
settings = isolateserver.fetch_isolated(
isolated_hash=isolated_hash,
storage=storage,
cache=cache,
outdir=run_dir,
require_command=True)
except isolateserver.ConfigError:
on_error.report(None)
return 1
change_tree_read_only(run_dir, settings.read_only)
cwd = os.path.normpath(os.path.join(run_dir, settings.relative_cwd))
command = settings.command + extra_args
# subprocess.call doesn't consider 'cwd' when searching for executable.
# Yet isolate can specify command relative to 'cwd'. Convert it to absolute
# path if necessary.
if not os.path.isabs(command[0]):
command[0] = os.path.abspath(os.path.join(cwd, command[0]))
command = process_command(command, out_dir)
logging.info('Running %s, cwd=%s' % (command, cwd))
# TODO(csharp): This should be specified somewhere else.
# TODO(vadimsh): Pass it via 'env_vars' in manifest.
# Add a rotating log file if one doesn't already exist.
env = os.environ.copy()
if MAIN_DIR:
env.setdefault('RUN_TEST_CASES_LOG_FILE',
os.path.join(MAIN_DIR, RUN_TEST_CASES_LOG))
try:
sys.stdout.flush()
with tools.Profiler('RunTest'):
result = subprocess.call(command, cwd=cwd, env=env)
logging.info(
'Command finished with exit code %d (%s)',
result, hex(0xffffffff & result))
except OSError:
on_error.report('Failed to run %s; cwd=%s' % (command, cwd))
result = 1
finally:
try:
try:
if not rmtree(run_dir):
print >> sys.stderr, (
'Failed to delete the temporary directory, forcibly failing the\n'
'task because of it. No zombie process can outlive a successful\n'
'task run and still be marked as successful. Fix your stuff.')
result = result or 1
except OSError:
logging.warning('Leaking %s', run_dir)
result = 1
# HACK(vadimsh): On Windows rmtree(run_dir) call above has
# a synchronization effect: it finishes only when all task child processes
# terminate (since a running process locks *.exe file). Examine out_dir
# only after that call completes (since child processes may
# write to out_dir too and we need to wait for them to finish).
# Upload out_dir and generate a .isolated file out of this directory.
# It is only done if files were written in the directory.
if os.listdir(out_dir):
with tools.Profiler('ArchiveOutput'):
results = isolateserver.archive_files_to_storage(
storage, [out_dir], None)
# TODO(maruel): Implement side-channel to publish this information.
output_data = {
'hash': results[0][0],
'namespace': storage.namespace,
'storage': storage.location,
}
sys.stdout.flush()
print(
'[run_isolated_out_hack]%s[/run_isolated_out_hack]' %
tools.format_json(output_data, dense=True))
#.........这里部分代码省略.........
开发者ID:bpsinc-native,项目名称:src_tools_swarming_client,代码行数:101,代码来源:run_isolated.py
注:本文中的utils.on_error.report函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论