本文整理汇总了Python中taskflow.engines.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: execute
def execute(input_data=None):
print "input Data" + str(input_data) if input_data else "nothing"
flow = get_flow(input_data)
eng = engines.load(flow, engine_conf='parallel')
result = eng.run()
return result
开发者ID:Phoenix1708,项目名称:OpenAcademy_OpenStack_Flyway,代码行数:7,代码来源:flow.py
示例2: _run
def _run(self, task_id, task_type):
LOG.debug(
"Taskflow executor picked up the execution of task ID "
"%(task_id)s of task type "
"%(task_type)s" % {"task_id": task_id, "task_type": task_type}
)
task = script_utils.get_task(self.task_repo, task_id)
if task is None:
# NOTE: This happens if task is not found in the database. In
# such cases, there is no way to update the task status so,
# it's ignored here.
return
flow = self._get_flow(task)
try:
with self._executor() as executor:
engine = engines.load(flow, self.engine_conf, executor=executor, **self.engine_kwargs)
with llistener.DynamicLoggingListener(engine, log=LOG):
engine.run()
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to execute task %(task_id)s: %(exc)s") % {"task_id": task_id, "exc": exc.message})
# TODO(sabari): Check for specific exceptions and update the
# task failure message.
task.fail(_("Task failed due to Internal Error"))
self.task_repo.save(task)
开发者ID:ianunruh,项目名称:glance,代码行数:28,代码来源:taskflow_executor.py
示例3: execute
def execute(self, actions):
try:
# NOTE(jed) We want to have a strong separation of concern
# between the Watcher planner and the Watcher Applier in order
# to us the possibility to support several workflow engine.
# We want to provide the 'taskflow' engine by
# default although we still want to leave the possibility for
# the users to change it.
# todo(jed) we need to change the way the actions are stored.
# The current implementation only use a linked list of actions.
# todo(jed) add olso conf for retry and name
flow = gf.Flow("watcher_flow")
previous = None
for a in actions:
task = TaskFlowActionContainer(a, self)
flow.add(task)
if previous is None:
previous = task
# we have only one Action in the Action Plan
if len(actions) == 1:
nop = TaskFlowNop()
flow.add(nop)
flow.link(previous, nop)
else:
# decider == guard (UML)
flow.link(previous, task, decider=self.decider)
previous = task
e = engines.load(flow)
e.run()
except Exception as e:
raise exception.WorkflowExecutionException(error=e)
开发者ID:j-carpentier,项目名称:watcher,代码行数:33,代码来源:default.py
示例4: execute_flow
def execute_flow(flow):
"""
Create all necessary prerequisites like task database and thread pool and
execute TaskFlow flow.
:param flow: TaskFlow flow instance
"""
backend = backends.fetch({
'connection': 'sqlite:///' + TASK_DATABASE_FILE,
'isolation_level': 'SERIALIZABLE'
})
executor = futurist.ThreadPoolExecutor(max_workers=MAX_WORKERS)
conn = backend.get_connection()
logbook, flow_detail = _ensure_db_initialized(conn, flow)
engine = engines.load(
flow, flow_detail=flow_detail, backend=backend, book=logbook,
engine='parallel', executor=executor)
engine.compile()
_workaround_reverted_reset(flow_detail)
try:
engine.run()
except exceptions.WrappedFailure as wf:
for failure in wf:
if failure.exc_info is not None:
traceback.print_exception(*failure.exc_info)
else:
print failure
开发者ID:JabarAli,项目名称:CloudFerry,代码行数:27,代码来源:taskflow_utils.py
示例5: _run
def _run(self, task_id, task_type):
LOG.debug('Taskflow executor picked up the execution of task ID '
'%(task_id)s of task type '
'%(task_type)s' % {'task_id': task_id,
'task_type': task_type})
task = script_utils.get_task(self.task_repo, task_id)
if task is None:
# NOTE: This happens if task is not found in the database. In
# such cases, there is no way to update the task status so,
# it's ignored here.
return
flow = self._get_flow(task)
try:
with self._executor() as executor:
engine = engines.load(flow, self.engine_conf,
executor=executor, **self.engine_kwargs)
with llistener.DynamicLoggingListener(engine, log=LOG):
engine.run()
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') %
{'task_id': task_id, 'exc': exc.message})
开发者ID:Dynavisor,项目名称:glance,代码行数:25,代码来源:taskflow_executor.py
示例6: _run
def _run(self, task_id, task_type):
LOG.debug('Taskflow executor picked up the execution of task ID '
'%(task_id)s of task type '
'%(task_type)s', {'task_id': task_id,
'task_type': task_type})
task = script_utils.get_task(self.task_repo, task_id)
if task is None:
# NOTE: This happens if task is not found in the database. In
# such cases, there is no way to update the task status so,
# it's ignored here.
return
flow = self._get_flow(task)
executor = self._fetch_an_executor()
try:
engine = engines.load(
flow,
engine=CONF.taskflow_executor.engine_mode, executor=executor,
max_workers=CONF.taskflow_executor.max_workers)
with llistener.DynamicLoggingListener(engine, log=LOG):
engine.run()
except Exception as exc:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Failed to execute task %(task_id)s: %(exc)s') %
{'task_id': task_id,
'exc': encodeutils.exception_to_unicode(exc)})
# TODO(sabari): Check for specific exceptions and update the
# task failure message.
task.fail(_('Task failed due to Internal Error'))
self.task_repo.save(task)
finally:
if executor is not None:
executor.shutdown()
开发者ID:froyobin,项目名称:xmonitor,代码行数:34,代码来源:taskflow_executor.py
示例7: main
def main(*args):
"""Main method of artman."""
# If no arguments are sent, we are using the entry point; derive
# them from sys.argv.
if not args:
args = sys.argv[1:]
# Get to a normalized set of arguments.
flags = parse_args(*args)
user_config = loader.read_user_config(flags.user_config)
_adjust_root_dir(flags.root_dir)
pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)
if flags.local:
try:
pipeline = pipeline_factory.make_pipeline(pipeline_name, False,
**pipeline_kwargs)
# Hardcoded to run pipeline in serial engine, though not necessarily.
engine = engines.load(
pipeline.flow, engine='serial', store=pipeline.kwargs)
engine.run()
except:
logger.error(traceback.format_exc())
sys.exit(32)
finally:
_change_owner(flags, pipeline_name, pipeline_kwargs)
else:
support.check_docker_requirements(flags.image)
# Note: artman currently won't work if input directory doesn't contain
# shared configuration files (e.g. gapic/packaging/dependencies.yaml).
# This will make artman less useful for non-Google APIs.
# TODO(ethanbao): Fix that by checking the input directory and
# pulling the shared configuration files if necessary.
logger.info('Running artman command in a Docker instance.')
_run_artman_in_docker(flags)
开发者ID:ethanbao,项目名称:artman,代码行数:35,代码来源:main.py
示例8: main
def main(*args):
"""Main method of artman."""
# If no arguments are sent, we are using the entry point; derive
# them from sys.argv.
if not args:
args = sys.argv[1:]
# Get to a normalized set of arguments.
flags = parse_args(*args)
user_config = loader.read_user_config(flags.user_config)
_adjust_root_dir(flags.root_dir)
pipeline_name, pipeline_kwargs = normalize_flags(flags, user_config)
if flags.local:
try:
pipeline = pipeline_factory.make_pipeline(pipeline_name,
**pipeline_kwargs)
# Hardcoded to run pipeline in serial engine, though not necessarily.
engine = engines.load(
pipeline.flow, engine='serial', store=pipeline.kwargs)
engine.run()
except:
logger.error(traceback.format_exc())
sys.exit(32)
finally:
_change_owner(flags, pipeline_name, pipeline_kwargs)
else:
support.check_docker_requirements(flags.image)
# Note: artman currently won't work if input directory doesn't contain
# common-protos.
logger.info('Running artman command in a Docker instance.')
_run_artman_in_docker(flags)
开发者ID:garrettjonesgoogle,项目名称:artman,代码行数:32,代码来源:main.py
示例9: run_flow
def run_flow(flow_name, init_params=None):
""" run the tasks in given flow name
"""
""" actual taskflow runner
"""
if flow_name not in app_task_flows:
raise Exception('taskflow-%s not definied' % flow_name)
flow = lflow.Flow(flow_name)
for task_cls, _ in app_task_flows[flow_name]:
task_params = getattr(task_cls, 'properties')
if isinstance(task_params, dict):
flow.add(task_cls(**task_params))
eng = engines.load(flow, store=init_params or {})
if sys.version_info > (2, 7):
with printing.PrintingListener(eng), timing.PrintingDurationListener(eng):
eng.run()
else:
with nested(printing.PrintingListener(eng), timing.PrintingDurationListener(eng)):
eng.run()
return eng.storage.fetch_all()
开发者ID:imjoey,项目名称:pyflowtask,代码行数:25,代码来源:__init__.py
示例10: main
def main(args):
pipeline_name, pipeline_kwargs, env, local_repo = _parse_args(args)
if local_repo:
pipeline_kwargs = _load_local_repo(local_repo, **pipeline_kwargs)
if env:
# Execute pipeline task remotely based on the specified env param.
pipeline = pipeline_factory.make_pipeline(
pipeline_name, True, **pipeline_kwargs)
jb = job_util.post_remote_pipeline_job_and_wait(pipeline, env)
task_details, flow_detail = job_util.fetch_job_status(jb, env)
for task_detail in task_details:
if task_detail.name == 'BlobUploadTask' and task_detail.results:
bucket_name, path, _ = task_detail.results
pipeline_util.download_from_gcs(
bucket_name,
path,
os.path.join(tempfile.gettempdir(), 'artman-remote'))
if flow_detail.state != 'SUCCESS':
# Print the remote log if the pipeline execution completes but not
# with SUCCESS status.
_print_log(pipeline_kwargs['pipeline_id'])
else:
pipeline = pipeline_factory.make_pipeline(
pipeline_name, False, **pipeline_kwargs)
# Hardcoded to run pipeline in serial engine, though not necessarily.
engine = engines.load(pipeline.flow, engine='serial',
store=pipeline.kwargs)
engine.run()
开发者ID:geigerj,项目名称:artman,代码行数:33,代码来源:execute_pipeline.py
示例11: run_update_property_flow
def run_update_property_flow(property_spec, update_type, update_info_list):
e = engines.load(
update_property_flow(),
store={"property_spec": property_spec, "update_type": update_type, "update_info_list": update_info_list},
engine="serial",
)
e.run()
开发者ID:bentwire,项目名称:poppy,代码行数:7,代码来源:update_property_flow.py
示例12: deploy
def deploy(self):
"""
deploy image in compute node, return the origin path to create snapshot
:returns origin_path: origin path to create snapshot
"""
LOG.debug("Virtman: in deploy_base_image, image name = %s, "
"multipath_path = %s, origin_path = %s, cached_path = %s, "
"is_login = %s" %
(self.image_name, self.multipath_path,
self.origin_path, self.cached_path,
self.is_login))
# Check if it had origin or not!
if self.origin_path:
return self.origin_path
# check local image and save the image connections
self.check_local_image()
# Reform connections
# If it has image on the local node or no path to connect, connect to
# root
parent_connections = self.modify_parent_connection()
# rebuild multipath
self.rebuild_multipath(parent_connections)
# build_chain = Chain()
# build_chain.add_step(
# partial(Cache.create_cache, base_image),
# partial(Cache.delete_cache, base_image))
# build_chain.add_step(
# partial(Origin.create_origin, base_image),
# partial(Origin.delete_origin, base_image))
# build_chain.add_step(
# partial(Target.create_target, base_image),
# partial(Target.delete_target, base_image))
# build_chain.add_step(
# partial(Register.login_master, base_image),
# partial(Register.logout_master, base_image))
# build_chain.do()
wf = linear_flow.Flow("base_image_flow")
wf.add(CreateCacheTask(),
CreateOriginTask(),
CreateTargetTask(),
LoginMasterTask()
)
dict_for_task = dict(base_image=self)
en = engines.load(wf, store=dict_for_task)
en.run()
LOG.debug("Virtman: baseimage OK!\n"
"target_id = %s, origin_path = %s, origin_name = %s, "
"cached_path = %s, multipath_path = %s, multipath_name = %s" %
(self.target_id, self.origin_path,
self.origin_name, self.cached_path,
self.multipath_path, self.multipath_name))
开发者ID:vmthunder,项目名称:virtman,代码行数:59,代码来源:baseimage_new.py
示例13: test_checks_for_dups_globally
def test_checks_for_dups_globally(self):
flo = gf.Flow("test").add(
gf.Flow("int1").add(test_utils.DummyTask(name="a")),
gf.Flow("int2").add(test_utils.DummyTask(name="a")))
e = engines.load(flo)
self.assertRaisesRegexp(exc.Duplicate,
'^Atoms with duplicate names',
e.compile)
开发者ID:FedericoCeratto,项目名称:taskflow,代码行数:8,代码来源:test_compile.py
示例14: test_formatted_via_listener
def test_formatted_via_listener(self, mock_format_node):
mock_format_node.return_value = 'A node'
flo = self._make_test_flow()
e = engines.load(flo)
with logging_listener.DynamicLoggingListener(e):
self.assertRaises(RuntimeError, e.run)
self.assertTrue(mock_format_node.called)
开发者ID:FedericoCeratto,项目名称:taskflow,代码行数:8,代码来源:test_formatters.py
示例15: execute
def execute(input_data=None):
flow = get_flow(input_data)
#TODO: need to figure out a better way to allow user to specify
#TODO: specific resource to migrate
eng = engines.load(flow)
result = eng.run()
return result
开发者ID:OpenAcademy-OpenStack,项目名称:OpenStack_Flyway,代码行数:9,代码来源:flow.py
示例16: calculate
def calculate(engine_conf):
# Subdivide the work into X pieces, then request each worker to calculate
# one of those chunks and then later we will write these chunks out to
# an image bitmap file.
# And unordered flow is used here since the mandelbrot calculation is an
# example of a embarrassingly parallel computation that we can scatter
# across as many workers as possible.
flow = uf.Flow("mandelbrot")
# These symbols will be automatically given to tasks as input to there
# execute method, in this case these are constants used in the mandelbrot
# calculation.
store = {
'mandelbrot_config': [-2.0, 1.0, -1.0, 1.0, MAX_ITERATIONS],
'image_config': {
'size': IMAGE_SIZE,
}
}
# We need the task names to be in the right order so that we can extract
# the final results in the right order (we don't care about the order when
# executing).
task_names = []
# Compose our workflow.
height, _width = IMAGE_SIZE
chunk_size = int(math.ceil(height / float(CHUNK_COUNT)))
for i in compat_range(0, CHUNK_COUNT):
chunk_name = 'chunk_%s' % i
task_name = "calculation_%s" % i
# Break the calculation up into chunk size pieces.
rows = [i * chunk_size, i * chunk_size + chunk_size]
flow.add(
MandelCalculator(task_name,
# This ensures the storage symbol with name
# 'chunk_name' is sent into the tasks local
# symbol 'chunk'. This is how we give each
# calculator its own correct sequence of rows
# to work on.
rebind={'chunk': chunk_name}))
store[chunk_name] = rows
task_names.append(task_name)
# Now execute it.
eng = engines.load(flow, store=store, engine_conf=engine_conf)
eng.run()
# Gather all the results and order them for further processing.
gather = []
for name in task_names:
gather.extend(eng.storage.get(name))
points = []
for y, row in enumerate(gather):
for x, color in enumerate(row):
points.append(((x, y), color))
return points
开发者ID:balagopalraj,项目名称:clearlinux,代码行数:57,代码来源:wbe_mandelbrot.py
示例17: run
def run(engine_options):
flow = lf.Flow('simple-linear').add(
utils.TaskOneArgOneReturn(provides='result1'),
utils.TaskMultiArgOneReturn(provides='result2')
)
eng = engines.load(flow,
store=dict(x=111, y=222, z=333),
engine='worker-based', **engine_options)
eng.run()
return eng.storage.fetch_all()
开发者ID:Dynavisor,项目名称:taskflow,代码行数:10,代码来源:wbe_simple_linear.py
示例18: _taskflow_load
def _taskflow_load(self, flow, **kwargs):
eng = tf_engines.load(
flow,
engine_conf=CONF.task_flow.engine,
executor=self.executor,
**kwargs)
eng.compile()
eng.prepare()
return eng
开发者ID:KevoTran,项目名称:octavia,代码行数:10,代码来源:base_taskflow.py
示例19: main
def main():
pipeline_name, pipeline_kwargs, remote_mode = _parse_args()
pipeline = pipeline_factory.make_pipeline(pipeline_name, **pipeline_kwargs)
if remote_mode:
job_util.post_remote_pipeline_job(pipeline)
else:
# Hardcoded to execute the pipeline in serial engine, though not necessarily.
engine = engines.load(pipeline.flow, engine="serial", store=pipeline.kwargs)
engine.run()
开发者ID:blowmage,项目名称:artman,代码行数:10,代码来源:execute_pipeline.py
示例20: _taskflow_load
def _taskflow_load(self, flow, **kwargs):
eng = tf_engines.load(
flow,
engine=CONF.task_flow.engine,
executor=self.executor,
never_resolve=CONF.task_flow.disable_revert,
**kwargs)
eng.compile()
eng.prepare()
return eng
开发者ID:openstack,项目名称:octavia,代码行数:11,代码来源:base_taskflow.py
注:本文中的taskflow.engines.load函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论