本文整理汇总了Python中pulp.server.dispatch.factory._task_queue函数的典型用法代码示例。如果您正苦于以下问题:Python _task_queue函数的具体用法?Python _task_queue怎么用?Python _task_queue使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_task_queue函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: get_call_reports_by_call_request_ids
def get_call_reports_by_call_request_ids(self, call_request_id_list, include_completed=False):
"""
Get all the call reports for corresponding to the given call request ids.
@param call_request_id_list: list of call request ids
@type: list or tuple
@param include_completed: toggle inclusion of cached completed tasks
@type include_completed: bool
@return: list of call reports for all call request ids found in the task queue
@rtype: list
"""
task_queue = dispatch_factory._task_queue()
if include_completed:
queued_tasks = task_queue.all_tasks()
else:
queued_tasks = task_queue.incomplete_tasks()
call_reports = []
for task in queued_tasks:
if task.call_request.id not in call_request_id_list:
continue
call_reports.append(task.call_report)
return call_reports
开发者ID:domcleal,项目名称:pulp,代码行数:25,代码来源:coordinator.py
示例2: install
def install(cls):
existing = dispatch_factory._task_queue()
if existing:
existing.stop()
queue = cls()
dispatch_factory._TASK_QUEUE = queue
return queue
开发者ID:signull,项目名称:pulp,代码行数:7,代码来源:base.py
示例3: _find_tasks
def _find_tasks(self, **criteria):
"""
Find call reports that match the criteria given as key word arguments.
Supported criteria:
* call_request_id
* call_request_group_id
* schedule_id
* state
* callable_name
* args
* kwargs
* resources
* tags
"""
provided_criteria = set(criteria.keys())
superfluous_criteria = provided_criteria - _VALID_SEARCH_CRITERIA
if superfluous_criteria:
raise dispatch_exceptions.UnrecognizedSearchCriteria(*list(superfluous_criteria))
tasks = []
task_queue = dispatch_factory._task_queue()
for task in task_queue.all_tasks():
if task_matches_criteria(task, criteria):
tasks.append(task)
return tasks
开发者ID:domcleal,项目名称:pulp,代码行数:30,代码来源:coordinator.py
示例4: find_tasks
def find_tasks(self, **criteria):
"""
Find call reports that match the criteria given as key word arguments.
Supported criteria:
* task_id
* task_group_id
* state
* call_name
* class_name
* args
* kwargs
* resources
* tags
"""
valid_criteria = set(('task_id', 'task_group_id', 'state', 'call_name',
'class_name', 'args', 'kwargs', 'resources', 'tags'))
provided_criteria = set(criteria.keys())
superfluous_criteria = provided_criteria - valid_criteria
if superfluous_criteria:
raise dispatch_exceptions.UnrecognizedSearchCriteria(*list(superfluous_criteria))
tasks = []
task_queue = dispatch_factory._task_queue()
for task in task_queue.all_tasks():
if task_matches_criteria(task, criteria):
tasks.append(task)
return tasks
开发者ID:ehelms,项目名称:pulp,代码行数:27,代码来源:coordinator.py
示例5: GET
def GET(self):
task_queue = dispatch_factory._task_queue()
tasks = task_queue.all_tasks()
queued_calls = []
for t in tasks:
data = {'task_id': t.id, 'queued_call_id': t.queued_call_id}
data.update(serialization.link.child_link_obj(t.queued_call_id))
queued_calls.append(data)
return self.ok(queued_calls)
开发者ID:stpierre,项目名称:pulp,代码行数:9,代码来源:dispatch.py
示例6: _process_tasks
def _process_tasks(self, task_list):
"""
Look for, and potentially resolve, resource conflicts for and enqueue
the tasks in the task list.
@param task_list: list of tasks tasks to work the coordinator magic on
@type task_list: list
"""
# we have to lock the task queue here as there is a race condition
# between calculating the blocking/postponing tasks and enqueueing the
# task when 2 or more tasks are being run that may have interdependencies
task_queue = dispatch_factory._task_queue()
task_queue.lock()
responses_list = []
call_resource_list = []
try:
for task in task_list:
response, blocking, reasons, call_resources = self._find_conflicts(task.call_request.resources)
task.call_report.response = response
task.call_report.reasons = reasons
responses_list.append(response)
if response is dispatch_constants.CALL_REJECTED_RESPONSE:
continue
dependencies = dict.fromkeys(blocking, dispatch_constants.CALL_COMPLETE_STATES)
# use the original (possibly more restrictive) values, when present
dependencies.update(task.call_request.dependencies)
task.call_request.dependencies = dependencies
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_ENQUEUE_LIFE_CYCLE_CALLBACK, GrantPermmissionsForTaskV2())
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_DEQUEUE_LIFE_CYCLE_CALLBACK, RevokePermissionsForTaskV2())
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_DEQUEUE_LIFE_CYCLE_CALLBACK, coordinator_dequeue_callback)
if call_resources:
set_call_request_id_on_call_resources(task.call_request.id, call_resources)
call_resource_list.extend(call_resources)
# for a call request group: if 1 of the tasks is rejected, then they are all rejected
if reduce(lambda p, r: r is dispatch_constants.CALL_REJECTED_RESPONSE or p, responses_list, False):
map(lambda t: setattr(t.call_report, 'response', dispatch_constants.CALL_REJECTED_RESPONSE), task_list)
return
if call_resource_list:
self.call_resource_collection.insert(call_resource_list, safe=True)
for task in task_list:
task_queue.enqueue(task)
finally:
task_queue.unlock()
开发者ID:domcleal,项目名称:pulp,代码行数:55,代码来源:coordinator.py
示例7: cancel_call
def cancel_call(self, call_request_id):
"""
Cancel a call request using the call request id.
@param call_request_id: id for call request to cancel
@type call_request_id: str
@return: True if the task is being cancelled, False if not, or None if the task was not found
@rtype: bool or None
"""
task_queue = dispatch_factory._task_queue()
task = task_queue.get(call_request_id)
if task is None:
return None
return task_queue.cancel(task)
开发者ID:domcleal,项目名称:pulp,代码行数:13,代码来源:coordinator.py
示例8: _run_task
def _run_task(self, task, synchronous=None, timeout=None):
"""
Run a task.
@param task: task to run
@type task: L{Task} instance
@param synchronous: whether or not to run the task synchronously,
None means dependent on what the conflict response is
@type synchronous: None or bool
@param timeout: how much time to wait for a synchronous task to start
None means indefinitely
@type timeout: None or datetime.timedelta
"""
# we have to lock the task queue here as there is a race condition
# between calculating the blocking/postponing tasks and enqueueing the
# task when 2 or more tasks are being run that may have
# interdependencies
task_queue = dispatch_factory._task_queue()
task_queue.lock()
task_resource_collection = TaskResource.get_collection()
try:
response, blocking, reasons, task_resources = self._find_conflicts(task.call_request.resources)
task.call_report.response = response
task.call_report.reasons = reasons
if response is dispatch_constants.CALL_REJECTED_RESPONSE:
return
task.blocking_tasks.update(blocking)
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_ENQUEUE_LIFE_CYCLE_CALLBACK, GrantPermmissionsForTaskV2())
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_DEQUEUE_LIFE_CYCLE_CALLBACK, RevokePermissionsForTaskV2())
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_DEQUEUE_LIFE_CYCLE_CALLBACK, coordinator_dequeue_callback)
if task_resources:
set_task_id_on_task_resources(task.id, task_resources)
task_resource_collection.insert(task_resources, safe=True)
task_queue.enqueue(task)
finally:
task_queue.unlock()
# if the call has requested synchronous execution or can be
# synchronously executed, do so
if synchronous or (synchronous is None and response is dispatch_constants.CALL_ACCEPTED_RESPONSE):
try:
# it's perfectly legitimate for the call to complete before the fist poll
running_states = [dispatch_constants.CALL_RUNNING_STATE]
running_states.extend(dispatch_constants.CALL_COMPLETE_STATES)
wait_for_task(task, running_states, poll_interval=self.task_state_poll_interval, timeout=timeout)
except OperationTimedOut:
task_queue.dequeue(task)
raise
else:
wait_for_task(task, dispatch_constants.CALL_COMPLETE_STATES,
poll_interval=self.task_state_poll_interval)
开发者ID:ehelms,项目名称:pulp,代码行数:50,代码来源:coordinator.py
示例9: cancel_multiple_calls
def cancel_multiple_calls(self, call_request_group_id):
"""
Cancel multiple call requests using the call request group id.
@param call_request_group_id: call request group id for multiple calls
@type call_request_group_id: str
@return: dictionary of {call request id: cancel return} for tasks associated with the call request group id
@rtype: dict
"""
cancel_returns = {}
task_queue = dispatch_factory._task_queue()
for task in task_queue.all_tasks():
if call_request_group_id != task.call_request.group_id:
continue
cancel_returns[task.call_request.id] = task_queue.cancel(task)
return cancel_returns
开发者ID:domcleal,项目名称:pulp,代码行数:15,代码来源:coordinator.py
示例10: _generate_call_request_group_id
def _generate_call_request_group_id(self):
"""
Generate a unique call request group id.
@return: uuid string
@rtype: str
"""
# NOTE this needs to utilize a central locking mechanism because on
# Python < 2.5 the uuid package can generate non-unique ids if more than
# one thread accesses it at a time
task_queue = dispatch_factory._task_queue()
task_queue.lock()
try:
return str(uuid.uuid4())
finally:
task_queue.unlock()
开发者ID:domcleal,项目名称:pulp,代码行数:15,代码来源:coordinator.py
示例11: GET
def GET(self):
call_request_group_ids = set()
task_queue = dispatch_factory._task_queue()
for task in task_queue.all_tasks():
call_request_group_id = task.call_request.id
if call_request_group_id is None:
continue
call_request_group_ids.add(call_request_group_id)
task_group_links = []
for id in call_request_group_ids:
# continue to support legacy task ids
link = {'task_group_id': id,
'call_request_group_id': id}
link.update(serialization.link.child_link_obj(id))
task_group_links.append(link)
return self.ok(task_group_links)
开发者ID:ryanschneider,项目名称:pulp,代码行数:16,代码来源:dispatch.py
示例12: scheduler_complete_callback
def scheduler_complete_callback(call_request, call_report):
"""
Pulp dispatch call complete lifecycle callback used to report the call's
completion and exit state to the scheduler.
:param call_request: call request for the scheduled call
:type call_request: pulp.server.dispatch.call.CallRequest
:param call_report: call report for the scheduled call
:type call_report: pulp.server.dispatch.call.CallReport
"""
scheduler = dispatch_factory.scheduler()
task_queue = dispatch_factory._task_queue()
# only allow one task at a time to report their completion
task_queue.lock()
try:
scheduler.call_group_call_completed(call_request.schedule_id, call_report.state)
finally:
task_queue.unlock()
开发者ID:ashcrow,项目名称:pulp,代码行数:20,代码来源:scheduler.py
示例13: _ready_task
def _ready_task(self, task):
"""
Look for and potentially resolve resource conflicts and enqueue the task.
@param task: task to ready
@type task: L{Task} instance
"""
# we have to lock the task queue here as there is a race condition
# between calculating the blocking/postponing tasks and enqueueing the
# task when 2 or more tasks are being run that may have interdependencies
task_queue = dispatch_factory._task_queue()
task_queue.lock()
try:
response, blocking, reasons, call_resources = self._find_conflicts(task.call_request.resources)
task.call_report.response = response
task.call_report.reasons = reasons
if response is dispatch_constants.CALL_REJECTED_RESPONSE:
return
dependencies = dict.fromkeys(blocking, dispatch_constants.CALL_COMPLETE_STATES)
dependencies.update(task.call_request.dependencies) # use the original (possibly more restrictive) values, when present
task.call_request.dependencies = dependencies
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_ENQUEUE_LIFE_CYCLE_CALLBACK, GrantPermmissionsForTaskV2())
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_DEQUEUE_LIFE_CYCLE_CALLBACK, RevokePermissionsForTaskV2())
task.call_request.add_life_cycle_callback(dispatch_constants.CALL_DEQUEUE_LIFE_CYCLE_CALLBACK, coordinator_dequeue_callback)
if call_resources:
set_call_request_id_on_call_resources(task.call_request.id, call_resources)
self.call_resource_collection.insert(call_resources, safe=True)
task_queue.enqueue(task)
finally:
task_queue.unlock()
开发者ID:ryanschneider,项目名称:pulp,代码行数:38,代码来源:coordinator.py
示例14: execute_multiple_calls
def execute_multiple_calls(self, call_request_list):
"""
Execute a list of call requests in the tasking sub-system.
This will run the tasks asynchronously regardless of postponing conflicts.
@param call_request_list: call requests to run
@type call_request_list: list of L{call.CallRequest} instances
@return: list of call reports pertaining to the running of the request calls
@rtype: list of L{call.CallReport} instances
@raise: L{TopologicalSortError} if inter-task dependencies are malformed
"""
call_request_group_id = self._generate_call_request_group_id()
task_list = []
call_report_list = []
for call_request in call_request_list:
task = self._create_task(call_request, call_request_group_id=call_request_group_id)
task_list.append(task)
sorted_task_list = self._analyze_dependencies(task_list)
task_queue = dispatch_factory._task_queue()
# locking the task queue so that the dependency validation can take place
# for all tasks in the group *before* any of them are run, eliminating
# a race condition in the dependency validation
task_queue.lock()
try:
for task in sorted_task_list:
self._ready_task(task)
call_report_list.append(copy.copy(task.call_report))
finally:
task_queue.unlock()
return call_report_list
开发者ID:ryanschneider,项目名称:pulp,代码行数:36,代码来源:coordinator.py
示例15: _run_task
def _run_task(self, task, timeout=None):
"""
Run a task "synchronously".
@param task: task to run
@type task: L{Task} instance
@param timeout: how much time to wait for a synchronous task to start
None means indefinitely
@type timeout: None or datetime.timedelta
"""
task_queue = dispatch_factory._task_queue()
valid_states = [dispatch_constants.CALL_RUNNING_STATE]
# it's perfectly legitimate for the call to complete before the first poll
valid_states.extend(dispatch_constants.CALL_COMPLETE_STATES)
try:
wait_for_task(task, valid_states, poll_interval=self.task_state_poll_interval, timeout=timeout)
except OperationTimedOut:
task_queue.dequeue(task) # dequeue or cancel? really need timed out support
raise
else:
wait_for_task(task, dispatch_constants.CALL_COMPLETE_STATES,
poll_interval=self.task_state_poll_interval)
开发者ID:domcleal,项目名称:pulp,代码行数:24,代码来源:coordinator.py
示例16: run_next
def run_next(cls):
queue = dispatch_factory._task_queue()
if isinstance(queue, cls):
queue.__run_next()
else:
raise Exception, '%s not installed' % cls
开发者ID:signull,项目名称:pulp,代码行数:6,代码来源:base.py
注:本文中的pulp.server.dispatch.factory._task_queue函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论