本文整理汇总了Python中nailgun.orchestrator.deployment_serializers.serialize函数的典型用法代码示例。如果您正苦于以下问题:Python serialize函数的具体用法?Python serialize怎么用?Python serialize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了serialize函数的18个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_deployment_serialization_ignore_customized
def test_deployment_serialization_ignore_customized(self, _):
cluster = self._create_cluster_with_extensions()
data = [{"uid": n.uid} for n in cluster.nodes]
mserializer = mock.MagicMock()
mserializer.return_value = mock.MagicMock()
mserializer.return_value.serialize.return_value = data
with mock.patch(
'nailgun.orchestrator.deployment_serializers.'
'get_serializer_for_cluster',
return_value=mserializer):
with mock.patch('nailgun.orchestrator.deployment_serializers.'
'fire_callback_on_deployment_data_serialization'
) as mfire_callback:
replaced_data = ["it's", "something"]
with mock.patch.object(
cluster.nodes[0], 'replaced_deployment_info',
new_callable=mock.Mock(return_value=replaced_data)):
graph = orchestrator_graph.AstuteGraph(cluster)
deployment_serializers.serialize(
graph, cluster, cluster.nodes, ignore_customized=True)
mfire_callback.assert_called_once_with(data, cluster, cluster.nodes)
开发者ID:huyupeng,项目名称:fuel-web,代码行数:26,代码来源:test_extensions.py
示例2: test_block_device_disks
def test_block_device_disks(self):
self.env.create(
release_kwargs={'version': self.env_version},
cluster_kwargs={
'mode': consts.CLUSTER_MODES.ha_compact,
'net_provider': consts.CLUSTER_NET_PROVIDERS.neutron,
'net_segment_type': consts.NEUTRON_SEGMENT_TYPES.vlan})
self.cluster_db = self.env.clusters[0]
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['cinder-block-device']
)
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['controller']
)
serialized_for_astute = deployment_serializers.serialize(
AstuteGraph(self.cluster_db),
self.cluster_db,
self.cluster_db.nodes)
for node in serialized_for_astute['nodes']:
self.assertIn("node_volumes", node)
for node_volume in node["node_volumes"]:
if node_volume["id"] == "cinder-block-device":
self.assertEqual(node_volume["volumes"], [])
else:
self.assertNotEqual(node_volume["volumes"], [])
开发者ID:sebrandon1,项目名称:fuel-web,代码行数:28,代码来源:test_pipelines.py
示例3: message
def message(cls, task, nodes):
logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)
for n in nodes:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
n.status = 'provisioned'
n.progress = 0
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
serialized_cluster = deployment_serializers.serialize(
orchestrator_graph, task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster
}
)
db().flush()
return rpc_message
开发者ID:vefimova,项目名称:fuel-web,代码行数:29,代码来源:task.py
示例4: message
def message(cls, task):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
task.cluster.prepare_for_deployment()
nodes = TaskHelper.nodes_to_deploy(task.cluster)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
# However, we must not pass nodes which are set to be deleted.
if n.pending_deletion:
continue
if n.id in nodes_ids: # It's node which we need to redeploy
n.pending_addition = False
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
if n.status in ('deploying'):
n.status = 'provisioned'
n.progress = 0
db().add(n)
db().commit()
# here we replace provisioning data if user redefined them
serialized_cluster = task.cluster.replaced_deployment_info or \
deployment_serializers.serialize(task.cluster)
return {
'method': 'deploy',
'respond_to': 'deploy_resp',
'args': {
'task_uuid': task.uuid,
'deployment_info': serialized_cluster}}
开发者ID:mrasskazov,项目名称:fuelweb,代码行数:33,代码来源:task.py
示例5: message
def message(cls, task, nodes):
logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)
for n in nodes:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
n.status = 'provisioned'
n.progress = 0
# here we replace deployment data if user redefined them
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster
}
)
db().commit()
return rpc_message
开发者ID:cxb811201,项目名称:fuel-web,代码行数:28,代码来源:task.py
示例6: _serialize
def _serialize(self, cluster, nodes):
if objects.Release.is_lcm_supported(cluster.release):
return deployment_serializers.serialize_for_lcm(
cluster, nodes, ignore_customized=True
)
graph = orchestrator_graph.AstuteGraph(cluster)
return deployment_serializers.serialize(
graph, cluster, nodes, ignore_customized=True)
开发者ID:mmalchuk,项目名称:openstack-fuel-web,代码行数:8,代码来源:orchestrator.py
示例7: _serialize
def _serialize(self, cluster, nodes):
if objects.Release.is_lcm_supported(cluster.release):
serialized = deployment_serializers.serialize_for_lcm(
cluster, nodes, ignore_customized=True
)
else:
graph = orchestrator_graph.AstuteGraph(cluster)
serialized = deployment_serializers.serialize(
graph, cluster, nodes, ignore_customized=True)
return _deployment_info_in_compatible_format(
serialized, utils.parse_bool(web.input(split='0').split)
)
开发者ID:openstack,项目名称:fuel-web,代码行数:13,代码来源:orchestrator.py
示例8: message
def message(cls, task, nodes):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in (NODE_STATUSES.deploying,):
n.status = NODE_STATUSES.provisioned
n.progress = 0
db().add(n)
db().flush()
deployment_tasks=[]
orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)
orchestrator_graph.only_tasks(deployment_tasks)
# serialized_cluster = deployment_serializers.serialize(
# orchestrator_graph,task.cluster, nodes)
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
pre_deployment = plugins_serializers.pre_deployment_serialize(
task.cluster, nodes)
post_deployment = plugins_serializers.post_deployment_serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
rpc_message = make_astute_message(
task,
'deploy',
'deploy_resp',
{
'deployment_info': serialized_cluster,
'pre_deployment': pre_deployment,
'post_deployment': post_deployment
}
)
db().commit()
return rpc_message
开发者ID:yxh1990,项目名称:fuel-cloudmaster,代码行数:51,代码来源:task.py
示例9: test_deployment_serialization_ignore_customized_false
def test_deployment_serialization_ignore_customized_false(self, _):
cluster = self._create_cluster_with_extensions(
nodes_kwargs=[
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
{'roles': ['controller'], 'pending_addition': True},
]
)
data = [{"uid": n.uid} for n in cluster.nodes]
expected_data = copy.deepcopy(data[1:])
mserializer = mock.MagicMock()
mserializer.return_value = mock.MagicMock()
mserializer.return_value.serialize.return_value = data
with mock.patch(
'nailgun.orchestrator.deployment_serializers.'
'get_serializer_for_cluster',
return_value=mserializer):
with mock.patch('nailgun.orchestrator.deployment_serializers.'
'fire_callback_on_deployment_data_serialization',
) as mfire_callback:
replaced_data = ["it's", "something"]
with mock.patch.object(
cluster.nodes[0], 'replaced_deployment_info',
new_callable=mock.Mock(return_value=replaced_data)):
graph = orchestrator_graph.AstuteGraph(cluster)
deployment_serializers.serialize(
graph, cluster, cluster.nodes, ignore_customized=False)
self.assertEqual(mfire_callback.call_args[0][0], expected_data)
self.assertIs(mfire_callback.call_args[0][1], cluster)
self.assertItemsEqual(
mfire_callback.call_args[0][2], cluster.nodes[1:])
开发者ID:huyupeng,项目名称:fuel-web,代码行数:38,代码来源:test_extensions.py
示例10: message
def message(cls, task):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
task.cluster.prepare_for_deployment()
nodes = TaskHelper.nodes_to_deploy(task.cluster)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(cluster=task.cluster).order_by(Node.id):
# However, we must not pass nodes which are set to be deleted.
if n.pending_deletion:
continue
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in ("deploying"):
n.status = "provisioned"
n.progress = 0
db().add(n)
db().commit()
# here we replace provisioning data if user redefined them
serialized_cluster = task.cluster.replaced_deployment_info or deployment_serializers.serialize(task.cluster)
# After searilization set pending_addition to False
for node in db().query(Node).filter(Node.id.in_(nodes_ids)):
node.pending_addition = False
db().commit()
return {
"method": "deploy",
"respond_to": "deploy_resp",
"args": {"task_uuid": task.uuid, "deployment_info": serialized_cluster},
}
开发者ID:rsokolkov,项目名称:fuel-web,代码行数:38,代码来源:task.py
示例11: message
def message(cls, task, nodes):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in (NODE_STATUSES.deploying,):
n.status = NODE_STATUSES.provisioned
n.progress = 0
db().add(n)
db().flush()
# here we replace deployment data if user redefined them
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return make_astute_message(
'deploy',
'deploy_resp',
{
'task_uuid': task.uuid,
'deployment_info': serialized_cluster
}
)
开发者ID:Axam,项目名称:nsx-web,代码行数:38,代码来源:task.py
示例12: message
def message(cls, task, nodes):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
TaskHelper.raise_if_node_offline(nodes)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in ('deploying'):
n.status = 'provisioned'
n.progress = 0
db().add(n)
db().commit()
# here we replace provisioning data if user redefined them
serialized_cluster = task.cluster.replaced_deployment_info or \
deployment_serializers.serialize(task.cluster, nodes)
# After searilization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return {
'method': 'deploy',
'respond_to': 'deploy_resp',
'args': {
'task_uuid': task.uuid,
'deployment_info': serialized_cluster}}
开发者ID:e0ne,项目名称:fuel-web,代码行数:37,代码来源:task.py
示例13: test_disks_attrs
#.........这里部分代码省略.........
{
"model": "TOSHIBA MK1002TS",
"name": "sda",
"disk": "sda",
"size": 1004886016
},
]
expected_node_volumes_hash = [
{
u'name': u'sda',
u'extra': [],
u'free_space': 330,
u'volumes': [
{
u'type': u'boot',
u'size': 300
},
{
u'mount': u'/boot',
u'type': u'partition',
u'file_system': u'ext2',
u'name': u'Boot',
u'size': 200
},
{
u'type': u'lvm_meta_pool',
u'size': 64
},
{
u'vg': u'os',
u'type': u'pv',
u'lvm_meta_size': 64,
u'size': 394
},
{
u'vg': u'vm',
u'type': u'pv',
u'lvm_meta_size': 0,
u'size': 0
}
],
u'type': u'disk',
u'id': u'sda',
u'bootable': True,
u'size': 958
},
{
u'_allocate_size': u'min',
u'label': u'Base System',
u'min_size': 19456,
u'volumes': [
{
u'mount': u'/',
u'size': -3766,
u'type': u'lv',
u'name': u'root',
u'file_system': u'ext4'
},
{
u'mount': u'swap',
u'size': 4096,
u'type': u'lv',
u'name': u'swap',
u'file_system': u'swap'
}
],
u'type': u'vg',
u'id': u'os'
},
{
u'_allocate_size': u'all',
u'label': u'Virtual Storage',
u'min_size': 5120,
u'volumes': [
{
u'mount': u'/var/lib/nova',
u'size': 0,
u'type': u'lv',
u'name': u'nova',
u'file_system': u'xfs'
}
],
u'type': u'vg',
u'id': u'vm'
}
]
self.env.create_node(
cluster_id=self.cluster_db.id,
roles=['compute'],
meta={"disks": disks},
)
serialized_for_astute = deployment_serializers.serialize(
AstuteGraph(self.cluster_db),
self.cluster_db,
self.cluster_db.nodes)
for node in serialized_for_astute['nodes']:
self.assertIn("node_volumes", node)
self.assertItemsEqual(
expected_node_volumes_hash, node["node_volumes"])
开发者ID:sebrandon1,项目名称:fuel-web,代码行数:101,代码来源:test_pipelines.py
示例14: get_deployment_info
def get_deployment_info(cluster, nodes):
return deployment_serializers.serialize(
AstuteGraph(cluster), cluster, nodes)
开发者ID:openstack,项目名称:fuel-web,代码行数:3,代码来源:test_pipelines.py
示例15: _serialize
def _serialize(self, cluster, nodes):
return deployment_serializers.serialize(
cluster, nodes, ignore_customized=True)
开发者ID:Zipfer,项目名称:fuel-web,代码行数:3,代码来源:orchestrator.py
示例16: _serialize
def _serialize(self, cluster, nodes):
graph = deployment_graph.AstuteGraph(cluster)
return deployment_serializers.serialize(
graph, cluster, nodes, ignore_customized=True)
开发者ID:TorstenS73,项目名称:fuel-web,代码行数:4,代码来源:orchestrator.py
示例17: execute
def execute(self):
# 开始执行部署变更
logger.info(u"Trying to start deployment at cluster '{0}'".format(self.cluster.name or self.cluster.id))
# 显示网络信息(openstack部署前执行网络验证)
network_info = self.serialize_network_cfg(self.cluster)
logger.info(u"Network info:\n{0}".format(jsonutils.dumps(network_info, indent=4)))
self._remove_obsolete_tasks() # obsolete 过时的
supertask = Task(name=TASK_NAMES.deploy, cluster=self.cluster)
db().add(supertask)
nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)
nodes_to_provision = TaskHelper.nodes_to_provision(self.cluster)
task_messages = []
# 如果是openstack环境,就执行原来流程判断看集群中是否有节点的变化
if self.cluster.cluster_type == 1:
if not any([nodes_to_provision, nodes_to_deploy, nodes_to_delete]):
db().rollback()
raise errors.WrongNodeStatus("No changes to deploy")
# we should have task committed for processing in other threads
db().commit()
TaskHelper.create_action_log(supertask)
# Run validation if user didn't redefine
# provisioning and deployment information
if not objects.Cluster.get_provisioning_info(self.cluster) and not objects.Cluster.get_deployment_info(
self.cluster
):
try:
if self.cluster.cluster_type == 1:
self.check_before_deployment(supertask)
except errors.CheckBeforeDeploymentError:
db().commit()
return supertask
task_deletion, task_provision, task_deployment = None, None, None
if nodes_to_delete:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# For more accurate progress calculation
task_weight = 0.4
task_deletion = supertask.create_subtask(TASK_NAMES.node_deletion, weight=task_weight)
logger.debug("Launching deletion task: %s", task_deletion.uuid)
self._call_silently(task_deletion, tasks.DeletionTask)
# we should have task committed for processing in other threads
db().commit()
if nodes_to_provision:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# updating nodes
nodes_to_provision = objects.NodeCollection.lock_nodes(nodes_to_provision)
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
logger.debug("There are nodes to provision: %s", " ".join([n.fqdn for n in nodes_to_provision]))
# For more accurate progress calulation
task_weight = 0.4
task_provision = supertask.create_subtask(TASK_NAMES.provision, weight=task_weight)
# we should have task committed for processing in other threads
db().commit()
provision_message = self._call_silently(
task_provision, tasks.ProvisionTask, nodes_to_provision, method_name="message"
)
task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True)
# if failed to generate task message for orchestrator
# then task is already set to error
if task_provision.status == TASK_STATUSES.error:
return supertask
task_provision.cache = provision_message
db().commit()
task_messages.append(provision_message)
else:
pass
# nodes_to_deploy=self.cluster.nodes
if nodes_to_deploy:
objects.TaskCollection.lock_cluster_tasks(self.cluster.id)
# locking nodes before updating
objects.NodeCollection.lock_nodes(nodes_to_deploy)
# updating nodes
objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deploy)
logger.debug("There are nodes to deploy: %s", " ".join([n.fqdn for n in nodes_to_deploy]))
task_deployment = supertask.create_subtask(TASK_NAMES.deployment)
# we should have task committed for processing in other threads
db().commit()
deployment_message = self._call_silently(
task_deployment, tasks.DeploymentTask, nodes_to_deploy, method_name="message"
)
# clusterdeploymsg = ClusterdeployMsg(cluster_id=self.cluster.id,cluster_deploymsg='deployment_message')
# db().add(clusterdeploymsg)
#.........这里部分代码省略.........
开发者ID:yxh1990,项目名称:fuel-cloudmaster,代码行数:101,代码来源:manager.py
示例18: serialize
def serialize(cluster):
return deployment_serializers.serialize(
AstuteGraph(cluster),
cluster,
cluster.nodes)['common']
开发者ID:sebrandon1,项目名称:fuel-web,代码行数:5,代码来源:test_pipelines.py
注:本文中的nailgun.orchestrator.deployment_serializers.serialize函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论