• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python availability_zones.get_host_availability_zone函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中nova.availability_zones.get_host_availability_zone函数的典型用法代码示例。如果您正苦于以下问题:Python get_host_availability_zone函数的具体用法?Python get_host_availability_zone怎么用?Python get_host_availability_zone使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_host_availability_zone函数的16个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_get_host_availability_zone

    def test_get_host_availability_zone(self):
        """Test get right availability zone by given host."""
        self.assertEquals(self.default_az, az.get_host_availability_zone(self.context, self.host))

        service = self._create_service_with_topic("compute", self.host)
        self._add_to_aggregate(service, self.agg)

        self.assertEquals(self.availability_zone, az.get_host_availability_zone(self.context, self.host))
开发者ID:kobtea,项目名称:nova,代码行数:8,代码来源:test_availability_zones.py


示例2: test_delete_host_availability_zone

    def test_delete_host_availability_zone(self):
        """Test availability zone could be deleted successfully."""
        service = self._create_service_with_topic("compute", self.host)

        # Create a new aggregate with an AZ and add the host to the AZ
        az_name = "az1"
        agg_az1 = self._create_az("agg-az1", az_name)
        self._add_to_aggregate(service, agg_az1)
        self.assertEquals(az_name, az.get_host_availability_zone(self.context, self.host))
        # Delete the AZ via deleting the aggregate
        self._delete_from_aggregate(service, agg_az1)
        self.assertEquals(self.default_az, az.get_host_availability_zone(self.context, self.host))
开发者ID:kobtea,项目名称:nova,代码行数:12,代码来源:test_availability_zones.py


示例3: test_update_host_availability_zone

    def test_update_host_availability_zone(self):
        """Test availability zone could be update by given host."""
        service = self._create_service_with_topic("compute", self.host)

        # Create a new aggregate with an AZ and add the host to the AZ
        az_name = "az1"
        agg_az1 = self._create_az("agg-az1", az_name)
        self._add_to_aggregate(service, agg_az1)
        self.assertEquals(az_name, az.get_host_availability_zone(self.context, self.host))
        # Update AZ
        new_az_name = "az2"
        self._update_az(agg_az1, new_az_name)
        self.assertEquals(new_az_name, az.get_host_availability_zone(self.context, self.host))
开发者ID:kobtea,项目名称:nova,代码行数:13,代码来源:test_availability_zones.py


示例4: _list_cobalt_hosts

    def _list_cobalt_hosts(self, context, availability_zone=None):
        """ Returns a list of all the hosts known to openstack running the cobalt service. """
        admin_context = context.elevated()
        services = self.db.service_get_all_by_topic(admin_context, CONF.cobalt_topic)

        if availability_zone is not None and ':' in availability_zone:
            parts = availability_zone.split(':')
            if len(parts) > 2:
                raise exception.NovaException(_('Invalid availability zone'))
            az = parts[0]
            host = parts[1]
            if (az, host) in [(srv['availability_zone'], srv['host']) for srv in services]:
                return [host]
            else:
                return []

        hosts = []
        for srv in services:
            in_availability_zone =  availability_zone is None or \
                                    availability_zone == \
                                            availability_zones.get_host_availability_zone(context,srv['host'])

            if srv['host'] not in hosts and in_availability_zone:
                hosts.append(srv['host'])
        return hosts
开发者ID:peterfeiner,项目名称:cobalt,代码行数:25,代码来源:api.py


示例5: _execute

    def _execute(self):
        self.quotas = objects.Quotas.from_reservations(self.context,
                                                       self.reservations,
                                                       instance=self.instance)
        # TODO(sbauza): Remove that once prep_resize() accepts a  RequestSpec
        # object in the signature and all the scheduler.utils methods too
        legacy_spec = self.request_spec.to_legacy_request_spec_dict()
        legacy_props = self.request_spec.to_legacy_filter_properties_dict()
        scheduler_utils.setup_instance_group(self.context, legacy_spec,
                                             legacy_props)
        scheduler_utils.populate_retry(legacy_props,
                                       self.instance.uuid)

        # NOTE(sbauza): Force_hosts/nodes needs to be reset
        # if we want to make sure that the next destination
        # is not forced to be the original host
        self.request_spec.reset_forced_destinations()

        # NOTE(danms): Right now we only support migrate to the same
        # cell as the current instance, so request that the scheduler
        # limit thusly.
        instance_mapping = objects.InstanceMapping.get_by_instance_uuid(
            self.context, self.instance.uuid)
        LOG.debug('Requesting cell %(cell)s while migrating',
                  {'cell': instance_mapping.cell_mapping.identity},
                  instance=self.instance)
        if ('requested_destination' in self.request_spec and
                self.request_spec.requested_destination):
            self.request_spec.requested_destination.cell = (
                instance_mapping.cell_mapping)
        else:
            self.request_spec.requested_destination = objects.Destination(
                cell=instance_mapping.cell_mapping)

        hosts = self.scheduler_client.select_destinations(
            self.context, self.request_spec)
        host_state = hosts[0]

        scheduler_utils.populate_filter_properties(legacy_props,
                                                   host_state)
        # context is not serializable
        legacy_props.pop('context', None)

        (host, node) = (host_state['host'], host_state['nodename'])

        self.instance.availability_zone = (
            availability_zones.get_host_availability_zone(
                self.context, host))

        # FIXME(sbauza): Serialize/Unserialize the legacy dict because of
        # oslo.messaging #1529084 to transform datetime values into strings.
        # tl;dr: datetimes in dicts are not accepted as correct values by the
        # rpc fake driver.
        legacy_spec = jsonutils.loads(jsonutils.dumps(legacy_spec))

        self.compute_rpcapi.prep_resize(
            self.context, self.instance, legacy_spec['image'],
            self.flavor, host, self.reservations,
            request_spec=legacy_spec, filter_properties=legacy_props,
            node=node, clean_shutdown=self.clean_shutdown)
开发者ID:andymcc,项目名称:nova,代码行数:60,代码来源:migrate.py


示例6: _get_host_az

 def _get_host_az(self, context, instance):
     host = str(instance.get('host'))
     if not host:
         return None
     cache_key = "azcache-%s" % host
     az = self.mc.get(cache_key)
     if not az:
         elevated = context.elevated()
         az = availability_zones.get_host_availability_zone(elevated, host)
         self.mc.set(cache_key, az, AZ_CACHE_SECONDS)
     return az
开发者ID:AnyBucket,项目名称:nova,代码行数:11,代码来源:extended_availability_zone.py


示例7: _get_networks_for_instance

    def _get_networks_for_instance(self, context, instance_id, project_id,
                                   requested_networks=None):
        supercls = super(manager.FlatDHCPManager, self)
        networks = supercls._get_networks_for_instance(context,
                                                       instance_id,
                                                       project_id,
                                                       requested_networks)

        instance = db.instance_get_by_uuid(context.elevated(),
                                           instance_id)
        host = str(instance.get('host'))
        # NOTE(SlickNik): expects label to be set to the name of the
        #                 availability zone.
        return [network for network in networks if network['label'] ==
                availability_zones.get_host_availability_zone(context, host)]
开发者ID:hpcloud,项目名称:nova-network-drivers,代码行数:15,代码来源:azmanager.py


示例8: host_passes

    def host_passes(self, host_state, filter_properties):
        spec = filter_properties.get('request_spec', {})
        props = spec.get('instance_properties', {})
        host = props.get('host')

        if host:
            context = filter_properties['context'].elevated()
            az = availability_zones.get_host_availability_zone(context, host)
            metadata = db.aggregate_metadata_get_by_host(
                context, host_state.host, key='availability_zone')
            if 'availability_zone' in metadata:
                return az in metadata['availability_zone']
            else:
                return az == CONF.default_availability_zone

        return True
开发者ID:hpcloud,项目名称:nova-scheduler-filters,代码行数:16,代码来源:host_availability_zone_filter.py


示例9: get_availability_zone_by_host

def get_availability_zone_by_host(host, conductor_api=None):
    return availability_zones.get_host_availability_zone(
        context.get_admin_context(), host, conductor_api)
开发者ID:YankunLi,项目名称:nova,代码行数:3,代码来源:ec2utils.py


示例10: _execute


#.........这里部分代码省略.........
                legacy_props.pop('retry', None)
                self.request_spec.retry = None
        else:
            self.request_spec.requested_destination = objects.Destination(
                cell=instance_mapping.cell_mapping)

        # Once _preallocate_migration() is done, the source node allocation is
        # moved from the instance consumer to the migration record consumer,
        # and the instance consumer doesn't have any allocations. If this is
        # the first time through here (not a reschedule), select_destinations
        # below will allocate resources on the selected destination node for
        # the instance consumer. If we're rescheduling, host_list is not None
        # and we'll call claim_resources for the instance and the selected
        # alternate. If we exhaust our alternates and raise MaxRetriesExceeded,
        # the rollback() method should revert the allocation swaparoo and move
        # the source node allocation from the migration record back to the
        # instance record.
        migration = self._preallocate_migration()

        self.request_spec.ensure_project_and_user_id(self.instance)
        # On an initial call to migrate, 'self.host_list' will be None, so we
        # have to call the scheduler to get a list of acceptable hosts to
        # migrate to. That list will consist of a selected host, along with
        # zero or more alternates. On a reschedule, though, the alternates will
        # be passed to this object and stored in 'self.host_list', so we can
        # pop the first alternate from the list to use for the destination, and
        # pass the remaining alternates to the compute.
        if self.host_list is None:
            selection_lists = self.scheduler_client.select_destinations(
                    self.context, self.request_spec, [self.instance.uuid],
                    return_objects=True, return_alternates=True)
            # Since there is only ever one instance to migrate per call, we
            # just need the first returned element.
            selection_list = selection_lists[0]
            # The selected host is the first item in the list, with the
            # alternates being the remainder of the list.
            selection, self.host_list = selection_list[0], selection_list[1:]
        else:
            # This is a reschedule that will use the supplied alternate hosts
            # in the host_list as destinations. Since the resources on these
            # alternates may have been consumed and might not be able to
            # support the migrated instance, we need to first claim the
            # resources to verify the host still has sufficient availabile
            # resources.
            elevated = self.context.elevated()
            host_available = False
            while self.host_list and not host_available:
                selection = self.host_list.pop(0)
                if selection.allocation_request:
                    alloc_req = jsonutils.loads(selection.allocation_request)
                else:
                    alloc_req = None
                if alloc_req:
                    # If this call succeeds, the resources on the destination
                    # host will be claimed by the instance.
                    host_available = scheduler_utils.claim_resources(
                            elevated, self.reportclient, self.request_spec,
                            self.instance.uuid, alloc_req,
                            selection.allocation_request_version)
                else:
                    # Some deployments use different schedulers that do not
                    # use Placement, so they will not have an
                    # allocation_request to claim with. For those cases,
                    # there is no concept of claiming, so just assume that
                    # the host is valid.
                    host_available = True
            # There are no more available hosts. Raise a MaxRetriesExceeded
            # exception in that case.
            if not host_available:
                reason = ("Exhausted all hosts available for retrying build "
                          "failures for instance %(instance_uuid)s." %
                          {"instance_uuid": self.instance.uuid})
                raise exception.MaxRetriesExceeded(reason=reason)

        scheduler_utils.populate_filter_properties(legacy_props, selection)
        # context is not serializable
        legacy_props.pop('context', None)

        (host, node) = (selection.service_host, selection.nodename)

        self.instance.availability_zone = (
            availability_zones.get_host_availability_zone(
                self.context, host))

        # FIXME(sbauza): Serialize/Unserialize the legacy dict because of
        # oslo.messaging #1529084 to transform datetime values into strings.
        # tl;dr: datetimes in dicts are not accepted as correct values by the
        # rpc fake driver.
        legacy_spec = jsonutils.loads(jsonutils.dumps(legacy_spec))

        LOG.debug("Calling prep_resize with selected host: %s; "
                  "Selected node: %s; Alternates: %s", host, node,
                  self.host_list, instance=self.instance)
        # RPC cast to the destination host to start the migration process.
        self.compute_rpcapi.prep_resize(
            self.context, self.instance, legacy_spec['image'],
            self.flavor, host, migration,
            request_spec=legacy_spec, filter_properties=legacy_props,
            node=node, clean_shutdown=self.clean_shutdown,
            host_list=self.host_list)
开发者ID:klmitch,项目名称:nova,代码行数:101,代码来源:migrate.py


示例11: get_availability_zone_by_host

def get_availability_zone_by_host(services, host, conductor_api=None):
    if len(services) > 0:
        return availability_zones.get_host_availability_zone(
            context.get_admin_context(), host, conductor_api)
    return 'unknown zone'
开发者ID:NetApp,项目名称:nova,代码行数:5,代码来源:ec2utils.py


示例12: _execute


#.........这里部分代码省略.........
        # is re calculated based on neutron ports.

        self._restrict_request_spec_to_cell(legacy_props)

        # Once _preallocate_migration() is done, the source node allocation is
        # moved from the instance consumer to the migration record consumer,
        # and the instance consumer doesn't have any allocations. If this is
        # the first time through here (not a reschedule), select_destinations
        # below will allocate resources on the selected destination node for
        # the instance consumer. If we're rescheduling, host_list is not None
        # and we'll call claim_resources for the instance and the selected
        # alternate. If we exhaust our alternates and raise MaxRetriesExceeded,
        # the rollback() method should revert the allocation swaparoo and move
        # the source node allocation from the migration record back to the
        # instance record.
        migration = self._preallocate_migration()

        self.request_spec.ensure_project_and_user_id(self.instance)
        self.request_spec.ensure_network_metadata(self.instance)
        compute_utils.heal_reqspec_is_bfv(
            self.context, self.request_spec, self.instance)
        # On an initial call to migrate, 'self.host_list' will be None, so we
        # have to call the scheduler to get a list of acceptable hosts to
        # migrate to. That list will consist of a selected host, along with
        # zero or more alternates. On a reschedule, though, the alternates will
        # be passed to this object and stored in 'self.host_list', so we can
        # pop the first alternate from the list to use for the destination, and
        # pass the remaining alternates to the compute.
        if self.host_list is None:
            selection_lists = self.query_client.select_destinations(
                    self.context, self.request_spec, [self.instance.uuid],
                    return_objects=True, return_alternates=True)
            # Since there is only ever one instance to migrate per call, we
            # just need the first returned element.
            selection_list = selection_lists[0]
            # The selected host is the first item in the list, with the
            # alternates being the remainder of the list.
            selection, self.host_list = selection_list[0], selection_list[1:]
        else:
            # This is a reschedule that will use the supplied alternate hosts
            # in the host_list as destinations. Since the resources on these
            # alternates may have been consumed and might not be able to
            # support the migrated instance, we need to first claim the
            # resources to verify the host still has sufficient availabile
            # resources.
            elevated = self.context.elevated()
            host_available = False
            while self.host_list and not host_available:
                selection = self.host_list.pop(0)
                if selection.allocation_request:
                    alloc_req = jsonutils.loads(selection.allocation_request)
                else:
                    alloc_req = None
                if alloc_req:
                    # If this call succeeds, the resources on the destination
                    # host will be claimed by the instance.
                    host_available = scheduler_utils.claim_resources(
                            elevated, self.reportclient, self.request_spec,
                            self.instance.uuid, alloc_req,
                            selection.allocation_request_version)
                else:
                    # Some deployments use different schedulers that do not
                    # use Placement, so they will not have an
                    # allocation_request to claim with. For those cases,
                    # there is no concept of claiming, so just assume that
                    # the host is valid.
                    host_available = True
            # There are no more available hosts. Raise a MaxRetriesExceeded
            # exception in that case.
            if not host_available:
                reason = ("Exhausted all hosts available for retrying build "
                          "failures for instance %(instance_uuid)s." %
                          {"instance_uuid": self.instance.uuid})
                raise exception.MaxRetriesExceeded(reason=reason)

        scheduler_utils.populate_filter_properties(legacy_props, selection)
        # context is not serializable
        legacy_props.pop('context', None)

        (host, node) = (selection.service_host, selection.nodename)

        self.instance.availability_zone = (
            availability_zones.get_host_availability_zone(
                self.context, host))

        LOG.debug("Calling prep_resize with selected host: %s; "
                  "Selected node: %s; Alternates: %s", host, node,
                  self.host_list, instance=self.instance)
        # RPC cast to the destination host to start the migration process.
        self.compute_rpcapi.prep_resize(
            # NOTE(mriedem): Using request_spec.image here is potentially
            # dangerous if it is not kept up to date (i.e. rebuild/unshelve);
            # seems like the sane thing to do would be to pass the current
            # instance.image_meta since that is what MoveClaim will use for
            # any NUMA topology claims on the destination host...
            self.context, self.instance, self.request_spec.image,
            self.flavor, host, migration,
            request_spec=self.request_spec, filter_properties=legacy_props,
            node=node, clean_shutdown=self.clean_shutdown,
            host_list=self.host_list)
开发者ID:arbrandes,项目名称:nova,代码行数:101,代码来源:migrate.py


示例13: get_availability_zone_by_host

def get_availability_zone_by_host(services, host):
    if len(services) > 0:
        return availability_zones.get_host_availability_zone(context, host)
    return 'unknown zone'
开发者ID:dtibarra,项目名称:nova,代码行数:4,代码来源:ec2utils.py


示例14: _execute

    def _execute(self):
        self._check_instance_is_active()
        self._check_instance_has_no_numa()
        self._check_host_is_up(self.source)

        self._source_cn, self._held_allocations = (
            # NOTE(danms): This may raise various exceptions, which will
            # propagate to the API and cause a 500. This is what we
            # want, as it would indicate internal data structure corruption
            # (such as missing migrations, compute nodes, etc).
            migrate.replace_allocation_with_migration(self.context,
                                                      self.instance,
                                                      self.migration))

        if not self.destination:
            # Either no host was specified in the API request and the user
            # wants the scheduler to pick a destination host, or a host was
            # specified but is not forcing it, so they want the scheduler
            # filters to run on the specified host, like a scheduler hint.
            self.destination, dest_node = self._find_destination()
        else:
            # This is the case that the user specified the 'force' flag when
            # live migrating with a specific destination host so the scheduler
            # is bypassed. There are still some minimal checks performed here
            # though.
            source_node, dest_node = self._check_requested_destination()
            # Now that we're semi-confident in the force specified host, we
            # need to copy the source compute node allocations in Placement
            # to the destination compute node. Normally select_destinations()
            # in the scheduler would do this for us, but when forcing the
            # target host we don't call the scheduler.
            # TODO(mriedem): Call select_destinations() with a
            # skip_filters=True flag so the scheduler does the work of claiming
            # resources on the destination in Placement but still bypass the
            # scheduler filters, which honors the 'force' flag in the API.
            # This raises NoValidHost which will be handled in
            # ComputeTaskManager.
            # NOTE(gibi): consumer_generation = None as we expect that the
            # source host allocation is held by the migration therefore the
            # instance is a new, empty consumer for the dest allocation. If
            # this assumption fails then placement will return consumer
            # generation conflict and this call raise a AllocationUpdateFailed
            # exception. We let that propagate here to abort the migration.
            scheduler_utils.claim_resources_on_destination(
                self.context, self.report_client,
                self.instance, source_node, dest_node,
                source_allocations=self._held_allocations,
                consumer_generation=None)

            # dest_node is a ComputeNode object, so we need to get the actual
            # node name off it to set in the Migration object below.
            dest_node = dest_node.hypervisor_hostname

        self.instance.availability_zone = (
            availability_zones.get_host_availability_zone(
                self.context, self.destination))

        self.migration.source_node = self.instance.node
        self.migration.dest_node = dest_node
        self.migration.dest_compute = self.destination
        self.migration.save()

        # TODO(johngarbutt) need to move complexity out of compute manager
        # TODO(johngarbutt) disk_over_commit?
        return self.compute_rpcapi.live_migration(self.context,
                host=self.source,
                instance=self.instance,
                dest=self.destination,
                block_migration=self.block_migration,
                migration=self.migration,
                migrate_data=self.migrate_data)
开发者ID:mahak,项目名称:nova,代码行数:71,代码来源:live_migrate.py


示例15: _get_host_az

 def _get_host_az(self, context, instance):
     admin_context = context.elevated()
     if instance['host']:
         return availability_zones.get_host_availability_zone(
                                         admin_context, instance['host'])
开发者ID:DSpeichert,项目名称:nova,代码行数:5,代码来源:extended_availability_zone.py


示例16: get_item_by_host

 def get_item_by_host(self, context, host):
     return availability_zones.get_host_availability_zone(
         ctxt.get_admin_context(), host)
开发者ID:pombredanne,项目名称:gce,代码行数:3,代码来源:zone_api.py



注:本文中的nova.availability_zones.get_host_availability_zone函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap