• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python txeffect.perform函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中txeffect.perform函数的典型用法代码示例。如果您正苦于以下问题:Python perform函数的具体用法?Python perform怎么用?Python perform使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了perform函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: collect_metrics

def collect_metrics(reactor, config, log, client=None, authenticator=None,
                    _print=False):
    """
    Start collecting the metrics

    :param reactor: Twisted reactor
    :param dict config: Configuration got from file containing all info
        needed to collect metrics
    :param :class:`silverberg.client.CQLClient` client:
        Optional cassandra client. A new client will be created
        if this is not given and disconnected before returing
    :param :class:`otter.auth.IAuthenticator` authenticator:
        Optional authenticator. A new authenticator will be created
        if this is not given
    :param bool _print: Should debug messages be printed to stdout?

    :return: :class:`Deferred` fired with ``list`` of `GroupMetrics`
    """
    _client = client or connect_cass_servers(reactor, config['cassandra'])
    authenticator = authenticator or generate_authenticator(reactor,
                                                            config['identity'])
    store = CassScalingGroupCollection(_client, reactor, 1000)
    dispatcher = get_dispatcher(reactor, authenticator, log,
                                get_service_configs(config), store)

    # calculate metrics on launch_server and non-paused groups
    groups = yield perform(dispatcher, Effect(GetAllValidGroups()))
    groups = [
        g for g in groups
        if json.loads(g["launch_config"]).get("type") == "launch_server" and
        (not g.get("paused", False))]
    tenanted_groups = groupby(lambda g: g["tenantId"], groups)
    group_metrics = yield get_all_metrics(
        dispatcher, tenanted_groups, log, _print=_print)

    # Add to cloud metrics
    metr_conf = config.get("metrics", None)
    if metr_conf is not None:
        eff = add_to_cloud_metrics(
            metr_conf['ttl'], config['region'], group_metrics,
            len(tenanted_groups), config, log, _print)
        eff = Effect(TenantScope(eff, metr_conf['tenant_id']))
        yield perform(dispatcher, eff)
        log.msg('added to cloud metrics')
        if _print:
            print('added to cloud metrics')
    if _print:
        group_metrics.sort(key=lambda g: abs(g.desired - g.actual),
                           reverse=True)
        print('groups sorted as per divergence')
        print('\n'.join(map(str, group_metrics)))

    # Disconnect only if we created the client
    if not client:
        yield _client.disconnect()

    defer.returnValue(group_metrics)
开发者ID:rackerlabs,项目名称:otter,代码行数:57,代码来源:metrics.py


示例2: collect_metrics

def collect_metrics(reactor, config, log, client=None, authenticator=None,
                    _print=False):
    """
    Start collecting the metrics

    :param reactor: Twisted reactor
    :param dict config: Configuration got from file containing all info
        needed to collect metrics
    :param :class:`silverberg.client.CQLClient` client:
        Optional cassandra client. A new client will be created
        if this is not given and disconnected before returing
    :param :class:`otter.auth.IAuthenticator` authenticator:
        Optional authenticator. A new authenticator will be created
        if this is not given
    :param bool _print: Should debug messages be printed to stdout?

    :return: :class:`Deferred` fired with ``list`` of `GroupMetrics`
    """
    convergence_tids = config.get('convergence-tenants', [])
    _client = client or connect_cass_servers(reactor, config['cassandra'])
    authenticator = authenticator or generate_authenticator(reactor,
                                                            config['identity'])
    store = CassScalingGroupCollection(_client, reactor, 1000)
    dispatcher = get_dispatcher(reactor, authenticator, log,
                                get_service_configs(config), store)

    # calculate metrics
    fpath = get_in(["metrics", "last_tenant_fpath"], config,
                   default="last_tenant.txt")
    tenanted_groups = yield perform(
        dispatcher,
        get_todays_scaling_groups(convergence_tids, fpath))
    group_metrics = yield get_all_metrics(
        dispatcher, tenanted_groups, log, _print=_print)

    # Add to cloud metrics
    metr_conf = config.get("metrics", None)
    if metr_conf is not None:
        eff = add_to_cloud_metrics(
            metr_conf['ttl'], config['region'], group_metrics,
            len(tenanted_groups), log, _print)
        eff = Effect(TenantScope(eff, metr_conf['tenant_id']))
        yield perform(dispatcher, eff)
        log.msg('added to cloud metrics')
        if _print:
            print('added to cloud metrics')
    if _print:
        group_metrics.sort(key=lambda g: abs(g.desired - g.actual),
                           reverse=True)
        print('groups sorted as per divergence', *group_metrics, sep='\n')

    # Disconnect only if we created the client
    if not client:
        yield _client.disconnect()

    defer.returnValue(group_metrics)
开发者ID:glyph,项目名称:otter,代码行数:56,代码来源:metrics.py


示例3: webhook_migrate

def webhook_migrate(reactor, conn, args):
    """
    Migrate webhook indexes to table
    """
    store = CassScalingGroupCollection(None, None, 3)
    eff = store.get_webhook_index_only().on(store.add_webhook_keys)
    return perform(get_working_cql_dispatcher(reactor, conn), eff)
开发者ID:rackerlabs,项目名称:otter,代码行数:7,代码来源:load_cql.py


示例4: webhook_index

def webhook_index(reactor, conn, args):
    """
    Show webhook indexes that is not there table connection
    """
    store = CassScalingGroupCollection(None, None, 3)
    eff = store.get_webhook_index_only()
    return perform(get_working_cql_dispatcher(reactor, conn), eff)
开发者ID:rackerlabs,项目名称:otter,代码行数:7,代码来源:load_cql.py


示例5: start_cluster

    def start_cluster(self, reactor):
        """
        Provision cloud cluster for acceptance tests.

        :return Cluster: The cluster to connect to for acceptance tests.
        """
        metadata = {
            'purpose': 'acceptance-testing',
            'distribution': self.distribution,
        }
        metadata.update(self.metadata)

        for index in range(self.num_nodes):
            name = "acceptance-test-%s-%d" % (self.creator, index)
            try:
                print "Creating node %d: %s" % (index, name)
                node = self.provisioner.create_node(
                    name=name,
                    distribution=self.distribution,
                    metadata=metadata,
                )
            except:
                print "Error creating node %d: %s" % (index, name)
                print "It may have leaked into the cloud."
                raise

            yield remove_known_host(reactor, node.address)
            self.nodes.append(node)
            del node

        commands = parallel([
            node.provision(package_source=self.package_source,
                           variants=self.variants)
            for node in self.nodes
        ])
        if self.dataset_backend == DatasetBackend.zfs:
            zfs_commands = parallel([
                configure_zfs(node, variants=self.variants)
                for node in self.nodes
            ])
            commands = commands.on(success=lambda _: zfs_commands)

        yield perform(make_dispatcher(reactor), commands)

        cluster = yield configured_cluster_for_nodes(
            reactor,
            generate_certificates(
                make_cluster_id(
                    TestTypes.ACCEPTANCE,
                    _provider_for_cluster_id(self.dataset_backend),
                ),
                self.nodes),
            self.nodes,
            self.dataset_backend,
            self.dataset_backend_configuration,
            _save_backend_configuration(self.dataset_backend,
                                        self.dataset_backend_configuration)
        )

        returnValue(cluster)
开发者ID:Kaffa-MY,项目名称:flocker,代码行数:60,代码来源:acceptance.py


示例6: modify_and_trigger

def modify_and_trigger(dispatcher, group, logargs, modifier, *args, **kwargs):
    """
    Modify group state and trigger convergence after that if the group is not
    suspended. Otherwise fail with :obj:`TenantSuspendedError`.

    :param IScalingGroup group: Scaling group whose state is getting modified
    :param log: Bound logger
    :param modifier: Callable as described in IScalingGroup.modify_state

    :return: Deferred with None if modification and convergence succeeded.
        Fails with :obj:`TenantSuspendedError` if group is suspended.
    """
    def modifier_wrapper(_group, state, *_args, **_kwargs):
        # Ideally this will not be allowed by repose middleware but
        # adding check for mimic based integration tests
        if state.suspended:
            raise TenantSuspendedError(_group.tenant_id)
        return modifier(_group, state, *_args, **_kwargs)

    cannot_exec_pol_err = None
    try:
        yield group.modify_state(modifier_wrapper, *args, **kwargs)
    except CannotExecutePolicyError as ce:
        cannot_exec_pol_err = ce
    if tenant_is_enabled(group.tenant_id, config_value):
        eff = Effect(
            BoundFields(
                trigger_convergence(group.tenant_id, group.uuid), logargs))
        yield perform(dispatcher, eff)
    if cannot_exec_pol_err is not None:
        raise cannot_exec_pol_err
开发者ID:rackerlabs,项目名称:otter,代码行数:31,代码来源:controller.py


示例7: perform_run_remotely

def perform_run_remotely(reactor, base_dispatcher, intent):
    connection_helper = get_connection_helper(
        reactor,
        username=intent.username, address=intent.address, port=intent.port)

    context = Message.new(
        username=intent.username, address=intent.address, port=intent.port)

    def connect():
        connection = connection_helper.secureConnection()
        connection.addErrback(write_failure)
        timeout(reactor, connection, 30)
        return connection

    connection = yield loop_until(reactor, connect)

    dispatcher = ComposedDispatcher([
        get_ssh_dispatcher(
            connection=connection,
            context=context,
        ),
        base_dispatcher,
    ])

    yield perform(dispatcher, intent.commands)

    yield connection_helper.cleanupConnection(
        connection, False)
开发者ID:wangbinxiang,项目名称:flocker,代码行数:28,代码来源:_conch.py


示例8: _upgrade_flocker

    def _upgrade_flocker(self, reactor, nodes, package_source):
        """
        Put the version of Flocker indicated by ``package_source`` onto all of
        the given nodes.

        This takes a primitive approach of uninstalling the software and then
        installing the new version instead of trying to take advantage of any
        OS-level package upgrade support.  Because it's easier.  The package
        removal step is allowed to fail in case the package is not installed
        yet (other failures are not differentiated).  The only action taken on
        failure is that the failure is logged.

        :param pvector nodes: The ``ManagedNode``\ s on which to upgrade the
            software.
        :param PackageSource package_source: The version of the software to
            which to upgrade.

        :return: A ``Deferred`` that fires when the software has been upgraded.
        """
        dispatcher = make_dispatcher(reactor)

        uninstalling = perform(dispatcher, uninstall_flocker(nodes))
        uninstalling.addErrback(write_failure, logger=None)

        def install(ignored):
            return perform(
                dispatcher,
                install_flocker(nodes, package_source),
            )
        installing = uninstalling.addCallback(install)
        return installing
开发者ID:petercolesdc,项目名称:flocker,代码行数:31,代码来源:acceptance.py


示例9: acquire

 def acquire(self, blocking=True, timeout=None):
     """
     Same as :meth:`kazoo.recipe.lock.Lock.acquire` except that this can be
     called again on an object that has been released. It will start fresh
     process to acquire the lock.
     """
     return perform(self.dispatcher, self.acquire_eff(blocking, timeout))
开发者ID:rackerlabs,项目名称:otter,代码行数:7,代码来源:zk.py


示例10: configured_cluster_for_nodes

def configured_cluster_for_nodes(
    reactor, certificates, nodes, dataset_backend,
    dataset_backend_configuration, dataset_backend_config_file,
    provider=None
):
    """
    Get a ``Cluster`` with Flocker services running on the right nodes.

    :param reactor: The reactor.
    :param Certificates certificates: The certificates to install on the
        cluster.
    :param nodes: The ``ManagedNode``s on which to operate.
    :param NamedConstant dataset_backend: The ``DatasetBackend`` constant
        representing the dataset backend that the nodes will be configured to
        use when they are "started".
    :param dict dataset_backend_configuration: The backend-specific
        configuration the nodes will be given for their dataset backend.
    :param FilePath dataset_backend_config_file: A FilePath that has the
        dataset_backend info stored.

    :returns: A ``Deferred`` which fires with ``Cluster`` when it is
        configured.
    """
    # XXX: There is duplication between the values here and those in
    # f.node.agents.test.blockdevicefactory.MINIMUM_ALLOCATABLE_SIZES. We want
    # the default volume size to be greater than or equal to the minimum
    # allocatable size.
    #
    # Ideally, the minimum allocatable size (and perhaps the default volume
    # size) would be something known by an object that represents the dataset
    # backend. Unfortunately:
    #  1. There is no such object
    #  2. There is existing confusion in the code around 'openstack' and
    #     'rackspace'
    #
    # Here, we special-case Rackspace (presumably) because it has a minimum
    # allocatable size that is different from other Openstack backends.
    #
    # FLOC-2584 also discusses this.
    default_volume_size = GiB(1)
    if dataset_backend_configuration.get('auth_plugin') == 'rackspace':
        default_volume_size = RACKSPACE_MINIMUM_VOLUME_SIZE

    cluster = Cluster(
        all_nodes=pvector(nodes),
        control_node=nodes[0],
        agent_nodes=nodes,
        dataset_backend=dataset_backend,
        default_volume_size=int(default_volume_size.to_Byte().value),
        certificates=certificates,
        dataset_backend_config_file=dataset_backend_config_file
    )

    configuring = perform(
        make_dispatcher(reactor),
        configure_cluster(cluster, dataset_backend_configuration, provider)
    )
    configuring.addCallback(lambda ignored: cluster)
    return configuring
开发者ID:aramase,项目名称:flocker,代码行数:59,代码来源:acceptance.py


示例11: is_acquired

    def is_acquired(self):
        """
        Is the lock already acquired? This method does not exist in kazoo
        lock recipe and is a nice addition to it.

        :return: :obj:`Deferred` of ``bool``
        """
        return perform(self.dispatcher, self.is_acquired_eff())
开发者ID:rackerlabs,项目名称:otter,代码行数:8,代码来源:zk.py


示例12: get_active_cache

def get_active_cache(reactor, connection, tenant_id, group_id):
    """
    Get active servers from servers cache table
    """
    eff = CassScalingGroupServersCache(tenant_id, group_id).get_servers(True)
    disp = get_working_cql_dispatcher(reactor, connection)
    d = perform(disp, eff)
    return d.addCallback(lambda (servers, _): {s['id']: s for s in servers})
开发者ID:pratikmallya,项目名称:otter,代码行数:8,代码来源:groups.py


示例13: groups_steps

def groups_steps(groups, reactor, store, cass_client, authenticator, conf):
    """
    Return [(group, steps)] list
    """
    eff = parallel(map(group_steps, groups))
    disp = get_full_dispatcher(
        reactor, authenticator, mock_log(), get_service_configs(conf),
        "kzclient", store, "supervisor", cass_client)
    return perform(disp, eff).addCallback(lambda steps: zip(groups, steps))
开发者ID:dragorosson,项目名称:otter,代码行数:9,代码来源:trigger_convergence.py


示例14: check_and_call

    def check_and_call():
        class DoFunc(object):
            pass

        @deferred_performer
        def func_performer(d, i):
            return maybeDeferred(func, *args, **kwargs)

        comp_dispatcher = ComposedDispatcher([TypeDispatcher({DoFunc: func_performer}), dispatcher])
        return perform(comp_dispatcher, call_if_acquired(lock, Effect(DoFunc())))
开发者ID:rackerlabs,项目名称:otter,代码行数:10,代码来源:zk.py


示例15: start_convergence

    def start_convergence(self, log, tenant_id, group_id, perform=perform):
        """Record that a group needs converged by creating a ZooKeeper node."""
        log = log.bind(tenant_id=tenant_id, scaling_group_id=group_id)
        eff = mark_divergent(tenant_id, group_id)
        d = perform(self.dispatcher, eff)

        def success(r):
            log.msg('mark-dirty-success')
            return r  # The result is ignored normally, but return it for tests
        d.addCallbacks(success, log.err, errbackArgs=('mark-dirty-failure',))
        return d
开发者ID:glyph,项目名称:otter,代码行数:11,代码来源:service.py


示例16: resume_scaling_group

def resume_scaling_group(log, transaction_id, scaling_group, dispatcher):
    """
    Resumes the scaling group, causing all scaling policy executions to be
    evaluated as normal again.  This is an idempotent change, if it's already
    paused, this does not raise an error.

    :raises: :class:`NoSuchScalingGroup` if the scaling group does not exist.

    :return: None
    """
    if not tenant_is_enabled(scaling_group.tenant_id, config_value):
        raise NotImplementedError("Resume is not implemented for legacy groups")
    return perform(dispatcher, conv_resume_group_eff(transaction_id, scaling_group))
开发者ID:stanzikratel,项目名称:otter,代码行数:13,代码来源:controller.py


示例17: pause_scaling_group

def pause_scaling_group(log, transaction_id, scaling_group, dispatcher):
    """
    Pauses the scaling group, causing all scaling policy executions to be
    rejected until unpaused.  This is an idempotent change, if it's already
    paused, this does not raise an error.

    :raises: :class:`NoSuchScalingGroup` if the scaling group does not exist.

    :return: None
    """
    if not tenant_is_enabled(scaling_group.tenant_id, config_value):
        raise NotImplementedError("Pause is not implemented for legay groups")
    return perform(dispatcher, conv_pause_group_eff(scaling_group, transaction_id))
开发者ID:stanzikratel,项目名称:otter,代码行数:13,代码来源:controller.py


示例18: buckets_acquired

    def buckets_acquired(self, my_buckets):
        """
        Get dirty flags from zookeeper and run convergence with them.

        This is used as the partitioner callback.
        """
        ceff = Effect(GetChildren(CONVERGENCE_DIRTY_DIR)).on(
            partial(self._converge_all, my_buckets))
        # Return deferred as 1-element tuple for testing only.
        # Returning deferred would block otter from shutting down until
        # it is fired which we don't need to do since convergence is itempotent
        # and will be triggered in next start of otter
        return (perform(self._dispatcher, self._with_conv_runid(ceff)), )
开发者ID:rackerlabs,项目名称:otter,代码行数:13,代码来源:service.py


示例19: _generic_rcv3_request

def _generic_rcv3_request(operation, request_bag, lb_id, server_id):
    """
    Perform a generic RCv3 bulk operation on a single (lb, server) pair.

    :param callable operation: RCv3 function to perform on (lb, server) pair.
    :param request_bag: An object with a bunch of useful data on it.
    :param str lb_id: The id of the RCv3 load balancer to act on.
    :param str server_id: The Nova server id to act on.
    :return: A deferred that will fire when the request has been performed,
        firing with the parsed result of the request, or :data:`None` if the
        request has no body.
    """
    eff = operation(pset([(lb_id, server_id)]))
    scoped = Effect(TenantScope(eff, request_bag.tenant_id))
    return perform(request_bag.dispatcher, scoped)
开发者ID:meker12,项目名称:otter,代码行数:15,代码来源:_rcv3.py


示例20: test_run_logs_stdout

    def test_run_logs_stdout(self, logger):
        """
        The ``Run`` intent logs the standard output of the specified command.
        """
        command = run_remotely(
            username="root",
            address=str(self.server.ip),
            port=self.server.port,
            commands=run("echo test_ssh_conch:test_run_logs_stdout 1>&2"),
        )

        d = perform(
            make_dispatcher(reactor),
            command,
        )
        return d
开发者ID:sysuwbs,项目名称:flocker,代码行数:16,代码来源:test_ssh_conch.py



注:本文中的txeffect.perform函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python utils.now函数代码示例发布时间:2022-05-27
下一篇:
Python base.encodeXMLName函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap