• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python rebalance_helper.RebalanceHelper类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中membase.helper.rebalance_helper.RebalanceHelper的典型用法代码示例。如果您正苦于以下问题:Python RebalanceHelper类的具体用法?Python RebalanceHelper怎么用?Python RebalanceHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了RebalanceHelper类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: create_ddocs

 def create_ddocs(self, is_dev_view):
     mapview = View(
         self.map_view_name,
         """function(doc) {
          emit(doc.integer, doc.string);
       }""",
         dev_view=is_dev_view,
     )
     self.cluster.create_view(self.master, "test", mapview)
     redview = View(
         self.red_view_name,
         """function(doc) {
          emit([doc.integer, doc.string], doc.integer);
       }""",
         """_count""",
         dev_view=is_dev_view,
     )
     self.cluster.create_view(self.master, "test", redview)
     redview_stats = View(
         self.red_view_stats_name,
         """function(doc) {
          emit(doc.string, doc.string);
       }""",
         """_stats""",
         dev_view=is_dev_view,
     )
     self.cluster.create_view(self.master, "test2", redview_stats)
     RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
开发者ID:ketakigangal,项目名称:testrunner,代码行数:28,代码来源:viewmergetests.py


示例2: replication_verification

    def replication_verification(master, bucket, replica, inserted_count, test):
        rest = RestConnection(master)
        nodes = rest.node_statuses()

        if len(nodes) / (1 + replica) >= 1:
                    final_replication_state = RestHelper(rest).wait_for_replication(900)
                    msg = "replication state after waiting for up to 15 minutes : {0}"
                    test.log.info(msg.format(final_replication_state))
                    # in windows, we need to set timeout_in_seconds to 15+ minutes
                    test.assertTrue(RebalanceHelper.wait_till_total_numbers_match(master=master,
                                                                                  bucket=bucket,
                                                                                  timeout_in_seconds=1200),
                                    msg="replication was completed but sum(curr_items) dont match the curr_items_total")

                    start_time = time.time()
                    stats = rest.get_bucket_stats()
                    while time.time() < (start_time + 120) and stats["curr_items"] != inserted_count:
                        test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
                        time.sleep(5)
                        stats = rest.get_bucket_stats()
                    RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
                    test.log.info("curr_items : {0} versus {1}".format(stats["curr_items"], inserted_count))
                    stats = rest.get_bucket_stats()
                    msg = "curr_items : {0} is not equal to actual # of keys inserted : {1}"
                    test.assertEquals(stats["curr_items"], inserted_count,
                                      msg=msg.format(stats["curr_items"], inserted_count))
开发者ID:jchris,项目名称:testrunner,代码行数:26,代码来源:failovertests.py


示例3: _verify_stats_all_buckets

    def _verify_stats_all_buckets(self, servers, timeout=60):
        stats_tasks = []
        for bucket in self.buckets:
            items = sum([len(kv_store) for kv_store in bucket.kvs.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))
        try:
            for task in stats_tasks:
                task.result(timeout)
        except Exception as e:
            print e;
            for task in stats_tasks:
                task.cancel()
            self.log.error("unable to get expected stats for any node! Print taps for all nodes:")
            rest = RestConnection(self.master)
            for bucket in self.buckets:
                RebalanceHelper.print_taps_from_all_nodes(rest, bucket)
            raise Exception("unable to get expected stats during {0} sec".format(timeout))
开发者ID:jason-hou,项目名称:testrunner,代码行数:31,代码来源:basetestcase.py


示例4: run_test

    def run_test(self):
        ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
        active_resident_threshold = int(self.input.param("active_resident_threshold", 10))

        mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
        stats = mc.stats()
        threshold = int(self.input.param("threshold", stats[ep_threshold]))
        threshold_reached = False
        self.num_items = self.input.param("items", 10000)
        self._load_doc_data_all_buckets("create")

        # load items till reached threshold or mem-ratio is less than resident ratio threshold
        while not threshold_reached:
            mem_used = int(mc.stats()["mem_used"])
            if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
                self.log.info(
                    "mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s "
                    % (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"])
                )
                items = self.num_items
                self.num_items += self.input.param("items", 10000)
                self._load_doc_data_all_buckets("create", items)
            else:
                threshold_reached = True
                self.log.info("DGM state achieved!!!!")

        # wait for draining of data before restart and warm up
        for bucket in self.buckets:
            RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket)

        while 1:

            #            read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])

            read_data_task = Thread(target=self._run_get)
            read_data_task.start()
            # 5 threads to run stats all and reset asynchronously
            start = time.time()
            while (time.time() - start) < 300:

                stats_all_thread = []
                stats_reset_thread = []

                for i in xrange(self.threads_to_run):
                    stat_str = ""
                    stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_all_thread[i].start()
                    stat_str = "reset"
                    stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
                    stats_reset_thread[i].start()

                for i in xrange(self.threads_to_run):
                    stats_all_thread[i].join()
                    stats_reset_thread[i].join()

                del stats_all_thread
                del stats_reset_thread

            #            read_data_task.result()
            read_data_task.join()
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:60,代码来源:stats_ops.py


示例5: _verify_data

 def _verify_data(self, master, rest, inserted_keys):
     log = logger.Logger.get_logger()
     log.info("Verifying data")
     ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_queue_size", 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, "default", "ep_flusher_todo", 0)
     self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     BucketOperationHelper.keys_exist_or_assert(keys=inserted_keys, server=master, bucket_name="default", test=self)
开发者ID:strategist922,项目名称:testrunner,代码行数:8,代码来源:upgradetests.py


示例6: verify_data

 def verify_data(master, inserted_keys, bucket, test):
     test.log.info("Verifying data")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
     test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     ready = RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
     test.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
     BucketOperationHelper.keys_exist_or_assert_in_parallel(keys=inserted_keys, server=master, \
         bucket_name=bucket, test=test, concurrency=4)
开发者ID:steveyen,项目名称:testrunner,代码行数:8,代码来源:swaprebalance.py


示例7: test_views_failover

 def test_views_failover(self):
     num_nodes = self.input.param('num-nodes', 1)
     ddocs =  self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0)
     RebalanceHelper.wait_for_persistence(self.master, self.bucket_name)
     self.cluster.failover(self.servers,
                           self.servers[1:num_nodes])
     self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes])
     self.perform_ddoc_ops(ddocs)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:8,代码来源:spatialviewtests.py


示例8: _failover_swap_rebalance

    def _failover_swap_rebalance(self):
        master = self.servers[0]
        rest = RestConnection(master)
        creds = self.input.membase_settings
        num_initial_servers = self.num_initial_servers
        intial_severs = self.servers[:num_initial_servers]

        self.log.info("CREATE BUCKET PHASE")
        SwapRebalanceBase.create_buckets(self)

        # Cluster all starting set of servers
        self.log.info("INITIAL REBALANCE PHASE")
        status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
        self.assertTrue(status, msg="Rebalance was failed")

        self.log.info("DATA LOAD PHASE")
        self.loaders = SwapRebalanceBase.start_load_phase(self, master)

        # Wait till load phase is over
        SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
        self.log.info("DONE LOAD PHASE")

        # Start the swap rebalance
        self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
        toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
        optNodesIds = [node.id for node in toBeEjectedNodes]
        if self.fail_orchestrator:
            status, content = ClusterOperationHelper.find_orchestrator(master)
            self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
            format(status, content))
            optNodesIds[0] = content

        self.log.info("FAILOVER PHASE")
        # Failover selected nodes
        for node in optNodesIds:
            self.log.info("failover node {0} and rebalance afterwards".format(node))
            rest.fail_over(node)

        new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
        for server in new_swap_servers:
            otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip)
            msg = "unable to add node {0} to the cluster"
            self.assertTrue(otpNode, msg.format(server.ip))

        if self.fail_orchestrator:
            rest = RestConnection(new_swap_servers[0])
            master = new_swap_servers[0]

        self.log.info("DATA ACCESS PHASE")
        self.loaders = SwapRebalanceBase.start_access_phase(self, master)

        rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
            ejectedNodes=optNodesIds)

        self.assertTrue(rest.monitorRebalance(),
            msg="rebalance operation failed after adding node {0}".format(new_swap_servers))

        SwapRebalanceBase.verification_phase(self, master)
开发者ID:Boggypop,项目名称:testrunner,代码行数:58,代码来源:swaprebalance.py


示例9: _test_delete_key_and_backup_and_restore_body

    def _test_delete_key_and_backup_and_restore_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, name=bucket, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        client.delete(keys[0])

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        missing_keys = []
        verify_keys = []
        for key in keys:
            vBucketId = crc32.crc32_hash(key) & 1023  # or & 0x3FF
            client.vbucketId = vBucketId
            if key == keys[0]:
                missing_keys.append(key)
            else:
                verify_keys.append(key)

        self.assertTrue(BucketOperationHelper.keys_dont_exist(self.master, missing_keys, self),
                        "Keys are not empty")
        self.assertTrue(BucketOperationHelper.verify_data(self.master, verify_keys, False, False, 11210, self),
                        "Missing keys")
开发者ID:jchris,项目名称:testrunner,代码行数:57,代码来源:backuptests.py


示例10: test_parallel_enable_DB_compaction

 def test_parallel_enable_DB_compaction(self):
     rest = RestConnection(self.master)
     self.set_auto_compaction(rest, parallelDBAndVC="true", dbFragmentThresholdPercentage=self.fragmentation_value)
     self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
     self.create_ddocs()
     self._load_all_buckets(self.master, self.gen_load, "create", 0)
     RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)
     self._compaction_thread()
     if self.thread_crashed.is_set():
         self.log.info("View Compaction is not started as expected")
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:10,代码来源:compactionviewtests.py


示例11: create_ddocs

 def create_ddocs(self):
     mapview = View(self.map_view_name, '''function(doc) {
          emit(doc.integer, doc.string);
       }''', dev_view=self.is_dev_view)
     self.cluster.create_view(self.master, 'test', mapview)
     redview = View(self.red_view_name, '''function(doc) {
          emit([doc.integer, doc.string], doc.integer);
       }''', '''_count''', dev_view=self.is_dev_view)
     self.cluster.create_view(self.master, 'test', redview)
     RebalanceHelper.wait_for_persistence(self.master, self.bucket, 0)
开发者ID:ronniedada,项目名称:testrunner,代码行数:10,代码来源:viewmergetests.py


示例12: _monitor_drain_queue

 def _monitor_drain_queue(self):
     #start whenever drain_queue is > 0
     rest = RestConnection(self.master)
     start = time.time()
     stats = rest.get_bucket_stats(self.bucket)
     self.log.info("current ep_queue_size: {0}".format(stats["ep_queue_size"]))
     verified = RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_queue_size', 0, timeout_in_seconds=300, verbose=False)\
     and RebalanceHelper.wait_for_stats(self.master, self.bucket, 'ep_flusher_todo', 0, timeout_in_seconds=300, verbose=False)
     self.drained = verified
     self.drained_in_seconds = time.time() - start
开发者ID:steveyen,项目名称:testrunner,代码行数:10,代码来源:drainratetests.py


示例13: test_parallel_DB_views_compaction

 def test_parallel_DB_views_compaction(self):
     rest = RestConnection(self.master)
     self.set_auto_compaction(rest, parallelDBAndVC="true", viewFragmntThresholdPercentage=self.fragmentation_value, dbFragmentThresholdPercentage=self.fragmentation_value)
     self.make_ddocs(self.ddocs_num, self.view_per_ddoc)
     self.create_ddocs()
     self._load_all_buckets(self.master, self.gen_load, "create", 0)
     RebalanceHelper.wait_for_persistence(self.master, self.default_bucket_name)
     self._compaction_thread()
     if self.thread_crashed.is_set():
         self.fail("Error occurred during run")
开发者ID:EricACooper,项目名称:testrunner,代码行数:10,代码来源:compactionviewtests.py


示例14: wait_until_warmed_up

    def wait_until_warmed_up(self, master=None):
        if not master:
            master = self.input.servers[0]

        bucket = self.param("bucket", "default")

        fn = RebalanceHelper.wait_for_mc_stats_no_timeout
        for bucket in self.buckets:
            RebalanceHelper.wait_for_stats_on_all(master, bucket,
                                                  'ep_warmup_thread',
                                                  'complete', fn=fn)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:11,代码来源:perf.py


示例15: test_observe_with_warmup

 def test_observe_with_warmup(self):
     self._load_doc_data_all_buckets('create', 0, self.num_items)
     # Persist all the loaded data item
     self.log.info("Nodes in cluster: %s" % self.servers[:self.nodes_init])
     for bucket in self.buckets:
         RebalanceHelper.wait_for_persistence(self.master, bucket)
         self._stats_befor_warmup(bucket.name)
         self._restart_memcache(bucket.name)
         # for bucket in self.buckets:
         ClusterOperationHelper._wait_warmup_completed(self, self.servers[:self.nodes_init], bucket.name)
         self._run_observe(self)
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:11,代码来源:observetest.py


示例16: _test_backup_and_restore_bucket_overwriting_body

    def _test_backup_and_restore_bucket_overwriting_body(self, overwrite_flag=True):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.add_nodes_and_rebalance()

        client = MemcachedClientHelper.direct_client(self.master, "default")
        expiry = 2400
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(500)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, "1")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))

        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        for server in self.servers:
            shell = RemoteMachineShellConnection(server)

            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        for key in keys:
            try:
                client.replace(key, expiry, 0, "2")
            except mc_bin_client.MemcachedError as error:
                msg = "unable to replace key : {0} in bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        self.log.info("replaced {0} keys with expiry set to {1}".format(len(keys), expiry))

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder, overwrite_flag)
            time.sleep(10)

        self.log.info('verifying that all those keys...')
        for key in keys:
            if overwrite_flag:
                self.assertEqual("2", client.get(key=key), key + " should has value = 2")
            else:
                self.assertNotEqual("2", client.get(key=key), key + " should not has value = 2")
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
开发者ID:jchris,项目名称:testrunner,代码行数:54,代码来源:backuptests.py


示例17: load_data

 def load_data(self, master, bucket, keys_count):
     log = logger.Logger.get_logger()
     inserted_keys_cnt = 0
     while inserted_keys_cnt < keys_count:
         keys_cnt, rejected_keys_cnt = MemcachedClientHelper.load_bucket(
             servers=[master], name=bucket, number_of_items=keys_count, number_of_threads=5, write_only=True
         )
         inserted_keys_cnt += keys_cnt
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, "ep_queue_size", 0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket, "ep_flusher_todo", 0)
     return inserted_keys_cnt
开发者ID:jason-hou,项目名称:testrunner,代码行数:12,代码来源:autofailovertests.py


示例18: _test_cluster_topology_change_body

    def _test_cluster_topology_change_body(self):
        bucket = "default"
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)
        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")
        self.add_nodes_and_rebalance()

        rest = RestConnection(self.master)

        distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}

        inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
                                                                                             ram_load_ratio=1,
                                                                                             value_size_distribution=distribution,
                                                                                             moxi=True,
                                                                                             write_only=True,
                                                                                             number_of_threads=2)

        self.log.info("Sleep after data load")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")

        #let's create a unique folder in the remote location
        for server in self.servers:
            shell = RemoteMachineShellConnection(server)
            output, error = shell.execute_command(self.perm_command)
            shell.log_command_output(output, error)
            node = RestConnection(server).get_nodes_self()
            BackupHelper(server, self).backup(bucket, node, self.remote_tmp_folder)
            shell.disconnect()

        ClusterOperationHelper.cleanup_cluster(self.servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)

        servers = []
        for i in range(0, len(self.servers) - 1):
            servers.append(self.servers[i])

        self.add_node_and_rebalance(servers[0], servers)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        BucketOperationHelper.create_bucket(serverInfo=self.master, test_case=self)

        ready = BucketOperationHelper.wait_for_memcached(self.master, bucket)
        self.assertTrue(ready, "wait_for_memcached failed")

        for server in self.servers:
            BackupHelper(server, self).restore(self.remote_tmp_folder)
            time.sleep(10)

        BucketOperationHelper.verify_data(self.master, inserted_keys, False, False, 11210, self)
开发者ID:steveyen,项目名称:testrunner,代码行数:53,代码来源:backuptests.py


示例19: load_data

 def load_data(master, bucket, keys_count=-1, load_ratio=-1):
     log = logger.Logger.get_logger()
     inserted_keys, rejected_keys =\
     MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[master],
                                                           name=bucket,
                                                           ram_load_ratio=load_ratio,
                                                           number_of_items=keys_count,
                                                           number_of_threads=2,
                                                           write_only=True)
     log.info("wait until data is completely persisted on the disk")
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_queue_size', 0)
     RebalanceHelper.wait_for_stats_on_all(master, bucket, 'ep_flusher_todo', 0)
     return inserted_keys
开发者ID:jchris,项目名称:testrunner,代码行数:13,代码来源:failovertests.py


示例20: test_vbucket_uuid

 def test_vbucket_uuid(self):
     """
         Test to show usage of vbucket information collection via api
         and than comparison and running the logic for analysis
         This is done for cluster and node level as well
     """
     self.gen_create = BlobGenerator('loadOne', 'loadOne_', self.value_size, end=self.num_items)
     self._load_all_buckets(self.master, self.gen_create, "create", 0,
                            batch_size=10000, pause_secs=10, timeout_secs=60)
     self._wait_for_stats_all_buckets(self.servers)
     RebalanceHelper.wait_for_replication(self.servers, self.cluster)
     vbucket_stats=self.get_vbucket_seqnos(self.servers,self.buckets, perNode =  True)
     logic,output = self.compare_per_node_maps(vbucket_stats)
     self.assertTrue(logic, output)
开发者ID:uvenum,项目名称:testrunner,代码行数:14,代码来源:clusterinfoanalysis.py



注:本文中的membase.helper.rebalance_helper.RebalanceHelper类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python spatial_helper.SpatialHelper类代码示例发布时间:2022-05-27
下一篇:
Python cluster_helper.ClusterOperationHelper类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap