本文整理汇总了Python中membase.api.rest_client.RestConnection类的典型用法代码示例。如果您正苦于以下问题:Python RestConnection类的具体用法?Python RestConnection怎么用?Python RestConnection使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RestConnection类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: pick_node
def pick_node(master):
rest = RestConnection(master)
nodes = rest.node_statuses()
node_picked = None
nodes_on_same_ip = True
firstIp = nodes[0].ip
for node in nodes:
if node.ip != firstIp:
nodes_on_same_ip = False
break
for node in nodes:
node_picked = node
if not nodes_on_same_ip:
if node_picked.ip != master.ip:
log.info("Picked node ... {0}:{1}".format(node_picked.ip, node_picked.port))
break
else:
# temp fix - port numbers of master(machine ip and localhost: 9000 match
if int(node_picked.port) == int(master.port):
log.info(
"Not picking the master node {0}:{1}.. try again...".format(node_picked.ip, node_picked.port)
)
else:
log.info("Picked node {0}:{1}".format(node_picked.ip, node_picked.port))
break
return node_picked
开发者ID:ketakigangal,项目名称:cbsystest,代码行数:28,代码来源:rebalance_helper.py
示例2: _verify_zone
def _verify_zone(self, name):
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
if rest.is_zone_exist(name.strip()):
self.log.info("verified! zone '{0}' is existed".format(name.strip()))
else:
raise Exception("There is not zone with name: %s in cluster" % name)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:7,代码来源:rackzonetests.py
示例3: test_change_mem_quota_when_index_building
def test_change_mem_quota_when_index_building(self):
rest = RestConnection(self.oomServer)
log.info("Setting indexer memory quota to 700 MB...")
rest.set_indexer_memoryQuota(indexMemoryQuota=700)
self.sleep(30)
query_definitions = []
for x in range(3):
index_name = "index_"+str(x)
query_definition = QueryDefinition(index_name=index_name, index_fields = ["job_title"],
query_template = self.query_template, groups = ["simple"])
query_definitions.append(query_definition)
create_tasks = []
build_tasks = []
index_info = {}
for bucket in self.buckets:
if not bucket in index_info.keys():
index_info[bucket] = []
for query_definition in query_definitions:
index_info[bucket].append(query_definition.index_name)
task = self.async_create_index(bucket.name, query_definition)
create_tasks.append(task)
for task in create_tasks:
task.result()
if self.defer_build:
log.info("Building Indexes...")
for key, val in index_info.iteritems():
task = self.async_build_index(bucket=key, index_list=val)
build_tasks.append(task)
self.sleep(10)
log.info("Setting indexer memory quota to 500 MB...")
rest.set_indexer_memoryQuota(indexMemoryQuota=500)
self.sleep(30)
for task in build_tasks:
task.result()
开发者ID:chethanrao,项目名称:testrunner-archive,代码行数:34,代码来源:memdb_oom_2i.py
示例4: rebalance_in_out_at_once_persistence_stopped
def rebalance_in_out_at_once_persistence_stopped(self):
num_nodes_with_stopped_persistence = self.input.param("num_nodes_with_stopped_persistence", 1)
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
rest = RestConnection(self.master)
self._wait_for_stats_all_buckets(servs_init)
for server in servs_init[:min(num_nodes_with_stopped_persistence, self.nodes_init)]:
shell = RemoteMachineShellConnection(server)
for bucket in self.buckets:
shell.execute_cbepctl(bucket, "stop", "", "", "")
self.sleep(5)
self.num_items_without_persistence = self.input.param("num_items_without_persistence", 100000)
gen_extra = BlobGenerator('mike', 'mike-', self.value_size, start=self.num_items / 2\
, end=self.num_items / 2 + self.num_items_without_persistence)
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
self.log.info("adding nodes {0} to cluster".format(servs_in))
self.log.info("removing nodes {0} from cluster".format(servs_out))
tasks = self._async_load_all_buckets(self.master, gen_extra, "create", 0, batch_size=1000)
result_nodes = set(servs_init + servs_in) - set(servs_out)
# wait timeout in 60 min because MB-7386 rebalance stuck
self.cluster.rebalance(servs_init[:self.nodes_init], servs_in, servs_out, timeout=self.wait_timeout * 60)
for task in tasks:
task.result()
self._wait_for_stats_all_buckets(servs_init[:self.nodes_init - self.nodes_out], \
ep_queue_size=self.num_items_without_persistence * 0.9, ep_queue_size_cond='>')
self._wait_for_stats_all_buckets(servs_in)
self._verify_all_buckets(self.master, timeout=None)
self._verify_stats_all_buckets(result_nodes)
#verify that curr_items_tot corresponds to sum of curr_items from all nodes
verified = True
for bucket in self.buckets:
verified &= RebalanceHelper.wait_till_total_numbers_match(self.master, bucket)
self.assertTrue(verified, "Lost items!!! Replication was completed but sum(curr_items) don't match the curr_items_total")
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:35,代码来源:rebalanceinout.py
示例5: test_delete_empty_defautl_zone
def test_delete_empty_defautl_zone(self):
zone_name ="test1"
default_zone = "Group 1"
moved_node = []
serverInfo = self.servers[0]
moved_node.append(serverInfo.ip)
rest = RestConnection(serverInfo)
try:
self.log.info("create zone {0}".format(zone_name))
rest.add_zone(zone_name)
if rest.is_zone_exist(zone_name):
self.log.info("Move node {0} from zone {1} to zone {2}" \
.format(moved_node, default_zone, zone_name))
status = rest.shuffle_nodes_in_zones(moved_node, default_zone, zone_name)
if status:
rest.delete_zone(default_zone)
else:
self.fail("Failed to move node {0} from zone {1} to zone {2}" \
.format(moved_node, default_zone, zone_name))
if not rest.is_zone_exist(default_zone):
self.log.info("successful delete default zone")
else:
raise Exception("Failed to delete default zone")
rest.rename_zone(zone_name, default_zone)
except Exception,e :
print e
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:26,代码来源:rackzonetests.py
示例6: test_rotateInterval
def test_rotateInterval(self):
intervalSec = self.input.param("intervalSec", None)
auditIns = audit(host=self.master)
rest = RestConnection(self.master)
originalInt = auditIns.getAuditRotateInterval()
try:
firstEventTime = self.getTimeStampForFile(auditIns)
self.log.info ("first time evetn is {0}".format(firstEventTime))
auditIns.setAuditRotateInterval(intervalSec)
self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
self.sleep(120)
shell = RemoteMachineShellConnection(self.master)
try:
hostname = shell.execute_command("hostname")
archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
self.log.info ("Archive File Name is {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, archiveFile)
self.assertTrue(result, "Archive Audit.log is not created on time interval")
self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
self.assertTrue(result, "Audit.log is not created when memcached server is killed")
finally:
shell.disconnect()
finally:
auditIns.setAuditRotateInterval(originalInt)
开发者ID:arod1987,项目名称:testrunner,代码行数:26,代码来源:auditcheckconfig.py
示例7: test_folderMisMatchCluster
def test_folderMisMatchCluster(self):
auditIns = audit(host=self.master)
orginalPath = auditIns.getAuditLogPath()
newPath = originalPath + 'testFolderMisMatch'
shell = RemoteMachineShellConnection(self.servers[0])
try:
shell.create_directory(newPath)
command = 'chown couchbase:couchbase ' + newPath
shell.execute_command(command)
finally:
shell.disconnect()
auditIns.setsetAuditLogPath(newPath)
for server in self.servers:
rest = RestConnection(sever)
#Create an Event for Bucket Creation
expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1,
'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
"flush_enabled":False, "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' }
rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
'11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')
#Check on Events
try:
self.checkConfig(self.eventID, self.servers[0], expectedResults)
except:
self.log.info ("Issue reading the file at Node {0}".format(server.ip))
开发者ID:arod1987,项目名称:testrunner,代码行数:30,代码来源:auditcheckconfig.py
示例8: common_test_body
def common_test_body(self, replica, load_ratio, timeout=10):
log = logger.Logger.get_logger()
start_time = time.time()
log.info("replica : {0}".format(replica))
log.info("load_ratio : {0}".format(load_ratio))
master = self._servers[0]
log.info('picking server : {0} as the master'.format(master))
rest = RestConnection(master)
while time.time() < ( start_time + 60 * timeout):
#rebalance out step nodes
#let's add some items ?
nodes = rest.node_statuses()
delta = len(self._servers) - len(nodes)
if delta > 0:
if delta > 1:
how_many_add = Random().randint(1, delta)
else:
how_many_add = 1
self.log.info("going to add {0} nodes".format(how_many_add))
self.rebalance_in(how_many=how_many_add)
else:
self.log.info("all nodes already joined the cluster")
time.sleep(30 * 60)
#dont rebalance out if there are not too many nodes
if len(nodes) >= (3.0 / 4.0 * len(self._servers)):
nodes = rest.node_statuses()
how_many_out = Random().randint(1, len(nodes) - 1)
self.log.info("going to remove {0} nodes".format(how_many_out))
self.rebalance_out(how_many=how_many_out)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:29,代码来源:longevity.py
示例9: backup
def backup(self):
while True:
try:
x = self.queue.get_nowait()
self.log.info("get_nowait : {0}".format(x))
break
#things are notmal just do another back aafter
#waiting for self.interval
except Exception:
master = self.servers[0]
rest = RestConnection(master)
nodes = rest.node_statuses()
map = self.node_server_map(nodes, self.servers)
self.log.info("cluster has {0} nodes".format(len(nodes)))
for node in nodes:
try:
from Crypto.Random import atfork
atfork()
BackupHelper(map[node]).backup('default', "/tmp")
BackupHelper(map[node]).backup('default', "/tmp")
except Exception as ex:
print ex
self.log.info("backed up the data into ")
time.sleep(self.interval)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:25,代码来源:longevity.py
示例10: create_bucket
def create_bucket(serverInfo, name='default', replica=1, port=11210, test_case=None, bucket_ram=-1, password=None):
log = logger.Logger.get_logger()
rest = RestConnection(serverInfo)
if bucket_ram < 0:
info = rest.get_nodes_self()
bucket_ram = info.memoryQuota * 2 / 3
if password == None:
authType = "sasl"
else:
authType = "none"
rest.create_bucket(bucket=name,
ramQuotaMB=bucket_ram,
replicaNumber=replica,
proxyPort=port,
authType=authType,
saslPassword=password)
msg = 'create_bucket succeeded but bucket "{0}" does not exist'
bucket_created = BucketOperationHelper.wait_for_bucket_creation(name, rest)
if not bucket_created:
log.error(msg)
if test_case:
test_case.fail(msg=msg.format(name))
return bucket_created
开发者ID:membase,项目名称:testrunner,代码行数:25,代码来源:bucket_helper.py
示例11: create_primary_index_for_3_0_and_greater
def create_primary_index_for_3_0_and_greater(self):
self.log.info("CREATE PRIMARY INDEX using %s" % self.primary_indx_type)
rest = RestConnection(self.master)
versions = rest.get_nodes_versions()
if versions[0].startswith("4") or versions[0].startswith("3") or versions[0].startswith("5"):
for bucket in self.buckets:
if self.primary_indx_drop:
self.log.info("Dropping primary index for %s using %s ..." % (bucket.name,self.primary_indx_type))
self.query = "DROP PRIMARY INDEX ON %s USING %s" % (bucket.name,self.primary_indx_type)
#self.run_cbq_query()
self.sleep(3, 'Sleep for some time after index drop')
self.query = 'select * from system:indexes where name="#primary" and keyspace_id = "%s"' % bucket.name
res = self.run_cbq_query()
self.sleep(10)
if self.monitoring:
self.query = "delete from system:completed_requests"
self.run_cbq_query()
if not self.skip_primary_index:
if (res['metrics']['resultCount'] == 0):
self.query = "CREATE PRIMARY INDEX ON %s USING %s" % (bucket.name, self.primary_indx_type)
self.log.info("Creating primary index for %s ..." % bucket.name)
try:
self.run_cbq_query()
self.primary_index_created = True
if self.primary_indx_type.lower() == 'gsi':
self._wait_for_index_online(bucket, '#primary')
except Exception, ex:
self.log.info(str(ex))
开发者ID:arod1987,项目名称:testrunner,代码行数:28,代码来源:newtuq.py
示例12: _create_buckets
def _create_buckets(self, nodes):
master_node = nodes[0]
num_buckets = 0
if self._default_bucket:
num_buckets += 1
num_buckets += self._sasl_buckets + self._standard_buckets
bucket_size = self._get_bucket_size(master_node, nodes, self._mem_quota_int, num_buckets)
rest = RestConnection(master_node)
master_id = rest.get_nodes_self().id
if self._default_bucket:
if self._default_quota != 0:
bucket_size = self._default_quota
rest = RestConnection(nodes[0])
rest.create_bucket(
bucket=self.default_bucket_name,
ramQuotaMB=bucket_size,
replicaNumber=self._num_replicas,
proxyPort=11211,
authType="none",
saslPassword=None,
)
self._buckets.append(self.default_bucket_name)
if self._sasl_buckets > 0:
if self._sasl_quota != 0:
bucket_size = self._sasl_quota
self._create_sasl_buckets(master_node, master_id, bucket_size, password="password")
if self._standard_buckets > 0:
if self._standard_quota != 0:
bucket_size = self._standard_quota
self._create_standard_buckets(master_node, master_id, bucket_size)
开发者ID:strategist922,项目名称:testrunner,代码行数:30,代码来源:cluster_setup.py
示例13: test_invalidLogPathCluster
def test_invalidLogPathCluster(self):
auditIns = audit(host=self.master)
newPath = auditIns.getAuditLogPath() + 'test'
rest = RestConnection(self.master)
status, content = rest.setAuditSettings(logPath=newPath)
self.assertFalse(status, "Audit is able to set invalid path")
self.assertEqual(content['errors']['logPath'], 'The value must be a valid directory', 'No error or error changed')
开发者ID:arod1987,项目名称:testrunner,代码行数:7,代码来源:auditcheckconfig.py
示例14: setUp
def setUp(self):
super(RbacTestMemcached, self).setUp()
rest = RestConnection(self.master)
self.auth_type = self.input.param('auth_type','builtin')
self.user_id = self.input.param("user_id",None)
self.user_role = self.input.param("user_role",None)
self.bucket_name = self.input.param("bucket_name",None)
rest.create_bucket(bucket=self.bucket_name, ramQuotaMB=100,lww=True)
self.role_map = self.input.param("role_map",None)
self.incorrect_bucket = self.input.param("incorrect_bucket",False)
self.new_role = self.input.param("new_role",None)
self.new_role_map = self.input.param("new_role_map",None)
self.no_bucket_access = self.input.param("no_bucket_access",False)
self.no_access_bucket_name = self.input.param("no_access_bucket_name","noaccess")
self.all_buckets = self.input.param("all_buckets",None)
self.ldap_users = rbacmain().returnUserList(self.user_id)
if self.no_bucket_access:
rest.create_bucket(bucket=self.no_access_bucket_name, ramQuotaMB=100, lww=True)
if self.auth_type == 'ldap':
rbacmain(self.master, 'builtin')._delete_user('cbadminbucket')
if self.auth_type == 'ldap':
rbacmain().setup_auth_mechanism(self.servers,'ldap',rest)
for user in self.ldap_users:
testuser = [{'id': user[0], 'name': user[0], 'password': user[1]}]
RbacBase().create_user_source(testuser, 'ldap', self.master)
self.sleep(10)
elif self.auth_type == "pam":
rbacmain().setup_auth_mechanism(self.servers,'pam', rest)
rbacmain().add_remove_local_user(self.servers, self.ldap_users, 'deluser')
rbacmain().add_remove_local_user(self.servers, self.ldap_users,'adduser')
elif self.auth_type == "builtin":
for user in self.ldap_users:
testuser = [{'id': user[0], 'name': user[0], 'password': user[1]}]
RbacBase().create_user_source(testuser, 'builtin', self.master)
self.sleep(10)
开发者ID:arod1987,项目名称:testrunner,代码行数:35,代码来源:rbacmem.py
示例15: test_rotateIntervalCluster
def test_rotateIntervalCluster(self):
intervalSec = self.input.param("intervalSec", None)
nodes_init = self.input.param("nodes_init", 2)
auditIns = audit(host=self.master)
auditIns.setAuditEnable('true')
originalInt = auditIns.getAuditRotateInterval()
auditIns.setAuditRotateInterval(intervalSec)
firstEventTime = []
try:
for i in range(len(self.servers[:nodes_init])):
auditTemp = audit(host=self.servers[i])
firstEventTime.append(self.getTimeStampForFile(auditTemp))
self.sleep(intervalSec + 20, 'Sleep for log roll over to happen')
for i in range(len(self.servers[:nodes_init])):
shell = RemoteMachineShellConnection(self.servers[i])
rest = RestConnection(self.servers[i])
status, content = rest.validateLogin(self.master.rest_username, self.master.rest_password, True, getContent=True)
self.sleep(120, "sleeping for log file creation")
try:
hostname = shell.execute_command("hostname")
self.log.info ("print firstEventTime {0}".format(firstEventTime[i]))
archiveFile = hostname[0][0] + '-' + firstEventTime[i] + "-audit.log"
self.log.info ("Archive File Name is {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, archiveFile)
self.assertTrue(result, "Archive Audit.log is not created on time interval")
self.log.info ("Validation of archive File created is True, Audit archive File is created {0}".format(archiveFile))
result = shell.file_exists(auditIns.pathLogFile, auditIns.AUDITLOGFILENAME)
self.assertTrue(result, "Audit.log is not created as per the roll over time specified")
finally:
shell.disconnect()
finally:
auditIns.setAuditRotateInterval(originalInt)
开发者ID:arod1987,项目名称:testrunner,代码行数:35,代码来源:auditcheckconfig.py
示例16: items_verification
def items_verification(test, master):
rest = RestConnection(master)
# Verify items count across all node
timeout = 600
for bucket in rest.get_buckets():
verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=timeout)
test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(timeout))
开发者ID:Boggypop,项目名称:testrunner,代码行数:7,代码来源:swaprebalance.py
示例17: test_fileRotate20MB
def test_fileRotate20MB(self):
auditIns = audit(host=self.master)
firstEventTime = self.getTimeStampForFile(auditIns)
tempEventCounter = 0
rest = RestConnection(self.master)
shell = RemoteMachineShellConnection(self.master)
filePath = auditIns.pathLogFile + auditIns.AUDITLOGFILENAME
number = int (shell.get_data_file_size(filePath))
hostname = shell.execute_command("hostname")
archiveFile = hostname[0][0] + '-' + firstEventTime + "-audit.log"
result = shell.file_exists(auditIns.pathLogFile, archiveFile)
tempTime = 0
starttime = time.time()
while ((number < 21089520) and (tempTime < 36000) and (result == False)):
for i in range(1, 10):
status, content = rest.validateLogin("Administrator", "password", True, getContent=True)
tempEventCounter += 1
number = int (shell.get_data_file_size(filePath))
currTime = time.time()
tempTime = int (currTime - starttime)
result = shell.file_exists(auditIns.pathLogFile, archiveFile)
self.sleep(30)
result = shell.file_exists(auditIns.pathLogFile, archiveFile)
shell.disconnect()
self.log.info ("--------Total Event Created ---- {0}".format(tempEventCounter))
self.assertTrue(result, "Archive Audit.log is not created on reaching 20MB threshhold")
开发者ID:arod1987,项目名称:testrunner,代码行数:26,代码来源:auditcheckconfig.py
示例18: setUp
def setUp(self):
super(XDCRTests, self).setUp()
self.bucket = Bucket()
self._initialize_nodes()
self.master = self.servers[0]
for server in self.servers:
rest=RestConnection(server)
cluster_status = rest.cluster_status()
self.log.info("Initial status of {0} cluster is {1}".format(server.ip,
cluster_status['nodes'][0]['status']))
while cluster_status['nodes'][0]['status'] == 'warmup':
self.log.info("Waiting for cluster to become healthy")
self.sleep(5)
cluster_status = rest.cluster_status()
self.log.info("current status of {0} is {1}".format(server.ip,
cluster_status['nodes'][0]['status']))
# Delete all buckets before creating new buckets
self.log.info("Deleting all existing buckets")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
self.log.info("Creating new buckets")
src_bucket = self.input.param('src_bucket', self.bucket)
dest_bucket = self.input.param('dest_bucket', self.bucket)
if src_bucket:
RestConnection(self.servers[0]).create_bucket(bucket='default', ramQuotaMB=500)
if dest_bucket:
RestConnection(self.servers[1]).create_bucket(bucket='default', ramQuotaMB=500)
helper = BaseHelper(self)
helper.login()
开发者ID:EricACooper,项目名称:testrunner,代码行数:28,代码来源:uixdcrtests.py
示例19: rebalance_in_out_at_once_with_max_buckets_number
def rebalance_in_out_at_once_with_max_buckets_number(self):
servs_init = self.servers[:self.nodes_init]
servs_in = [self.servers[i + self.nodes_init] for i in range(self.nodes_in)]
servs_out = [self.servers[self.nodes_init - i - 1] for i in range(self.nodes_out)]
rest = RestConnection(self.master)
self._wait_for_stats_all_buckets(servs_init)
self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
self.log.info("adding nodes {0} to cluster".format(servs_in))
self.log.info("removing nodes {0} from cluster".format(servs_out))
result_nodes = set(servs_init + servs_in) - set(servs_out)
rest = RestConnection(self.master)
bucket_num = rest.get_internalSettings("maxBucketCount")
self.bucket_size = self.quota / bucket_num
self.log.info('total %s buckets will be created with size %s MB' % (bucket_num, self.bucket_size))
self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
num_replicas=self.num_replicas, bucket_size=self.bucket_size))
self._create_sasl_buckets(self.master, (bucket_num - 1) / 2)
self._create_standard_buckets(self.master, bucket_num - 1 - (bucket_num - 1) / 2)
gen = BlobGenerator('mike', 'mike-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen, "create", 0)
self._wait_for_stats_all_buckets(servs_init)
rebalance = self.cluster.async_rebalance(servs_init, servs_in, servs_out)
self._async_load_all_buckets(self.master, gen, "update", 0)
rebalance.result()
self.verify_cluster_stats(result_nodes)
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:30,代码来源:rebalanceinout.py
示例20: test_compare_views_all_nodes_x_docs
def test_compare_views_all_nodes_x_docs(self):
num_docs = self.helper.input.param("num-docs", 100)
self.log.info("description : creates view on {0} documents, queries "
"all nodes (not only the master node) and compares "
"if the results are all the same"\
.format(num_docs))
design_name = "dev_test_compare_views_{0}_docs".format(num_docs)
prefix = str(uuid.uuid4())[:7]
inserted_keys = self._setup_index(design_name, num_docs, prefix)
nodes = self.helper.rest.get_nodes()
params = {"connection_timeout": 60000, "full_set": True}
# Query every single node and verify
for n in nodes:
n_rest = RestConnection({
"ip": n.ip,
"port": n.port,
"username": self.helper.master.rest_username,
"password": self.helper.master.rest_password})
results = n_rest.spatial_results(self.helper.bucket, design_name,
params, None)
result_keys = self.helper.get_keys(results)
self.helper.verify_result(inserted_keys, result_keys)
开发者ID:mschoch,项目名称:testrunner,代码行数:25,代码来源:spatialviewtests.py
注:本文中的membase.api.rest_client.RestConnection类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论