本文整理汇总了Python中memcached.helper.data_helper.MemcachedClientHelper类的典型用法代码示例。如果您正苦于以下问题:Python MemcachedClientHelper类的具体用法?Python MemcachedClientHelper怎么用?Python MemcachedClientHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MemcachedClientHelper类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: system_stats
def system_stats(self, nodes, pnames, frequency, verbosity=False):
shells = []
for node in nodes:
try:
bucket = RestConnection(node).get_buckets()[0].name
MemcachedClientHelper.direct_client(node, bucket)
shells.append(RemoteMachineShellConnection(node))
except:
pass
d = {"snapshots": []}
# "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}]
start_time = str(self._task["time"])
while not self._aborted():
time.sleep(frequency)
current_time = time.time()
i = 0
for shell in shells:
node = nodes[i]
unique_id = node.ip+'-'+start_time
for pname in pnames:
obj = RemoteMachineHelper(shell).is_process_running(pname)
if obj and obj.pid:
value = self._extract_proc_info(shell, obj.pid)
value["name"] = pname
value["id"] = obj.pid
value["unique_id"] = unique_id
value["time"] = current_time
value["ip"] = node.ip
d["snapshots"].append(value)
i += 1
self._task["systemstats"] = d["snapshots"]
print " finished system_stats"
开发者ID:jchris,项目名称:testrunner,代码行数:33,代码来源:stats.py
示例2: iostats
def iostats(self, nodes, frequency, verbosity=False):
shells = []
for node in nodes:
try:
bucket = RestConnection(node).get_buckets()[0].name
MemcachedClientHelper.direct_client(node, bucket)
shells.append(RemoteMachineShellConnection(node))
except:
pass
self._task["iostats"] = []
print "started capturing io stats"
while not self._aborted():
time.sleep(frequency)
print "collecting io_stats"
for shell in shells:
kB_read, kB_wrtn = self._extract_io_info(shell)
if kB_read and kB_wrtn:
self._task["iostats"].append({"time": time.time(),
"ip": shell.ip,
"read": kB_read,
"write": kB_wrtn})
print "finished capturing io stats"
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:26,代码来源:stats.py
示例3: common_tearDown
def common_tearDown(servers, testcase):
log = logger.Logger.get_logger()
log.info(
"============== common_tearDown was started for test #{0} {1} ==============".format(
testcase.case_number, testcase._testMethodName
)
)
RemoteUtilHelper.common_basic_setup(servers)
log.info("10 seconds delay to wait for couchbase-server to start")
time.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(
servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
)
try:
rest = RestConnection(self._servers[0])
buckets = rest.get_buckets()
for bucket in buckets:
MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
except Exception:
pass
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
ClusterOperationHelper.cleanup_cluster(servers)
log.info(
"============== common_tearDown was finished for test #{0} {1} ==============".format(
testcase.case_number, testcase._testMethodName
)
)
开发者ID:jason-hou,项目名称:testrunner,代码行数:28,代码来源:autofailovertests.py
示例4: insert_key
def insert_key(serverInfo, bucket_name, count, size):
client = MemcachedClientHelper.proxy_client(serverInfo, bucket_name)
value = MemcachedClientHelper.create_value("*", size)
for i in range(count * 1000):
key = "key_" + str(i)
flag = random.randint(1, 999)
client.set(key, 0, flag, value)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:7,代码来源:autocompaction.py
示例5: set_get_test
def set_get_test(self, value_size, number_of_items):
fixed_value = MemcachedClientHelper.create_value("S", value_size)
specs = [
("default", 0),
("set-get-bucket-replica-1", 1),
("set-get-bucket-replica-2", 2),
("set-get-bucket-replica-3", 3),
]
serverInfo = self.master
rest = RestConnection(serverInfo)
bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)
mcport = rest.get_nodes_self().memcached
for name, replica in specs:
rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)
bucket_data = {}
buckets = RestConnection(serverInfo).get_buckets()
for bucket in buckets:
bucket_data[bucket.name] = {}
ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
self.test.assertTrue(ready, "wait_for_memcached failed")
client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
inserted = []
rejected = []
while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
try:
key = str(uuid.uuid4())
client.set(key, 0, 0, fixed_value)
inserted.append(key)
except mc_bin_client.MemcachedError:
pass
retry = 0
remaining_items = []
remaining_items.extend(inserted)
msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
while retry < 10 and len(remaining_items) > 0:
verified_keys = []
for key in remaining_items:
try:
flag, keyx, value = client.get(key=key)
if not value == fixed_value:
self.test.fail("value mismatch for key {0}".format(key))
verified_keys.append(key)
except mc_bin_client.MemcachedError as error:
self.log.error(msg.format(error.status, key))
retry += 1
[remaining_items.remove(x) for x in verified_keys]
print_count = 0
for key in remaining_items:
if print_count > 100:
break
print_count += 1
self.log.error("unable to verify key : {0}".format(key))
if remaining_items:
self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:59,代码来源:setgettests.py
示例6: test_checkpointing_with_full_rollback
def test_checkpointing_with_full_rollback(self):
bucket = self.src_cluster.get_buckets()[0]
nodes = self.src_cluster.get_nodes()
# Stop Persistence on Node A & Node B
for node in nodes:
mem_client = MemcachedClientHelper.direct_client(node, bucket)
mem_client.stop_persistence()
self.src_cluster.pause_all_replications()
gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
self.src_cluster.load_all_buckets_from_generator(gen)
self.src_cluster.resume_all_replications()
self.sleep(self._checkpoint_interval * 2)
self.get_and_validate_latest_checkpoint()
# Perform mutations on the bucket
self.async_perform_update_delete()
self.sleep(self._wait_timeout)
# Kill memcached on Node A so that Node B becomes master
shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
shell.kill_memcached()
# Start persistence on Node B
mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
mem_client.start_persistence()
# Failover Node B
failover_task = self.src_cluster.async_failover()
failover_task.result()
# Wait for Failover & rollback to complete
self.sleep(self._wait_timeout * 5)
goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
+ '/goxdcr.log*'
count1 = NodeHelper.check_goxdcr_log(
nodes[0],
"Received rollback from DCP stream",
goxdcr_log)
self.assertGreater(count1, 0, "full rollback not received from DCP as expected")
self.log.info("full rollback received from DCP as expected")
count2 = NodeHelper.check_goxdcr_log(
nodes[0],
"Rolled back startSeqno to 0",
goxdcr_log)
self.assertGreater(count2, 0, "startSeqno not rolled back to 0 as expected")
self.log.info("startSeqno rolled back to 0 as expected")
shell.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:56,代码来源:checkpointXDCR.py
示例7: common_tearDown
def common_tearDown(servers, testcase):
RemoteUtilHelper.common_basic_setup(servers)
log = logger.Logger.get_logger()
log.info("10 seconds delay to wait for couchbase-server to start")
time.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
try:
MemcachedClientHelper.flush_bucket(servers[0], 'default')
except Exception:
pass
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
ClusterOperationHelper.cleanup_cluster(servers)
ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
开发者ID:jchris,项目名称:testrunner,代码行数:13,代码来源:autofailovertests.py
示例8: _test_backup_add_restore_bucket_with_expiration_key
def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
bucket = "default"
rest = RestConnection(self.master)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
BucketOperationHelper.wait_for_memcached(self.master, bucket)
client = MemcachedClientHelper.direct_client(self.master, bucket)
expiry = 60
test_uuid = uuid.uuid4()
keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
self.log.info("pushing keys with expiry set to {0}".format(expiry))
for key in keys:
try:
client.set(key, expiry, 0, key)
except mc_bin_client.MemcachedError as error:
msg = "unable to push key : {0} to bucket : {1} error : {2}"
self.log.error(msg.format(key, client.vbucketId, error.status))
self.fail(msg.format(key, client.vbucketId, error.status))
client.close()
self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
node = RestConnection(self.master).get_nodes_self()
output, error = self.shell.execute_command(self.perm_command)
self.shell.log_command_output(output, error)
backupHelper = BackupHelper(self.master, self)
backupHelper.backup(bucket, node, self.remote_tmp_folder)
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
BucketOperationHelper.wait_for_memcached(self.master, bucket)
backupHelper.restore(self.remote_tmp_folder)
time.sleep(60)
client = MemcachedClientHelper.direct_client(self.master, bucket)
self.log.info('verifying that all those keys have expired...')
for key in keys:
try:
client.get(key=key)
msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
self.fail(msg.format(expiry, key, expiry))
except mc_bin_client.MemcachedError as error:
self.assertEquals(error.status, 1,
msg="expected error code {0} but saw error code {1}".format(1, error.status))
client.close()
self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
开发者ID:jchris,项目名称:testrunner,代码行数:49,代码来源:backuptests.py
示例9: setUp
def setUp(self):
self.log = logger.Logger.get_logger()
self.params = TestInputSingleton.input.test_params
self.master = TestInputSingleton.input.servers[0]
rest = RestConnection(self.master)
rest.init_cluster(self.master.rest_username, self.master.rest_password)
info = rest.get_nodes_self()
rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password,
memoryQuota=info.mcdMemoryReserved)
ClusterOperationHelper.cleanup_cluster([self.master])
ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self)
self._create_default_bucket()
self.keys_cleanup = []
self.onenodemc = MemcachedClientHelper.direct_client(self.master, "default", timeout=600)
self.onenodemoxi = MemcachedClientHelper.proxy_client(self.master, "default", timeout=600)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:15,代码来源:memcapable.py
示例10: common_tearDown
def common_tearDown(servers, testcase):
for server in servers:
shell = RemoteMachineShellConnection(server)
shell.start_membase()
log = logger.Logger.get_logger()
log.info("10 seconds delay to wait for membase-server to start")
time.sleep(10)
ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
try:
MemcachedClientHelper.flush_bucket(servers[0], 'default', 11211)
except Exception:
pass
ClusterOperationHelper.cleanup_cluster(servers)
ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:15,代码来源:combotests.py
示例11: test_time_sync_threshold_setting_rest_call
def test_time_sync_threshold_setting_rest_call(self):
self.log.info("starting test_time_sync_threshold_setting_rest_call")
# bucket is created with lww in base test case using the LWW parameter
client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
rest = RestConnection(self.master)
self.assertTrue(
rest.set_cas_drift_threshold(self.buckets[0], 100000, 200000), "Unable to set the CAS drift threshold"
)
time.sleep(15) # take a few seconds for the stats to settle in
stats = client.stats()
self.assertTrue(
int(stats["ep_hlc_drift_ahead_threshold_us"]) == 100000 * 1000,
"Ahead threshold incorrect. Expected {0} actual {1}".format(
100000 * 1000, stats["ep_hlc_drift_ahead_threshold_us"]
),
)
self.assertTrue(
int(stats["ep_hlc_drift_behind_threshold_us"]) == 200000 * 1000,
"Ahead threshold incorrect. Expected {0} actual {1}".format(
200000 * 1000, stats["ep_hlc_drift_behind_threshold_us"]
),
)
开发者ID:membase,项目名称:testrunner,代码行数:28,代码来源:lww_stats.py
示例12: test_time_sync_threshold_setting
def test_time_sync_threshold_setting(self):
self.log.info("starting test_time_sync_threshold_setting")
# bucket is created with lww in base test case using the LWW parameter
# get the stats
client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
self.assertTrue(
ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
"Ahead threshold mismatch expected: {0} actual {1}".format(
LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold
),
)
# change the setting and verify it is per the new setting - this may or may not be supported
shell = RemoteMachineShellConnection(self.servers[0])
output, error = shell.execute_cbepctl(
self.buckets[0],
"",
"set vbucket_param",
"hlc_drift_ahead_threshold_us ",
str(LWWStatsTests.DEFAULT_THRESHOLD / 2) + LWWStatsTests.DUMMY_VBUCKET,
)
if len(error) > 0:
self.fail("Failed to set the drift counter threshold, please check the logs.")
ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
self.assertTrue(
ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
"Ahead threshold mismatch expected: {0} actual {1}".format(
LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold
),
)
开发者ID:membase,项目名称:testrunner,代码行数:35,代码来源:lww_stats.py
示例13: _verify_data
def _verify_data(self, version):
#verify all the keys
#let's use vbucketaware
rest = RestConnection(self.servers[0])
moxi = MemcachedClientHelper.proxy_client(self.servers[0], self.bucket_name)
index = 0
all_verified = True
keys_failed = []
for key in self.updated_keys:
try:
index += 1
flag, keyx, value = moxi.get(key=key)
self.assertTrue(value.endswith(version),
msg='values do not match . key value should endwith {0}'.format(version))
except MemcachedError as error:
self.log.error(error)
self.log.error(
"memcachedError : {0} - unable to get a pre-inserted key : {0}".format(error.status, key))
keys_failed.append(key)
all_verified = False
# except :
# self.log.error("unknown errors unable to get a pre-inserted key : {0}".format(key))
# keys_failed.append(key)
# all_verified = False
self.assertTrue(all_verified,
'unable to verify #{0} keys'.format(len(keys_failed)))
开发者ID:steveyen,项目名称:testrunner,代码行数:27,代码来源:replicationtests.py
示例14: _insert_data
def _insert_data(self, howmany):
self.onenodemc = MemcachedClientHelper.proxy_client(self.master, "default")
items = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, howmany)]
for item in items:
self.onenodemc.set(item, 0, 0, item)
self.log.info("inserted {0} items".format(howmany))
self.onenodemc.close()
开发者ID:paul-guo-,项目名称:appstack,代码行数:7,代码来源:warmupcluster.py
示例15: run
def run(self):
client = MemcachedClientHelper.direct_client(server, bucket)
for i in range(num_items):
key = "key-{0}".format(i)
value = "value-{0}".format(str(uuid.uuid4())[:7])
client.set(key, 0, 0, value, 0)
log.info("Loaded {0} key".format(num_items))
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:7,代码来源:checkpoint.py
示例16: _restart_memcache
def _restart_memcache(self, bucket_name):
rest = RestConnection(self.master)
nodes = rest.node_statuses()
self._kill_nodes(nodes, bucket_name)
start = time.time()
memcached_restarted = False
for server in self.servers[:self.nodes_init]:
mc = None
while time.time() - start < 60:
try:
mc = MemcachedClientHelper.direct_client(server, bucket_name)
stats = mc.stats()
new_uptime = int(stats["uptime"])
self.log.info("warmutime%s:%s" % (new_uptime, self.pre_warmup_stats["%s:%s" % (server.ip, server.port)]["uptime"]))
if new_uptime < self.pre_warmup_stats["%s:%s" % (server.ip, server.port)]["uptime"]:
self.log.info("memcached restarted...")
memcached_restarted = True
break;
except Exception:
self.log.error("unable to connect to %s:%s" % (server.ip, server.port))
if mc:
mc.close()
time.sleep(1)
if not memcached_restarted:
self.fail("memcached did not start %s:%s" % (server.ip, server.port))
开发者ID:strategist922,项目名称:testrunner,代码行数:25,代码来源:basetestcase.py
示例17: _stats_befor_warmup
def _stats_befor_warmup(self, bucket_name):
self.pre_warmup_stats[bucket_name] = {}
self.stats_monitor = self.input.param("stats_monitor", "")
self.warmup_stats_monitor = self.input.param("warmup_stats_monitor", "")
if self.stats_monitor is not "":
self.stats_monitor = self.stats_monitor.split(";")
if self.warmup_stats_monitor is not "":
self.warmup_stats_monitor = self.warmup_stats_monitor.split(";")
for server in self.servers:
mc_conn = MemcachedClientHelper.direct_client(server, bucket_name, self.timeout)
self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)] = {}
self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["uptime"] = mc_conn.stats("")[
"uptime"
]
self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items_tot"] = mc_conn.stats(
""
)["curr_items_tot"]
self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)]["curr_items"] = mc_conn.stats("")[
"curr_items"
]
for stat_to_monitor in self.stats_monitor:
self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][stat_to_monitor] = mc_conn.stats(
""
)[stat_to_monitor]
if self.without_access_log:
for stat_to_monitor in self.warmup_stats_monitor:
self.pre_warmup_stats[bucket_name]["%s:%s" % (server.ip, server.port)][
stat_to_monitor
] = mc_conn.stats("warmup")[stat_to_monitor]
mc_conn.close()
开发者ID:Boggypop,项目名称:testrunner,代码行数:30,代码来源:basetestcase.py
示例18: incr_test
def incr_test(self, key, exp, flags, value, incr_amt, decr_amt, incr_time):
global update_value
serverInfo = self.master
client = MemcachedClientHelper.proxy_client(serverInfo, self.bucket_name)
# self.log.info('Waitting 15 seconds for memcached started')
# time.sleep(15)
if key != 'no_key':
client.set(key, exp, flags, value)
if exp:
self.log.info('Wait {0} seconds for the key expired' .format(exp + 2))
time.sleep(exp + 2)
if decr_amt:
c, d = client.decr(key, decr_amt)
self.log.info('decr amt {0}' .format(c))
try:
i = 0
while i < incr_time:
update_value, cas = client.incr(key, incr_amt)
i += 1
self.log.info('incr {0} times with value {1}'.format(incr_time, incr_amt))
return update_value
except mc_bin_client.MemcachedError as error:
self.log.info('memcachedError : {0}'.format(error.status))
self.test.fail("unable to increment value: {0}".format(incr_amt))
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:25,代码来源:memcapable.py
示例19: test_append_wrong_cas
def test_append_wrong_cas(self):
#monitor the memory usage , it should not go beyond
#doing append 20,000 times ( each 5k) mem_used should not increase more than
#10 percent
#
stats = self.onenodemc.stats()
initial_mem_used = -1
if "mem_used" in stats:
initial_mem_used = int(stats["mem_used"])
self.assertTrue(initial_mem_used > 0)
key = str(uuid.uuid4())
size = 5 * 1024
value = MemcachedClientHelper.create_value("*", size)
self.onenodemc.set(key, 0, 0, value)
flags_v, cas_v, get_v = self.onenodemc.get(key)
self.onenodemc.append(key, value, cas_v)
iteration = 50000
for i in range(0, iteration):
try:
self.onenodemc.append(key, value, random.randint(0, 1000))
except:
#ignoring the error here
pass
stats = self.onenodemc.stats()
if "mem_used" in stats:
delta = int(stats["mem_used"]) - initial_mem_used
self.log.info("initial mem_used {0}, current mem_used {1}".format(initial_mem_used, stats["mem_used"]))
self.log.info(delta)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:28,代码来源:memcapable.py
示例20: run_test
def run_test(self):
ep_threshold = self.input.param("ep_threshold", "ep_mem_low_wat")
active_resident_threshold = int(self.input.param("active_resident_threshold", 10))
mc = MemcachedClientHelper.direct_client(self.servers[0], self.bucket_name)
stats = mc.stats()
threshold = int(self.input.param("threshold", stats[ep_threshold]))
threshold_reached = False
self.num_items = self.input.param("items", 10000)
self._load_doc_data_all_buckets("create")
# load items till reached threshold or mem-ratio is less than resident ratio threshold
while not threshold_reached:
mem_used = int(mc.stats()["mem_used"])
if mem_used < threshold or int(mc.stats()["vb_active_perc_mem_resident"]) >= active_resident_threshold:
self.log.info(
"mem_used and vb_active_perc_mem_resident_ratio reached at %s/%s and %s "
% (mem_used, threshold, mc.stats()["vb_active_perc_mem_resident"])
)
items = self.num_items
self.num_items += self.input.param("items", 10000)
self._load_doc_data_all_buckets("create", items)
else:
threshold_reached = True
self.log.info("DGM state achieved!!!!")
# wait for draining of data before restart and warm up
for bucket in self.buckets:
RebalanceHelper.wait_for_persistence(self.nodes_server[0], bucket)
while 1:
# read_data_task = self.cluster.async_verify_data(self.master, self.buckets[0], self.buckets[0].kvs[1])
read_data_task = Thread(target=self._run_get)
read_data_task.start()
# 5 threads to run stats all and reset asynchronously
start = time.time()
while (time.time() - start) < 300:
stats_all_thread = []
stats_reset_thread = []
for i in xrange(self.threads_to_run):
stat_str = ""
stats_all_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_all_thread[i].start()
stat_str = "reset"
stats_reset_thread.append(Thread(target=self._get_stats, args=[stat_str]))
stats_reset_thread[i].start()
for i in xrange(self.threads_to_run):
stats_all_thread[i].join()
stats_reset_thread[i].join()
del stats_all_thread
del stats_reset_thread
# read_data_task.result()
read_data_task.join()
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:60,代码来源:stats_ops.py
注:本文中的memcached.helper.data_helper.MemcachedClientHelper类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论