本文整理汇总了Python中pycassa.ConnectionPool类的典型用法代码示例。如果您正苦于以下问题:Python ConnectionPool类的具体用法?Python ConnectionPool怎么用?Python ConnectionPool使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ConnectionPool类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_server_list_func
def test_server_list_func(self):
listener = _TestListener()
pool = ConnectionPool('PycassaTestKeyspace', server_list=_get_list,
listeners=[listener], prefill=False)
assert_equal(listener.serv_list, ['foo:bar'])
assert_equal(listener.list_count, 1)
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:7,代码来源:test_connection_pooling.py
示例2: test_server_list_func
def test_server_list_func(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool('PycassaTestKeyspace', server_list=_get_list,
listeners=[stats_logger], prefill=False)
assert_equal(stats_logger.serv_list, ['foo:bar'])
assert_equal(stats_logger.stats['list'], 1)
pool.dispose()
开发者ID:pista329,项目名称:pycassa-thriftpy,代码行数:7,代码来源:test_connection_pooling.py
示例3: test_queue_failure_on_retry
def test_queue_failure_on_retry(self):
listener = _TestListener()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, max_retries=3, # allow 3 retries
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=False,
server_list=['localhost:9160', 'localhost:9160'])
def raiser():
raise IOError
# Replace wrapper will open a connection to get the version, so if it
# fails we need to retry as with any other connection failure
pool._replace_wrapper = raiser
# Corrupt all of the connections
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
cf = ColumnFamily(pool, 'Standard1')
assert_raises(MaximumRetryException, cf.insert, 'key', {'col':'val', 'col2': 'val'})
assert_equal(listener.failure_count, 4) # On the 4th failure, didn't retry
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:25,代码来源:test_connection_pooling.py
示例4: TestDefaultValidators
class TestDefaultValidators(unittest.TestCase):
def setUp(self):
credentials = {"username": "jsmith", "password": "havebadpass"}
self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials)
self.cf_def_valid = ColumnFamily(self.pool, "DefaultValidator")
def tearDown(self):
for key, cols in self.cf_def_valid.get_range():
self.cf_def_valid.remove(key)
self.pool.dispose()
def test_default_validated_columns(self):
key = "key1"
col_cf = {"aaaaaa": 1L}
col_cm = {"subcol": TIME1}
col_ncf = {"aaaaaa": TIME1}
col_ncm = {"subcol": 1L}
# Both of these inserts work, as cf allows
# longs and cm for 'subcol' allows TIMEUUIDs.
self.cf_def_valid.insert(key, col_cf)
self.cf_def_valid.insert(key, col_cm)
assert self.cf_def_valid.get(key) == {"aaaaaa": 1L, "subcol": TIME1}
assert_raises(TypeError, self.cf_def_valid.insert, key, col_ncf)
assert_raises(TypeError, self.cf_def_valid.insert, key, col_ncm)
开发者ID:trhowe,项目名称:pycassa,代码行数:28,代码来源:test_autopacking.py
示例5: TestValidators
class TestValidators(unittest.TestCase):
def setUp(self):
credentials = {"username": "jsmith", "password": "havebadpass"}
self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials)
self.cf_valid_long = ColumnFamily(self.pool, "ValidatorLong")
self.cf_valid_int = ColumnFamily(self.pool, "ValidatorInt")
self.cf_valid_time = ColumnFamily(self.pool, "ValidatorTime")
self.cf_valid_lex = ColumnFamily(self.pool, "ValidatorLex")
self.cf_valid_ascii = ColumnFamily(self.pool, "ValidatorAscii")
self.cf_valid_utf8 = ColumnFamily(self.pool, "ValidatorUTF8")
self.cf_valid_bytes = ColumnFamily(self.pool, "ValidatorBytes")
self.cfs = [
self.cf_valid_long,
self.cf_valid_int,
self.cf_valid_time,
self.cf_valid_lex,
self.cf_valid_ascii,
self.cf_valid_utf8,
self.cf_valid_bytes,
]
def tearDown(self):
for cf in self.cfs:
for key, cols in cf.get_range():
cf.remove(key)
self.pool.dispose()
def test_validated_columns(self):
key = "key1"
col = {"subcol": 1L}
self.cf_valid_long.insert(key, col)
assert self.cf_valid_long.get(key) == col
col = {"subcol": 1}
self.cf_valid_int.insert(key, col)
assert self.cf_valid_int.get(key) == col
col = {"subcol": TIME1}
self.cf_valid_time.insert(key, col)
assert self.cf_valid_time.get(key) == col
col = {"subcol": uuid.UUID(bytes="aaa aaa aaa aaaa")}
self.cf_valid_lex.insert(key, col)
assert self.cf_valid_lex.get(key) == col
col = {"subcol": "aaa"}
self.cf_valid_ascii.insert(key, col)
assert self.cf_valid_ascii.get(key) == col
col = {"subcol": u"a\u0020"}
self.cf_valid_utf8.insert(key, col)
assert self.cf_valid_utf8.get(key) == col
col = {"subcol": "aaa"}
self.cf_valid_bytes.insert(key, col)
assert self.cf_valid_bytes.get(key) == col
开发者ID:trhowe,项目名称:pycassa,代码行数:59,代码来源:test_autopacking.py
示例6: test_queue_pool
def test_queue_pool(self):
listener = _TestListener()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, pool_timeout=0.1, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=False)
conns = []
for i in range(10):
conns.append(pool.get())
assert_equal(listener.connect_count, 10)
assert_equal(listener.checkout_count, 10)
# Pool is maxed out now
assert_raises(NoConnectionAvailable, pool.get)
assert_equal(listener.connect_count, 10)
assert_equal(listener.max_count, 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(listener.close_count, 0)
assert_equal(listener.checkin_count, 5)
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(listener.close_count, 5)
assert_equal(listener.checkin_count, 10)
conns = []
# These connections should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(listener.connect_count, 10)
assert_equal(listener.checkout_count, 15)
# But these will need to be made
for i in range(5):
conns.append(pool.get())
assert_equal(listener.connect_count, 15)
assert_equal(listener.checkout_count, 20)
assert_equal(listener.close_count, 5)
for i in range(10):
conns[i].return_to_pool()
assert_equal(listener.checkin_count, 20)
assert_equal(listener.close_count, 10)
assert_raises(InvalidRequestError, conns[0].return_to_pool)
assert_equal(listener.checkin_count, 20)
assert_equal(listener.close_count, 10)
print "in test:", id(conns[-1])
assert_raises(InvalidRequestError, conns[-1].return_to_pool)
assert_equal(listener.checkin_count, 20)
assert_equal(listener.close_count, 10)
pool.dispose()
assert_equal(listener.dispose_count, 1)
开发者ID:anisnasir,项目名称:pycassa,代码行数:59,代码来源:test_connection_pooling.py
示例7: test_queue_pool
def test_queue_pool(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, pool_timeout=0.1, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=False)
conns = []
for i in range(10):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['checked_out'], 10)
# Pool is maxed out now
assert_raises(NoConnectionAvailable, pool.get)
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['at_max'], 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['disposed']['success'], 0)
assert_equal(stats_logger.stats['checked_in'], 5)
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(stats_logger.stats['disposed']['success'], 5)
assert_equal(stats_logger.stats['checked_in'], 10)
conns = []
# These connections should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 10)
assert_equal(stats_logger.stats['checked_out'], 15)
# But these will need to be made
for i in range(5):
conns.append(pool.get())
assert_equal(stats_logger.stats['created']['success'], 15)
assert_equal(stats_logger.stats['checked_out'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 5)
for i in range(10):
conns[i].return_to_pool()
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
assert_raises(InvalidRequestError, conns[0].return_to_pool)
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
print("in test:", id(conns[-1]))
conns[-1].return_to_pool()
assert_equal(stats_logger.stats['checked_in'], 20)
assert_equal(stats_logger.stats['disposed']['success'], 10)
pool.dispose()
开发者ID:pista329,项目名称:pycassa-thriftpy,代码行数:58,代码来源:test_connection_pooling.py
示例8: test_pool_invalid_request
def test_pool_invalid_request(self):
listener = _TestListener()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=True, max_retries=3,
keyspace='PycassaTestKeyspace',
credentials=_credentials,
listeners=[listener], use_threadlocal=False,
server_list=['localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
# Make sure the pool doesn't hide and retries invalid requests
assert_raises(InvalidRequestException, cf.add, 'key', 'col')
assert_raises(NotFoundException, cf.get, 'none')
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:13,代码来源:test_connection_pooling.py
示例9: test_queue_threadlocal_retry_limit
def test_queue_threadlocal_retry_limit(self):
stats_logger = StatsLoggerWithListStorage()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, max_retries=3, # allow 3 retries
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[stats_logger], use_threadlocal=True,
server_list=['localhost:9160', 'localhost:9160'])
# Corrupt all of the connections
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
cf = ColumnFamily(pool, 'Standard1')
assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 'val', 'col2': 'val'})
assert_equal(stats_logger.stats['failed'], 4) # On the 4th failure, didn't retry
pool.dispose()
开发者ID:pista329,项目名称:pycassa-thriftpy,代码行数:20,代码来源:test_connection_pooling.py
示例10: test_queue_failure_with_no_retries
def test_queue_failure_with_no_retries(self):
listener = _TestListener()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, max_retries=3, # allow 3 retries
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=False,
server_list=['localhost:9160', 'localhost:9160'])
# Corrupt all of the connections
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
cf = ColumnFamily(pool, 'Counter1')
assert_raises(MaximumRetryException, cf.insert, 'key', {'col': 2, 'col2': 2})
assert_equal(listener.failure_count, 1) # didn't retry at all
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:20,代码来源:test_connection_pooling.py
示例11: test_failure_connection_info
def test_failure_connection_info(self):
listener = _TestListenerRequestInfo()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=True, max_retries=0,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=True,
server_list=['localhost:9160'])
cf = ColumnFamily(pool, 'Counter1')
# Corrupt the connection
conn = pool.get()
setattr(conn, 'send_get', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
assert_raises(MaximumRetryException, cf.get, 'greunt', columns=['col'])
assert_true('request' in listener.failure_dict['connection'].info)
request = listener.failure_dict['connection'].info['request']
assert_equal(request['method'], 'get')
assert_equal(request['args'], ('greunt', ColumnPath('Counter1', None, 'col'), 1))
assert_equal(request['kwargs'], {})
开发者ID:anisnasir,项目名称:pycassa,代码行数:21,代码来源:test_connection_pooling.py
示例12: test_queue_pool_recycle
def test_queue_pool_recycle(self):
listener = _TestListener()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1,
prefill=True, pool_timeout=0.5, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=False)
cf = ColumnFamily(pool, 'Standard1')
columns = {'col1': 'val', 'col2': 'val'}
for i in range(10):
cf.insert('key', columns)
assert_equal(listener.recycle_count, 5)
pool.dispose()
listener.reset()
# Try with threadlocal=True
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=1,
prefill=False, pool_timeout=0.5, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=True)
cf = ColumnFamily(pool, 'Standard1')
for i in range(10):
cf.insert('key', columns)
pool.dispose()
assert_equal(listener.recycle_count, 5)
开发者ID:anisnasir,项目名称:pycassa,代码行数:29,代码来源:test_connection_pooling.py
示例13: test_queue_threadlocal_failover
def test_queue_threadlocal_failover(self):
listener = _TestListener()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=True, timeout=0.05,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=True,
server_list=['localhost:9160', 'localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
for i in range(1,5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
# The first insert attempt should fail, but failover should occur
# and the insert should succeed
cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'})
assert_equal(listener.failure_count, i)
assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'})
pool.dispose()
listener.reset()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, timeout=0.05,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=True,
server_list=['localhost:9160', 'localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
for i in range(5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
threads = []
args=('key', {'col': 'val', 'col2': 'val'})
for i in range(5):
threads.append(threading.Thread(target=cf.insert, args=args))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(listener.failure_count, 5)
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:50,代码来源:test_connection_pooling.py
示例14: create_cfs
def create_cfs(self):
"""
Creates the Cassandra Column Families (if not exist)
"""
sys_mgr = None
pool = None
try:
sys_mgr = SystemManager()
pool = ConnectionPool(settings.KEYSPACE, server_list=settings.CASSANDRA_HOSTS)
for cf_name in [CF_LOGS, CF_LOGS_BY_APP, CF_LOGS_BY_HOST, CF_LOGS_BY_SEVERITY]:
try:
cf = ColumnFamily(pool, cf_name)
except:
logger.info("create_cfs(): Creating column family %s", cf_name)
sys_mgr.create_column_family(settings.KEYSPACE, cf_name, comparator_type=TimeUUIDType())
cf = ColumnFamily(pool, cf_name)
cf.get_count(str(uuid.uuid4()))
finally:
if pool:
pool.dispose()
if sys_mgr:
sys_mgr.close()
开发者ID:hgdeoro,项目名称:daedalus,代码行数:22,代码来源:storage.py
示例15: setUp
def setUp(self):
credentials = {"username": "jsmith", "password": "havebadpass"}
self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials)
self.cf = ColumnFamily(self.pool, "Standard2")
self.cf_long = ColumnFamily(self.pool, "StdLong")
self.cf_int = ColumnFamily(self.pool, "StdInteger")
self.cf_time = ColumnFamily(self.pool, "StdTimeUUID")
self.cf_lex = ColumnFamily(self.pool, "StdLexicalUUID")
self.cf_ascii = ColumnFamily(self.pool, "StdAscii")
self.cf_utf8 = ColumnFamily(self.pool, "StdUTF8")
self.cf_bytes = ColumnFamily(self.pool, "StdBytes")
self.cfs = [self.cf_long, self.cf_int, self.cf_time, self.cf_lex, self.cf_ascii, self.cf_utf8, self.cf_bytes]
开发者ID:trhowe,项目名称:pycassa,代码行数:15,代码来源:test_autopacking.py
示例16: test_queue_failover
def test_queue_failover(self):
for prefill in (True, False):
listener = _TestListener()
pool = ConnectionPool(pool_size=1, max_overflow=0, recycle=10000,
prefill=prefill, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=False,
server_list=['localhost:9160', 'localhost:9160'])
cf = ColumnFamily(pool, 'Standard1')
for i in range(1,5):
conn = pool.get()
setattr(conn, 'send_batch_mutate', conn._fail_once)
conn._should_fail = True
conn.return_to_pool()
# The first insert attempt should fail, but failover should occur
# and the insert should succeed
cf.insert('key', {'col': 'val%d' % i, 'col2': 'val'})
assert_equal(listener.failure_count, i)
assert_equal(cf.get('key'), {'col': 'val%d' % i, 'col2': 'val'})
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:24,代码来源:test_connection_pooling.py
示例17: test_pool_connection_failure
def test_pool_connection_failure(self):
listener = _TestListener()
def get_extra():
"""Make failure count adjustments based on whether or not
the permuted list starts with a good host:port"""
if listener.serv_list[0] == 'localhost:9160':
return 0
else:
return 1
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True,
keyspace='PycassaTestKeyspace', credentials=_credentials,
pool_timeout=0.01, timeout=0.05,
listeners=[listener], use_threadlocal=False,
server_list=['localhost:9160', 'foobar:1'])
assert_equal(listener.failure_count, 4 + get_extra())
for i in range(0,7):
pool.get()
assert_equal(listener.failure_count, 6 + get_extra())
pool.dispose()
listener.reset()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000, prefill=True,
keyspace='PycassaTestKeyspace', credentials=_credentials,
pool_timeout=0.01, timeout=0.05,
listeners=[listener], use_threadlocal=True,
server_list=['localhost:9160', 'foobar:1'])
assert_equal(listener.failure_count, 4 + get_extra())
threads = []
for i in range(0, 7):
threads.append(threading.Thread(target=pool.get))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(listener.failure_count, 6 + get_extra())
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:45,代码来源:test_connection_pooling.py
示例18: TestStandardCFs
class TestStandardCFs(unittest.TestCase):
def setUp(self):
credentials = {"username": "jsmith", "password": "havebadpass"}
self.pool = ConnectionPool(pool_size=10, keyspace="Keyspace1", credentials=credentials)
self.cf = ColumnFamily(self.pool, "Standard2")
self.cf_long = ColumnFamily(self.pool, "StdLong")
self.cf_int = ColumnFamily(self.pool, "StdInteger")
self.cf_time = ColumnFamily(self.pool, "StdTimeUUID")
self.cf_lex = ColumnFamily(self.pool, "StdLexicalUUID")
self.cf_ascii = ColumnFamily(self.pool, "StdAscii")
self.cf_utf8 = ColumnFamily(self.pool, "StdUTF8")
self.cf_bytes = ColumnFamily(self.pool, "StdBytes")
self.cfs = [self.cf_long, self.cf_int, self.cf_time, self.cf_lex, self.cf_ascii, self.cf_utf8, self.cf_bytes]
def tearDown(self):
for cf in self.cfs:
for key, cols in cf.get_range():
cf.remove(key)
self.pool.dispose()
def make_group(self, cf, cols):
diction = {cols[0]: VALS[0], cols[1]: VALS[1], cols[2]: VALS[2]}
return {"cf": cf, "cols": cols, "dict": diction}
def test_standard_column_family(self):
# For each data type, create a group that includes its column family,
# a set of column names, and a dictionary that maps from the column
# names to values.
type_groups = []
long_cols = [1111111111111111L, 2222222222222222L, 3333333333333333L]
type_groups.append(self.make_group(self.cf_long, long_cols))
int_cols = [1, 2, 3]
type_groups.append(self.make_group(self.cf_int, int_cols))
time_cols = [TIME1, TIME2, TIME3]
type_groups.append(self.make_group(self.cf_time, time_cols))
lex_cols = [
uuid.UUID(bytes="aaa aaa aaa aaaa"),
uuid.UUID(bytes="bbb bbb bbb bbbb"),
uuid.UUID(bytes="ccc ccc ccc cccc"),
]
type_groups.append(self.make_group(self.cf_lex, lex_cols))
ascii_cols = ["aaaa", "bbbb", "cccc"]
type_groups.append(self.make_group(self.cf_ascii, ascii_cols))
utf8_cols = [u"a\u0020", u"b\u0020", u"c\u0020"]
type_groups.append(self.make_group(self.cf_utf8, utf8_cols))
bytes_cols = ["aaaa", "bbbb", "cccc"]
type_groups.append(self.make_group(self.cf_bytes, bytes_cols))
# Begin the actual inserting and getting
for group in type_groups:
cf = group.get("cf")
gdict = group.get("dict")
gcols = group.get("cols")
cf.insert(KEYS[0], gdict)
assert_equal(cf.get(KEYS[0]), gdict)
# Check each column individually
for i in range(3):
assert_equal(cf.get(KEYS[0], columns=[gcols[i]]), {gcols[i]: VALS[i]})
# Check that if we list all columns, we get the full dict
assert_equal(cf.get(KEYS[0], columns=gcols[:]), gdict)
# The same thing with a start and end instead
assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[2]), gdict)
# A start and end that are the same
assert_equal(cf.get(KEYS[0], column_start=gcols[0], column_finish=gcols[0]), {gcols[0]: VALS[0]})
assert_equal(cf.get_count(KEYS[0]), 3)
# Test removing rows
cf.remove(KEYS[0], columns=gcols[:1])
assert_equal(cf.get_count(KEYS[0]), 2)
cf.remove(KEYS[0], columns=gcols[1:])
assert_equal(cf.get_count(KEYS[0]), 0)
# Insert more than one row now
cf.insert(KEYS[0], gdict)
cf.insert(KEYS[1], gdict)
cf.insert(KEYS[2], gdict)
### multiget() tests ###
res = cf.multiget(KEYS[:])
for i in range(3):
assert_equal(res.get(KEYS[i]), gdict)
res = cf.multiget(KEYS[2:])
#.........这里部分代码省略.........
开发者ID:trhowe,项目名称:pycassa,代码行数:101,代码来源:test_autopacking.py
示例19: TestTimeUUIDs
class TestTimeUUIDs(unittest.TestCase):
def setUp(self):
credentials = {"username": "jsmith", "password": "havebadpass"}
self.pool = ConnectionPool(pool_size=5, keyspace="Keyspace1", credentials=credentials)
self.cf_time = ColumnFamily(self.pool, "StdTimeUUID")
def tearDown(self):
for key, cols in self.cf_time.get_range():
self.cf_time.remove(key)
self.pool.dispose()
def test_datetime_to_uuid(self):
key = "key1"
timeline = []
timeline.append(datetime.now())
time1 = uuid1()
col1 = {time1: "0"}
self.cf_time.insert(key, col1)
time.sleep(1)
timeline.append(datetime.now())
time2 = uuid1()
col2 = {time2: "1"}
self.cf_time.insert(key, col2)
time.sleep(1)
timeline.append(datetime.now())
cols = {time1: "0", time2: "1"}
assert_equal(self.cf_time.get(key, column_start=timeline[0]), cols)
assert_equal(self.cf_time.get(key, column_finish=timeline[2]), cols)
assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols)
assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols)
assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[1]), col1)
assert_equal(self.cf_time.get(key, column_start=timeline[1], column_finish=timeline[2]), col2)
def test_time_to_uuid(self):
key = "key1"
timeline = []
timeline.append(time.time())
time1 = uuid1()
col1 = {time1: "0"}
self.cf_time.insert(key, col1)
time.sleep(0.1)
timeline.append(time.time())
time2 = uuid1()
col2 = {time2: "1"}
self.cf_time.insert(key, col2)
time.sleep(0.1)
timeline.append(time.time())
cols = {time1: "0", time2: "1"}
assert_equal(self.cf_time.get(key, column_start=timeline[0]), cols)
assert_equal(self.cf_time.get(key, column_finish=timeline[2]), cols)
assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols)
assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[2]), cols)
assert_equal(self.cf_time.get(key, column_start=timeline[0], column_finish=timeline[1]), col1)
assert_equal(self.cf_time.get(key, column_start=timeline[1], column_finish=timeline[2]), col2)
def test_auto_time_to_uuid1(self):
key = "key"
t = time.time()
col = {t: "foo"}
self.cf_time.insert(key, col)
uuid_res = self.cf_time.get(key).keys()[0]
timestamp = convert_uuid_to_time(uuid_res)
assert_almost_equal(timestamp, t, places=3)
开发者ID:trhowe,项目名称:pycassa,代码行数:79,代码来源:test_autopacking.py
示例20: test_queue_pool_threadlocal
def test_queue_pool_threadlocal(self):
listener = _TestListener()
pool = ConnectionPool(pool_size=5, max_overflow=5, recycle=10000,
prefill=True, pool_timeout=0.01, timeout=1,
keyspace='PycassaTestKeyspace', credentials=_credentials,
listeners=[listener], use_threadlocal=True)
conns = []
assert_equal(listener.connect_count, 5)
# These connections should all be the same
for i in range(10):
conns.append(pool.get())
assert_equal(listener.connect_count, 5)
assert_equal(listener.checkout_count, 1)
for i in range(0, 5):
pool.return_conn(conns[i])
assert_equal(listener.checkin_count, 1)
for i in range(5, 10):
pool.return_conn(conns[i])
assert_equal(listener.checkin_count, 1)
conns = []
assert_equal(listener.connect_count, 5)
# A single connection should come from the pool
for i in range(5):
conns.append(pool.get())
assert_equal(listener.connect_count, 5)
assert_equal(listener.checkout_count, 2)
for conn in conns:
pool.return_conn(conn)
conns = []
threads = []
listener.reset()
def checkout_return():
conn = pool.get()
time.sleep(1)
pool.return_conn(conn)
for i in range(5):
threads.append(threading.Thread(target=checkout_return))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(listener.connect_count, 0) # Still 5 connections in pool
assert_equal(listener.checkout_count, 5)
assert_equal(listener.checkin_count, 5)
# These should come from the pool
threads = []
for i in range(5):
threads.append(threading.Thread(target=checkout_return))
threads[-1].start()
for thread in threads:
thread.join()
assert_equal(listener.connect_count, 0)
assert_equal(listener.checkout_count, 10)
assert_equal(listener.checkin_count, 10)
pool.dispose()
开发者ID:anisnasir,项目名称:pycassa,代码行数:64,代码来源:test_connection_pooling.py
注:本文中的pycassa.ConnectionPool类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论