本文整理汇总了Python中swift.common.db.ContainerBroker类的典型用法代码示例。如果您正苦于以下问题:Python ContainerBroker类的具体用法?Python ContainerBroker怎么用?Python ContainerBroker使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ContainerBroker类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_unicode
def test_unicode(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
})
containers_dir = os.path.join(self.sda1, container_server.DATADIR)
os.mkdir(containers_dir)
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='\xce\xa9')
cb.initialize(normalize_timestamp(1))
cb.put_object('\xce\xa9', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
def accept(sock, addr):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 201 OK\r\nContent-Length: 0\r\n\r\n')
out.flush()
inc.read()
except BaseException, err:
import traceback
traceback.print_exc()
return err
return None
开发者ID:Awingu,项目名称:swift,代码行数:31,代码来源:test_updater.py
示例2: test_container_stat_get_data
def test_container_stat_get_data(self):
stat = db_stats_collector.ContainerStatsCollector(self.conf)
container_db = ContainerBroker("%s/con.db" % self.containers,
account='test_acc', container='test_con')
container_db.initialize()
container_db.put_object('test_obj', time.time(), 10, 'text', 'faketag')
info = stat.get_data("%s/con.db" % self.containers)
self.assertEquals('''"test_acc","test_con",1,10\n''', info)
开发者ID:dellcloudedge,项目名称:slogging,代码行数:8,代码来源:test_db_stats_collector.py
示例3: process_container
def process_container(self, dbfile):
"""
Process a container, and update the information in the account.
:param dbfile: container DB to process
"""
start_time = time.time()
broker = ContainerBroker(dbfile, logger=self.logger)
info = broker.get_info()
# Don't send updates if the container was auto-created since it
# definitely doesn't have up to date statistics.
if float(info['put_timestamp']) <= 0:
return
if self.account_suppressions.get(info['account'], 0) > time.time():
return
if info['put_timestamp'] > info['reported_put_timestamp'] or \
info['delete_timestamp'] > info['reported_delete_timestamp'] \
or info['object_count'] != info['reported_object_count'] or \
info['bytes_used'] != info['reported_bytes_used']:
container = '/%s/%s' % (info['account'], info['container'])
part, nodes = self.get_account_ring().get_nodes(info['account'])
events = [spawn(self.container_report, node, part, container,
info['put_timestamp'], info['delete_timestamp'],
info['object_count'], info['bytes_used'])
for node in nodes]
successes = 0
failures = 0
for event in events:
if is_success(event.wait()):
successes += 1
else:
failures += 1
if successes > failures:
self.logger.increment('successes')
self.successes += 1
self.logger.debug(
_('Update report sent for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
broker.reported(info['put_timestamp'],
info['delete_timestamp'], info['object_count'],
info['bytes_used'])
else:
self.logger.increment('failures')
self.failures += 1
self.logger.debug(
_('Update report failed for %(container)s %(dbfile)s'),
{'container': container, 'dbfile': dbfile})
self.account_suppressions[info['account']] = until = \
time.time() + self.account_suppression_time
if self.new_account_suppressions:
print >>self.new_account_suppressions, \
info['account'], until
# Only track timing data for attempted updates:
self.logger.timing_since('timing', start_time)
else:
self.logger.increment('no_changes')
self.no_changes += 1
开发者ID:ChicoTeam,项目名称:swift,代码行数:57,代码来源:updater.py
示例4: container_audit
def container_audit(self, path):
"""
Audits the given container path
:param path: the path to a container db
"""
try:
if not path.endswith('.db'):
return
broker = ContainerBroker(path)
if not broker.is_deleted():
info = broker.get_info()
self.container_passes += 1
self.logger.debug(_('Audit passed for %s'), broker.db_file)
except (Exception, Timeout):
self.container_failures += 1
self.logger.exception(_('ERROR Could not get container info %s'),
(broker.db_file))
开发者ID:AnyBucket,项目名称:OpenStack-Install-and-Understand-Guide,代码行数:18,代码来源:auditor.py
示例5: container_audit
def container_audit(self, path):
"""
Audits the given container path
:param path: the path to a container db
"""
start_time = time.time()
try:
broker = ContainerBroker(path)
if not broker.is_deleted():
broker.get_info()
self.logger.increment('passes')
self.container_passes += 1
self.logger.debug(_('Audit passed for %s'), broker.db_file)
except (Exception, Timeout):
self.logger.increment('failures')
self.container_failures += 1
self.logger.exception(_('ERROR Could not get container info %s'),
broker.db_file)
self.logger.timing_since('timing', start_time)
开发者ID:iBeacons,项目名称:swift,代码行数:20,代码来源:auditor.py
示例6: container_audit
def container_audit(self, path):
"""
Audits the given container path
:param path: the path to a container db
"""
start_time = time.time()
try:
if not path.endswith(".db"):
return
broker = ContainerBroker(path)
if not broker.is_deleted():
info = broker.get_info()
self.logger.increment("passes")
self.container_passes += 1
self.logger.debug(_("Audit passed for %s"), broker.db_file)
except (Exception, Timeout):
self.logger.increment("failures")
self.container_failures += 1
self.logger.exception(_("ERROR Could not get container info %s"), (broker.db_file))
self.logger.timing_since("timing", start_time)
开发者ID:hortonworkstest,项目名称:Hadoop-and-Swift-integration,代码行数:21,代码来源:auditor.py
示例7: get_data
def get_data(self, db_path):
"""
Data for generated csv has the following columns:
Account Hash, Container Name, Object Count, Bytes Used
This will just collect whether or not the metadata is set
using a 1 or ''.
:raises sqlite3.Error: does not catch errors connecting to db
"""
line_data = None
broker = ContainerBroker(db_path)
if not broker.is_deleted():
info = broker.get_info(include_metadata=bool(self.metadata_keys))
encoded_container_name = urllib.quote(info['container'])
line_data = '"%s","%s",%d,%d' % (
info['account'], encoded_container_name,
info['object_count'], info['bytes_used'])
if self.metadata_keys:
metadata_results = ','.join(
[info['metadata'].get(mkey) and '1' or ''
for mkey in self.metadata_keys])
line_data += ',%s' % metadata_results
line_data += '\n'
return line_data
开发者ID:dellcloudedge,项目名称:slogging,代码行数:24,代码来源:db_stats_collector.py
示例8: _gen_container_stat
def _gen_container_stat(self, set_metadata=False, drop_metadata=False):
if set_metadata:
self.conf['metadata_keys'] = 'test1,test2'
# webob runs title on all headers
stat = db_stats_collector.ContainerStatsCollector(self.conf)
output_data = set()
for i in range(10):
cont_db = ContainerBroker(
"%s/container-stats-201001010%s-%s.db" % (self.containers, i,
uuid.uuid4().hex),
account='test_acc_%s' % i, container='test_con')
cont_db.initialize()
cont_db.put_object('test_obj', time.time(), 10, 'text', 'faketag')
metadata_output = ''
if set_metadata:
if i % 2:
cont_db.update_metadata({'X-Container-Meta-Test1': (5, 1)})
metadata_output = ',1,'
else:
cont_db.update_metadata({'X-Container-Meta-Test2': (7, 2)})
metadata_output = ',,1'
# this will "commit" the data
cont_db.get_info()
if drop_metadata:
output_data.add('''"test_acc_%s","test_con",1,10,,''' % i)
else:
output_data.add('''"test_acc_%s","test_con",1,10%s''' %
(i, metadata_output))
if drop_metadata:
self._drop_metadata_col(cont_db, 'test_acc_%s' % i)
self.assertEqual(len(output_data), 10)
return stat, output_data
开发者ID:dellcloudedge,项目名称:slogging,代码行数:33,代码来源:test_db_stats_collector.py
示例9: container_sync
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path)
info = broker.get_info()
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if node['ip'] in self._myips and node['port'] == self._myport:
break
else:
return
if not broker.is_deleted():
sync_to = None
sync_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.iteritems():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
sync_key = value
if not sync_to or not sync_key:
self.container_skips += 1
self.logger.increment('skips')
return
sync_to = sync_to.rstrip('/')
err = validate_sync_to(sync_to, self.allowed_sync_hosts)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': broker.db_file,
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
stop_at = time() + self.container_time
next_sync_point = None
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.) and will skip
# problematic rows as needed in case of faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the nodes
# didn't succeed.
if not self.container_sync_row(row, sync_to, sync_key,
broker, info):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None, next_sync_point)
while time() < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
# around to the section above and attempt to sync
# previously skipped rows in case the other nodes didn't
# succeed or in case it failed to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(row, sync_to, sync_key,
broker, info)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
self.container_syncs += 1
self.logger.increment('syncs')
except (Exception, Timeout) as err:
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
broker.db_file if broker else path)
开发者ID:Awingu,项目名称:swift,代码行数:93,代码来源:sync.py
示例10: test_run_once
def test_run_once(self):
cu = container_updater.ContainerUpdater({
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
'interval': '1',
'concurrency': '1',
'node_timeout': '15',
'account_suppression_time': 0
})
cu.run_once()
containers_dir = os.path.join(self.sda1, container_server.DATADIR)
os.mkdir(containers_dir)
cu.run_once()
self.assert_(os.path.exists(containers_dir))
subdir = os.path.join(containers_dir, 'subdir')
os.mkdir(subdir)
cb = ContainerBroker(os.path.join(subdir, 'hash.db'), account='a',
container='c')
cb.initialize(normalize_timestamp(1))
cu.run_once()
info = cb.get_info()
self.assertEquals(info['object_count'], 0)
self.assertEquals(info['bytes_used'], 0)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
cb.put_object('o', normalize_timestamp(2), 3, 'text/plain',
'68b329da9893e34099c7d8ad5cb9c940')
cu.run_once()
info = cb.get_info()
self.assertEquals(info['object_count'], 1)
self.assertEquals(info['bytes_used'], 3)
self.assertEquals(info['reported_object_count'], 0)
self.assertEquals(info['reported_bytes_used'], 0)
def accept(sock, addr, return_code):
try:
with Timeout(3):
inc = sock.makefile('rb')
out = sock.makefile('wb')
out.write('HTTP/1.1 %d OK\r\nContent-Length: 0\r\n\r\n' %
return_code)
out.flush()
self.assertEquals(inc.readline(),
'PUT /sda1/0/a/c HTTP/1.1\r\n')
headers = {}
line = inc.readline()
while line and line != '\r\n':
headers[line.split(':')[0].lower()] = \
line.split(':')[1].strip()
line = inc.readline()
self.assert_('x-put-timestamp' in headers)
self.assert_('x-delete-timestamp' in headers)
self.assert_('x-object-count' in headers)
self.assert_('x-bytes-used' in headers)
except BaseException, err:
import traceback
traceback.print_exc()
return err
return None
开发者ID:Awingu,项目名称:swift,代码行数:61,代码来源:test_updater.py
示例11: container_sync
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
try:
if not path.endswith(".db"):
return
broker = ContainerBroker(path)
info = broker.get_info()
x, nodes = self.container_ring.get_nodes(info["account"], info["container"])
for ordinal, node in enumerate(nodes):
if node["ip"] in self._myips and node["port"] == self._myport:
break
else:
return
if not broker.is_deleted():
sync_to = None
sync_key = None
sync_point1 = info["x_container_sync_point1"]
sync_point2 = info["x_container_sync_point2"]
for key, (value, timestamp) in broker.metadata.iteritems():
if key.lower() == "x-container-sync-to":
sync_to = value
elif key.lower() == "x-container-sync-key":
sync_key = value
if not sync_to or not sync_key:
self.container_skips += 1
return
sync_to = sync_to.rstrip("/")
err = validate_sync_to(sync_to, self.allowed_sync_hosts)
if err:
self.logger.info(
_("ERROR %(db_file)s: %(validate_sync_to_err)s"),
{"db_file": broker.db_file, "validate_sync_to_err": err},
)
self.container_failures += 1
return
stop_at = time() + self.container_time
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row["ROWID"] >= sync_point1:
break
key = hash_path(info["account"], info["container"], row["name"], raw_digest=True)
# This node will only intially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). This section
# will attempt to sync previously skipped rows in case the
# other nodes didn't succeed.
if unpack_from(">I", key)[0] % len(nodes) != ordinal:
if not self.container_sync_row(row, sync_to, sync_key, broker, info):
return
sync_point2 = row["ROWID"]
broker.set_x_container_sync_points(None, sync_point2)
while time() < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info["account"], info["container"], row["name"], raw_digest=True)
# This node will only intially sync out one third of the
# objects (if 3 replicas, 1/4 if 4, etc.). It'll come back
# around to the section above and attempt to sync
# previously skipped rows in case the other nodes didn't
# succeed.
if unpack_from(">I", key)[0] % len(nodes) == ordinal:
if not self.container_sync_row(row, sync_to, sync_key, broker, info):
return
sync_point1 = row["ROWID"]
broker.set_x_container_sync_points(sync_point1, None)
self.container_syncs += 1
except (Exception, Timeout), err:
self.container_failures += 1
self.logger.exception(_("ERROR Syncing %s"), (broker.db_file))
开发者ID:Nupta,项目名称:swift,代码行数:79,代码来源:sync.py
注:本文中的swift.common.db.ContainerBroker类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论