本文整理汇总了Python中swift.obj.diskfile.DiskFileManager类的典型用法代码示例。如果您正苦于以下问题:Python DiskFileManager类的具体用法?Python DiskFileManager怎么用?Python DiskFileManager使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DiskFileManager类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: do_test
def do_test(headers_out, expected):
# write an async
dfmanager = DiskFileManager(conf, daemon.logger)
account, container, obj = 'a', 'c', 'o'
op = 'PUT'
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
dfmanager.pickle_async_update(self.sda1, account, container, obj,
data, next(ts_iter), policies[0])
request_log = []
def capture(*args, **kwargs):
request_log.append((args, kwargs))
# run once
fake_status_codes = [
200, # object update success
200, # object update success
200, # object update conflict
]
with mocked_http_conn(*fake_status_codes, give_connect=capture):
daemon.run_once()
self.assertEqual(len(fake_status_codes), len(request_log))
for request_args, request_kwargs in request_log:
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, 'PUT')
self.assertDictEqual(expected, headers)
self.assertEqual(
daemon.logger.get_increment_counts(),
{'successes': 1, 'unlinks': 1, 'async_pendings': 1})
self.assertFalse(os.listdir(async_dir))
daemon.logger.clear()
开发者ID:nadeemsyed,项目名称:swift,代码行数:33,代码来源:test_updater.py
示例2: test_obj_put_async_updates
def test_obj_put_async_updates(self):
ts = (normalize_timestamp(t) for t in
itertools.count(int(time())))
policy = random.choice(list(POLICIES))
# setup updater
conf = {
'devices': self.devices_dir,
'mount_check': 'false',
'swift_dir': self.testdir,
}
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
async_dir = os.path.join(self.sda1, get_async_dir(policy))
os.mkdir(async_dir)
# write an async
dfmanager = DiskFileManager(conf, daemon.logger)
account, container, obj = 'a', 'c', 'o'
op = 'PUT'
headers_out = swob.HeaderKeyDict({
'x-size': 0,
'x-content-type': 'text/plain',
'x-etag': 'd41d8cd98f00b204e9800998ecf8427e',
'x-timestamp': next(ts),
'X-Backend-Storage-Policy-Index': int(policy),
})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
dfmanager.pickle_async_update(self.sda1, account, container, obj,
data, next(ts), policy)
request_log = []
def capture(*args, **kwargs):
request_log.append((args, kwargs))
# run once
fake_status_codes = [
200, # object update success
200, # object update success
200, # object update conflict
]
with mocked_http_conn(*fake_status_codes, give_connect=capture):
daemon.run_once()
self.assertEqual(len(fake_status_codes), len(request_log))
for request_args, request_kwargs in request_log:
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, 'PUT')
self.assertEqual(headers['X-Backend-Storage-Policy-Index'],
str(int(policy)))
self.assertEqual(daemon.logger.get_increment_counts(),
{'successes': 1, 'unlinks': 1, 'async_pendings': 1})
开发者ID:kirubakk,项目名称:swift,代码行数:51,代码来源:test_updater.py
示例3: setUp
def setUp(self):
self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
self.devices = os.path.join(self.testdir, "node")
self.rcache = os.path.join(self.testdir, "object.recon")
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, "sda"))
os.mkdir(os.path.join(self.devices, "sdb"))
# policy 0
self.objects = os.path.join(self.devices, "sda", get_data_dir(POLICIES[0]))
self.objects_2 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, "sda", get_data_dir(POLICIES[1]))
self.objects_2_p1 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
for part in ["0", "1", "2", "3"]:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[1])
开发者ID:renanalan,项目名称:swift,代码行数:31,代码来源:test_auditor.py
示例4: setUp
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
os.mkdir(os.path.join(self.devices, 'sdb'))
# policy 0
self.objects = os.path.join(self.devices, 'sda', get_data_dir(0))
self.objects_2 = os.path.join(self.devices, 'sdb', get_data_dir(0))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, 'sda', get_data_dir(1))
self.objects_2_p1 = os.path.join(self.devices, 'sdb', get_data_dir(1))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o', 0)
self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
'o', 1)
开发者ID:AsherBond,项目名称:swift,代码行数:35,代码来源:test_auditor.py
示例5: _get_object_info
def _get_object_info(self, account, container, obj, number):
obj_conf = self.configs['object-server']
config_path = obj_conf[number]
options = utils.readconf(config_path, 'app:object-server')
swift_dir = options.get('swift_dir', '/etc/swift')
ring = POLICIES.get_object_ring(int(self.policy), swift_dir)
part, nodes = ring.get_nodes(account, container, obj)
for node in nodes:
# assumes one to one mapping
if node['port'] == int(options.get('bind_port')):
device = node['device']
break
else:
return None
mgr = DiskFileManager(options, get_logger(options))
disk_file = mgr.get_diskfile(device, part, account, container, obj,
self.policy)
info = disk_file.read_metadata()
return info
开发者ID:clayg,项目名称:swift,代码行数:19,代码来源:test_object_metadata_replication.py
示例6: test_obj_put_legacy_updates
def test_obj_put_legacy_updates(self):
ts = (normalize_timestamp(t) for t in itertools.count(int(time())))
policy = POLICIES.get_by_index(0)
# setup updater
conf = {"devices": self.devices_dir, "mount_check": "false", "swift_dir": self.testdir}
async_dir = os.path.join(self.sda1, get_async_dir(policy.idx))
os.mkdir(async_dir)
account, container, obj = "a", "c", "o"
# write an async
for op in ("PUT", "DELETE"):
self.logger._clear()
daemon = object_updater.ObjectUpdater(conf, logger=self.logger)
dfmanager = DiskFileManager(conf, daemon.logger)
# don't include storage-policy-index in headers_out pickle
headers_out = swob.HeaderKeyDict(
{
"x-size": 0,
"x-content-type": "text/plain",
"x-etag": "d41d8cd98f00b204e9800998ecf8427e",
"x-timestamp": ts.next(),
}
)
data = {"op": op, "account": account, "container": container, "obj": obj, "headers": headers_out}
dfmanager.pickle_async_update(self.sda1, account, container, obj, data, ts.next(), policy.idx)
request_log = []
def capture(*args, **kwargs):
request_log.append((args, kwargs))
# run once
fake_status_codes = [200, 200, 200]
with mocked_http_conn(*fake_status_codes, give_connect=capture):
daemon.run_once()
self.assertEqual(len(fake_status_codes), len(request_log))
for request_args, request_kwargs in request_log:
ip, part, method, path, headers, qs, ssl = request_args
self.assertEqual(method, op)
self.assertEqual(headers["X-Backend-Storage-Policy-Index"], str(policy.idx))
self.assertEqual(daemon.logger.get_increment_counts(), {"successes": 1, "unlinks": 1, "async_pendings": 1})
开发者ID:benjkeller,项目名称:swift,代码行数:41,代码来源:test_updater.py
示例7: setup
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger)
开发者ID:yohsuke,项目名称:swift,代码行数:12,代码来源:server.py
示例8: __init__
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.ip = conf.get('md-server-ip', '127.0.0.1')
self.port = conf.get('md-server-port', '6090')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 30))
#self.container_ring = None
#self.concurrency = int(conf.get('concurrency', 1))
#self.slowdown = float(conf.get('slowdown', 0.01))
#self.node_timeout = int(conf.get('node_timeout', 10))
#self.conn_timeout = float(conf.get('conn_timeout', .5))
self.last_time_ran = 0
self.diskfile_mgr = DiskFileManager(conf, self.logger)
开发者ID:ucsc-hp-group,项目名称:swift,代码行数:16,代码来源:crawler.py
示例9: __init__
def __init__(self, conf, logger=None):
# location/directory of the metadata database (meta.db)
self.location = conf.get('location', '/srv/node/sdb1/metadata/')
# path the the actual file
#self.db_file = os.path.join(self.location, 'meta.db')
self.logger = logger or get_logger(conf, log_route='metadata-server')
self.root = conf.get('devices', '/srv/node')
#workaround for device listings
self.node_count = conf.get('nodecount','8')
self.devicelist = []
for x in range(0,int(self.node_count)):
self.devicelist.append(conf.get('device'+str(x),''))
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('node_timeout', 3))
replication_server = conf.get('replication_server', None)
if replication_server is not None:
replication_server = config_true_value(replication_server)
self.replication_server = replication_server
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()
]
self.replicator_rpc = ReplicatorRpc(
self.root,
DATADIR,
MetadataBroker,
self.mount_check,
logger=self.logger
)
self.diskfile_mgr = DiskFileManager(conf,self.logger)
self.db_ip = conf.get('db_ip', '127.0.0.1')
self.db_port = int(conf.get('db_port', 2424))
self.db_user = conf.get('db_user', 'root')
self.db_pw = conf.get('db_pw', 'root')
if config_true_value(conf.get('allow_versions', 'f')):
self.save_headers.append('x-versions-location')
swift.common.db.DB_PREALLOCATION = config_true_value(
conf.get('db_preallocation', 'f'))
self.broker = self._get_metadata_broker()#
self.broker.initialize()#
开发者ID:2015-ucsc-hp,项目名称:swift,代码行数:45,代码来源:server.py
示例10: setup
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if "replication_semaphore" in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf["replication_semaphore"][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(conf.get("replication_failure_threshold") or 100)
self.replication_failure_ratio = float(conf.get("replication_failure_ratio") or 1.0)
开发者ID:steveruckdashel,项目名称:swift,代码行数:21,代码来源:server.py
示例11: setUp
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda', 'objects')
os.mkdir(os.path.join(self.devices, 'sdb'))
self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
开发者ID:TheUtils,项目名称:swift,代码行数:23,代码来源:test_auditor.py
示例12: ObjectController
class ObjectController(object):
"""Implements the WSGI application for the Swift Object Server."""
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = int(conf.get('node_timeout', 3))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
replication_server = conf.get('replication_server', None)
if replication_server is not None:
replication_server = config_true_value(replication_server)
self.replication_server = replication_server
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.expiring_objects_account = \
(conf.get('auto_create_account_prefix') or '.') + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup sepecific to an object server implemenation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_mgr = DiskFileManager(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
#.........这里部分代码省略.........
开发者ID:HoO-Group,项目名称:swift,代码行数:101,代码来源:server.py
示例13: TestAuditor
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
self.objects = os.path.join(self.devices, 'sda', 'objects')
os.mkdir(os.path.join(self.devices, 'sdb'))
self.objects_2 = os.path.join(self.devices, 'sdb', 'objects')
os.mkdir(self.objects)
self.parts = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_object_audit_extra_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = '0' * 1024
etag = md5()
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, 'extra_data')
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = '0' * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {
'ETag': etag,
'X-Timestamp': timestamp,
'Content-Length': str(os.fstat(writer._fd).st_size),
}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o')
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
etag = md5()
etag.update('1' + '0' * 1023)
etag = etag.hexdigest()
metadata['ETag'] = etag
with self.disk_file.create() as writer:
writer.write(data)
writer.put(metadata)
auditor_worker.object_audit(
AuditLocation(self.disk_file._datadir, 'sda', '0'))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + '.data')
mkdirs(self.disk_file._datadir)
fp = open(path, 'w')
#.........这里部分代码省略.........
开发者ID:brightbox,项目名称:swift,代码行数:101,代码来源:test_auditor.py
示例14: TestAuditor
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
self.devices = os.path.join(self.testdir, "node")
self.rcache = os.path.join(self.testdir, "object.recon")
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, "sda"))
os.mkdir(os.path.join(self.devices, "sdb"))
# policy 0
self.objects = os.path.join(self.devices, "sda", get_data_dir(POLICIES[0]))
self.objects_2 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, "sda", get_data_dir(POLICIES[1]))
self.objects_2_p1 = os.path.join(self.devices, "sdb", get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
self.parts = self.parts_p1 = {}
for part in ["0", "1", "2", "3"]:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
self.df_mgr = DiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES[1])
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_worker_conf_parms(self):
def check_common_defaults():
self.assertEquals(auditor_worker.max_bytes_per_second, 10000000)
self.assertEquals(auditor_worker.log_time, 3600)
# test default values
conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
auditor_worker = auditor.AuditorWorker(conf, self.logger, self.rcache, self.devices)
check_common_defaults()
self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 65536)
self.assertEquals(auditor_worker.max_files_per_second, 20)
self.assertEquals(auditor_worker.zero_byte_only_at_fps, 0)
# test specified audit value overrides
conf.update({"disk_chunk_size": 4096})
auditor_worker = auditor.AuditorWorker(conf, self.logger, self.rcache, self.devices, zero_byte_only_at_fps=50)
check_common_defaults()
self.assertEquals(auditor_worker.diskfile_mgr.disk_chunk_size, 4096)
self.assertEquals(auditor_worker.max_files_per_second, 50)
self.assertEquals(auditor_worker.zero_byte_only_at_fps, 50)
def test_object_audit_extra_data(self):
def run_tests(disk_file):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(AuditLocation(disk_file._datadir, "sda", "0", policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, "extra_data")
auditor_worker.object_audit(AuditLocation(disk_file._datadir, "sda", "0", policy=POLICIES.legacy))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
run_tests(self.disk_file)
run_tests(self.disk_file_p1)
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o", policy=POLICIES.legacy)
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0", policy=POLICIES.legacy))
#.........这里部分代码省略.........
开发者ID:renanalan,项目名称:swift,代码行数:101,代码来源:test_auditor.py
示例15: TestAuditor
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), 'tmp_test_object_auditor')
self.devices = os.path.join(self.testdir, 'node')
self.rcache = os.path.join(self.testdir, 'object.recon')
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, 'sda'))
os.mkdir(os.path.join(self.devices, 'sdb'))
# policy 0
self.objects = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[0]))
self.objects_2 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[0]))
os.mkdir(self.objects)
# policy 1
self.objects_p1 = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[1]))
self.objects_2_p1 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[1]))
os.mkdir(self.objects_p1)
# policy 2
self.objects_p2 = os.path.join(self.devices, 'sda',
get_data_dir(POLICIES[2]))
self.objects_2_p2 = os.path.join(self.devices, 'sdb',
get_data_dir(POLICIES[2]))
os.mkdir(self.objects_p2)
self.parts = {}
self.parts_p1 = {}
self.parts_p2 = {}
for part in ['0', '1', '2', '3']:
self.parts[part] = os.path.join(self.objects, part)
self.parts_p1[part] = os.path.join(self.objects_p1, part)
self.parts_p2[part] = os.path.join(self.objects_p2, part)
os.mkdir(os.path.join(self.objects, part))
os.mkdir(os.path.join(self.objects_p1, part))
os.mkdir(os.path.join(self.objects_p2, part))
self.conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.ec_df_mgr = ECDiskFileManager(self.conf, self.logger)
# diskfiles for policy 0, 1, 2
self.disk_file = self.df_mgr.get_diskfile('sda', '0', 'a', 'c', 'o',
policy=POLICIES[0])
self.disk_file_p1 = self.df_mgr.get_diskfile('sda', '0', 'a', 'c',
'o', policy=POLICIES[1])
self.disk_file_ec = self.ec_df_mgr.get_diskfile(
'sda', '0', 'a', 'c', 'o', policy=POLICIES[2], frag_index=1)
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_worker_conf_parms(self):
def check_common_defaults():
self.assertEqual(auditor_worker.max_bytes_per_second, 10000000)
self.assertEqual(auditor_worker.log_time, 3600)
# test default values
conf = dict(
devices=self.devices,
mount_check='false',
object_size_stats='10,100,1024,10240')
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices)
check_common_defaults()
for policy in POLICIES:
mgr = auditor_worker.diskfile_router[policy]
self.assertEqual(mgr.disk_chunk_size, 65536)
self.assertEqual(auditor_worker.max_files_per_second, 20)
self.assertEqual(auditor_worker.zero_byte_only_at_fps, 0)
# test specified audit value overrides
conf.update({'disk_chunk_size': 4096})
auditor_worker = auditor.AuditorWorker(conf, self.logger,
self.rcache, self.devices,
zero_byte_only_at_fps=50)
check_common_defaults()
for policy in POLICIES:
mgr = auditor_worker.diskfile_router[policy]
self.assertEqual(mgr.disk_chunk_size, 4096)
self.assertEqual(auditor_worker.max_files_per_second, 50)
self.assertEqual(auditor_worker.zero_byte_only_at_fps, 50)
def test_object_audit_extra_data(self):
def run_tests(disk_file):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger,
self.rcache, self.devices)
data = '0' * 1024
etag = md5()
with disk_file.create() as writer:
writer.write(data)
etag.update(data)
#.........这里部分代码省略.........
开发者ID:HanJX,项目名称:swift,代码行数:101,代码来源:test_auditor.py
示例16: TestAuditor
class TestAuditor(unittest.TestCase):
def setUp(self):
self.testdir = os.path.join(mkdtemp(), "tmp_test_object_auditor")
self.devices = os.path.join(self.testdir, "node")
self.rcache = os.path.join(self.testdir, "object.recon")
self.logger = FakeLogger()
rmtree(self.testdir, ignore_errors=1)
mkdirs(os.path.join(self.devices, "sda"))
self.objects = os.path.join(self.devices, "sda", "objects")
os.mkdir(os.path.join(self.devices, "sdb"))
self.objects_2 = os.path.join(self.devices, "sdb", "objects")
os.mkdir(self.objects)
self.parts = {}
for part in ["0", "1", "2", "3"]:
self.parts[part] = os.path.join(self.objects, part)
os.mkdir(os.path.join(self.objects, part))
self.conf = dict(devices=self.devices, mount_check="false", object_size_stats="10,100,1024,10240")
self.df_mgr = DiskFileManager(self.conf, self.logger)
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o")
def tearDown(self):
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
unit.xattr_data = {}
def test_object_audit_extra_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
timestamp = str(normalize_timestamp(time.time()))
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
os.write(writer._fd, "extra_data")
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_diff_data(self):
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
data = "0" * 1024
etag = md5()
timestamp = str(normalize_timestamp(time.time()))
with self.disk_file.create() as writer:
writer.write(data)
etag.update(data)
etag = etag.hexdigest()
metadata = {"ETag": etag, "X-Timestamp": timestamp, "Content-Length": str(os.fstat(writer._fd).st_size)}
writer.put(metadata)
pre_quarantines = auditor_worker.quarantines
# remake so it will have metadata
self.disk_file = self.df_mgr.get_diskfile("sda", "0", "a", "c", "o")
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines)
etag = md5()
etag.update("1" + "0" * 1023)
etag = etag.hexdigest()
metadata["ETag"] = etag
with self.disk_file.create() as writer:
writer.write(data)
writer.put(metadata)
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_no_meta(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + ".data")
mkdirs(self.disk_file._datadir)
fp = open(path, "w")
fp.write("0" * 1024)
fp.close()
invalidate_hash(os.path.dirname(self.disk_file._datadir))
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
pre_quarantines = auditor_worker.quarantines
auditor_worker.object_audit(AuditLocation(self.disk_file._datadir, "sda", "0"))
self.assertEquals(auditor_worker.quarantines, pre_quarantines + 1)
def test_object_audit_will_not_swallow_errors_in_tests(self):
timestamp = str(normalize_timestamp(time.time()))
path = os.path.join(self.disk_file._datadir, timestamp + ".data")
mkdirs(self.disk_file._datadir)
with open(path, "w") as f:
write_metadata(f, {"name": "/a/c/o"})
auditor_worker = auditor.AuditorWorker(self.conf, self.logger, self.rcache, self.devices)
def blowup(*args):
raise NameError("tpyo")
#.........这里部分代码省略.........
开发者ID:nlevinki,项目名称:swift,代码行数:101,代码来源:test_auditor.py
示例17: __init__
def __init__(self, app, conf, *args, **kwargs):
self.app = app
self.conf = conf
self.logger = get_logger(self.conf, log_route='restore')
self._diskfile_mgr = DiskFileManager(conf, self.logger)
开发者ID:KoreaCloudObjectStorage,项目名称:swift-lifecycle-management,代码行数:5,代码来源:middleware.py
|
请发表评论