本文整理汇总了Python中monitoring.start函数的典型用法代码示例。如果您正苦于以下问题:Python start函数的具体用法?Python start怎么用?Python start使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了start函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: run
def run(self):
super(CephTestRados, self).run()
# Remake the pool
self.mkpool()
self.dropcaches()
self.cluster.dump_config(self.run_dir)
monitoring.start(self.run_dir)
time.sleep(5)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
logger.info('Running ceph_test_rados.')
ps = []
for i in xrange(1):
p = common.pdsh(settings.getnodes('clients'), self.mkcmd())
ps.append(p)
for p in ps:
p.wait()
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
monitoring.stop(self.run_dir)
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:bengland2,项目名称:cbt,代码行数:30,代码来源:cephtestrados.py
示例2: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = "--concurrent-ios %s" % self.concurrent_ops
op_size_str = "-b %s" % self.op_size
common.make_remote_dir(run_dir)
monitoring.start(run_dir)
# Run rados bench
print "Running radosbench read test."
ps = []
for i in xrange(self.concurrent_procs):
out_file = "%s/output.%s" % (run_dir, i)
objecter_log = "%s/objecter.%s.log" % (run_dir, i)
p = common.pdsh(
settings.cluster.get("clients"),
"/usr/bin/rados -p rados-bench-%s %s bench %s %s %s --no-cleanup 2> %s > %s"
% (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file),
)
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
common.sync_files("%s/*" % run_dir, out_dir)
开发者ID:rosarion,项目名称:ceph-tools,代码行数:26,代码来源:radosbench.py
示例3: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
op_size_str = '-b %s' % self.op_size
common.make_remote_dir(run_dir)
# dump the cluster config
common.dump_config(run_dir)
monitoring.start(run_dir)
# Run rados bench
print 'Running radosbench read test.'
ps = []
for i in xrange(self.concurrent_procs):
out_file = '%s/output.%s' % (run_dir, i)
objecter_log = '%s/objecter.%s.log' % (run_dir, i)
p = common.pdsh(settings.getnodes('clients'), '/usr/bin/rados -p rados-bench-`hostname -s`-%s %s bench %s %s %s --no-cleanup 2> %s > %s' % (i, op_size_str, self.time, mode, concurrent_ops_str, objecter_log, out_file))
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
# Get the historic ops
common.dump_historic_ops(run_dir)
common.sync_files('%s/*' % run_dir, out_dir)
开发者ID:cityyard,项目名称:ceph-tools,代码行数:29,代码来源:radosbench.py
示例4: initialize
def initialize(self):
super(Cosbench, self).initialize()
logger.debug('Running cosbench and radosgw check.')
self.prerun_check()
logger.debug('Running scrub monitoring.')
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
logger.debug('Pausing for 60s for idle monitoring.')
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s' % self.run_dir, self.out_dir)
# Create the run directory
common.make_remote_dir(self.run_dir)
conf = self.config
if not self.config["template"]:
self.config["template"] = "default"
self.config["workload"] = self.choose_template("default", conf)
self.prepare_xml(self.config["workload"])
return True
开发者ID:VishwanathMaram,项目名称:cbt,代码行数:27,代码来源:cosbench.py
示例5: run
def run(self):
super(KvmRbdFio, self).run()
# We'll always drop caches for rados bench
self.dropcaches()
monitoring.start(self.run_dir)
time.sleep(5)
names = ""
for i in xrange(self.concurrent_procs):
names += "--name=/srv/rbdfio-`hostname -s`-%d/cbt-kvmrbdfio " % i
out_file = '%s/output' % self.run_dir
pre_cmd = 'sudo fio --rw=read -ioengine=sync --numjobs=1 --bs=4M --runtime=1 --size %dM %s > /dev/null' % (self.vol_size * 9/10, names)
fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
print 'Attempting to populating fio files...'
common.pdsh(settings.cluster.get('clients'), pre_cmd).communicate()
print 'Running rbd fio %s test.' % self.mode
common.pdsh(settings.cluster.get('clients'), fio_cmd).communicate()
# ps = []
# for i in xrange(self.concurrent_procs):
# out_file = '%s/output.%s' % (self.run_dir, i)
# p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
# ps.append(p)
# for p in ps:
# p.wait()
monitoring.stop(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:grepory,项目名称:ceph-tools,代码行数:27,代码来源:kvmrbdfio.py
示例6: run
def run(self):
super(LibrbdFio, self).run()
# We'll always drop caches for rados bench
self.dropcaches()
# dump the cluster config
self.cluster.dump_config(self.run_dir)
monitoring.start(self.run_dir)
time.sleep(5)
# Run the backfill testing thread if requested
if "recovery_test" in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
print "Running rbd fio %s test." % self.mode
ps = []
for i in xrange(self.volumes_per_client):
fio_cmd = self.mkfiocmd(i)
p = common.pdsh(settings.getnodes("clients"), fio_cmd)
ps.append(p)
for p in ps:
p.wait()
# If we were doing recovery, wait until it's done.
if "recovery_test" in self.cluster.config:
self.cluster.wait_recovery_done()
monitoring.stop(self.run_dir)
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files("%s/*" % self.run_dir, self.out_dir)
开发者ID:athenahealth,项目名称:cbt,代码行数:35,代码来源:librbdfio.py
示例7: initialize
def initialize(self):
super(LibrbdFio, self).initialize()
print 'Running scrub monitoring.'
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
print 'Pausing for 60s for idle monitoring.'
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
self.mkimages()
# Create the run directory
common.make_remote_dir(self.run_dir)
# populate the fio files
print 'Attempting to populating fio files...'
pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s` --invalidate=0 --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.poolname, self.numjobs, self.vol_size, self.names)
common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
return True
开发者ID:FrankLikuohao,项目名称:ceph-tools,代码行数:26,代码来源:librbdfio.py
示例8: initialize
def initialize(self):
super(LibrbdFio, self).initialize()
# Clean and Create the run directory
common.clean_remote_dir(self.run_dir)
common.make_remote_dir(self.run_dir)
logger.info('Pausing for 60s for idle monitoring.')
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
self.mkimages()
# populate the fio files
ps = []
logger.info('Attempting to populating fio files...')
if (self.use_existing_volumes == False):
for volnum in xrange(self.volumes_per_client):
rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum)
pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0 --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (self.cmd_path, self.pool_name, rbd_name, self.numjobs, self.vol_size, self.names, self.fio_out_format)
p = common.pdsh(settings.getnodes('clients'), pre_cmd)
ps.append(p)
for p in ps:
p.wait()
return True
开发者ID:bengland2,项目名称:cbt,代码行数:28,代码来源:librbdfio.py
示例9: initialize
def initialize(self):
super(RbdFio, self).initialize()
logger.info('Running scrub monitoring.')
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
logger.info('Pausing for 60s for idle monitoring.')
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files('%s/*' % self.run_dir, self.out_dir)
self.mkimages()
# Create the run directory
common.make_remote_dir(self.run_dir)
# populate the fio files
logger.info('Attempting to populating fio files...')
pre_cmd = 'sudo %s --ioengine=%s --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null' % (self.cmd_path, self.ioengine, self.numjobs, self.vol_size*0.9, self.names)
common.pdsh(settings.getnodes('clients'), pre_cmd).communicate()
return True
开发者ID:sirspock,项目名称:cbt,代码行数:26,代码来源:rbdfio.py
示例10: run
def run(self):
# First create a credential file for each gateway
self.mkcredfiles()
# We'll always drop caches for rados bench
self.dropcaches()
# dump the cluster config
self.cluster.dump_config(self.run_dir)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
# Run getput
monitoring.start(self.run_dir)
logger.info('Running getput %s test.' % self.test)
ps = []
for i in xrange(0, len(self.auth_urls)):
cmd = self.mkgetputcmd("%s/gw%02d.cred" % (self.run_dir, i), i)
p = common.pdsh(settings.getnodes('clients'), cmd)
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(self.run_dir)
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:bengland2,项目名称:cbt,代码行数:35,代码来源:getput.py
示例11: run
def run(self):
super(RbdFio, self).run()
# Set client readahead
self.set_client_param('read_ahead_kb', self.client_ra)
# We'll always drop caches for rados bench
self.dropcaches()
common.make_remote_dir(self.run_dir)
monitoring.start(self.run_dir)
# Run rados bench
print 'Running rbd fio %s test.' % self.mode
names = ""
for i in xrange(self.concurrent_procs):
names += "--name=%s/mnt/rbdfio-`hostname -s`-%d/cbt-rbdfio " % (self.tmp_dir, i)
out_file = '%s/output' % self.run_dir
fio_cmd = 'sudo fio --rw=%s -ioengine=%s --runtime=%s --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM %s > %s' % (self.mode, self.ioengine, self.time, self.op_size, self.iodepth, self.vol_size * 9/10, names, out_file)
common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
# ps = []
# for i in xrange(self.concurrent_procs):
# out_file = '%s/output.%s' % (self.run_dir, i)
# p = common.pdsh(settings.cluster.get('clients'), 'sudo fio --rw=%s -ioengine=%s --runtime=%s --name=/srv/rbdfio-`hostname -s`-%d/cbt-rbdfio --numjobs=1 --direct=1 --bs=%dB --iodepth=%d --size %dM > %s' % (self.mode, self.ioengine, self.time, i, self.op_size, self.iodepth, self.vol_size * 9/10, out_file))
# ps.append(p)
# for p in ps:
# p.wait()
monitoring.stop(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:hjwsm1989,项目名称:ceph-tools,代码行数:27,代码来源:rbdfio.py
示例12: initialize
def initialize(self):
super(RbdFio, self).initialize()
self.cleanup()
if not self.use_existing:
self.cluster.initialize()
self.cluster.dump_config(self.run_dir)
# Setup the pools
monitoring.start("%s/pool_monitoring" % self.run_dir)
common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool create rbdfio %d %d' % (self.tmp_conf, self.pgs, self.pgs)).communicate()
common.pdsh(settings.getnodes('head'), 'sudo ceph -c %s osd pool set rbdfio size 1' % self.tmp_conf).communicate()
print 'Checking Healh after pool creation.'
self.cluster.check_health()
monitoring.stop()
# Mount the filesystem
common.pdsh(settings.getnodes('clients'), 'sudo modprobe rbd').communicate()
for i in xrange(self.concurrent_procs):
common.pdsh(settings.getnodes('clients'), 'sudo rbd -c %s create rbdfio/rbdfio-`hostname -s`-%d --size %d' % (self.tmp_conf, i, self.vol_size)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo echo "%s %s rbdfio rbdfio-`hostname -s`-%d" | sudo tee /sys/bus/rbd/add && sudo /sbin/udevadm settle' % (self.rbdadd_mons, self.rbdadd_options, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d' % i).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/mnt/rbdfio-`hostname -s`-%d' % (self.tmp_dir, i)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/rbdfio/rbdfio-`hostname -s`-%d %s/mnt/rbdfio-`hostname -s`-%d' % (i, self.tmp_dir, i)).communicate()
print 'Running scrub monitoring'
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
# Create the run directory
common.make_remote_dir(self.run_dir)
开发者ID:hjwsm1989,项目名称:ceph-tools,代码行数:32,代码来源:rbdfio.py
示例13: initialize
def initialize(self):
super(LibrbdFio, self).initialize()
print "Running scrub monitoring."
monitoring.start("%s/scrub_monitoring" % self.run_dir)
self.cluster.check_scrub()
monitoring.stop()
print "Pausing for 60s for idle monitoring."
monitoring.start("%s/idle_monitoring" % self.run_dir)
time.sleep(60)
monitoring.stop()
common.sync_files("%s/*" % self.run_dir, self.out_dir)
self.mkimages()
# Create the run directory
common.make_remote_dir(self.run_dir)
# populate the fio files
ps = []
print "Attempting to populating fio files..."
for i in xrange(self.volumes_per_client):
pre_cmd = (
"sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s`-%d --invalidate=0 --rw=write --numjobs=%s --bs=4M --size %dM %s > /dev/null"
% (self.cmd_path, self.poolname, i, self.numjobs, self.vol_size, self.names)
)
p = common.pdsh(settings.getnodes("clients"), pre_cmd)
ps.append(p)
for p in ps:
p.wait()
return True
开发者ID:athenahealth,项目名称:cbt,代码行数:33,代码来源:librbdfio.py
示例14: mkpools
def mkpools(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
for i in xrange(self.concurrent_procs):
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
self.cluster.rmpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
self.cluster.mkpool('rados-bench-%s-%s' % (node, i), self.pool_profile)
monitoring.stop()
开发者ID:FrankLikuohao,项目名称:ceph-tools,代码行数:8,代码来源:radosbench.py
示例15: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
for node in settings.getnodes('clients').split(','):
node = node.rpartition("@")[2]
common.pdsh(settings.getnodes('head'), '/usr/bin/rbd create cbt-librbdfio-%s --size %s --pool %s --order %s' % (node, self.vol_size, self.poolname, self.vol_order)).communicate()
monitoring.stop()
开发者ID:FrankLikuohao,项目名称:ceph-tools,代码行数:8,代码来源:librbdfio.py
示例16: run
def run(self):
super(LibrbdFio, self).run()
# We'll always drop caches for rados bench
self.dropcaches()
# dump the cluster config
self.cluster.dump_config(self.run_dir)
monitoring.start(self.run_dir)
time.sleep(5)
out_file = '%s/output' % self.run_dir
fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=cbt-librbdfio-`hostname -s` --invalidate=0' % (self.cmd_path_full, self.poolname)
fio_cmd += ' --rw=%s' % self.mode
if (self.mode == 'readwrite' or self.mode == 'randrw'):
fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
# fio_cmd += ' --ioengine=%s' % self.ioengine
if self.time is not None:
fio_cmd += ' --runtime=%s' % self.time
if self.ramp is not None:
fio_cmd += ' --ramp_time=%s' % self.ramp
fio_cmd += ' --numjobs=%s' % self.numjobs
fio_cmd += ' --direct=1'
fio_cmd += ' --bs=%dB' % self.op_size
fio_cmd += ' --iodepth=%d' % self.iodepth
fio_cmd += ' --end_fsync=%s' % self.end_fsync
# if self.vol_size:
# fio_cmd += ' -- size=%dM' % self.vol_size
fio_cmd += ' --write_iops_log=%s' % out_file
fio_cmd += ' --write_bw_log=%s' % out_file
fio_cmd += ' --write_lat_log=%s' % out_file
if 'recovery_test' in self.cluster.config:
fio_cmd += ' --time_based'
if self.random_distribution is not None:
fio_cmd += ' --random_distribution=%s' % self.random_distribution
if self.log_avg_msec is not None:
fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
fio_cmd += ' %s > %s' % (self.names, out_file)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
print 'Running rbd fio %s test.' % self.mode
common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
monitoring.stop(self.run_dir)
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:FrankLikuohao,项目名称:ceph-tools,代码行数:58,代码来源:librbdfio.py
示例17: mkimages
def mkimages(self):
monitoring.start("%s/pool_monitoring" % self.run_dir)
self.cluster.rmpool(self.poolname, self.pool_profile)
self.cluster.mkpool(self.poolname, self.pool_profile)
common.pdsh(settings.getnodes('clients'), '/usr/bin/rbd create cbt-kernelrbdfio-`hostname -s` --size %s --pool %s' % (self.vol_size, self.poolname)).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo rbd map cbt-kernelrbdfio-`hostname -s` --pool %s --id admin' % self.poolname).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkfs.xfs /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s`').communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mkdir -p -m0755 -- %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
common.pdsh(settings.getnodes('clients'), 'sudo mount -t xfs -o noatime,inode64 /dev/rbd/cbt-kernelrbdfio/cbt-kernelrbdfio-`hostname -s` %s/cbt-kernelrbdfio-`hostname -s`' % self.cluster.mnt_dir).communicate()
monitoring.stop()
开发者ID:sirspock,项目名称:cbt,代码行数:10,代码来源:rbdfio.py
示例18: run
def run(self):
super(KvmRbdFio, self).run()
# Set client readahead
self.set_client_param('read_ahead_kb', self.client_ra)
clnts = settings.getnodes('clients')
# We'll always drop caches for rados bench
self.dropcaches()
monitoring.start(self.run_dir)
time.sleep(5)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
logger.info('Starting rbd fio %s test.', self.mode)
fio_process_list = []
for i in range(self.concurrent_procs):
b = self.block_devices[i % len(self.block_devices)]
bnm = os.path.basename(b)
mtpt = '/srv/rbdfio-`hostname -s`-%s' % bnm
fiopath = os.path.join(mtpt, 'fio%d.img' % i)
out_file = '%s/output.%d' % (self.run_dir, i)
fio_cmd = 'sudo %s' % self.fio_cmd
fio_cmd += ' --rw=%s' % self.mode
if (self.mode == 'readwrite' or self.mode == 'randrw'):
fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
fio_cmd += ' --ioengine=%s' % self.ioengine
fio_cmd += ' --runtime=%s' % self.time
fio_cmd += ' --ramp_time=%s' % self.ramp
if self.startdelay:
fio_cmd += ' --startdelay=%s' % self.startdelay
if self.rate_iops:
fio_cmd += ' --rate_iops=%s' % self.rate_iops
fio_cmd += ' --numjobs=%s' % self.numjobs
fio_cmd += ' --direct=1'
fio_cmd += ' --bs=%dB' % self.op_size
fio_cmd += ' --iodepth=%d' % self.iodepth
fio_cmd += ' --size=%dM' % self.vol_size
fio_cmd += ' --write_iops_log=%s' % out_file
fio_cmd += ' --write_bw_log=%s' % out_file
fio_cmd += ' --write_lat_log=%s' % out_file
if 'recovery_test' in self.cluster.config:
fio_cmd += ' --time_based'
fio_cmd += ' --name=%s > %s' % (fiopath, out_file)
fio_process_list.append(common.pdsh(clnts, fio_cmd, continue_if_error=False))
for p in fio_process_list:
p.communicate()
monitoring.stop(self.run_dir)
logger.info('Finished rbd fio test')
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:ASBishop,项目名称:cbt,代码行数:55,代码来源:kvmrbdfio.py
示例19: _run
def _run(self, mode, run_dir, out_dir):
# We'll always drop caches for rados bench
self.dropcaches()
if self.concurrent_ops:
concurrent_ops_str = '--concurrent-ios %s' % self.concurrent_ops
#determine rados version
rados_version_str, err = common.pdsh(settings.getnodes('head'), '/usr/bin/rados -v').communicate()
m = re.findall("version (\d+)", rados_version_str)
rados_version = int(m[0])
if mode in ['write'] or rados_version < 9:
op_size_str = '-b %s' % self.op_size
else:
op_size_str = ''
common.make_remote_dir(run_dir)
# dump the cluster config
self.cluster.dump_config(run_dir)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(run_dir, recovery_callback)
# Run rados bench
monitoring.start(run_dir)
logger.info('Running radosbench %s test.' % mode)
ps = []
for i in xrange(self.concurrent_procs):
out_file = '%s/output.%s' % (run_dir, i)
objecter_log = '%s/objecter.%s.log' % (run_dir, i)
# default behavior is to use a single storage pool
pool_name = self.pool
run_name = '--run-name %s`hostname -s`-%s'%(self.object_set_id, i)
if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process
pool_name = 'rados-bench-`hostname -s`-%s'%i
run_name = ''
rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s --no-cleanup 2> %s > %s' % \
(self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, run_name, objecter_log, out_file)
p = common.pdsh(settings.getnodes('clients'), rados_bench_cmd)
ps.append(p)
for p in ps:
p.wait()
monitoring.stop(run_dir)
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
# Finally, get the historic ops
self.cluster.dump_historic_ops(run_dir)
common.sync_files('%s/*' % run_dir, out_dir)
开发者ID:JevonQ,项目名称:cbt,代码行数:55,代码来源:radosbench.py
示例20: run
def run(self):
super(RbdFio, self).run()
# Set client readahead
self.set_client_param('read_ahead_kb', self.client_ra)
# We'll always drop caches for rados bench
self.dropcaches()
monitoring.start(self.run_dir)
# Run the backfill testing thread if requested
if 'recovery_test' in self.cluster.config:
recovery_callback = self.recovery_callback
self.cluster.create_recovery_test(self.run_dir, recovery_callback)
time.sleep(5)
out_file = '%s/output' % self.run_dir
fio_cmd = 'sudo %s' % (self.cmd_path_full)
fio_cmd += ' --rw=%s' % self.mode
if (self.mode == 'readwrite' or self.mode == 'randrw'):
fio_cmd += ' --rwmixread=%s --rwmixwrite=%s' % (self.rwmixread, self.rwmixwrite)
fio_cmd += ' --ioengine=%s' % self.ioengine
if self.time is not None:
fio_cmd += ' --runtime=%s' % self.time
if self.ramp is not None:
fio_cmd += ' --ramp_time=%s' % self.ramp
fio_cmd += ' --numjobs=%s' % self.numjobs
fio_cmd += ' --direct=1'
fio_cmd += ' --bs=%dB' % self.op_size
fio_cmd += ' --iodepth=%d' % self.iodepth
if self.vol_size:
fio_cmd += ' --size=%dM' % (int(self.vol_size) * 0.9)
fio_cmd += ' --write_iops_log=%s' % out_file
fio_cmd += ' --write_bw_log=%s' % out_file
fio_cmd += ' --write_lat_log=%s' % out_file
if 'recovery_test' in self.cluster.config:
fio_cmd += ' --time_based'
if self.random_distribution is not None:
fio_cmd += ' --random_distribution=%s' % self.random_distribution
fio_cmd += ' %s > %s' % (self.names, out_file)
if self.log_avg_msec is not None:
fio_cmd += ' --log_avg_msec=%s' % self.log_avg_msec
logger.info('Running rbd fio %s test.', self.mode)
common.pdsh(settings.getnodes('clients'), fio_cmd).communicate()
# If we were doing recovery, wait until it's done.
if 'recovery_test' in self.cluster.config:
self.cluster.wait_recovery_done()
monitoring.stop(self.run_dir)
# Finally, get the historic ops
self.cluster.dump_historic_ops(self.run_dir)
common.sync_files('%s/*' % self.run_dir, self.out_dir)
开发者ID:sirspock,项目名称:cbt,代码行数:55,代码来源:rbdfio.py
注:本文中的monitoring.start函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论