本文整理汇总了Python中teuthology.misc.write_file函数的典型用法代码示例。如果您正苦于以下问题:Python write_file函数的具体用法?Python write_file怎么用?Python write_file使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了write_file函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _setup_mon
def _setup_mon(ctx, manager, remote, mon, name, data_path, conf_path):
# co-locate a new monitor on remote where an existing monitor is hosted
cluster = manager.cluster
remote.run(args=['sudo', 'mkdir', '-p', data_path])
keyring_path = '/etc/ceph/{cluster}.keyring'.format(
cluster=manager.cluster)
testdir = teuthology.get_testdir(ctx)
monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir,
cluster=cluster)
manager.raw_cluster_cmd('mon', 'getmap', '-o', monmap_path)
if manager.controller != remote:
monmap = teuthology.get_file(manager.controller, monmap_path)
teuthology.write_file(remote, monmap_path, StringIO(monmap))
remote.run(
args=[
'sudo',
'ceph-mon',
'--cluster', cluster,
'--mkfs',
'-i', mon,
'--monmap', monmap_path,
'--keyring', keyring_path])
if manager.controller != remote:
teuthology.delete_file(remote, monmap_path)
# raw_cluster_cmd() is performed using sudo, so sudo here also.
teuthology.delete_file(manager.controller, monmap_path, sudo=True)
# update ceph.conf so that the ceph CLI is able to connect to the cluster
if conf_path:
ip = remote.ip_address
port = _get_next_port(ctx, ip, cluster)
mon_addr = '{ip}:{port}'.format(ip=ip, port=port)
ctx.ceph[cluster].conf[name] = {'mon addr': mon_addr}
write_conf(ctx, conf_path, cluster)
开发者ID:Abhishekvrshny,项目名称:ceph,代码行数:33,代码来源:mon_seesaw.py
示例2: configure
def configure(ctx, config):
assert isinstance(config, dict)
log.info('Configuring s3-tests...')
for client, properties in config['clients'].iteritems():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
_, host = split_user(target)
assert host is not None, "Invalid client specified as the rgw_server"
s3tests_conf['DEFAULT']['host'] = host
else:
s3tests_conf['DEFAULT']['host'] = 'localhost'
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'cd',
'/tmp/cephtest/s3-tests',
run.Raw('&&'),
'./bootstrap',
],
)
conf_fp = StringIO()
s3tests_conf.write(conf_fp)
teuthology.write_file(
remote=remote,
path='/tmp/cephtest/archive/s3-tests.{client}.conf'.format(client=client),
data=conf_fp.getvalue(),
)
yield
开发者ID:tv42,项目名称:teuthology,代码行数:34,代码来源:s3tests.py
示例3: write_rotate_conf
def write_rotate_conf(ctx, daemons):
testdir = teuthology.get_testdir(ctx)
rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf')
with file(rotate_conf_path, 'rb') as f:
conf = ""
for daemon, size in daemons.iteritems():
log.info('writing logrotate stanza for {daemon}'.format(daemon=daemon))
conf += f.read().format(daemon_type=daemon, max_size=size)
f.seek(0, 0)
for remote in ctx.cluster.remotes.iterkeys():
teuthology.write_file(remote=remote,
path='{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
data=StringIO(conf)
)
remote.run(
args=[
'sudo',
'mv',
'{tdir}/logrotate.ceph-test.conf'.format(tdir=testdir),
'/etc/logrotate.d/ceph-test.conf',
run.Raw('&&'),
'sudo',
'chmod',
'0644',
'/etc/logrotate.d/ceph-test.conf',
run.Raw('&&'),
'sudo',
'chown',
'root.root',
'/etc/logrotate.d/ceph-test.conf'
]
)
remote.chcon('/etc/logrotate.d/ceph-test.conf',
'system_u:object_r:etc_t:s0')
开发者ID:cooboos,项目名称:ceph-qa-suite,代码行数:35,代码来源:ceph.py
示例4: configure
def configure(ctx, config):
assert isinstance(config, dict)
log.info("Configuring testswift...")
testdir = teuthology.get_testdir(ctx)
for client, properties in config["clients"].iteritems():
log.info("client={c}".format(c=client))
log.info("config={c}".format(c=config))
testswift_conf = config["testswift_conf"][client]
if properties is not None and "rgw_server" in properties:
host = None
for target, roles in zip(ctx.config["targets"].iterkeys(), ctx.config["roles"]):
log.info("roles: " + str(roles))
log.info("target: " + str(target))
if properties["rgw_server"] in roles:
_, host = split_user(target)
assert host is not None, "Invalid client specified as the rgw_server"
testswift_conf["func_test"]["auth_host"] = host
else:
testswift_conf["func_test"]["auth_host"] = "localhost"
log.info(client)
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(args=["cd", "{tdir}/swift".format(tdir=testdir), run.Raw("&&"), "./bootstrap"])
conf_fp = StringIO()
testswift_conf.write(conf_fp)
teuthology.write_file(
remote=remote,
path="{tdir}/archive/testswift.{client}.conf".format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
yield
开发者ID:athanatos,项目名称:teuthology,代码行数:31,代码来源:swift.py
示例5: write_core_site
def write_core_site(ctx, config):
coreSiteFile = "/tmp/cephtest/hadoop/conf/core-site.xml"
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
# check the config to see if we should use hdfs or ceph
default_fs_string = ""
if config.get('hdfs'):
default_fs_string = 'hdfs://{master_ip}:54310'.format(master_ip=get_hadoop_master_ip(ctx))
else:
default_fs_string = 'ceph:///'
teuthology.write_file(remote, coreSiteFile,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>hadoop.tmp.dir</name>
<value>/tmp/hadoop/tmp</value>
</property>
<property>
<name>fs.default.name</name>
<value>{default_fs}</value>
</property>
<property>
<name>ceph.conf.file</name>
<value>/tmp/cephtest/ceph.conf</value>
</property>
</configuration>
'''.format(default_fs=default_fs_string))
log.info("wrote file: " + coreSiteFile + " to host: " + str(remote))
开发者ID:dzafman,项目名称:teuthology,代码行数:34,代码来源:hadoop.py
示例6: write_mapred_site
def write_mapred_site(ctx):
"""
Add required entries to conf/mapred-site.xml
"""
mapred_site_file = "{tdir}/apache_hadoop/conf/mapred-site.xml".format(
tdir=teuthology.get_testdir(ctx))
master_ip = get_hadoop_master_ip(ctx)
log.info('adding host {remote} as jobtracker'.format(remote=master_ip))
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote, mapred_site_file,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>{remote}:54311</value>
</property>
</configuration>
'''.format(remote=master_ip))
log.info("wrote file: " + mapred_site_file + " to host: " + str(remote))
开发者ID:AlfredChenxf,项目名称:teuthology,代码行数:25,代码来源:hadoop.py
示例7: write_rotate_conf
def write_rotate_conf(ctx, daemons):
testdir = teuthology.get_testdir(ctx)
rotate_conf_path = os.path.join(os.path.dirname(__file__), "logrotate.conf")
with file(rotate_conf_path, "rb") as f:
conf = ""
for daemon, size in daemons.iteritems():
log.info("writing logrotate stanza for {daemon}".format(daemon=daemon))
conf += f.read().format(daemon_type=daemon, max_size=size)
f.seek(0, 0)
for remote in ctx.cluster.remotes.iterkeys():
teuthology.write_file(
remote=remote, path="{tdir}/logrotate.ceph-test.conf".format(tdir=testdir), data=StringIO(conf)
)
remote.run(
args=[
"sudo",
"mv",
"{tdir}/logrotate.ceph-test.conf".format(tdir=testdir),
"/etc/logrotate.d/ceph-test.conf",
run.Raw("&&"),
"sudo",
"chmod",
"0644",
"/etc/logrotate.d/ceph-test.conf",
run.Raw("&&"),
"sudo",
"chown",
"root.root",
"/etc/logrotate.d/ceph-test.conf",
]
)
remote.chcon("/etc/logrotate.d/ceph-test.conf", "system_u:object_r:etc_t:s0")
开发者ID:hjwsm1989,项目名称:ceph-qa-suite,代码行数:33,代码来源:ceph.py
示例8: write_mapred_site
def write_mapred_site(ctx):
mapredSiteFile = "{tdir}/apache_hadoop/conf/mapred-site.xml".format(tdir=teuthology.get_testdir(ctx))
master_ip = get_hadoop_master_ip(ctx)
log.info("adding host {remote} as jobtracker".format(remote=master_ip))
hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(
remote,
mapredSiteFile,
"""<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>{remote}:54311</value>
</property>
</configuration>
""".format(
remote=master_ip
),
)
log.info("wrote file: " + mapredSiteFile + " to host: " + str(remote))
开发者ID:athanatos,项目名称:teuthology,代码行数:26,代码来源:hadoop.py
示例9: write_master
def write_master(ctx):
mastersFile = "/tmp/cephtest/hadoop/conf/masters"
master = _get_master(ctx)
remote, _ = master
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(remote, mastersFile, '{remote}\n'.format(remote=remote.ssh.get_transport().getpeername()[0]))
log.info("wrote file: " + mastersFile + " to host: " + str(remote))
开发者ID:dzafman,项目名称:teuthology,代码行数:10,代码来源:hadoop.py
示例10: setup
def setup(self):
super(CBT, self).setup()
self.first_mon = self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys()[0]
self.cbt_config = self.generate_cbt_config()
self.log.info('cbt configuration is %s', self.cbt_config)
self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt')
self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', self.cbt_dir])
misc.write_file(self.first_mon, os.path.join(self.cbt_dir, 'cbt_config.yaml'),
yaml.safe_dump(self.cbt_config, default_flow_style=False))
self.checkout_cbt()
self.install_dependencies()
开发者ID:xiaoxichen,项目名称:ceph,代码行数:11,代码来源:cbt.py
示例11: write_master
def write_master(ctx):
mastersFile = "{tdir}/apache_hadoop/conf/masters".format(tdir=teuthology.get_testdir(ctx))
master = _get_master(ctx)
master_remote, _ = master
hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(
remote,
mastersFile,
"{master_host}\n".format(master_host=master_remote.ssh.get_transport().getpeername()[0]),
)
log.info("wrote file: " + mastersFile + " to host: " + str(remote))
开发者ID:athanatos,项目名称:teuthology,代码行数:13,代码来源:hadoop.py
示例12: write_master
def write_master(ctx):
"""
Add required entries to conf/masters
These nodes host JobTrackers and Namenodes
"""
masters_file = "{tdir}/apache_hadoop/conf/masters".format(
tdir=teuthology.get_testdir(ctx))
master = _get_master(ctx)
master_remote, _ = master
hadoop_nodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote in hadoop_nodes.remotes:
teuthology.write_file(remote, masters_file, '{master_host}\n'.format(
master_host=master_remote.ssh.get_transport().getpeername()[0]))
log.info("wrote file: " + masters_file + " to host: " + str(remote))
开发者ID:AlfredChenxf,项目名称:teuthology,代码行数:15,代码来源:hadoop.py
示例13: cod_setup_remote_data
def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, REP_NAME, DATALINECOUNT):
objects = range(1, NUM_OBJECTS + 1)
for i in objects:
NAME = REP_NAME + "{num}".format(num=i)
DDNAME = os.path.join(DATADIR, NAME)
remote.run(args=['rm', '-f', DDNAME ])
dataline = range(DATALINECOUNT)
data = "This is the replicated data for " + NAME + "\n"
DATA = ""
for _ in dataline:
DATA += data
teuthology.write_file(remote, DDNAME, DATA)
开发者ID:athanatos,项目名称:ceph-qa-suite,代码行数:15,代码来源:ceph_objectstore_tool.py
示例14: write_hadoop_env
def write_hadoop_env(ctx, config):
hadoopEnvFile = "/tmp/cephtest/hadoop/conf/hadoop-env.sh"
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(remote, hadoopEnvFile,
'''export JAVA_HOME=/usr/lib/jvm/default-java
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp/cephtest/binary/usr/local/lib:/usr/lib
export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:/tmp/cephtest/binary/usr/local/lib/libcephfs.jar:/tmp/cephtest/hadoop/build/hadoop-core*.jar
export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
''' )
log.info("wrote file: " + hadoopEnvFile + " to host: " + str(remote))
开发者ID:dzafman,项目名称:teuthology,代码行数:16,代码来源:hadoop.py
示例15: _setup_calamari_cluster
def _setup_calamari_cluster(remote, restapi_remote):
"""
Add restapi db entry to the server.
"""
restapi_hostname = str(restapi_remote).split('@')[1]
sqlcmd = 'insert into ceph_cluster (name, api_base_url) ' \
'values ("{host}", "http://{host}:5000/api/v0.1/");'. \
format(host=restapi_hostname)
teuthology.write_file(remote, '/tmp/create.cluster.sql', sqlcmd)
return remote.run(args=['cat',
'/tmp/create.cluster.sql',
run.Raw('|'),
'sudo',
'sqlite3',
'/opt/calamari/webapp/calamari/db.sqlite3'],
stdout=StringIO())
开发者ID:AlfredChenxf,项目名称:teuthology,代码行数:16,代码来源:calamari.py
示例16: configure
def configure(ctx, config):
"""
Configure the s3-tests. This includes the running of the
bootstrap code and the updating of local conf files.
"""
assert isinstance(config, dict)
log.info('Configuring s3-roundtrip-tests...')
testdir = teuthology.get_testdir(ctx)
for client, properties in config['clients'].iteritems():
s3tests_conf = config['s3tests_conf'][client]
if properties is not None and 'rgw_server' in properties:
host = None
for target, roles in zip(ctx.config['targets'].iterkeys(), ctx.config['roles']):
log.info('roles: ' + str(roles))
log.info('target: ' + str(target))
if properties['rgw_server'] in roles:
_, host = split_user(target)
assert host is not None, "Invalid client specified as the rgw_server"
s3tests_conf['s3']['host'] = host
else:
s3tests_conf['s3']['host'] = 'localhost'
def_conf = s3tests_conf['DEFAULT']
s3tests_conf['s3'].setdefault('port', def_conf['port'])
s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure'])
(remote,) = ctx.cluster.only(client).remotes.keys()
remote.run(
args=[
'cd',
'{tdir}/s3-tests'.format(tdir=testdir),
run.Raw('&&'),
'./bootstrap',
],
)
conf_fp = StringIO()
conf = dict(
s3=s3tests_conf['s3'],
roundtrip=s3tests_conf['roundtrip'],
)
yaml.safe_dump(conf, conf_fp, default_flow_style=False)
teuthology.write_file(
remote=remote,
path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client),
data=conf_fp.getvalue(),
)
yield
开发者ID:LalatenduMohanty,项目名称:teuthology,代码行数:47,代码来源:s3roundtrip.py
示例17: write_slaves
def write_slaves(ctx):
log.info("Setting up slave nodes...")
slavesFile = "{tdir}/apache_hadoop/conf/slaves".format(tdir=teuthology.get_testdir(ctx))
tmpFile = StringIO()
slaves = ctx.cluster.only(teuthology.is_type("hadoop.slave"))
for remote, roles_for_host in slaves.remotes.iteritems():
tmpFile.write("{remote}\n".format(remote=remote.ssh.get_transport().getpeername()[0]))
tmpFile.seek(0)
hadoopNodes = ctx.cluster.only(teuthology.is_type("hadoop"))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(remote=remote, path=slavesFile, data=tmpFile)
tmpFile.seek(0)
log.info("wrote file: " + slavesFile + " to host: " + str(remote))
开发者ID:athanatos,项目名称:teuthology,代码行数:17,代码来源:hadoop.py
示例18: write_slaves
def write_slaves(ctx):
log.info('Setting up slave nodes...')
slavesFile = "/tmp/cephtest/hadoop/conf/slaves"
tmpFile = StringIO()
slaves = ctx.cluster.only(teuthology.is_type('hadoop.slave'))
for remote, roles_for_host in slaves.remotes.iteritems():
tmpFile.write('{remote}\n'.format(remote=remote.ssh.get_transport().getpeername()[0]))
tmpFile.seek(0)
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(remote=remote, path=slavesFile, data=tmpFile)
tmpFile.seek(0)
log.info("wrote file: " + slavesFile + " to host: " + str(remote))
开发者ID:dzafman,项目名称:teuthology,代码行数:17,代码来源:hadoop.py
示例19: write_hdfs_site
def write_hdfs_site(ctx):
hdfsSiteFile = "/tmp/cephtest/hadoop/conf/hdfs-site.xml"
hadoopNodes = ctx.cluster.only(teuthology.is_type('hadoop'))
for remote, roles_for_host in hadoopNodes.remotes.iteritems():
teuthology.write_file(remote, hdfsSiteFile,
'''<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
''' )
log.info("wrote file: " + hdfsSiteFile + " to host: " + str(remote))
开发者ID:dzafman,项目名称:teuthology,代码行数:17,代码来源:hadoop.py
示例20: ship_utilities
def ship_utilities(ctx, config):
assert config is None
FILES = ['daemon-helper', 'adjust-ulimits', 'chdir-coredump',
'valgrind.supp', 'kcon_most']
testdir = teuthology.get_testdir(ctx)
for filename in FILES:
log.info('Shipping %r...', filename)
src = os.path.join(os.path.dirname(__file__), filename)
dst = os.path.join(testdir, filename)
with file(src, 'rb') as f:
for rem in ctx.cluster.remotes.iterkeys():
teuthology.write_file(
remote=rem,
path=dst,
data=f,
)
f.seek(0)
rem.run(
args=[
'chmod',
'a=rx',
'--',
dst,
],
)
try:
yield
finally:
log.info('Removing shipped files: %s...', ' '.join(FILES))
filenames = (
os.path.join(testdir, filename)
for filename in FILES
)
run.wait(
ctx.cluster.run(
args=[
'rm',
'-rf',
'--',
] + list(filenames),
wait=False,
),
)
开发者ID:gregsfortytwo,项目名称:teuthology,代码行数:44,代码来源:ceph.py
注:本文中的teuthology.misc.write_file函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论