本文整理汇总了Python中tests.quiet.logger_disabled函数的典型用法代码示例。如果您正苦于以下问题:Python logger_disabled函数的具体用法?Python logger_disabled怎么用?Python logger_disabled使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了logger_disabled函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_dont_take_down_cluster_on_failure
def test_dont_take_down_cluster_on_failure(self):
runner = DataprocJobRunner(conf_paths=[])
cluster_body = runner.api_client.cluster_create()
cluster_id = cluster_body['clusterName']
mr_job = MRTwoStepJob(['-r', 'dataproc', '-v',
'--cluster-id', cluster_id])
mr_job.sandbox()
self._dataproc_client.job_get_advances_states = collections.deque(['SETUP_DONE', 'RUNNING', 'ERROR'])
with mr_job.make_runner() as runner:
self.assertIsInstance(runner, DataprocJobRunner)
with logger_disabled('mrjob.dataproc'):
self.assertRaises(StepFailedException, runner.run)
cluster = self.get_cluster_from_runner(runner, cluster_id)
cluster_state = self._dataproc_client.get_state(cluster)
self.assertEqual(cluster_state, 'RUNNING')
# job shouldn't get terminated by cleanup
cluster = self._dataproc_client._cache_clusters[_TEST_PROJECT][cluster_id]
cluster_state = self._dataproc_client.get_state(cluster)
self.assertEqual(cluster_state, 'RUNNING')
开发者ID:Jeremyfanfan,项目名称:mrjob,代码行数:26,代码来源:test_dataproc.py
示例2: test_hadoop_version_option_does_nothing
def test_hadoop_version_option_does_nothing(self):
with logger_disabled('mrjob.dataproc'):
with self.make_runner('--hadoop-version', '1.2.3.4') as runner:
runner.run()
self.assertEqual(runner.get_image_version(),
_DEFAULT_IMAGE_VERSION)
self.assertEqual(runner.get_hadoop_version(), '2.7.2')
开发者ID:Jeremyfanfan,项目名称:mrjob,代码行数:7,代码来源:test_dataproc.py
示例3: test_end_to_end
def test_end_to_end(self):
n_file_path = os.path.join(self.tmp_dir, "n_file")
with open(n_file_path, "w") as f:
f.write("3")
os.environ["LOCAL_N_FILE_PATH"] = n_file_path
stdin = ["0\n", "1\n", "2\n"]
# use local runner so that the file is actually sent somewhere
mr_job = MRTowerOfPowers(["-v", "--cleanup=NONE", "--n-file", n_file_path, "--runner=local"])
self.assertEqual(len(mr_job.steps()), 3)
mr_job.sandbox(stdin=stdin)
with logger_disabled("mrjob.local"):
with mr_job.make_runner() as runner:
# make sure our file gets placed in the working dir
self.assertIn(n_file_path, runner._working_dir_mgr.paths())
runner.run()
output = set()
for line in runner.stream_output():
_, value = mr_job.parse_output_line(line)
output.add(value)
self.assertEqual(set(output), set([0, 1, ((2 ** 3) ** 3) ** 3]))
开发者ID:ndimiduk,项目名称:mrjob,代码行数:28,代码来源:test_job.py
示例4: test_failed_job
def test_failed_job(self):
mr_job = MRTwoStepJob(['-r', 'emr', '-v',
'-c', self.mrjob_conf_path])
mr_job.sandbox()
self.add_mock_s3_data({'walrus': {}})
self.mock_emr_failures = {('j-MOCKJOBFLOW0', 0): None}
with mr_job.make_runner() as runner:
assert isinstance(runner, EMRJobRunner)
with logger_disabled('mrjob.emr'):
assert_raises(Exception, runner.run)
emr_conn = botoemr.EmrConnection()
job_flow_id = runner.get_emr_job_flow_id()
for i in range(10):
emr_conn.simulate_progress(job_flow_id)
job_flow = emr_conn.describe_jobflow(job_flow_id)
assert_equal(job_flow.state, 'FAILED')
# job should get terminated on cleanup
emr_conn = runner.make_emr_conn()
job_flow_id = runner.get_emr_job_flow_id()
for i in range(10):
emr_conn.simulate_progress(job_flow_id)
job_flow = emr_conn.describe_jobflow(job_flow_id)
assert_equal(job_flow.state, 'TERMINATED')
开发者ID:boursier,项目名称:mrjob,代码行数:30,代码来源:emr_test.py
示例5: test_end_to_end
def test_end_to_end(self):
n_file_path = os.path.join(self.tmp_dir, 'n_file')
with open(n_file_path, 'w') as f:
f.write('3')
os.environ['LOCAL_N_FILE_PATH'] = n_file_path
stdin = ['0\n', '1\n', '2\n']
mr_job = MRTowerOfPowers(
['--no-conf', '-v', '--cleanup=NONE', '--n-file', n_file_path])
self.assertEqual(len(mr_job.steps()), 3)
mr_job.sandbox(stdin=stdin)
with logger_disabled('mrjob.local'):
with mr_job.make_runner() as runner:
assert isinstance(runner, LocalMRJobRunner)
# make sure our file gets "uploaded"
assert [
fd for fd in runner._files if fd['path'] == n_file_path
]
runner.run()
output = set()
for line in runner.stream_output():
_, value = mr_job.parse_output_line(line)
output.add(value)
self.assertEqual(set(output), set([0, 1, ((2**3)**3)**3]))
开发者ID:bchess,项目名称:mrjob,代码行数:31,代码来源:test_job.py
示例6: test_end_to_end
def test_end_to_end(self):
n_file_path = os.path.join(self.tmp_dir, 'n_file')
with open(n_file_path, 'w') as f:
f.write('3')
os.environ['LOCAL_N_FILE_PATH'] = n_file_path
stdin = [b'0\n', b'1\n', b'2\n']
# use local runner so that the file is actually sent somewhere
mr_job = MRTowerOfPowers(
['-v', '--cleanup=NONE', '--n-file', n_file_path,
'--runner=local'])
self.assertEqual(len(mr_job.steps()), 3)
mr_job.sandbox(stdin=stdin)
with logger_disabled('mrjob.local'):
with mr_job.make_runner() as runner:
# make sure our file gets placed in the working dir
self.assertIn(n_file_path, runner._working_dir_mgr.paths())
runner.run()
output = set()
for _, value in mr_job.parse_output(runner.cat_output()):
output.add(value)
self.assertEqual(set(output), set([0, 1, ((2 ** 3) ** 3) ** 3]))
开发者ID:okomestudio,项目名称:mrjob,代码行数:29,代码来源:test_job.py
示例7: test_mr
def test_mr(self):
kwargs = {
'mapper': _IDENTITY_MAPPER,
'reducer': _IDENTITY_REDUCER,
}
with logger_disabled('mrjob.job'):
self.assertEqual(MRJob.mr(**kwargs), MRStep(**kwargs))
开发者ID:tempcyc,项目名称:mrjob,代码行数:8,代码来源:test_job.py
示例8: test_jar
def test_jar(self):
kwargs = {
'jar': 'binks.jar.jar',
'main_class': 'MyMainMan',
'args': ['argh', 'argh'],
}
with logger_disabled('mrjob.job'):
self.assertEqual(MRJob.jar(**kwargs), JarStep(**kwargs))
开发者ID:tempcyc,项目名称:mrjob,代码行数:9,代码来源:test_job.py
示例9: test_extra_kwargs_passed_in_directly_okay
def test_extra_kwargs_passed_in_directly_okay(self):
with logger_disabled('mrjob.runner'):
runner = InlineMRJobRunner(
foo='bar',
local_tmp_dir='/var/tmp',
conf_paths=[],
)
self.assertEqual(runner._opts['local_tmp_dir'], '/var/tmp')
self.assertNotIn('bar', runner._opts)
开发者ID:okomestudio,项目名称:mrjob,代码行数:10,代码来源:test_runner.py
示例10: test_parse_output
def test_parse_output(self):
# test parsing JSON
mr_job = MRJob()
output = b'0\t1\n"a"\t"b"\n'
mr_job.stdout = BytesIO(output)
with logger_disabled('mrjob.job'):
self.assertEqual(mr_job.parse_output(), [(0, 1), ('a', 'b')])
# verify that stdout is not cleared
self.assertEqual(mr_job.stdout.getvalue(), output)
开发者ID:tempcyc,项目名称:mrjob,代码行数:10,代码来源:test_job.py
示例11: test_parse_counters
def test_parse_counters(self):
mr_job = MRJob().sandbox()
mr_job.increment_counter('Foo', 'Bar')
mr_job.increment_counter('Foo', 'Bar')
mr_job.increment_counter('Foo', 'Baz', 20)
with logger_disabled('mrjob.job'):
self.assertEqual(mr_job.parse_counters(),
{'Foo': {'Bar': 2, 'Baz': 20}})
开发者ID:tempcyc,项目名称:mrjob,代码行数:10,代码来源:test_job.py
示例12: test_load_mrjob_conf_and_load_opts
def test_load_mrjob_conf_and_load_opts(self):
conf_path = os.path.join(self.tmp_dir, "mrjob.conf.2")
with open(conf_path, "w") as f:
f.write('{"runners": {"foo": {"qux": "quux"}}}')
self.assertEqual(load_mrjob_conf(conf_path=conf_path), {"runners": {"foo": {"qux": "quux"}}})
self.assertEqual(load_opts_from_mrjob_conf("foo", conf_path=conf_path)[0][1], {"qux": "quux"})
# test missing options
with logger_disabled("mrjob.conf"):
self.assertEqual(load_opts_from_mrjob_conf("bar", conf_path=conf_path)[0][1], {})
开发者ID:JeffersonK,项目名称:mrjob,代码行数:10,代码来源:test_conf.py
示例13: test_parse_output_with_protocol_instance
def test_parse_output_with_protocol_instance(self):
# see if we can use the repr protocol
mr_job = MRJob()
output = b"0\t1\n['a', 'b']\tset(['c', 'd'])\n"
mr_job.stdout = BytesIO(output)
with logger_disabled('mrjob.job'):
self.assertEqual(mr_job.parse_output(ReprProtocol()),
[(0, 1), (['a', 'b'], set(['c', 'd']))])
# verify that stdout is not cleared
self.assertEqual(mr_job.stdout.getvalue(), output)
开发者ID:tempcyc,项目名称:mrjob,代码行数:11,代码来源:test_job.py
示例14: test_job_name_prefix_is_now_label
def test_job_name_prefix_is_now_label(self):
with logger_disabled('mrjob.runner'):
old_way = LocalMRJobRunner(
conf_path=False, job_name_prefix='ads_chain')
old_opts = old_way.get_opts()
new_way = LocalMRJobRunner(conf_path=False, label='ads_chain')
new_opts = new_way.get_opts()
assert_equal(old_opts, new_opts)
assert_equal(old_opts['label'], 'ads_chain')
assert_not_in('job_name_prefix', old_opts)
开发者ID:Jyrsa,项目名称:mrjob,代码行数:12,代码来源:runner_test.py
示例15: test_setup_wrapper_script_uses_local_line_endings
def test_setup_wrapper_script_uses_local_line_endings(self):
job = MRTwoStepJob(["-r", "local", "--setup", "true"])
job.sandbox(stdin=BytesIO())
# tests #1071. Unfortunately, we mostly run these tests on machines
# that use unix line endings anyway. So monitor open() instead
with patch("mrjob.runner.open", create=True, side_effect=open) as m_open:
with logger_disabled("mrjob.local"):
with job.make_runner() as runner:
runner.run()
self.assertIn(call(runner._setup_wrapper_script_path, "w"), m_open.mock_calls)
开发者ID:alanhdu,项目名称:mrjob,代码行数:12,代码来源:test_local.py
示例16: test_deprecated_command_line_options_override_attrs
def test_deprecated_command_line_options_override_attrs(self):
mr_job = MRHadoopFormatJob([
'--hadoop-input-format',
'org.apache.hadoop.mapred.lib.NLineInputFormat',
'--hadoop-output-format',
'org.apache.hadoop.mapred.FileOutputFormat',
])
with logger_disabled('mrjob.job'):
assert_equal(mr_job.job_runner_kwargs()['hadoop_input_format'],
'org.apache.hadoop.mapred.lib.NLineInputFormat')
assert_equal(mr_job.job_runner_kwargs()['hadoop_output_format'],
'org.apache.hadoop.mapred.FileOutputFormat')
开发者ID:gimlids,项目名称:LTPM,代码行数:13,代码来源:job_test.py
示例17: test_load_mrjob_conf_and_load_opts
def test_load_mrjob_conf_and_load_opts(self):
conf_path = os.path.join(self.tmp_dir, 'mrjob.conf.2')
with open(conf_path, 'w') as f:
f.write('{"runners": {"foo": {"qux": "quux"}}}')
assert_equal(load_mrjob_conf(conf_path=conf_path),
{'runners': {'foo': {'qux': 'quux'}}})
assert_equal(load_opts_from_mrjob_conf('foo', conf_path=conf_path),
{'qux': 'quux'})
# test missing options
with logger_disabled('mrjob.conf'):
assert_equal(
load_opts_from_mrjob_conf('bar', conf_path=conf_path), {})
开发者ID:gimlids,项目名称:LTPM,代码行数:13,代码来源:conf_test.py
示例18: test_deprecated_command_line_options
def test_deprecated_command_line_options(self):
mr_job = MRJob([
'--hadoop-input-format',
'org.apache.hadoop.mapred.lib.NLineInputFormat',
'--hadoop-output-format',
'org.apache.hadoop.mapred.FileOutputFormat',
])
with logger_disabled('mrjob.job'):
job_runner_kwargs = mr_job.job_runner_kwargs()
self.assertEqual(job_runner_kwargs['hadoop_input_format'],
'org.apache.hadoop.mapred.lib.NLineInputFormat')
self.assertEqual(job_runner_kwargs['hadoop_output_format'],
'org.apache.hadoop.mapred.FileOutputFormat')
开发者ID:bchess,项目名称:mrjob,代码行数:14,代码来源:test_job.py
示例19: test_setup_wrapper_script_uses_local_line_endings
def test_setup_wrapper_script_uses_local_line_endings(self):
job = MRTwoStepJob(['-r', 'hadoop', '--setup', 'true'])
job.sandbox(stdin=BytesIO(b''))
add_mock_hadoop_output([b''])
add_mock_hadoop_output([b''])
# tests #1071. Unfortunately, we mostly run these tests on machines
# that use unix line endings anyway. So monitor open() instead
with patch(
'mrjob.runner.open', create=True, side_effect=open) as m_open:
with logger_disabled('mrjob.hadoop'):
with job.make_runner() as runner:
runner.run()
self.assertIn(
call(runner._setup_wrapper_script_path, 'wb'),
m_open.mock_calls)
开发者ID:kartheek6,项目名称:mrjob,代码行数:18,代码来源:test_hadoop.py
示例20: test_hdfs_jar_uri
def test_hdfs_jar_uri(self):
# this could change, but for now, we pass URIs straight through
mock_hdfs_jar = os.path.join(os.environ["MOCK_HDFS_ROOT"], "fake.jar")
open(mock_hdfs_jar, "w").close()
jar_uri = "hdfs:///fake.jar"
job = MRJustAJar(["-r", "hadoop", "--jar", jar_uri])
job.sandbox()
with job.make_runner() as runner:
with logger_disabled("mrjob.hadoop"):
# `hadoop jar` doesn't actually accept URIs
self.assertRaises(CalledProcessError, runner.run)
with open(os.environ["MOCK_HADOOP_LOG"]) as hadoop_log:
hadoop_jar_lines = [line for line in hadoop_log if line.startswith("jar ")]
self.assertEqual(len(hadoop_jar_lines), 1)
self.assertEqual(hadoop_jar_lines[0].rstrip(), "jar " + jar_uri)
开发者ID:swiftserve,项目名称:mrjob,代码行数:19,代码来源:test_hadoop.py
注:本文中的tests.quiet.logger_disabled函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论