本文整理汇总了Python中pylint.testutils._tokenize_str函数的典型用法代码示例。如果您正苦于以下问题:Python _tokenize_str函数的具体用法?Python _tokenize_str怎么用?Python _tokenize_str使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_tokenize_str函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: testTrailingCommaGood
def testTrailingCommaGood(self):
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('(a, )\n'))
self.checker.process_tokens(_tokenize_str('(a,)\n'))
self.checker.config.no_space_check = []
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('(a,)\n'))
开发者ID:KabageMark,项目名称:newsapp,代码行数:8,代码来源:unittest_checker_format.py
示例2: testCheckKeywordParensHandlesUnnecessaryParens
def testCheckKeywordParensHandlesUnnecessaryParens(self):
self.checker._keywords_with_parens = set()
cases = [
(Message('superfluous-parens', line=1, args='if'),
'if (foo):', 0),
(Message('superfluous-parens', line=1, args='if'),
'if ((foo, bar)):', 0),
(Message('superfluous-parens', line=1, args='if'),
'if (foo(bar)):', 0),
(Message('superfluous-parens', line=1, args='return'),
'return ((x for x in x))', 0),
(Message('superfluous-parens', line=1, args='not'),
'not (foo)', 0),
(Message('superfluous-parens', line=1, args='not'),
'if not (foo):', 1),
(Message('superfluous-parens', line=1, args='if'),
'if (not (foo)):', 0),
(Message('superfluous-parens', line=1, args='not'),
'if (not (foo)):', 2),
(Message('superfluous-parens', line=1, args='for'),
'for (x) in (1, 2, 3):', 0),
(Message('superfluous-parens', line=1, args='if'),
'if (1) in (1, 2, 3):', 0),
]
for msg, code, offset in cases:
with self.assertAddsMessages(msg):
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
开发者ID:KabageMark,项目名称:newsapp,代码行数:27,代码来源:unittest_checker_format.py
示例3: test_non_ascii_bytes_literal
def test_non_ascii_bytes_literal(self):
code = 'b"测试"'
self._test_token_message(code, 'non-ascii-bytes-literal')
for code in ("测试", u"测试", u'abcdef', b'\x80'):
tokens = testutils._tokenize_str(code)
with self.assertNoMessages():
self.checker.process_tokens(tokens)
开发者ID:eladm26,项目名称:pylint,代码行数:7,代码来源:unittest_checker_python3.py
示例4: test_other_present_codetag
def test_other_present_codetag(self):
code = """a = 1
# CODETAG
# FIXME
"""
with self.assertAddsMessages(Message(msg_id="fixme", line=2, args="CODETAG")):
self.checker.process_tokens(_tokenize_str(code))
开发者ID:bluesheeptoken,项目名称:pylint,代码行数:7,代码来源:unittest_checker_misc.py
示例5: testValidTypingAnnotationEllipses
def testValidTypingAnnotationEllipses(self):
"""Make sure ellipses in function typing annotation
doesn't cause a false positive bad-whitespace message"""
with self.assertNoMessages():
self.checker.process_tokens(
_tokenize_str("def foo(t: Tuple[str, ...] = None):\n")
)
开发者ID:aluoch-sheila,项目名称:NEIGHBOURHOOD,代码行数:7,代码来源:unittest_checker_format.py
示例6: test_check_bad_coment_custom_suggestion_count
def test_check_bad_coment_custom_suggestion_count(self):
with self.assertAddsMessages(
Message('wrong-spelling-in-comment', line=1,
args=('coment', '# bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment', count=2)))):
self.checker.process_tokens(_tokenize_str("# bad coment"))
开发者ID:AWhetter,项目名称:pylint,代码行数:7,代码来源:unittest_checker_spelling.py
示例7: testCheckKeywordParensHandlesUnnecessaryParens
def testCheckKeywordParensHandlesUnnecessaryParens(self):
self.checker._keywords_with_parens = set()
cases = [
(Message("superfluous-parens", line=1, args="if"), "if (foo):", 0),
(Message("superfluous-parens", line=1, args="if"), "if ((foo, bar)):", 0),
(Message("superfluous-parens", line=1, args="if"), "if (foo(bar)):", 0),
(
Message("superfluous-parens", line=1, args="return"),
"return ((x for x in x))",
0,
),
(Message("superfluous-parens", line=1, args="not"), "not (foo)", 0),
(Message("superfluous-parens", line=1, args="not"), "if not (foo):", 1),
(Message("superfluous-parens", line=1, args="if"), "if (not (foo)):", 0),
(Message("superfluous-parens", line=1, args="not"), "if (not (foo)):", 2),
(
Message("superfluous-parens", line=1, args="for"),
"for (x) in (1, 2, 3):",
0,
),
(
Message("superfluous-parens", line=1, args="if"),
"if (1) in (1, 2, 3):",
0,
),
]
for msg, code, offset in cases:
with self.assertAddsMessages(msg):
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
开发者ID:aluoch-sheila,项目名称:NEIGHBOURHOOD,代码行数:29,代码来源:unittest_checker_format.py
示例8: testCheckIfArgsAreNotUnicode
def testCheckIfArgsAreNotUnicode(self):
self.checker._keywords_with_parens = set()
cases = [(u'if (foo):', 0), (u'assert (1 == 1)', 0)]
for code, offset in cases:
self.checker._check_keyword_parentheses(_tokenize_str(code), offset)
got = self.linter.release_messages()
assert isinstance(got[-1].args, str)
开发者ID:KabageMark,项目名称:newsapp,代码行数:8,代码来源:unittest_checker_format.py
示例9: test_absent_codetag
def test_absent_codetag(self):
code = """a = 1
# FIXME # FIXME
# TODO # TODO
# XXX # XXX
"""
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str(code))
开发者ID:bluesheeptoken,项目名称:pylint,代码行数:8,代码来源:unittest_checker_misc.py
示例10: test_fixme_with_message
def test_fixme_with_message(self):
code = """a = 1
# FIXME message
"""
with self.assertAddsMessages(
Message(msg_id="fixme", line=2, args="FIXME message")
):
self.checker.process_tokens(_tokenize_str(code))
开发者ID:bluesheeptoken,项目名称:pylint,代码行数:8,代码来源:unittest_checker_misc.py
示例11: testFuturePrintStatementWithoutParensWarning
def testFuturePrintStatementWithoutParensWarning(self):
code = """from __future__ import print_function
print('Hello world!')
"""
tree = astroid.parse(code)
with self.assertNoMessages():
self.checker.process_module(tree)
self.checker.process_tokens(_tokenize_str(code))
开发者ID:KabageMark,项目名称:newsapp,代码行数:8,代码来源:unittest_checker_format.py
示例12: testEmptyLines
def testEmptyLines(self):
self.checker.config.no_space_check = []
with self.assertAddsMessages(
Message('trailing-whitespace', line=2)):
self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))
with self.assertAddsMessages(
Message('trailing-whitespace', line=2)):
self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))
with self.assertAddsMessages(
Message('trailing-whitespace', line=2)):
self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n\f\nb = 2\n'))
self.checker.config.no_space_check = ['empty-line']
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n \nb = 2\n'))
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n\t\nb = 2\n'))
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('a = 1\n\v\nb = 2\n'))
开发者ID:KabageMark,项目名称:newsapp,代码行数:26,代码来源:unittest_checker_format.py
示例13: testOperatorSpacingGood
def testOperatorSpacingGood(self):
good_cases = [
'a = b\n'
'a < b\n'
'a\n< b\n',
]
with self.assertNoMessages():
for code in good_cases:
self.checker.process_tokens(_tokenize_str(code))
开发者ID:KabageMark,项目名称:newsapp,代码行数:9,代码来源:unittest_checker_format.py
示例14: test_old_octal_literal
def test_old_octal_literal(self):
for octal in ("045", "055", "075", "077", "076543"):
self._test_token_message(octal, "old-octal-literal")
# Make sure we are catching only octals.
for non_octal in ("45", "00", "085", "08", "1"):
tokens = testutils._tokenize_str(non_octal)
with self.assertNoMessages():
self.checker.process_tokens(tokens)
开发者ID:eladm26,项目名称:pylint,代码行数:9,代码来源:unittest_checker_python3.py
示例15: testComma
def testComma(self):
with self.assertAddsMessages(
Message(
"bad-whitespace",
line=1,
args=("No", "allowed", "before", "comma", "(a , b)\n ^"),
)
):
self.checker.process_tokens(_tokenize_str("(a , b)\n"))
开发者ID:aluoch-sheila,项目名称:NEIGHBOURHOOD,代码行数:9,代码来源:unittest_checker_format.py
示例16: testParenthesesGood
def testParenthesesGood(self):
good_cases = [
'(a)\n',
'(a * (b + c))\n',
'(#\n a)\n',
]
with self.assertNoMessages():
for code in good_cases:
self.checker.process_tokens(_tokenize_str(code))
开发者ID:KabageMark,项目名称:newsapp,代码行数:9,代码来源:unittest_checker_format.py
示例17: test_encoding_token
def test_encoding_token(self):
"""Make sure the encoding token doesn't change the checker's behavior
_tokenize_str doesn't produce an encoding token, but
reading a file does
"""
with self.assertNoMessages():
encoding_token = tokenize.TokenInfo(tokenize.ENCODING, "utf-8", (0, 0), (0, 0), '')
tokens = [encoding_token] + _tokenize_str('if (\n None):\n pass\n')
self.checker.process_tokens(tokens)
开发者ID:gtt116,项目名称:vimrc,代码行数:10,代码来源:unittest_checker_format.py
示例18: testKeywordSpacingGood
def testKeywordSpacingGood(self):
with self.assertNoMessages():
self.checker.process_tokens(_tokenize_str('foo(foo=bar)\n'))
self.checker.process_tokens(_tokenize_str('foo(foo: int = bar)\n'))
self.checker.process_tokens(_tokenize_str('foo(foo: module.classname = bar)\n'))
self.checker.process_tokens(_tokenize_str('foo(foo: Dict[int, str] = bar)\n'))
self.checker.process_tokens(_tokenize_str('foo(foo: \'int\' = bar)\n'))
self.checker.process_tokens(_tokenize_str('foo(foo: Dict[int, \'str\'] = bar)\n'))
self.checker.process_tokens(_tokenize_str('lambda x=1: x\n'))
开发者ID:KabageMark,项目名称:newsapp,代码行数:9,代码来源:unittest_checker_format.py
示例19: testOperatorSpacingBad
def testOperatorSpacingBad(self):
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('Exactly one', 'required', 'before', 'comparison', 'a< b\n ^'))):
self.checker.process_tokens(_tokenize_str('a< b\n'))
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('Exactly one', 'required', 'after', 'comparison', 'a <b\n ^'))):
self.checker.process_tokens(_tokenize_str('a <b\n'))
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('Exactly one', 'required', 'around', 'comparison', 'a<b\n ^'))):
self.checker.process_tokens(_tokenize_str('a<b\n'))
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('Exactly one', 'required', 'around', 'comparison', 'a< b\n ^'))):
self.checker.process_tokens(_tokenize_str('a< b\n'))
开发者ID:KabageMark,项目名称:newsapp,代码行数:20,代码来源:unittest_checker_format.py
示例20: testParenthesesBad
def testParenthesesBad(self):
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('No', 'allowed', 'after', 'bracket', '( a)\n^'))):
self.checker.process_tokens(_tokenize_str('( a)\n'))
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('No', 'allowed', 'before', 'bracket', '(a )\n ^'))):
self.checker.process_tokens(_tokenize_str('(a )\n'))
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('No', 'allowed', 'before', 'bracket', 'foo (a)\n ^'))):
self.checker.process_tokens(_tokenize_str('foo (a)\n'))
with self.assertAddsMessages(
Message('bad-whitespace', line=1,
args=('No', 'allowed', 'before', 'bracket', '{1: 2} [1]\n ^'))):
self.checker.process_tokens(_tokenize_str('{1: 2} [1]\n'))
开发者ID:KabageMark,项目名称:newsapp,代码行数:20,代码来源:unittest_checker_format.py
注:本文中的pylint.testutils._tokenize_str函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论