本文整理汇总了Python中tokenize.generate_tokens函数的典型用法代码示例。如果您正苦于以下问题:Python generate_tokens函数的具体用法?Python generate_tokens怎么用?Python generate_tokens使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了generate_tokens函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_check_dict_formatting_in_string
def test_check_dict_formatting_in_string(self):
bad = [
'"%(a)s" % d',
'"Split across "\n"multiple lines: %(a)f" % d',
'"%(a)X split across "\n"multiple lines" % d',
'"%(a)-5.2f: Split %("\n"a)#Lu stupidly" % d',
'"Comment between " # wtf\n"split lines: %(a) -6.2f" % d',
'"Two strings" + " added: %(a)-6.2f" % d',
'"half legit (%(a)s %(b)s)" % d + " half bogus: %(a)s" % d',
'("Parenthesized: %(a)s") % d',
'("Parenthesized "\n"concatenation: %(a)s") % d',
'("Parenthesized " + "addition: %(a)s") % d',
'"Complete %s" % ("foolisness: %(a)s%(a)s" % d)',
'"Modulus %(a)s" % {"a": (5 % 3)}',
]
for sample in bad:
sample = "print(%s)" % sample
tokens = tokenize.generate_tokens(six.moves.StringIO(sample).readline)
self.assertEqual(1, len(list(checks.check_dict_formatting_in_string(sample, tokens))))
sample = 'print("%(a)05.2lF" % d + " added: %(a)s" % d)'
tokens = tokenize.generate_tokens(six.moves.StringIO(sample).readline)
self.assertEqual(2, len(list(checks.check_dict_formatting_in_string(sample, tokens))))
good = ['"This one is okay: %(a)s %(b)s" % d', '"So is %(a)s"\n"this one: %(b)s" % d']
for sample in good:
sample = "print(%s)" % sample
tokens = tokenize.generate_tokens(six.moves.StringIO(sample).readline)
self.assertEqual([], list(checks.check_dict_formatting_in_string(sample, tokens)))
开发者ID:rvbaz,项目名称:rally,代码行数:29,代码来源:test_hacking.py
示例2: do_viprcli
def do_viprcli(self, command):
#pass
# Command to be executed
command = "viprcli " + command
# Tokenize the command
STRING = 1
L2 = list(token[STRING] for token in generate_tokens(StringIO(command).readline)
if token[STRING])
# Check if this was a command other than authenticate
if(L2[1] != "authenticate"):
# If cf is set then use it else show a message
if(len(self.cf) != 0):
command = command + " -cf "+ self.cf
# run the command
output = commands.getoutput(command)
# Find the cf information
if(L2[1] == "authenticate"):
self.cf = ""
L1 = list(token[STRING] for token in generate_tokens(StringIO(output).readline)
if token[STRING])
cf_length = len(L1) - 8
for i in range(0, cf_length-1):
self.cf = self.cf + str(L1[5 + i])
print output
开发者ID:tylerbaker,项目名称:controller-openstack-cinder,代码行数:25,代码来源:viprcli_interpreter.py
示例3: generate_tokenizer
def generate_tokenizer(self,linenoList=None):
""" 定義されたソースコードへの参照ファイル名から、トークンジェネレーターを生成する """
# Generate
if linenoList is not None :
max_lineno = file_utils.count_lineno(self.filename)
if not all( (isinstance(num,int) and 1 <= num <= max_lineno) for num in linenoList ) :
raise Exception("行数定義が不正です: %s " % linenoList)
elif linecache.getline(self.filename,linenoList[-1]).strip().endswith('\\') :
return self.generate_tokenizer(linenoList + [linenoList[-1]+1])
gen = ( linecache.getline(self.filename,lineno) for lineno in linenoList )
def readline():
try :
line = gen.next()
except StopIteration :
return ""
return line
tokenizer = tokenize.generate_tokens(readline)
else :
# Generate tokenizer
f = open(self.filename)
tokenizer = tokenize.generate_tokens(f.readline)
return tokenizer
开发者ID:henpin,项目名称:module,代码行数:25,代码来源:source_analyser.py
示例4: __substituteVars
def __substituteVars(self, code, env):
'''
Expand any variables that exist in the given environment to their corresponding values
'''
# tokenize the given expression code
gtoks = tokenize.generate_tokens(StringIO.StringIO(code).readline)
# iterate over each token and replace any matching token with its corresponding value
tokens = []
for toknum, tokval, _, _, _ in gtoks:
if toknum == tokenize.NAME and tokval in env:
ntoks = tokenize.generate_tokens(StringIO.StringIO(str(env[tokval])).readline)
tokens.extend(ntoks)
else:
tokens.append((toknum, tokval))
# convert the tokens back to a string
code = tokenize.untokenize(tokens)
# remove all the leading and trailing spaces
code = code.strip()
# return the modified string
return code
开发者ID:axelyamel,项目名称:Orio,代码行数:25,代码来源:eval.py
示例5: extract_docstring
def extract_docstring(self):
""" Extract a module-level docstring
"""
lines = open(self.filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
if sys.version_info[0] >= 3:
tokens = tokenize.generate_tokens(lines.__iter__().__next__)
else:
tokens = tokenize.generate_tokens(lines.__iter__().next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
self.docstring = docstring
self.short_desc = first_par
self.end_line = erow + 1 + start_row
开发者ID:astroML,项目名称:astroML,代码行数:33,代码来源:example_builder.py
示例6: test_roundtrip
def test_roundtrip(f):
## print 'Testing:', f
fobj = open(f)
try:
fulltok = list(generate_tokens(fobj.readline))
finally:
fobj.close()
t1 = [tok[:2] for tok in fulltok]
newtext = untokenize(t1)
readline = iter(newtext.splitlines(1)).next
t2 = [tok[:2] for tok in generate_tokens(readline)]
if t1 != t2:
raise TestFailed("untokenize() roundtrip failed for %r" % f)
开发者ID:B-Rich,项目名称:breve,代码行数:14,代码来源:test_tokenize.py
示例7: find_fold_points
def find_fold_points(block):
"""
Returns a list of (start_row, end_row, indent) tuples that denote fold
locations. Basically anywhere that there's an indent.
"""
token_whitelist = (tokenize.NL,
tokenize.NEWLINE,
tokenize.INDENT,
tokenize.DEDENT,
tokenize.COMMENT,
)
# temporary code that allows for running a block or a full file
if os.path.isfile(block):
with open(block) as open_file:
token_block = tokenize.generate_tokens(open_file)
else:
token_block = tokenize.generate_tokens(StringIO(block).readline)
indent_level = 0
nl_counter = 0
comment_counter = 0
indents = []
result = []
for toknum, _, srowcol, _, _ in token_block:
# Account for comments at the start of a block and newlines at the
# end of a block.
if toknum == tokenize.NL:
nl_counter += 1
if toknum == tokenize.COMMENT:
comment_counter += 1
if toknum == tokenize.INDENT:
indent_level += 1
indents.append(srowcol[0] - 1 - comment_counter)
if toknum == tokenize.DEDENT:
# the next DEDENT belongs to the most recent INDENT, so we pop off
# the last indent from the stack
indent_level -= 1
matched_indent = indents.pop()
result.append((matched_indent,
srowcol[0] - 1 - nl_counter,
indent_level + 1))
if toknum not in token_whitelist:
nl_counter = 0
comment_counter = 0
if len(indents) != 0:
raise ValueError("Number of DEDENTs does not match number of INDENTs.")
return result
开发者ID:dougthor42,项目名称:CodeSort,代码行数:50,代码来源:find_fold_points.py
示例8: check_roundtrip
def check_roundtrip(self, f):
"""
Test roundtrip for `untokenize`. `f` is an open file or a string.
The source code in f is tokenized, converted back to source code
via tokenize.untokenize(), and tokenized again from the latter.
The test fails if the second tokenization doesn't match the first.
"""
if isinstance(f, str): f = StringIO(f)
token_list = list(generate_tokens(f.readline))
f.close()
tokens1 = [tok[:2] for tok in token_list]
new_text = untokenize(tokens1)
readline = iter(new_text.splitlines(1)).next
tokens2 = [tok[:2] for tok in generate_tokens(readline)]
self.assertEqual(tokens2, tokens1)
开发者ID:billtsay,项目名称:win-demo-opcua,代码行数:15,代码来源:test_tokenize.py
示例9: check
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("%r: listing directory" % (file,))
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = tokenize.open(file)
except IOError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print("checking %r ..." % file)
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
print("offending line: %r" % (line,))
print(nag.get_msg())
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print(file)
else: print(file, badline, repr(line))
return
finally:
f.close()
if verbose:
print("%r: Clean bill of health." % (file,))
开发者ID:7modelsan,项目名称:kbengine,代码行数:60,代码来源:tabnanny.py
示例10: __init__
def __init__(self, buffers):
# type: (List[str]) -> None
lines = iter(buffers)
self.buffers = buffers
self.tokens = tokenize.generate_tokens(lambda: next(lines))
self.current = None # type: Token
self.previous = None # type: Token
开发者ID:olivier-heurtier,项目名称:sphinx,代码行数:7,代码来源:parser.py
示例11: set_url_param
def set_url_param(parser, token):
"""
Creates a URL (containing only the querystring [including "?"]) based on
the current URL, but updated with the provided keyword arguments.
Example::
{% set_url_param name="help" age=20 %}
?name=help&age=20
**Deprecated** as of 0.7.0, use `querystring`.
"""
bits = token.contents.split()
qschanges = {}
for i in bits[1:]:
try:
key, value = i.split('=', 1)
key = key.strip()
value = value.strip()
key_line_iter = StringIO.StringIO(key).readline
keys = list(tokenize.generate_tokens(key_line_iter))
if keys[0][0] == tokenize.NAME:
# workaround bug #5270
value = Variable(value) if value == '""' else parser.compile_filter(value)
qschanges[str(key)] = value
else:
raise ValueError
except ValueError:
raise TemplateSyntaxError("Argument syntax wrong: should be"
"key=value")
return SetUrlParamNode(qschanges)
开发者ID:aptivate,项目名称:django-tables2,代码行数:31,代码来源:django_tables2.py
示例12: extract_docstring
def extract_docstring(filename):
# Extract a module-level docstring, if any
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
li = lines.__iter__()
li_next = li.__next__ if hasattr(li, '__next__') else li.next
tokens = tokenize.generate_tokens(li_next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(line.rstrip()
for line in docstring.split('\n')).split('\n\n')
if len(paragraphs) > 0:
first_par = paragraphs[0]
break
return docstring, first_par, erow+1+start_row
开发者ID:bergtholdt,项目名称:mayavi,代码行数:27,代码来源:render_examples.py
示例13: readFile
def readFile():
global curId
script = StringIO(QuestScripts.SCRIPT)
def readLine():
return script.readline().replace('\r', '')
gen = tokenize.generate_tokens(readLine)
line = getLineOfTokens(gen)
while line is not None:
if line == []:
line = getLineOfTokens(gen)
continue
if line[0] == 'ID':
parseId(line)
elif curId is None:
notify.error('Every script must begin with an ID')
else:
lineDict[curId].append(line)
line = getLineOfTokens(gen)
script.close()
开发者ID:CalebSmith376,项目名称:src,代码行数:27,代码来源:QuestParser.py
示例14: _template_decorator
def _template_decorator(self, func):
"""Registers template as expected by _create_template_function.
The template data consists of:
- the function object as it comes from the sandbox evaluation of the
template declaration.
- its code, modified as described in the comments of this method.
- the path of the file containing the template definition.
"""
if not inspect.isfunction(func):
raise Exception('`template` is a function decorator. You must '
'use it as `@template` preceding a function declaration.')
name = func.func_name
if name in self.templates:
raise KeyError(
'A template named "%s" was already declared in %s.' % (name,
self.templates[name][2]))
if name.islower() or name.isupper() or name[0].islower():
raise NameError('Template function names must be CamelCase.')
lines, firstlineno = inspect.getsourcelines(func)
first_op = None
generator = tokenize.generate_tokens(iter(lines).next)
# Find the first indent token in the source of this template function,
# which corresponds to the beginning of the function body.
for typ, s, begin, end, line in generator:
if typ == tokenize.OP:
first_op = True
if first_op and typ == tokenize.INDENT:
break
if typ != tokenize.INDENT:
# This should never happen.
raise Exception('Could not find the first line of the template %s' %
func.func_name)
# The code of the template in moz.build looks like this:
# m def Foo(args):
# n FOO = 'bar'
# n+1 (...)
#
# where,
# - m is firstlineno - 1,
# - n is usually m + 1, but in case the function signature takes more
# lines, is really m + begin[0] - 1
#
# We want that to be replaced with:
# m if True:
# n FOO = 'bar'
# n+1 (...)
#
# (this is simpler than trying to deindent the function body)
# So we need to prepend with n - 1 newlines so that line numbers
# are unchanged.
code = '\n' * (firstlineno + begin[0] - 3) + 'if True:\n'
code += ''.join(lines[begin[0] - 1:])
self.templates[name] = func, code, self._context.current_path
开发者ID:LordJZ,项目名称:gecko-dev,代码行数:60,代码来源:reader.py
示例15: test_with_correct_code
def test_with_correct_code(self, MockNannyNag):
"""A python source code without any whitespace related problems."""
with TemporaryPyFile(SOURCE_CODES["error_free"]) as file_path:
with open(file_path) as f:
tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
self.assertFalse(MockNannyNag.called)
开发者ID:Eyepea,项目名称:cpython,代码行数:7,代码来源:test_tabnanny.py
示例16: decistmt
def decistmt(s):
"""Substitute Decimals for floats in a string of statements.
>>> from decimal import Decimal
>>> s = 'print +21.3e-5*-.1234/81.7'
>>> decistmt(s)
"print +Decimal ('21.3e-5')*-Decimal ('.1234')/Decimal ('81.7')"
The format of the exponent is inherited from the platform C library.
Known cases are "e-007" (Windows) and "e-07" (not Windows). Since
we're only showing 12 digits, and the 13th isn't close to 5, the
rest of the output should be platform-independent.
>>> exec(s) #doctest: +ELLIPSIS
-3.21716034272e-0...7
Output from calculations with Decimal should be identical across all
platforms.
>>> exec(decistmt(s))
-3.217160342717258261933904529E-7
"""
result = []
g = generate_tokens(StringIO(s).readline) # tokenize the string
for toknum, tokval, _, _, _ in g:
if toknum == NUMBER and "." in tokval: # replace NUMBER tokens
result.extend([(NAME, "Decimal"), (OP, "("), (STRING, repr(tokval)), (OP, ")")])
else:
result.append((toknum, tokval))
return untokenize(result)
开发者ID:slide,项目名称:main,代码行数:31,代码来源:test_tokenize.py
示例17: analyse_file_by_tokens
def analyse_file_by_tokens(filename, ignore_errors):
"""This function analyses a file and produces a dict with these members:
- 'tokens': number of tokens;
- 'bad_indentation': list of lines with a bad indentation;
"""
stats = {'tokens': 0}
plugins = [ cls() for cls in token_plugins ]
for plugin in plugins:
stats[plugin.key] = []
tokens = generate_tokens(file(filename).readline)
try:
for token, value, (srow, scol), _, _ in tokens:
# Tokens number
stats['tokens'] += 1
for plugin in plugins:
if plugin.analyse_token(token, value, srow, scol):
stats[plugin.key].append(srow)
except TokenError, e:
if ignore_errors is False:
raise e
print e
return {'tokens': 0}
开发者ID:kennym,项目名称:itools,代码行数:25,代码来源:ipkg-quality.py
示例18: get_description
def get_description(filename):
"""
#doc...
For now, only the filename option is supported.
Someday, support the other ones mentioned in our caller, ParameterDialog.__init__.__doc__.
"""
assert type(filename) == type(""), "get_description only supports filenames for now (and not even unicode filenames, btw)"
file = open(filename, 'rU')
gentok = generate_tokens(file.readline)
res, newrest = parse_top(Whole, list(gentok))
if debug_parse:
print len(` res `), 'chars in res' #3924
## print res # might be an error message
if newrest and debug_parse: # boolean test, since normal value is []
print "res is", res # assume it is an error message
print "newrest is", newrest
print "res[0].pprint() :"
print res[0].pprint() #k
if debug_parse:
print "parse done"
desc = res[0] #k class ThingData in parse_utils - move to another file? it stays with the toplevel grammar...
return desc # from get_description
开发者ID:elfion,项目名称:nanoengineer,代码行数:28,代码来源:ParameterDialog.py
示例19: tokenize
def tokenize(text):
if not hasattr(text, 'readline'):
readline = StringIO.StringIO(text).readline
else:
readline = text.readline
for token in tokenizer.generate_tokens(readline):
yield Token(*token)
开发者ID:pradyunsg,项目名称:PyTML,代码行数:7,代码来源:tokenizer.py
示例20: _parse_line
def _parse_line(self, line):
"""Parses a single line consisting of a tag-value pair
and optional modifiers. Returns the tag name and the
value as a `Value` object."""
match = self.line_re.match(line)
if not match:
return False
tag, value_and_mod = match.group("tag"), match.group("value")
# If the value starts with a quotation mark, we parse it as a
# Python string -- luckily this is the same as an OBO string
if value_and_mod and value_and_mod[0] == '"':
stringio = StringIO(value_and_mod)
gen = tokenize.generate_tokens(stringio.readline)
for toknum, tokval, _, (_, ecol), _ in gen:
if toknum == tokenize.STRING:
value = eval(tokval)
mod = (value_and_mod[ecol:].strip(),)
break
raise ParseError("cannot parse string literal", self.lineno)
else:
value = value_and_mod
mod = None
value = Value(value, mod)
return tag, value
开发者ID:MatchmakerExchange,项目名称:reference-server,代码行数:26,代码来源:obo.py
注:本文中的tokenize.generate_tokens函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论