本文整理汇总了Python中tokenize.detect_encoding函数的典型用法代码示例。如果您正苦于以下问题:Python detect_encoding函数的具体用法?Python detect_encoding怎么用?Python detect_encoding使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了detect_encoding函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_filename_in_exception
def test_filename_in_exception(self):
# When possible, include the file name in the exception.
path = 'some_file_path'
lines = (
b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S
)
class Bunk:
def __init__(self, lines, path):
self.name = path
self._lines = lines
self._index = 0
def readline(self):
if self._index == len(lines):
raise StopIteration
line = lines[self._index]
self._index += 1
return line
with self.assertRaises(SyntaxError):
ins = Bunk(lines, path)
# Make sure lacking a name isn't an issue.
del ins.name
detect_encoding(ins.readline)
with self.assertRaisesRegex(SyntaxError, '.*{}'.format(path)):
ins = Bunk(lines, path)
detect_encoding(ins.readline)
开发者ID:5outh,项目名称:Databases-Fall2014,代码行数:27,代码来源:test_tokenize.py
示例2: read_py_file
def read_py_file(filepath):
if sys.version_info < (3, ):
return open(filepath, 'rU').read()
else:
# see https://docs.python.org/3/library/tokenize.html#tokenize.detect_encoding
# first just see if the file is properly encoded
try:
with open(filepath, 'rb') as f:
tokenize.detect_encoding(f.readline)
except SyntaxError as err:
# this warning is issued:
# (1) in badly authored files (contains non-utf8 in a comment line)
# (2) a coding is specified, but wrong and
# (3) no coding is specified, and the default
# 'utf8' fails to decode.
# (4) the encoding specified by a pep263 declaration did not match
# with the encoding detected by inspecting the BOM
raise CouldNotHandleEncoding(filepath, err)
try:
return tokenize.open(filepath).read()
# this warning is issued:
# (1) if uft-8 is specified, but latin1 is used with something like \x0e9 appearing
# (see http://stackoverflow.com/a/5552623)
except UnicodeDecodeError as err:
raise CouldNotHandleEncoding(filepath, err)
开发者ID:landscapeio,项目名称:prospector,代码行数:26,代码来源:encoding.py
示例3: update_fileinfo
def update_fileinfo(cls, fileinfo, document=None):
import tokenize
if not document:
try:
with open(fileinfo.fullpathname, 'rb') as buffer:
encoding, lines = tokenize.detect_encoding(buffer.readline)
fileinfo.encoding = encoding
except IOError:
pass
else:
s = document.gettext(0, 1024).encode('utf-8', errors='ignore')
buffer = io.BytesIO(s)
encoding, lines = tokenize.detect_encoding(buffer.readline)
fileinfo.encoding = encoding
开发者ID:hihihippp,项目名称:kaa,代码行数:14,代码来源:pythonmode.py
示例4: _readSourceCodeFromFilename3
def _readSourceCodeFromFilename3(source_filename):
import tokenize
try:
with open(source_filename, "rb") as source_file:
encoding = tokenize.detect_encoding(source_file.readline)[0] # @UndefinedVariable
# Rewind to get the whole file.
source_file.seek(0)
source_code = source_file.read()
return source_code.decode(encoding)
except SyntaxError as e:
if Options.isFullCompat():
if PythonVersions.doShowUnknownEncodingName():
match = re.match("unknown encoding for '.*?': (.*)", e.args[0])
complaint = match.group(1)
else:
complaint = "with BOM"
e.args = (
"encoding problem: %s" % complaint,
(source_filename, 1, None, None)
)
if hasattr(e, "msg"):
e.msg = e.args[0]
raise
开发者ID:Xzarh,项目名称:Nuitka,代码行数:30,代码来源:SourceReading.py
示例5: get_source
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except IOError:
raise ImportError("source not available through get_data()")
if py3k:
import io, tokenize
readsource = io.BytesIO(source_bytes).readline
try:
encoding = tokenize.detect_encoding(readsource)
except SyntaxError as exc:
raise ImportError("Failed to detect encoding")
newline_decoder = io.IncrementalNewlineDecoder(None, True)
try:
return newline_decoder.decode(source_bytes.decode(encoding[0]))
except UnicodeDecodeError as exc:
raise ImportError("Failed to decode source file")
else:
return source_bytes # XXX proper encoding
开发者ID:vloginova,项目名称:mybuild,代码行数:25,代码来源:abc.py
示例6: source_to_unicode
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, unicode):
return txt
if isinstance(txt, bytes):
buffer = BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read()
开发者ID:3kwa,项目名称:ipython,代码行数:25,代码来源:openpy.py
示例7: read_py_url
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
response = urllib.request.urlopen(url)
buffer = io.BytesIO(response.read())
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(text))
else:
return text.read()
开发者ID:BlackEarth,项目名称:portable-python-win32,代码行数:29,代码来源:openpy.py
示例8: execute
def execute(self):
# Try to detect the encoding for you.
with open(self.script, 'rb') as file:
try:
encoding = tokenize.detect_encoding(file.readline)[0]
except SyntaxError:
encoding = "utf-8"
# Set the global values for the module.
global_values = {
'__file__': self.script, # Use actual filename of the script.
'__name__': '__main__' # Make sure that 'if __name__ == "__main__"'-hook works
}
with open(self.script, 'r', encoding=encoding) as file:
# Do not inherit any 'from future import ...'-statements
# that may be used by AnimaFX.
# Additionally set the current filename.
module = compile(file.read(), self.script, 'exec', False)
try:
exec(module, global_values)
# Reraise any occuring exceptions
except (SystemExit, KeyboardInterrupt) as e:
raise e
# Print the exception
except BaseException as e:
traceback.print_exception(e.__class__, e, e.__traceback__)
return False
return True
开发者ID:StuxSoftware,项目名称:AnimaFX,代码行数:32,代码来源:console.py
示例9: roundtrip
def roundtrip(filename, output=sys.stdout):
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST)
Unparser(tree, output)
开发者ID:ysangkok,项目名称:jsppa,代码行数:7,代码来源:bug.py
示例10: read_text_file
def read_text_file(filename, encoding=None):
"""Read text file.
Give back the contents, and the encoding we used.
Unless specified manually, We have no way of knowing what text
encoding this file may be in.
The standard Python 'open' method uses the default system encoding
to read text files in Python 3 or falls back to utf-8.
On Python 3 we can use tokenize to detect the encoding.
On Python 2 we can use chardet to detect the encoding.
"""
# Only if the encoding is not manually specified, we may try to
# detect it.
if encoding is None and detect_encoding is not None:
with open(filename, 'rb') as filehandler:
encoding = detect_encoding(filehandler.readline)[0]
with open(filename, 'rb') as filehandler:
data = filehandler.read()
if encoding is not None:
return data.decode(encoding), encoding
if HAVE_CHARDET:
encoding_result = chardet.detect(data)
if encoding_result and encoding_result['encoding'] is not None:
encoding = encoding_result['encoding']
return data.decode(encoding), encoding
# Look for hints, PEP263-style
if data[:3] == b'\xef\xbb\xbf':
encoding = 'utf-8'
return data.decode(encoding), encoding
data_len = len(data)
for canary in ENCODING_HINTS:
if canary in data:
pos = data.index(canary)
if pos > 1 and data[pos - 1] not in (b' ', b'\n', b'\r'):
continue
pos += len(canary)
coding = b''
while pos < data_len and data[pos] not in (b' ', b'\n'):
coding += data[pos]
pos += 1
encoding = coding.decode('ascii').strip()
try:
return data.decode(encoding), encoding
except (LookupError, UnicodeError):
# Try the next one
pass
# Fall back to utf-8
encoding = 'utf-8'
return data.decode(encoding), encoding
开发者ID:awello,项目名称:zest.releaser,代码行数:60,代码来源:utils.py
示例11: patch
def patch(self, filename):
self.current_file = filename
with tokenize.open(filename) as fp:
content = fp.read()
old_content = content
for operation in self.operations:
content = operation.patch(content)
if content == old_content:
# no change
self.check(content)
if self.options.to_stdout:
self.write_stdout(content)
return False
with open(filename, "rb") as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
if not self.options.quiet:
print("Patch %s" % filename)
if not self.options.to_stdout:
with open(filename, "w", encoding=encoding) as fp:
fp.write(content)
else:
self.write_stdout(content)
self.check(content)
return True
开发者ID:mscuthbert,项目名称:sixer,代码行数:29,代码来源:sixer.py
示例12: _read_file
def _read_file(filename):
# read the file contents, obeying the python encoding marker
with open(filename, 'rb') as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
with open(filename, 'rt', encoding=encoding) as fp:
content = fp.read()
content += '\n\n'
return content
开发者ID:terrence2,项目名称:millipede,代码行数:8,代码来源:module.py
示例13: read_source_code
def read_source_code(filename):
with open(filename, 'rb') as source_file:
encoding, first_lines = tokenize.detect_encoding(source_file.readline)
source_bytes = b''.join(first_lines) + source_file.read()
newline_decoder = io.IncrementalNewlineDecoder(None, translate=True)
source_code = newline_decoder.decode(source_bytes.decode(encoding))
return source_code.splitlines(True)
开发者ID:juanrodriguezmonti,项目名称:autopython,代码行数:8,代码来源:script_parser.py
示例14: read_pyfile
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
开发者ID:d11,项目名称:rts,代码行数:8,代码来源:test_unparse.py
示例15: _stdin_get_value_py3
def _stdin_get_value_py3():
stdin_value = sys.stdin.buffer.read()
fd = io.BytesIO(stdin_value)
try:
(coding, lines) = tokenize.detect_encoding(fd.readline)
return io.StringIO(stdin_value.decode(coding))
except (LookupError, SyntaxError, UnicodeError):
return io.StringIO(stdin_value.decode("utf-8"))
开发者ID:HeitorGonzaga,项目名称:simple_list,代码行数:8,代码来源:utils.py
示例16: test_matched_bom_and_cookie_first_line
def test_matched_bom_and_cookie_first_line(self):
lines = (
b'\xef\xbb\xbf# coding=utf-8\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8-sig')
self.assertEqual(consumed_lines, [b'# coding=utf-8\n'])
开发者ID:ChowZenki,项目名称:kbengine,代码行数:9,代码来源:test_tokenize.py
示例17: test_no_bom_no_encoding_cookie
def test_no_bom_no_encoding_cookie(self):
lines = (
b'# something\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'utf-8')
self.assertEqual(consumed_lines, list(lines[:2]))
开发者ID:ChowZenki,项目名称:kbengine,代码行数:9,代码来源:test_tokenize.py
示例18: test_cookie_first_line_no_bom
def test_cookie_first_line_no_bom(self):
lines = (
b'# -*- coding: latin-1 -*-\n',
b'print(something)\n',
b'do_something(else)\n'
)
encoding, consumed_lines = detect_encoding(self.get_readline(lines))
self.assertEqual(encoding, 'iso-8859-1')
self.assertEqual(consumed_lines, [b'# -*- coding: latin-1 -*-\n'])
开发者ID:ChowZenki,项目名称:kbengine,代码行数:9,代码来源:test_tokenize.py
示例19: _source_encoding_py3
def _source_encoding_py3(source):
"""Determine the encoding for `source`, according to PEP 263.
`source` is a byte string: the text of the program.
Returns a string, the name of the encoding.
"""
readline = iternext(source.splitlines(True))
return tokenize.detect_encoding(readline)[0]
开发者ID:fantasy0901,项目名称:blog,代码行数:10,代码来源:phystokens.py
示例20: test_short_files
def test_short_files(self):
readline = self.get_readline((b'print(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEquals(encoding, 'utf-8')
self.assertEquals(consumed_lines, [b'print(something)\n'])
encoding, consumed_lines = detect_encoding(self.get_readline(()))
self.assertEquals(encoding, 'utf-8')
self.assertEquals(consumed_lines, [])
readline = self.get_readline((b'\xef\xbb\xbfprint(something)\n',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEquals(encoding, 'utf-8')
self.assertEquals(consumed_lines, [b'print(something)\n'])
readline = self.get_readline((b'\xef\xbb\xbf',))
encoding, consumed_lines = detect_encoding(readline)
self.assertEquals(encoding, 'utf-8')
self.assertEquals(consumed_lines, [])
开发者ID:LinkedModernismProject,项目名称:web_code,代码行数:19,代码来源:test_tokenize.py
注:本文中的tokenize.detect_encoding函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论