本文整理汇总了Python中pygments.lexers.get_lexer_for_filename函数的典型用法代码示例。如果您正苦于以下问题:Python get_lexer_for_filename函数的具体用法?Python get_lexer_for_filename怎么用?Python get_lexer_for_filename使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_lexer_for_filename函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: try_lexer
def try_lexer(name, filename=None):
""" Try getting a pygments lexer by name.
None is returned if no lexer can be found by that name,
unless 'filename' is given. If 'filename' is given the lexer
is guessed by file name.
Ultimately returns None on failure.
"""
if not name:
if filename_is_stdin(filename):
# No lexer or file name.
return None
try:
lexer = lexers.get_lexer_for_filename(filename)
except pygments.util.ClassNotFound:
return None
# Retrieved by file name only.
return lexer
try:
lexer = lexers.get_lexer_by_name(name)
except pygments.util.ClassNotFound:
if filename_is_stdin(filename):
# No lexer found.
return None
try:
lexer = lexers.get_lexer_for_filename(filename)
except pygments.util.ClassNotFound:
return None
# Retrieved by falling back to file name.
return lexer
# Successful lexer by name.
return lexer
开发者ID:guneysu-arsiv,项目名称:ccat,代码行数:32,代码来源:ccat.py
示例2: iscode
def iscode(self):
if pygments is None:
return False
try:
get_lexer_for_filename(self.filename)
return True
except ClassNotFound:
return False
开发者ID:FMSio,项目名称:regenwolken,代码行数:10,代码来源:specs.py
示例3: get_renderer
def get_renderer(full_path):
if full_path.endswith('.ipynb'):
return nb_renderer
else:
try:
get_lexer_for_filename(full_path)
return pygments_renderer
except ClassNotFound:
return raw_renderer
return raw_renderer
开发者ID:B-Rich,项目名称:wakari-app-viewer,代码行数:11,代码来源:renderer.py
示例4: get_lexers
def get_lexers():
''''''
import pygments.lexers as t
count = 0
for item in t.get_all_lexers():
print item
count += 1
try:
print t.get_lexer_for_filename('this.xsd')
except:
print 'found'
print 'NUMBER OF LEXERS: %s'%count
开发者ID:bodleian,项目名称:vcabtext,代码行数:12,代码来源:ideatest.py
示例5: pygments_lexer
def pygments_lexer(filename):
try:
from pygments.lexers import get_lexer_for_filename
from pygments.util import ClassNotFound
except ImportError:
return None
try:
return get_lexer_for_filename(filename)
except ClassNotFound:
if filename.lower().endswith('.recipe'):
return get_lexer_for_filename('a.py')
return None
开发者ID:alip,项目名称:calibre,代码行数:12,代码来源:highlight.py
示例6: __init__
def __init__(self, text, syntax=None, name=None):
self.text = strip_indents(text)
lexer = None
if name:
self.name = name
if syntax is not None:
self.syntax = syntax.lower()
elif self.name:
try:
lexer = get_lexer_for_filename(self.name)
except ClassNotFound:
pass
else:
self.syntax = lexer.aliases[0]
if self.syntax == "markdown":
self.html = markdown(self.text)
else:
try:
lexer = lexer or get_lexer_by_name(self.syntax)
except ClassNotFound:
# do nothing - if html is empty then description is a raw text
pass
else:
self.html = highlight(self.text, lexer, formater)
开发者ID:firemark,项目名称:quizFactory,代码行数:27,代码来源:quiz.py
示例7: get_sphinx_data
def get_sphinx_data(sphinx_id):
query = Search(indexes=['sourcecode'], config=BaseSearchConfig)
query = query.filter(id__eq=sphinx_id)
results = query.ask()
if len(results['result']['items']) == 0:
flask.abort(404)
filename = results['result']['items'][0]['path']
if not os.path.isfile(filename):
return filename, 'File not found. Please resphinx_id.'
code = ''
with open(filename) as f:
code = f.read()
try:
# This is the line that throws the exception.
lexer = get_lexer_for_filename(filename)
formatter = HtmlFormatter(noclasses=True)
result = highlight(code, lexer, formatter)
except ClassNotFound:
# Syntax highlighting not supported.'
result = '<pre>{}</pre>'.format(code)
url = flask.url_for('display', sphinx_id=sphinx_id)
return {'body': result, 'path': filename, 'url': url}
开发者ID:pombredanne,项目名称:codesearch,代码行数:28,代码来源:codesearch.py
示例8: render_listing
def render_listing(in_name, out_name):
with open(in_name, 'r') as fd:
try:
lexer = get_lexer_for_filename(in_name)
except:
lexer = TextLexer()
code = highlight(fd.read(), lexer,
HtmlFormatter(cssclass='code',
linenos="table",
nowrap=False,
lineanchors=utils.slugify(f),
anchorlinenos=True))
title = os.path.basename(in_name)
crumbs = out_name.split(os.sep)[1:-1] + [title]
# TODO: write this in human
paths = ['/'.join(['..'] * (len(crumbs) - 2 - i)) for i in
range(len(crumbs[:-2]))] + ['.', '#']
context = {
'code': code,
'title': title,
'crumbs': zip(paths, crumbs),
'lang': kw['default_lang'],
'description': title,
}
self.site.render_template('listing.tmpl', out_name, context)
开发者ID:hansg01,项目名称:nikola,代码行数:25,代码来源:task_render_listings.py
示例9: set_lexer_from_filename
def set_lexer_from_filename(self, filename):
"""
Change the lexer based on the filename (actually only the extension is
needed)
:param filename: Filename or extension
"""
if filename.endswith("~"):
filename = filename[0:len(filename) - 1]
try:
self._lexer = get_lexer_for_filename(filename)
_logger().debug('lexer for filename (%s): %r', filename,
self._lexer)
except ClassNotFound:
_logger().warning('failed to get lexer from filename: %s, using '
'plain text instead...', filename)
self._lexer = TextLexer()
return False
except ImportError:
# import error while loading some pygments plugins, the editor
# should not crash
_logger().warning('failed to get lexer from filename: %s, using '
'plain text instead...', filename)
self._lexer = TextLexer()
return False
else:
return True
开发者ID:abdullahtahiriyo,项目名称:cadquery-freecad-module,代码行数:27,代码来源:pygments_sh.py
示例10: test_example_files
def test_example_files():
testdir = os.path.dirname(__file__)
outdir = os.path.join(testdir, 'examplefiles', 'output')
if STORE_OUTPUT and not os.path.isdir(outdir):
os.makedirs(outdir)
for fn in os.listdir(os.path.join(testdir, 'examplefiles')):
if fn.startswith('.') or fn.endswith('#'):
continue
absfn = os.path.join(testdir, 'examplefiles', fn)
if not os.path.isfile(absfn):
continue
outfn = os.path.join(outdir, fn)
try:
lx = get_lexer_for_filename(absfn)
except ClassNotFound:
if "_" not in fn:
raise AssertionError('file %r has no registered extension, '
'nor is of the form <lexer>_filename '
'for overriding, thus no lexer found.'
% fn)
try:
name, rest = fn.split("_", 1)
lx = get_lexer_by_name(name)
except ClassNotFound:
raise AssertionError('no lexer found for file %r' % fn)
yield check_lexer, lx, absfn, outfn
开发者ID:13scoobie,项目名称:gobyexample,代码行数:28,代码来源:test_examplefiles.py
示例11: pipp_code
def pipp_code(context, src, code, lexer, docss):
ctx = context.processor.extensionParams[(NAMESPACE, 'context')]
src = Conversions.StringValue(src)
if src:
abs_src = ctx.abs_in_path(src)
ctx.add_depends(abs_src[len(ctx.in_root):])
fname = os.path.basename(src)
code = open(abs_src).read()
else:
fname = 'inline-code'
code = Conversions.StringValue(code)
lexer = Conversions.StringValue(lexer)
if lexer:
lexer = get_lexer_by_name(lexer)
elif src:
lexer = get_lexer_for_filename(fname)
else:
raise Exception('The lexer must be explicitly specified for inline code blocks')
formatter = HtmlFormatter(cssclass="source")
result = highlight(code, lexer, formatter)
if Conversions.StringValue(docss) == '1':
result = '<link rel="stylesheet" href="%s.css"/>' % fname + result
css = open(ctx.abs_out_path(ctx.abs_in_path(fname + '.css')), 'w')
css.write(formatter.get_style_defs())
css.close()
return result
开发者ID:TheProjecter,项目名称:pipp,代码行数:30,代码来源:pipp_xslt.py
示例12: _lexer_for_filename
def _lexer_for_filename(filename):
"""Return a Pygments lexer suitable for a file based on its extension.
Return None if one can't be determined.
"""
if filename.endswith('.js') or filename.endswith('.jsm'):
# Use a custom lexer for js/jsm files to highlight prepocessor
# directives
lexer = JavascriptPreprocLexer()
elif filename == 'moz.build':
lexer = PythonLexer()
else:
try:
# Lex .h files as C++ so occurrences of "class" and such get colored;
# Pygments expects .H, .hxx, etc. This is okay even for uses of
# keywords that would be invalid in C++, like 'int class = 3;'.
# Also we can syntax highlight XUL as XML, and IDL/WebIDL as CPP
lexer = get_lexer_for_filename(
'dummy.cpp' if filename.endswith(
('.h', '.idl', '.webidl', '.tcc', '.tpp'))
else 'dummy.xml' if filename.endswith(('.xul', '.svg'))
else filename)
except ClassNotFound:
return None
return lexer
开发者ID:abbeyj,项目名称:dxr,代码行数:28,代码来源:__init__.py
示例13: get_line_types
def get_line_types(repo, repo_uri, rev, path):
"""Returns an array, where each item means a line of code.
Each item is labled 'code', 'comment' or 'empty'"""
#profiler_start("Processing LineTypes for revision %s:%s", (self.rev, self.file_path))
uri = os.path.join(repo_uri, path) # concat repo_uri and file_path for full path
file_content = _get_file_content(repo, uri, rev) # get file_content
if file_content is None or file_content == '':
printerr("[get_line_types] Error: No file content for " + str(rev) + ":" + str(path) + " found! Skipping.")
line_types = None
else:
try:
lexer = get_lexer_for_filename(path)
except ClassNotFound:
try:
printdbg("[get_line_types] Guessing lexer for" + str(rev) + ":" + str(path) + ".")
lexer = guess_lexer(file_content)
except ClassNotFound:
printdbg("[get_line_types] No guess or lexer found for " + str(rev) + ":" + str(path) + ". Using TextLexer instead.")
lexer = TextLexer()
if isinstance(lexer, NemerleLexer):
# this lexer is broken and yield an unstoppable process
# see https://bitbucket.org/birkenfeld/pygments-main/issue/706/nemerle-lexer-ends-in-an-infinite-loop
lexer = TextLexer()
# Not shure if this should be skipped, when the language uses off-side rules (e.g. python,
# see http://en.wikipedia.org/wiki/Off-side_rule for list)
stripped_code = _strip_lines(file_content)
lexer_output = _iterate_lexer_output(lexer.get_tokens(stripped_code))
line_types_str = _comment_empty_or_code(lexer_output)
line_types = line_types_str.split("\n")
return line_types
开发者ID:ProjectHistory,项目名称:MininGit,代码行数:35,代码来源:line_types.py
示例14: doc
def doc(path):
"""Gathers the documentation
"""
file_ = open(path, 'r')
content = file_.read()
file_.close()
try:
lexer = get_lexer_for_filename(path, stripall=True)
except:
lexer = TextLexer(stripall=True)
if path.endswith('.md'):
return markdown.markdown(
bleach.clean(content),
extensions=['markdown.extensions.nl2br', 'markdown.extensions.toc']
)
elif path.endswith('.less'):
lexer = ScssLexer(stripall=True)
formatter = HtmlFormatter(
linenos=True,
cssclass='codehilight',
noclobber_cssfile=True,
title=path[path.rfind(os.sep)+1:]
)
return "<div class='table-responsive codehilight'>"+highlight(content, lexer, formatter)+"</div>"
开发者ID:rhino1998,项目名称:Voting-System,代码行数:25,代码来源:util.py
示例15: _get_lexer
def _get_lexer(self, filename):
try:
lexer = get_lexer_for_filename(filename)
except ClassNotFound:
raise
return lexer
开发者ID:diezguerra,项目名称:pastepm,代码行数:7,代码来源:views.py
示例16: highlightCode
def highlightCode(code, fileName):
htmlFormatter = HtmlFormatter()
try:
lexer = get_lexer_for_filename(fileName)
except ClassNotFound:
lexer = TextLexer()
return highlight(code, lexer, htmlFormatter)
开发者ID:n3wtron,项目名称:gitDashboard,代码行数:7,代码来源:highlight.py
示例17: code
def code(title):
"""
Return syntax highlighted LaTeX.
"""
filename = title.split(' ')[1]
# open the code file relative from the yml file path
f = open(os.path.join(os.path.dirname(os.path.abspath(source_file)), filename))
out = "\n\\begin{frame}[fragile,t]"
out += "\n\t\\frametitle{Code: \"%s\"}" % filename
try:
from pygments import highlight
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.formatters import LatexFormatter
try:
lexer = get_lexer_for_filename(filename)
except:
lexer = get_lexer_by_name('text')
out += "\n%s\n" % highlight(f.read(), lexer, LatexFormatter(linenos=True))
except ImportError:
out += "\n\t\\begin{lstlisting}\n"
out += f.read()
out += "\n\t\end{lstlisting}"
f.close()
out += "\n\end{frame}"
return out
开发者ID:aleksandersumowski,项目名称:yml2tex,代码行数:30,代码来源:__init__.py
示例18: colorize
def colorize(language, title, text):
"""Colorize the text syntax.
Guess the language of the text and colorize it.
Returns a tuple containing the colorized text and the language name.
"""
formatter = HtmlFormatter(
linenos=True, style=PygmentsStyle, noclasses=True, nobackground=True)
#Try to get the lexer by name
try:
lexer = get_lexer_by_name(language.lower())
return highlight(text, lexer, formatter), lexer.name
except LexerNotFound:
pass
#Try to get the lexer by filename
try:
lexer = get_lexer_for_filename(title.lower())
return highlight(text, lexer, formatter), lexer.name
except LexerNotFound:
pass
#Try to guess the lexer from the text
try:
lexer = guess_lexer(text)
if lexer.analyse_text(text) > .3:
return highlight(text, lexer, formatter), lexer.name
except LexerNotFound:
pass
#Fallback to the plain/text lexer
lexer = get_lexer_by_name('text')
return highlight(text, lexer, formatter), lexer.name
开发者ID:Kozea,项目名称:PastaBin,代码行数:31,代码来源:pastabin.py
示例19: get_used_tokens
def get_used_tokens(filename) :
"""
Get all the tokens that are referenced in this file
"""
lines = (line for line in open(filename, 'r'))
lexer = lexers.get_lexer_for_filename(filename)
seen = set([])
used_tokens = []
declared_tokens = set([])
token_iter = lexer.get_tokens('\n'.join(lines))
filtered_iter = (tok for tok in token_iter if tok[1].strip())
prev_iter, curr_iter, next_iter = itertools.tee(filtered_iter, 3)
# we miss the first token here... whatever
next(curr_iter)
next(next_iter)
next(next_iter)
for (prev, curr, next_tok) in itertools.izip(prev_iter, curr_iter, next_iter) :
if str(prev[0]) in ['Token.Operator', 'Token.Keyword.Type'] and prev[1] == '.' :
continue
if (next_tok[1]) == u'=>' :
continue
if str(curr[0]) in ['Token.Name.Class', 'Token.Keyword.Type'] and curr[1][0].isupper() :
if is_declaration(prev[1]) :
declared_tokens.add(curr[1])
elif curr not in seen :
seen.add(curr)
used_tokens.append(curr[1].split(".")[0])
return [t for t in used_tokens if t not in declared_tokens]
开发者ID:pfhayes,项目名称:scala_imports,代码行数:34,代码来源:scala_imports.py
示例20: process_text_to_dict
def process_text_to_dict(self, input_text):
composer = Composer()
builder = idiopidae.parser.parse('Document', input_text + "\n\0")
ext = self.artifact.input_ext
name = "input_text%s" % ext
# List any file extensions which don't map neatly to lexers.
if ext == '.pycon':
lexer = PythonConsoleLexer()
elif ext == '.rbcon':
lexer = RubyConsoleLexer()
elif ext in ('.json', '.dexy'):
lexer = JavascriptLexer()
else:
lexer = get_lexer_for_filename(name)
formatter = get_formatter_for_filename(self.artifact.filename(),
lineanchors='l')
output_dict = OrderedDict()
for i, s in enumerate(builder.sections):
lines = builder.statements[i]['lines']
formatted_lines = composer.format(lines, lexer, formatter)
output_dict[s] = formatted_lines
return output_dict
开发者ID:cassj,项目名称:dexy,代码行数:25,代码来源:idio_handler.py
注:本文中的pygments.lexers.get_lexer_for_filename函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论