• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python pygments.lex函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pygments.lex函数的典型用法代码示例。如果您正苦于以下问题:Python lex函数的具体用法?Python lex怎么用?Python lex使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了lex函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: findMain

def findMain(code):
    """
    Look for the existence of if __name__ == '__main__'
    Documentation: https://docs.python.org/2/tutorial/modules.html in 6.1.1
    """
    found = False
    pos = 0
    lexer = PythonLexer()

    tokens_1 = pygments.lex(code, lexer)
    tokens_2 = pygments.lex(code, lexer)
    
    sequence_1 = [(Token.Keyword, '^if$'),
                (Token.Name, '^__name__$'),
                (Token.Operator, '^==$'),
                (Token.Literal.String.Double, '^__main__$'),
                (Token.Punctuation, '^:$')]

    sequence_2 = [(Token.Keyword, '^if$'),
                (Token.Name, '^__name__$'),
                (Token.Operator, '^==$'),
                (Token.Literal.String.Single, '^__main__$'),
                (Token.Punctuation, '^:$')]

    mainIdiom = PythonIdiom('ifNameMain')

    lineNum = _findSeqInTokens(sequence_1, tokens_1)
    if lineNum < 0:
        lineNum = _findSeqInTokens(sequence_2, tokens_2)
    if lineNum > 0:
        mainIdiom.addNew(lineNum)
    log("If name main found in lines: " + str(mainIdiom.getLines()))
    return mainIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:33,代码来源:pythonic.py


示例2: test_bare_class_handler

def test_bare_class_handler():
    from pygments.formatters import HtmlFormatter
    from pygments.lexers import PythonLexer
    try:
        lex('test\n', PythonLexer)
    except TypeError as e:
        assert 'lex() argument must be a lexer instance' in str(e)
    else:
        assert False, 'nothing raised'
    try:
        format([], HtmlFormatter)
    except TypeError as e:
        assert 'format() argument must be a formatter instance' in str(e)
    else:
        assert False, 'nothing raised'
开发者ID:Oire,项目名称:gobyexample,代码行数:15,代码来源:test_basic_api.py


示例3: findMagicMethods

def findMagicMethods(code):
    """
    Search magic methods in the code and returns a list of how many have
    been found, what kind of dificult it has and wich where
    Documentation: http://www.rafekettler.com/magicmethods.html
                   Python Pocket Reference page 88
    """
    lexer = PythonLexer()
    tokens = pygments.lex(code, lexer)
    lineNumber = 1
    methodsFound = []
    methodsIdiom1 = PythonIdiom('idiomMethods1')
    methodsIdiom2 = PythonIdiom('idiomMethods2')
    methodsIdiom3 = PythonIdiom('idiomMethods3')

    for ttype, word in tokens:
        lineNumber += _getNewLines((ttype, word))
        if ttype is Token.Name.Function:
            if word in magicMethods_1:
                methodsIdiom1.addNew(lineNumber, otherInfo={'method': word})
                methodsFound.append(word)
            elif word in magicMethods_2:
                methodsIdiom2.addNew(lineNumber, otherInfo={'method': word})
                methodsFound.append(word)
            elif word in magicMethods_3:
                methodsIdiom3.addNew(lineNumber, otherInfo={'method': word})
                methodsFound.append(word)

    log("MagicMethods: %s" % str(methodsFound))
    return [methodsIdiom1, methodsIdiom2, methodsIdiom3]
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:30,代码来源:pythonic.py


示例4: basicStructure

def basicStructure(code):
    sequence = []
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    for token in tokens:
        print token
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:7,代码来源:pythonicSimple.py


示例5: __init__

    def __init__(self, pdf, code, lexer):
        self.pdf = pdf
        fname, fstyle, fsize = self.pdf.theme["code-font"]

        self.pdf.set_font(fname, fstyle, fsize)
        style = pygments.styles.get_style_by_name("emacs")
        style = dict(style)
        for token, text in pygments.lex(code["code"], lexer):
            token_style = style[token]

            if token_style["color"]:
                r, g, b = map(ord, token_style["color"].decode("hex"))
            else:
                r, g, b = (0, 0, 0)
            self.pdf.set_text_color(r, g, b)

            if token_style["bold"] and token_style["italic"]:
                self.pdf.set_font(fname, "BI", fsize)
            elif token_style["bold"]:
                self.pdf.set_font(fname, "B", fsize)
            elif token_style["italic"]:
                self.pdf.set_font(fname, "I", fsize)
            else:
                self.pdf.set_font(fname, "", fsize)

            height = pdf.theme["code-height"]
            self.pdf.write(height, text)
开发者ID:bravegnu,项目名称:peacock,代码行数:27,代码来源:peacock.py


示例6: findDocstring

def findDocstring(code):
    """Find the use of documentation in the functions, classes or script
    Documentation: https://www.python.org/dev/peps/pep-0257/
    """
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')

    classDefToken = (Token.Keyword, '^class$')
    functDefToken = (Token.Keyword, '^def$')
    tokens = pygments.lex(code, lexer)

    docIdiom = PythonIdiom('docstring')
    docstringFound = defaultdict(int)
    typeDoc = 'module'
    lineNumber = 1


    for ttype, word in tokens:
        if _sameToken((ttype, word), classDefToken):
            typeDoc = 'class'
        elif _sameToken((ttype, word), functDefToken):
            typeDoc = 'function'
        elif ttype == Token.Literal.String.Doc:
            docstringFound[typeDoc] += 1
            docIdiom.addNew(lineNumber)
        lineNumber += _getNewLines((ttype, word))

    for typeDoc in docstringFound:
        log("type %s: %d found" % (typeDoc, docstringFound[typeDoc]))
    log('DocString found in lines: ' + str(docIdiom.getLines()))
    return docIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:31,代码来源:pythonic.py


示例7: check

def check(*expected):
    text = ''.join(i[1] for i in expected)
    md_lexer = MarkdownLexer()
    md_lexer.add_filter('raiseonerror')
    md_lexer.add_filter('tokenmerge')
    result = list(pygments.lex(text, md_lexer))
    assert result == list(expected)
开发者ID:jhermann,项目名称:pygments-markdown-lexer,代码行数:7,代码来源:test_lexer.py


示例8: SyntexHighlight

 def SyntexHighlight(self, event=None):
     from tkinter.font import Font
     for tag in self.tag_names():
         self.tag_delete(tag)
     self.mark_set("range_start", "1.0")
     data = self._get_value()
     self.tag_configure("Token.Comment", foreground="#F00")
     bolder = Font(family=self.app.cnf['font'][0])
     bolder.config(size=self.app.cnf['font'][1]-2)
     bolder.config(weight="bold")
     for token, content in lex(data, PythonLexer()):
         self.mark_set("range_end", "range_start + %dc" % len(content))
         self.tag_add(str(token), "range_start", "range_end")
         self.mark_set("range_start", "range_end")
     self.tag_config("Token.Comment.Single", foreground="#F00")
     self.tag_config("Token.Literal.String.Doc", foreground="#F00")
     for tag in self.tag_names():
         if 'Token.Keyword' == tag:
             self.tag_config(tag, foreground="#008", font=bolder)
         elif 'Token.Keyword.Namespace' == tag:
             self.tag_config(tag, foreground="#00F", font=bolder)
         elif 'Token.Name.Class' in tag:
             self.tag_config(tag, foreground="#F30", background='#AFA')
         elif 'Token.Name.Function' in tag:
             self.tag_config(tag, foreground="#A3A", background='#FFA')
         elif 'Token.Literal' in tag:
             self.tag_config(tag, foreground="#6A0")
         elif 'Token.Operator' in tag:
             self.tag_config(tag, foreground="#A3A")
     print(self.tag_names())
开发者ID:soma0sd,项目名称:python-study,代码行数:30,代码来源:main.py


示例9: _lexContents

    def _lexContents(self):
        # We add a space in front because otherwise the lexer will discard
        # everything up to the first token, meaning that we lose the potentially
        # empty first lines and mess up the matching. With the space, we force
        # the lexer to process the initial \n. and we just skip the space token
        tokens = list(pygments.lex(" "+self._document.documentText(), pygments.lexers.PythonLexer()))
        self._document.beginTransaction()
        current_line_num = 1
        meta = []

        # Skip the space token
        for token in tokens[1:]:
            ttype, string = token

            meta.extend([ttype]*len(string))

            if string.endswith('\n'):
                self._document.deleteCharMeta( (current_line_num,1),
                                                self._document.lineLength(current_line_num),
                                                CharMeta.LexerToken)
                self._document.updateCharMeta((current_line_num,1), {CharMeta.LexerToken: meta})
                current_line_num += 1
                meta = []

        self._document.endTransaction()
开发者ID:maiconpl,项目名称:vix,代码行数:25,代码来源:Lexer.py


示例10: filename

    def filename(self, value):
        "Set the file being displayed by the view"
        if self._filename != value:
            self.code.delete('1.0', END)
            with open(value) as code:
                all_content = code.read()
                if self.lexer:
                    lexer = self.lexer
                else:
                    lexer = guess_lexer_for_filename(value, all_content, stripnl=False)
                for token, content in lex(all_content, lexer):
                    self.code.insert(END, content, str(token))

            # Now update the text for the linenumbers
            end_index = self.code.index(END)
            line_count = int(end_index.split('.')[0])
            lineNumbers = '\n'.join('%5d' % i for i in range(1, line_count))
            self.lines.config(state=NORMAL)
            self.lines.delete('1.0', END)
            self.lines.insert('1.0', lineNumbers)
            self.lines.config(state=DISABLED)

            # Store the new filename, and clear any current line
            self._filename = value
            self._line = None
开发者ID:pybee,项目名称:tkreadonly,代码行数:25,代码来源:tkreadonly.py


示例11: checkNotRange

def checkNotRange(code):
    """
    Check if there is: for xx in [0,1,2] instead of for xxx in (x)range
    Documentation: https://youtu.be/OSGv2VnC0go?t=3m4s
    """
    sequence = [(Token.Keyword, '^for$'),
                (Token.Name, '^\w+$'),
                (Token.Operator.Word, '^in$'),
                (Token.Punctuation, '^\[$'),
                (Token.Literal.Number.Integer, '^\d$')]

    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    notRangeIdiom = PythonIdiom('notRange')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        notRangeIdiom.addNew(lineNumber)
    log("badForIn found in lines {0}".format(notRangeIdiom.getLines()))
    return notRangeIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:25,代码来源:pythonic.py


示例12: __init__

    def __init__(self, disassembly, lexer=lexer, msg=None):

        self.lines = []
        if isinstance(disassembly, list):
            self.lines = disassembly
        elif disassembly:
            line = []
            if msg:
                current_function = msg.rsplit(None, 1)[-1][:-1]
            else:
                current_function = None
            with currentfunctiontfilter.current_function(current_function):
                for ttype, value in pygments.lex(disassembly, lexer):
                    if '\n' in value:
                        self.lines.append(DisassemblyLine(line))
                        line = []
                    else:
                        line.append((ttype, value))

        self.linenos = {}
        for i, line in enumerate(self.lines):
            self.linenos[line.address] = line, i

        self.lexer = lexer
        self.msg = msg
开发者ID:Talanor,项目名称:gxf,代码行数:25,代码来源:disassembly.py


示例13: highlightMultiSource

def highlightMultiSource(codeLexerTuples, multiSourceFormatter, outfile=None):
    """
    main function to create formatted output based on tuples of code and
    related metadata (lexing information and title to display)
    """
    if not isinstance(codeLexerTuples, tuple):
        raise TypeError(
            "first highlight() argument must be a tupel of \
codeLexerTuple"
        )
    if not isinstance(multiSourceFormatter, Formatter):
        raise TypeError(
            "second highlight() argument must be a \
MultiSourceFormatter"
        )

    tokensList = []
    for codeLexerTuple in codeLexerTuples:
        tokensList.append(lex(codeLexerTuple.code, codeLexerTuple.lexer))
        multiSourceFormatter.titles.append(codeLexerTuple.title)
    if not outfile:
        # print formatter, 'using', formatter.encoding
        realoutfile = multiSourceFormatter.encoding and BytesIO() or StringIO()
        multiSourceFormatter.format(tokensList, realoutfile)
        return realoutfile.getvalue()
    else:
        multiSourceFormatter.format(tokensList, outfile)
开发者ID:gixxi,项目名称:comparareetpendere,代码行数:27,代码来源:__init__.py


示例14: style_ansi

def style_ansi(raw_code, lang=None):
    """ actual code hilite """
    lexer = 0
    if lang:
        try:
            lexer = get_lexer_by_name(lang)
        except ValueError:
            print col(R, 'Lexer for %s not found' % lang)
    lexer = None
    if not lexer:
        try:
            if guess_lexer:
                lexer = pyg_guess_lexer(raw_code)
        except:
            pass
    if not lexer:
        lexer = get_lexer_by_name(def_lexer)
    tokens = lex(raw_code, lexer)
    cod = []
    for t, v in tokens:
        if not v:
            continue
        _col = code_hl_tokens.get(t)
        if _col:
            cod.append(col(v, _col))
        else:
            cod.append(v)
    return ''.join(cod)
开发者ID:dimitri-koussa,项目名称:terminal_markdown_viewer,代码行数:28,代码来源:mdv.py


示例15: main

def main():
    arguments = docopt( 
        __doc__.format( program=docstring_format_dict ), 
        version=        '{docstring_format_dict["human_format"]} 2.0', 
        options_first=  True
        )

    lexer = BibtexLexer()
    lexer.add_filter( RaiseOnErrorTokenFilter() )
    #lexer.add_filter( TokenMergeFilter() )
    lexer.add_filter( KeywordCaseFilter(case='lower') )
    
    for f in arguments['<file>']:
        # get bibtex source
        code = None
        with open(f, 'r') as f:
            code = ''.join( f.readlines() )

        # NOW LEX SEE CODE!
        for idx, item in enumerate(pygments.lex(code, lexer)):
            tokentype, tokenvalue = item[0], item[1]
            
            # if tokentype in frozenset([Token.Text.Whitespace, Token.Punctuation]):
            #     continue
            print(  "{0:>5}\t{1[0]!s:<25}\t{1[1]!r}".format(idx, item),
                    file=sys.stdout )
开发者ID:Zearin,项目名称:bibtexml2,代码行数:26,代码来源:__main__.py


示例16: BuildTags

    def BuildTags(self, buff, lexer):
        """
        @param buff: code buffer
        @param lexer: xml lexer
        @return: taglib.DocStruct instance for the given buff
        """
        rtags = taglib.DocStruct()
        rtags.SetElementDescription(self.TAG_ID, '/')
        line_count = 0
        current_line = []
        code_lines = []

        # Parse the file into tokens and values
        for ttype, value in lex(buff.read(), lexer):
            if '\n' in value:
                if len(current_line) > 0:
                    code_lines.append((line_count, current_line))
                current_line = []
                line_count += value.count('\n')
                continue
            if ttype == Token.Name.Tag and len(value) > 1:
                current_line.append((ttype, value))
        docroot = self.Parse(code_lines)
        if docroot != None:
            rtags.AddElement(self.TAG_ID, docroot)
        return rtags
开发者ID:2015E8007361074,项目名称:wxPython,代码行数:26,代码来源:xmltags.py


示例17: getCodeStyleChunks

 def getCodeStyleChunks(self, node):
     assert node.tag == 'code'
     lang = node.attrib.get('lang','python')
     #@TODO: error handling if lang is bad
     lexer = pygments.lexers.get_lexer_by_name(lang)
     for tok, text in pygments.lex(e2txt(node), lexer):
         yield [[tok]], text
开发者ID:sabren,项目名称:ceomatic,代码行数:7,代码来源:wxoutline.py


示例18: lex

    def lex(self, code, lex):
        """Return tokenified code.

        Return a list of tuples (scope, word) where word is the word to be
        printed and scope the scope name representing the context.

        :param str code: Code to tokenify.
        :param lex: Lexer to use.
        :return:
        """
        if lex is None:
            if not type(code) is str:
                # if not suitable lexer is found, return decoded code
                code = code.decode("utf-8")
            return (("global", code),)

        words = pygments.lex(code, lex)

        scopes = []
        for word in words:
            token = word[0]
            scope = "global"

            if token in self.token_map.keys():
                scope = self.token_map[token]

            scopes.append((scope, word[1]))
        return scopes
开发者ID:Gnewbee,项目名称:suplemon,代码行数:28,代码来源:lexer.py


示例19: lex

def lex(code_lines, lexername):
    try:
        from pygments.lexers import get_lexer_by_name
        from pygments import lex
    except ImportError:
        print('For lexer support please install extras: pip install sourcemap-tool[lexer]', file=stderr)
        exit(1)

    # TODO: join lexemes with trailing space, remove comment lexemes
    lexer = get_lexer_by_name(lexername)
    tokens = lex(''.join(code_lines), lexer)
    result = []
    line = []
    for _, text in tokens:
        parts = text.split('\n')
        if len(parts) > 1: # multiline token
            first = True
            for part in parts:
                if not first:
                    result.append(line)
                    line = []
                first = False
                if len(part) > 0:
                    line.append(len(part))
        else:
            if len(text) > 0:
                line.append(len(text))
    if line:
        result.append(line)
    return result
开发者ID:kyleget,项目名称:sourcemap-tool,代码行数:30,代码来源:sourcemap_tool.py


示例20: findBadUseImport

def findBadUseImport(code):
    """
    Find when use from foo import *
    Documentation: http://python.net/~goodger/projects/pycon/2007/idiomatic/handout.html#importing
                   https://docs.python.org/2/howto/doanddont.html#from-module-import
    """
    sequence = [(Token.Keyword.Namespace, '^from$'),
                (Token.Name.Namespace, '.*'),
                (Token.Keyword.Namespace, '^import$'),
                (Token.Operator, '\*')]
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    badUseImport = PythonIdiom('badImport')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        badUseImport.addNew(lineNumber)
    log("badUseImport found in lines {0}".format(badUseImport.getLines()))

    return badUseImport
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:25,代码来源:pythonic.py



注:本文中的pygments.lex函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python console.colorize函数代码示例发布时间:2022-05-25
下一篇:
Python pygments.highlight函数代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap