• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python lexer.bygroups函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中pygments.lexer.bygroups函数的典型用法代码示例。如果您正苦于以下问题:Python bygroups函数的具体用法?Python bygroups怎么用?Python bygroups使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了bygroups函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: format

 def format(self, extension=None):
     """Write pretty HTML logs."""
     M = self.M
     # pygments lexing setup:
     # (pygments HTML-formatter handles HTML-escaping)
     import pygments
     from pygments.lexers import IrcLogsLexer
     from pygments.formatters import HtmlFormatter
     import pygments.token as token
     from pygments.lexer import bygroups
     # Don't do any encoding in this function with pygments.
     # That's only right before the i/o functions in the Config
     # object.
     formatter = HtmlFormatter(lineanchors='l',
                               full=True, style=M.config.pygmentizeStyle,
                               output_encoding=self.M.config.output_codec)
     Lexer = IrcLogsLexer
     Lexer.tokens['msg'][1:1] = \
        [ # match:   #topic commands
         (r"(\#topic[ \t\f\v]*)(.*\n)",
          bygroups(token.Keyword, token.Generic.Heading), '#pop'),
          # match:   #command   (others)
         (r"(\#[^\s]+[ \t\f\v]*)(.*\n)",
          bygroups(token.Keyword, token.Generic.Strong), '#pop'),
        ]
     lexer = Lexer()
     #from rkddp.interact import interact ; interact()
     out = pygments.highlight("\n".join(M.lines), lexer, formatter)
     return out
开发者ID:Affix,项目名称:Fedbot,代码行数:29,代码来源:writers.py


示例2: string_rules

def string_rules(state):
    return [
        (r'(")((?:[^\r\n"\\]|(?:\\.))+)(")',
         bygroups(Text, String, Text), state),

        (r'(")((?:[^\r\n"\\]|(?:\\.))+)', bygroups(Text, String), state),

        (r"(')((?:[^\r\n'\\]|(?:\\.))+)(')",
         bygroups(Text, String, Text), state),

        (r"(')((?:[^\r\n'\\]|(?:\\.))+)", bygroups(Text, String), state),

        (r'([^\s\'\\]|(\\.))+', String, state)
    ]
开发者ID:eliangcs,项目名称:http-prompt,代码行数:14,代码来源:lexer.py


示例3: gen_elixir_sigil_rules

    def gen_elixir_sigil_rules():
        # all valid sigil terminators (excluding heredocs)
        terminators = [
            (r'\{', r'\}', 'cb'),
            (r'\[', r'\]', 'sb'),
            (r'\(', r'\)', 'pa'),
            (r'<', r'>', 'ab'),
            (r'/', r'/', 'slas'),
            (r'\|', r'\|', 'pipe'),
            ('"', '"', 'quot'),
            ("'", "'", 'apos'),
        ]

        # heredocs have slightly different rules
        triquotes = [(r'"""', 'triquot'), (r"'''", 'triapos')]

        token = String.Other
        states = {'sigils': []}

        for term, name in triquotes:
            states['sigils'] += [
                (r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),
                    (name + '-end', name + '-intp')),
                (r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),
                    (name + '-end', name + '-no-intp')),
            ]

            states[name + '-end'] = [
                (r'[a-zA-Z]+', token, '#pop'),
                default('#pop'),
            ]
            states[name + '-intp'] = [
                (r'^\s*' + term, String.Heredoc, '#pop'),
                include('heredoc_interpol'),
            ]
            states[name + '-no-intp'] = [
                (r'^\s*' + term, String.Heredoc, '#pop'),
                include('heredoc_no_interpol'),
            ]

        for lterm, rterm, name in terminators:
            states['sigils'] += [
                (r'~[a-z]' + lterm, token, name + '-intp'),
                (r'~[A-Z]' + lterm, token, name + '-no-intp'),
            ]
            states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token)
            states[name + '-no-intp'] = \
                gen_elixir_sigstr_rules(rterm, token, interpol=False)

        return states
开发者ID:2015E8014661092,项目名称:jinjaysnow.github.io,代码行数:50,代码来源:erlang.py


示例4: format

 def format(self, extension=None):
     """Write pretty HTML logs."""
     M = self.M
     # pygments lexing setup:
     # (pygments HTML-formatter handles HTML-escaping)
     import pygments
     from pygments.lexers import IrcLogsLexer
     from pygments.formatters import HtmlFormatter
     import pygments.token as token
     from pygments.lexer import bygroups
     # Don't do any encoding in this function with pygments.
     # That's only right before the i/o functions in the Config
     # object.
     formatter = HtmlFormatter(
         lineanchors='l',
         full=True,
         style=M.config.pygmentizeStyle,
         outencoding=self.M.config.output_codec)
     Lexer = IrcLogsLexer
     Lexer.tokens['msg'][1:1] = \
        [ # match:   #topic commands
         (r"(\#topic[ \t\f\v]*)(.*\n)",
          bygroups(token.Keyword, token.Generic.Heading), '#pop'),
          # match:   #command   (others)
         (r"(\#[^\s]+[ \t\f\v]*)(.*\n)",
          bygroups(token.Keyword, token.Generic.Strong), '#pop'),
        ]
     lexer = Lexer()
     #from rkddp.interact import interact ; interact()
     out = pygments.highlight("\n".join(M.lines), lexer, formatter)
     # Hack it to add "pre { white-space: pre-wrap; }", which make
     # it wrap the pygments html logs.  I think that in a newer
     # version of pygmetns, the "prestyles" HTMLFormatter option
     # would do this, but I want to maintain compatibility with
     # lenny.  Thus, I do these substitution hacks to add the
     # format in.  Thanks to a comment on the blog of Francis
     # Giannaros (http://francis.giannaros.org) for the suggestion
     # and instructions for how.
     out, n = re.subn(
         r"(\n\s*pre\s*\{[^}]+;\s*)(\})",
         r"\1\n      white-space: pre-wrap;\2",
         out,
         count=1)
     if n == 0:
         out = re.sub(r"(\n\s*</style>)",
                      r"\npre { white-space: pre-wrap; }\1",
                      out,
                      count=1)
     return out
开发者ID:JohnVillalovos,项目名称:meetbot,代码行数:49,代码来源:writers.py


示例5: _make_redirect_state

 def _make_redirect_state(compound,
                          _core_token_compound=_core_token_compound,
                          _nl=_nl, _punct=_punct, _stoken=_stoken,
                          _string=_string, _space=_space,
                          _variable=_variable, _ws=_ws):
     stoken_compound = (r'(?:[%s]+|(?:%s|%s|%s)+)' %
                        (_punct, _string, _variable, _core_token_compound))
     return [
         (r'((?:(?<=[%s%s])\d)?)(>>?&|<&)([%s%s]*)(\d)' %
          (_nl, _ws, _nl, _ws),
          bygroups(Number.Integer, Punctuation, Text, Number.Integer)),
         (r'((?:(?<=[%s%s])(?<!\^[%s])\d)?)(>>?|<)(%s?%s)' %
          (_nl, _ws, _nl, _space, stoken_compound if compound else _stoken),
          bygroups(Number.Integer, Punctuation, using(this, state='text')))
     ]
开发者ID:LihMeh,项目名称:outwiker,代码行数:15,代码来源:shell.py


示例6: _make_call_state

 def _make_call_state(compound, _label=_label,
                      _label_compound=_label_compound):
     state = []
     if compound:
         state.append((r'(?=\))', Text, '#pop'))
     state.append((r'(:?)(%s)' % (_label_compound if compound else _label),
                   bygroups(Punctuation, Name.Label), '#pop'))
     return state
开发者ID:LihMeh,项目名称:outwiker,代码行数:8,代码来源:shell.py


示例7: gen_elixir_string_rules

def gen_elixir_string_rules(name, symbol, token):
    states = {}
    states['string_' + name] = [
        (r'[^#%s\\]+' % (symbol,), token),
        include('escapes'),
        (r'\\.', token),
        (r'(%s)' % (symbol,), bygroups(token), "#pop"),
        include('interpol')
    ]
    return states
开发者ID:2015E8014661092,项目名称:jinjaysnow.github.io,代码行数:10,代码来源:erlang.py


示例8: _objdump_lexer_tokens

def _objdump_lexer_tokens(asm_lexer):
    """
    Common objdump lexer tokens to wrap an ASM lexer.
    """
    hex_re = r'[0-9A-Za-z]'
    return {
        'root': [
            # File name & format:
            ('(.*?)(:)( +file format )(.*?)$',
                bygroups(Name.Label, Punctuation, Text, String)),
            # Section header
            ('(Disassembly of section )(.*?)(:)$',
                bygroups(Text, Name.Label, Punctuation)),
            # Function labels
            # (With offset)
            ('('+hex_re+'+)( )(<)(.*?)([-+])(0[xX][A-Za-z0-9]+)(>:)$',
                bygroups(Number.Hex, Text, Punctuation, Name.Function,
                         Punctuation, Number.Hex, Punctuation)),
            # (Without offset)
            ('('+hex_re+'+)( )(<)(.*?)(>:)$',
                bygroups(Number.Hex, Text, Punctuation, Name.Function,
                         Punctuation)),
            # Code line with disassembled instructions
            ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *\t)([a-zA-Z].*?)$',
                bygroups(Text, Name.Label, Text, Number.Hex, Text,
                         using(asm_lexer))),
            # Code line with ascii
            ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)( *)(.*?)$',
                bygroups(Text, Name.Label, Text, Number.Hex, Text, String)),
            # Continued code line, only raw opcodes without disassembled
            # instruction
            ('( *)('+hex_re+r'+:)(\t)((?:'+hex_re+hex_re+' )+)$',
                bygroups(Text, Name.Label, Text, Number.Hex)),
            # Skipped a few bytes
            (r'\t\.\.\.$', Text),
            # Relocation line
            # (With offset)
            (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)([-+])(0x'+hex_re+'+)$',
                bygroups(Text, Name.Label, Text, Name.Property, Text,
                         Name.Constant, Punctuation, Number.Hex)),
            # (Without offset)
            (r'(\t\t\t)('+hex_re+r'+:)( )([^\t]+)(\t)(.*?)$',
                bygroups(Text, Name.Label, Text, Name.Property, Text,
                         Name.Constant)),
            (r'[^\n]+\n', Other)
        ]
    }
开发者ID:victoredwardocallaghan,项目名称:pygments-main,代码行数:47,代码来源:asm.py


示例9: _make_label_state

 def _make_label_state(compound, _label=_label,
                       _label_compound=_label_compound, _nl=_nl,
                       _punct=_punct, _string=_string, _variable=_variable):
     state = []
     if compound:
         state.append((r'(?=\))', Text, '#pop'))
     state.append((r'(%s?)((?:%s|%s|\^[%s]?%s|[^"%%^%s%s%s])*)' %
                   (_label_compound if compound else _label, _string,
                    _variable, _nl, r'[^)]' if compound else r'[\w\W]', _nl,
                    _punct, r')' if compound else ''),
                   bygroups(Name.Label, Comment.Single), '#pop'))
     return state
开发者ID:LihMeh,项目名称:outwiker,代码行数:12,代码来源:shell.py


示例10: _make_follow_state

 def _make_follow_state(compound, _label=_label,
                        _label_compound=_label_compound, _nl=_nl,
                        _space=_space, _start_label=_start_label,
                        _token=_token, _token_compound=_token_compound,
                        _ws=_ws):
     suffix = '/compound' if compound else ''
     state = []
     if compound:
         state.append((r'(?=\))', Text, '#pop'))
     state += [
         (r'%s([%s]*)(%s)(.*)' %
          (_start_label, _ws, _label_compound if compound else _label),
          bygroups(Text, Punctuation, Text, Name.Label, Comment.Single)),
         include('redirect%s' % suffix),
         (r'(?=[%s])' % _nl, Text, '#pop'),
         (r'\|\|?|&&?', Punctuation, '#pop'),
         include('text')
     ]
     return state
开发者ID:LihMeh,项目名称:outwiker,代码行数:19,代码来源:shell.py


示例11: gen_rubystrings_rules

    def gen_rubystrings_rules():
        def intp_regex_callback(self, match, ctx):
            yield match.start(1), String.Regex, match.group(1)  # begin
            nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
            for i, t, v in self.get_tokens_unprocessed(context=nctx):
                yield match.start(3)+i, t, v
            yield match.start(4), String.Regex, match.group(4)  # end[mixounse]*
            ctx.pos = match.end()

        def intp_string_callback(self, match, ctx):
            yield match.start(1), String.Other, match.group(1)
            nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
            for i, t, v in self.get_tokens_unprocessed(context=nctx):
                yield match.start(3)+i, t, v
            yield match.start(4), String.Other, match.group(4)  # end
            ctx.pos = match.end()

        states = {}
        states['strings'] = [
            # easy ones
            (r'\:@{0,2}([a-zA-Z_]\w*[\!\?]?|\*\*?|[-+]@?|'
             r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', String.Symbol),
            (r":'(\\\\|\\'|[^'])*'", String.Symbol),
            (r"'(\\\\|\\'|[^'])*'", String.Single),
            (r':"', String.Symbol, 'simple-sym'),
            (r'([a-zA-Z_]\w*)(:)(?!:)',
             bygroups(String.Symbol, Punctuation)),  # Since Ruby 1.9
            (r'"', String.Double, 'simple-string'),
            (r'(?<!\.)`', String.Backtick, 'simple-backtick'),
        ]

        # double-quoted string and symbol
        for name, ttype, end in ('string', String.Double, '"'), \
                                ('sym', String.Symbol, '"'), \
                                ('backtick', String.Backtick, '`'):
            states['simple-'+name] = [
                include('string-intp-escaped'),
                (r'[^\\%s#]+' % end, ttype),
                (r'[\\#]', ttype),
                (end, ttype, '#pop'),
            ]

        # braced quoted strings
        for lbrace, rbrace, name in ('\\{', '\\}', 'cb'), \
                                    ('\\[', '\\]', 'sb'), \
                                    ('\\(', '\\)', 'pa'), \
                                    ('<', '>', 'ab'):
            states[name+'-intp-string'] = [
                (r'\\[\\' + lbrace + rbrace + ']', String.Other),
                (r'(?<!\\)' + lbrace, String.Other, '#push'),
                (r'(?<!\\)' + rbrace, String.Other, '#pop'),
                include('string-intp-escaped'),
                (r'[\\#' + lbrace + rbrace + ']', String.Other),
                (r'[^\\#' + lbrace + rbrace + ']+', String.Other),
            ]
            states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
                                      name+'-intp-string'))
            states[name+'-string'] = [
                (r'\\[\\' + lbrace + rbrace + ']', String.Other),
                (r'(?<!\\)' + lbrace, String.Other, '#push'),
                (r'(?<!\\)' + rbrace, String.Other, '#pop'),
                (r'[\\#' + lbrace + rbrace + ']', String.Other),
                (r'[^\\#' + lbrace + rbrace + ']+', String.Other),
            ]
            states['strings'].append((r'%[qsw]' + lbrace, String.Other,
                                      name+'-string'))
            states[name+'-regex'] = [
                (r'\\[\\' + lbrace + rbrace + ']', String.Regex),
                (r'(?<!\\)' + lbrace, String.Regex, '#push'),
                (r'(?<!\\)' + rbrace + '[mixounse]*', String.Regex, '#pop'),
                include('string-intp'),
                (r'[\\#' + lbrace + rbrace + ']', String.Regex),
                (r'[^\\#' + lbrace + rbrace + ']+', String.Regex),
            ]
            states['strings'].append((r'%r' + lbrace, String.Regex,
                                      name+'-regex'))

        # these must come after %<brace>!
        states['strings'] += [
            # %r regex
            (r'(%r([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2[mixounse]*)',
             intp_regex_callback),
            # regular fancy strings with qsw
            (r'%[qsw]([^a-zA-Z0-9])((?:\\\1|(?!\1).)*)\1', String.Other),
            (r'(%[QWx]([^a-zA-Z0-9]))((?:\\\2|(?!\2).)*)(\2)',
             intp_string_callback),
            # special forms of fancy strings after operators or
            # in method calls with braces
            (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
             bygroups(Text, String.Other, None)),
            # and because of fixed width lookbehinds the whole thing a
            # second time for line startings...
            (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
             bygroups(Text, String.Other, None)),
            # all regular fancy strings without qsw
            (r'(%([^a-zA-Z0-9\s]))((?:\\\2|(?!\2).)*)(\2)',
             intp_string_callback),
        ]

        return states
开发者ID:spencerlyon2,项目名称:pygments,代码行数:100,代码来源:ruby.py


示例12: _stringescapes

 def _stringescapes(lexer, match, ctx):
     lexer._start = match.group(3)
     lexer._end = match.group(5)
     return bygroups(Keyword.Reserved, Text, String.Escape, Text,
                     String.Escape)(lexer, match, ctx)
开发者ID:axil,项目名称:blog,代码行数:5,代码来源:dsls.py


示例13: gen_crystalstrings_rules

    def gen_crystalstrings_rules():
        def intp_regex_callback(self, match, ctx):
            yield match.start(1), String.Regex, match.group(1)  # begin
            nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
            for i, t, v in self.get_tokens_unprocessed(context=nctx):
                yield match.start(3)+i, t, v
            yield match.start(4), String.Regex, match.group(4)  # end[imsx]*
            ctx.pos = match.end()

        def intp_string_callback(self, match, ctx):
            yield match.start(1), String.Other, match.group(1)
            nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
            for i, t, v in self.get_tokens_unprocessed(context=nctx):
                yield match.start(3)+i, t, v
            yield match.start(4), String.Other, match.group(4)  # end
            ctx.pos = match.end()

        states = {}
        states['strings'] = [
            (r'\:@{0,2}[a-zA-Z_]\w*[!?]?', String.Symbol),
            (words(CRYSTAL_OPERATORS, prefix=r'\:@{0,2}'), String.Symbol),
            (r":'(\\\\|\\'|[^'])*'", String.Symbol),
            # This allows arbitrary text after '\ for simplicity
            (r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
            (r':"', String.Symbol, 'simple-sym'),
            # Crystal doesn't have "symbol:"s but this simplifies function args
            (r'([a-zA-Z_]\w*)(:)(?!:)', bygroups(String.Symbol, Punctuation)),
            (r'"', String.Double, 'simple-string'),
            (r'(?<!\.)`', String.Backtick, 'simple-backtick'),
        ]

        # double-quoted string and symbol
        for name, ttype, end in ('string', String.Double, '"'), \
                                ('sym', String.Symbol, '"'), \
                                ('backtick', String.Backtick, '`'):
            states['simple-'+name] = [
                include('string-escaped' if name == 'sym' else 'string-intp-escaped'),
                (r'[^\\%s#]+' % end, ttype),
                (r'[\\#]', ttype),
                (end, ttype, '#pop'),
            ]

        # braced quoted strings
        for lbrace, rbrace, bracecc, name in \
                ('\\{', '\\}', '{}', 'cb'), \
                ('\\[', '\\]', '\\[\\]', 'sb'), \
                ('\\(', '\\)', '()', 'pa'), \
                ('<', '>', '<>', 'ab'):
            states[name+'-intp-string'] = [
                (r'\\[' + lbrace + ']', String.Other),
                (lbrace, String.Other, '#push'),
                (rbrace, String.Other, '#pop'),
                include('string-intp-escaped'),
                (r'[\\#' + bracecc + ']', String.Other),
                (r'[^\\#' + bracecc + ']+', String.Other),
            ]
            states['strings'].append((r'%' + lbrace, String.Other,
                                      name+'-intp-string'))
            states[name+'-string'] = [
                (r'\\[\\' + bracecc + ']', String.Other),
                (lbrace, String.Other, '#push'),
                (rbrace, String.Other, '#pop'),
                (r'[\\#' + bracecc + ']', String.Other),
                (r'[^\\#' + bracecc + ']+', String.Other),
            ]
            # http://crystal-lang.org/docs/syntax_and_semantics/literals/array.html
            states['strings'].append((r'%[wi]' + lbrace, String.Other,
                                      name+'-string'))
            states[name+'-regex'] = [
                (r'\\[\\' + bracecc + ']', String.Regex),
                (lbrace, String.Regex, '#push'),
                (rbrace + '[imsx]*', String.Regex, '#pop'),
                include('string-intp'),
                (r'[\\#' + bracecc + ']', String.Regex),
                (r'[^\\#' + bracecc + ']+', String.Regex),
            ]
            states['strings'].append((r'%r' + lbrace, String.Regex,
                                      name+'-regex'))

        # these must come after %<brace>!
        states['strings'] += [
            # %r regex
            (r'(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[imsx]*)',
             intp_regex_callback),
            # regular fancy strings with qsw
            (r'(%[wi]([\W_]))((?:\\\2|(?!\2).)*)(\2)',
             intp_string_callback),
            # special forms of fancy strings after operators or
            # in method calls with braces
            (r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
             bygroups(Text, String.Other, None)),
            # and because of fixed width lookbehinds the whole thing a
            # second time for line startings...
            (r'^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)',
             bygroups(Text, String.Other, None)),
            # all regular fancy strings without qsw
            (r'(%([\[{(<]))((?:\\\2|(?!\2).)*)(\2)',
             intp_string_callback),
        ]

#.........这里部分代码省略.........
开发者ID:Jenyay,项目名称:outwiker,代码行数:101,代码来源:crystal.py


示例14: bygroups

from pygments.lexer import RegexLexer, bygroups
from pygments.token import *


# See http://pygments.org/docs/lexerdevelopment/ for background.
ROOT_TOKENS = [
  (r'\"[^\"]*\"',                      Literal.String),
  (r'##.*\n',                          Comment),
  (r'#.*\n',                           Comment.Single),
  (r'[$]+([A-Za-z][A-Za-z0-9_:]*)?',   Name.Variable),
  (r'[@]+([A-Za-z][A-Za-z0-9_:]*)?',   Name.Class),
  (r'[A-Za-z0-9_]+:',                  Name.Tag),
  (r'(\.)([A-Za-z][A-Za-z0-9_]*)',     bygroups(Text, Name.Function)),
  (r'(:=|=>)',                         Punctuation),
  (r'(\.?)([!+=<>/*%-]+)',             bygroups(Text, Operator)),
  (r'\b(true|false|null)\b',           Keyword.Constant),
  (r'\b(type|def|var|import)\b',       Keyword.Declaration),
  (r'\b[A-Za-z][A-Za-z0-9_]+\b',       Keyword),
  (r'[0-9]+',                          Number),
  (r'.',                               Text)
]


class NeutrinoLexer(RegexLexer):
  name = 'Neutrino'
  aliases = ['neutrino']
  filenames = ['*.n']
  tokens = {'root': ROOT_TOKENS}
开发者ID:plesner,项目名称:blog,代码行数:28,代码来源:pygtrino.py


示例15: build_ipy_lexer

def build_ipy_lexer(python3):
    """Builds IPython lexers depending on the value of `python3`.

    The lexer inherits from an appropriate Python lexer and then adds
    information about IPython specific keywords (i.e. magic commands,
    shell commands, etc.)

    Parameters
    ----------
    python3 : bool
        If `True`, then build an IPython lexer from a Python 3 lexer.

    """
    # It would be nice to have a single IPython lexer class which takes
    # a boolean `python3`.  But since there are two Python lexer classes,
    # we will also have two IPython lexer classes.
    if python3:
        PyLexer = Python3Lexer
        name = 'IPython3'
        aliases = ['ipython3']
        doc = """IPython3 Lexer"""
    else:
        PyLexer = PythonLexer
        name = 'IPython'
        aliases = ['ipython2', 'ipython']
        doc = """IPython Lexer"""

    ipython_tokens = [
       (r'(?s)(\s*)(%%capture)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?s)(\s*)(%%debug)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?is)(\s*)(%%html)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(HtmlLexer))),
        (r'(?s)(\s*)(%%javascript)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
        (r'(?s)(\s*)(%%js)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(JavascriptLexer))),
        (r'(?s)(\s*)(%%latex)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(TexLexer))),
        (r'(?s)(\s*)(%%perl)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PerlLexer))),
        (r'(?s)(\s*)(%%prun)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?s)(\s*)(%%pypy)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?s)(\s*)(%%python)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?s)(\s*)(%%python2)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PythonLexer))),
        (r'(?s)(\s*)(%%python3)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(Python3Lexer))),
        (r'(?s)(\s*)(%%ruby)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(RubyLexer))),
        (r'(?s)(\s*)(%%time)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?s)(\s*)(%%timeit)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?s)(\s*)(%%writefile)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r'(?s)(\s*)(%%file)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(PyLexer))),
        (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
        (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
        (r"(%%?)(\w+)(\?\??)$",  bygroups(Operator, Keyword, Operator)),
        (r"\b(\?\??)(\s*)$",  bygroups(Operator, Text)),
        (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
                                                using(BashLexer), Text)),
        (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
        (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
        (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
        (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
        (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
    ]

    tokens = PyLexer.tokens.copy()
    tokens['root'] = ipython_tokens + tokens['root']

    attrs = {'name': name, 'aliases': aliases, 'filenames': [],
             '__doc__': doc, 'tokens': tokens}

    return type(name, (PyLexer,), attrs)
开发者ID:PKpacheco,项目名称:monitor-dollar-value-galicia,代码行数:65,代码来源:lexers.py


示例16: import

)
from pygments.token import (
    Comment, Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
)
from pygments.util import get_bool_opt

# Local

line_re = re.compile('.*?\n')

__all__ = ['build_ipy_lexer', 'IPython3Lexer', 'IPythonLexer',
           'IPythonPartialTracebackLexer', 'IPythonTracebackLexer',
           'IPythonConsoleLexer', 'IPyLexer']

ipython_tokens = [
  (r"(?s)(\s*)(%%)(\w+)(.*)", bygroups(Text, Operator, Keyword, Text)),
  (r'(?s)(^\s*)(%%!)([^\n]*\n)(.*)', bygroups(Text, Operator, Text, using(BashLexer))),
  (r"(%%?)(\w+)(\?\??)$",  bygroups(Operator, Keyword, Operator)),
  (r"\b(\?\??)(\s*)$",  bygroups(Operator, Text)),
  (r'(%)(sx|sc|system)(.*)(\n)', bygroups(Operator, Keyword,
                                       using(BashLexer), Text)),
  (r'(%)(\w+)(.*\n)', bygroups(Operator, Keyword, Text)),
  (r'^(!!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
  (r'(!)(?!=)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
  (r'^(\s*)(\?\??)(\s*%{0,2}[\w\.\*]*)', bygroups(Text, Operator, Text)),
  (r'(\s*%{0,2}[\w\.\*]*)(\?\??)(\s*)$', bygroups(Text, Operator, Text)),
]

def build_ipy_lexer(python3):
    """Builds IPython lexers depending on the value of `python3`.
开发者ID:12312113,项目名称:ipython,代码行数:30,代码来源:lexers.py


示例17: bygroups

#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
# =============================================================================
import os
import re

# Monkey patch for pygments reporting an error when generator expressions are
# used.
# https://bitbucket.org/birkenfeld/pygments-main/issue/942/cmake-generator-expressions-not-handled
from pygments.lexers import CMakeLexer
from pygments.token import Name, Operator
from pygments.lexer import bygroups

CMakeLexer.tokens["args"].append(("(\\$<)(.+?)(>)", bygroups(Operator, Name.Variable, Operator)))

# Monkey patch for sphinx generating invalid content for qcollectiongenerator
# https://bitbucket.org/birkenfeld/sphinx/issue/1435/qthelp-builder-should-htmlescape-keywords
from sphinx.util.pycompat import htmlescape
from sphinx.builders.qthelp import QtHelpBuilder

old_build_keywords = QtHelpBuilder.build_keywords


def new_build_keywords(self, title, refs, subitems):
    old_items = old_build_keywords(self, title, refs, subitems)
    new_items = []
    for item in old_items:
        before, rest = item.split('ref="', 1)
        ref, after = rest.split('"')
开发者ID:Baoqi,项目名称:CMake,代码行数:31,代码来源:cmake.py


示例18: import

from pygments.lexers import BashLexer, PythonLexer, Python3Lexer
from pygments.lexer import (
    Lexer, DelegatingLexer, RegexLexer, do_insertions, bygroups, using,
)
from pygments.token import (
    Comment, Generic, Keyword, Literal, Name, Operator, Other, Text, Error,
)
from pygments.util import get_bool_opt

# Local
from IPython.testing.skipdoctest import skip_doctest

line_re = re.compile('.*?\n')

ipython_tokens = [
  (r'(\%+)(\w+)\s+(\.*)(\n)', bygroups(Operator, Keyword,
                                       using(BashLexer), Text)),
  (r'(\%+)(\w+)\b', bygroups(Operator, Keyword)),
  (r'^(!)(.+)(\n)', bygroups(Operator, using(BashLexer), Text)),
]

def build_ipy_lexer(python3):
    """Builds IPython lexers depending on the value of `python3`.

    The lexer inherits from an appropriate Python lexer and then adds
    information about IPython specific keywords (i.e. magic commands,
    shell commands, etc.)

    Parameters
    ----------
    python3 : bool
        If `True`, then build an IPython lexer from a Python 3 lexer.
开发者ID:NitikaAgarwal,项目名称:ipython,代码行数:32,代码来源:lexers.py


示例19: bygroups

# - manual/cmake-buildsystem.7.html (with nested $<..>; relative and absolute paths, "::")

from pygments.lexers import CMakeLexer
from pygments.token import Name, Operator, Punctuation, String, Text, Comment, Generic, Whitespace, Number
from pygments.lexer import bygroups

# Notes on regular expressions below:
# - [\.\+-] are needed for string constants like gtk+-2.0
# - Unix paths are recognized by '/'; support for Windows paths may be added if needed
# - (\\.) allows for \-escapes (used in manual/cmake-language.7)
# - $<..$<..$>..> nested occurence in cmake-buildsystem
# - Nested variable evaluations are only supported in a limited capacity. Only
#   one level of nesting is supported and at most one nested variable can be present.

CMakeLexer.tokens["root"] = [
  (r'\b(\w+)([ \t]*)(\()', bygroups(Name.Function, Text, Name.Function), '#push'),     # fctn(
  (r'\(', Name.Function, '#push'),
  (r'\)', Name.Function, '#pop'),
  (r'\[', Punctuation, '#push'),
  (r'\]', Punctuation, '#pop'),
  (r'[|;,.=*\-]', Punctuation),
  (r'\\\\', Punctuation),                                   # used in commands/source_group
  (r'[:]', Operator),
  (r'[<>]=', Punctuation),                                  # used in FindPkgConfig.cmake
  (r'\$<', Operator, '#push'),                              # $<...>
  (r'<[^<|]+?>(\w*\.\.\.)?', Name.Variable),                # <expr>
  (r'(\$\w*\{)([^\}\$]*)?(?:(\$\w*\{)([^\}]+?)(\}))?([^\}]*?)(\})',  # ${..} $ENV{..}, possibly nested
    bygroups(Operator, Name.Tag, Operator, Name.Tag, Operator, Name.Tag, Operator)),
  (r'([A-Z]+\{)(.+?)(\})', bygroups(Operator, Name.Tag, Operator)),  # DATA{ ...}
  (r'[a-z]+(@|(://))((\\.)|[\w.+-:/\\])+', Name.Attribute),          # URL, [email protected], ...
  (r'/\w[\w\.\+-/\\]*', Name.Attribute),                    # absolute path
开发者ID:Kitware,项目名称:CMake,代码行数:31,代码来源:cmake.py


示例20: gen_crystalstrings_rules

    def gen_crystalstrings_rules():
        def intp_regex_callback(self, match, ctx):
            yield match.start(1), String.Regex, match.group(1)  # begin
            nctx = LexerContext(match.group(3), 0, ["interpolated-regex"])
            for i, t, v in self.get_tokens_unprocessed(context=nctx):
                yield match.start(3) + i, t, v
            yield match.start(4), String.Regex, match.group(4)  # end[imsx]*
            ctx.pos = match.end()

        def intp_string_callback(self, match, ctx):
            yield match.start(1), String.Other, match.group(1)
            nctx = LexerContext(match.group(3), 0, ["interpolated-string"])
            for i, t, v in self.get_tokens_unprocessed(context=nctx):
                yield match.start(3) + i, t, v
            yield match.start(4), String.Other, match.group(4)  # end
            ctx.pos = match.end()

        states = {}
        states["strings"] = [
            (r"\:@{0,2}[a-zA-Z_]\w*[!?]?", String.Symbol),
            (words(CRYSTAL_OPERATORS, prefix=r"\:@{0,2}"), String.Symbol),
            (r":'(\\\\|\\'|[^'])*'", String.Symbol),
            # This allows arbitrary text after '\ for simplicity
            (r"'(\\\\|\\'|[^']|\\[^'\\]+)'", String.Char),
            (r':"', String.Symbol, "simple-sym"),
            # Crystal doesn't have "symbol:"s but this simplifies function args
            (r"([a-zA-Z_]\w*)(:)(?!:)", bygroups(String.Symbol, Punctuation)),
            (r'"', String.Double, "simple-string"),
            (r"(?<!\.)`", String.Backtick, "simple-backtick"),
        ]

        # double-quoted string and symbol
        for name, ttype, end in (
            ("string", String.Double, '"'),
            ("sym", String.Symbol, '"'),
            ("backtick", String.Backtick, "`"),
        ):
            states["simple-" + name] = [
                include("string-escaped" if name == "sym" else "string-intp-escaped"),
                (r"[^\\%s#]+" % end, ttype),
                (r"[\\#]", ttype),
                (end, ttype, "#pop"),
            ]

        # braced quoted strings
        for lbrace, rbrace, bracecc, name in (
            ("\\{", "\\}", "{}", "cb"),
            ("\\[", "\\]", "\\[\\]", "sb"),
            ("\\(", "\\)", "()", "pa"),
            ("<", ">", "<>", "ab"),
        ):
            states[name + "-intp-string"] = [
                (r"\\[" + lbrace + "]", String.Other),
                (lbrace, String.Other, "#push"),
                (rbrace, String.Other, "#pop"),
                include("string-intp-escaped"),
                (r"[\\#" + bracecc + "]", String.Other),
                (r"[^\\#" + bracecc + "]+", String.Other),
            ]
            states["strings"].append((r"%" + lbrace, String.Other, name + "-intp-string"))
            states[name + "-string"] = [
                (r"\\[\\" + bracecc + "]", String.Other),
                (lbrace, String.Other, "#push"),
                (rbrace, String.Other, "#pop"),
                (r"[\\#" + bracecc + "]", String.Other),
                (r"[^\\#" + bracecc + "]+", String.Other),
            ]
            # http://crystal-lang.org/docs/syntax_and_semantics/literals/array.html
            states["strings"].append((r"%[wi]" + lbrace, String.Other, name + "-string"))
            states[name + "-regex"] = [
                (r"\\[\\" + bracecc + "]", String.Regex),
                (lbrace, String.Regex, "#push"),
                (rbrace + "[imsx]*", String.Regex, "#pop"),
                include("string-intp"),
                (r"[\\#" + bracecc + "]", String.Regex),
                (r"[^\\#" + bracecc + "]+", String.Regex),
            ]
            states["strings"].append((r"%r" + lbrace, String.Regex, name + "-regex"))

        # these must come after %<brace>!
        states["strings"] += [
            # %r regex
            (r"(%r([\W_]))((?:\\\2|(?!\2).)*)(\2[imsx]*)", intp_regex_callback),
            # regular fancy strings with qsw
            (r"(%[wi]([\W_]))((?:\\\2|(?!\2).)*)(\2)", intp_string_callback),
            # special forms of fancy strings after operators or
            # in method calls with braces
            (r"(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)", bygroups(Text, String.Other, None)),
            # and because of fixed width lookbehinds the whole thing a
            # second time for line startings...
            (r"^(\s*)(%([\t ])(?:(?:\\\3|(?!\3).)*)\3)", bygroups(Text, String.Other, None)),
            # all regular fancy strings without qsw
            (r"(%([\[{(<]))((?:\\\2|(?!\2).)*)(\ 

鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python lexer.do_insertions函数代码示例发布时间:2022-05-25
下一篇:
Python html.HtmlFormatter类代码示例发布时间:2022-05-25
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap