• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python util.peek函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中util.peek函数的典型用法代码示例。如果您正苦于以下问题:Python peek函数的具体用法?Python peek怎么用?Python peek使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了peek函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: ml_parse_value

def ml_parse_value(gmls, default=None):
    """ Parse a value in a macro-language string. """
    c = util.skip(gmls, ml_whitepace)
    sgn = -1 if c == '-' else 1
    if c in ('+', '-'):
        gmls.read(1)
        c = util.peek(gmls)
        # don't allow default if sign is given
        default = None
    if c == '=':
        gmls.read(1)
        c = util.peek(gmls)
        if len(c) == 0:
            raise error.RunError(error.IFC)
        elif ord(c) > 8:
            name = util.get_var_name(gmls)
            indices = ml_parse_indices(gmls)
            step = var.get_var_or_array(name, indices)
            util.require_read(gmls, (';',), err=error.IFC)
        else:
            # varptr$
            step = get_value_for_varptrstr(gmls.read(3))
    elif c and c in string.digits:
        step = ml_parse_const(gmls)
    elif default is not None:
        step = default
    else:
        raise error.RunError(error.IFC)
    if sgn == -1:
        step = vartypes.number_neg(step)
    return step
开发者ID:nestormh,项目名称:pcbasic,代码行数:31,代码来源:draw_and_play.py


示例2: detokenise_line

def detokenise_line(ins, bytepos=None):
    """ Convert a tokenised program line to ascii text. """
    litstring, comment = False, False
    textpos = 0
    current_line = util.parse_line_number(ins)
    if current_line < 0:
        # parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
        # stream ends or end of file sequence \x00\x00\x1A
        return -1, '', 0
    elif current_line == 0 and util.peek(ins) == ' ':
        # ignore up to one space after line number 0
        ins.read(1)
    output = bytearray(str(current_line))
    # write one extra whitespace character after line number
    # unless first char is TAB
    if util.peek(ins) != '\t':
        output += bytearray(' ')
    # detokenise tokens until end of line
    while True:
        s = ins.read(1)
        if not textpos and ins.tell() >= bytepos:
            textpos = len(output)
        if s in tk.end_line:
            # \x00 ends lines and comments when listed,
            # if not inside a number constant
            # stream ended or end of line
            break
        elif s == '"':
            # start of literal string, passed verbatim
            # until a closing quote or EOL comes by
            # however number codes are *printed* as the corresponding numbers,
            # even inside comments & literals
            output += s
            litstring = not litstring
        elif s in tk.number:
            ins.seek(-1, 1)
            representation.detokenise_number(ins, output)
        elif s in tk.linenum:
            # 0D: line pointer (unsigned int) - this token should not be here;
            #     interpret as line number and carry on
            # 0E: line number (unsigned int)
            output += representation.uint_to_str(bytearray(ins.read(2)))
        elif comment or litstring or ('\x20' <= s <= '\x7E'):
            # honest ASCII
            output += s
        elif s == '\x0A':
            # LF becomes LF CR
            output += '\x0A\x0D'
        elif s <= '\x09':
            # controls that do not double as tokens
            output += s
        else:
            ins.seek(-1, 1)
            comment = detokenise_keyword(ins, output)
    return current_line, output, textpos
开发者ID:nestormh,项目名称:pcbasic,代码行数:55,代码来源:tokenise.py


示例3: tokenise_word

def tokenise_word(ins, outs):
    """ Convert a keyword to tokenised form. """
    word = ''
    while True: 
        c = ins.read(1).upper()
        word += c
        # special cases 'GO     TO' -> 'GOTO', 'GO SUB' -> 'GOSUB'    
        if word == 'GO':
            pos = ins.tell()
            # GO SUB allows 1 space
            if util.peek(ins, 4) == ' SUB':
                word = 'GOSUB'
                ins.read(4)
            else:
                # GOTO allows any number of spaces
                nxt = util.skip(ins, whitespace)
                if ins.read(2) == 'TO':
                    word = 'GOTO'
                else:
                    ins.seek(pos)
            if word in ('GOTO', 'GOSUB'):
                nxt = util.peek(ins).upper()
                if nxt in name_chars:
                    ins.seek(pos)
                    word = 'GO'
                else:
                    pass
        if word in keyword_to_token:
            # ignore if part of a longer name, except FN, SPC(, TAB(, USR
            if word not in ('FN', 'SPC(', 'TAB(', 'USR'):
                nxt = util.peek(ins).upper()
                if nxt in name_chars:  
                    continue
            token = keyword_to_token[word]
            # handle special case ELSE -> :ELSE
            if word == 'ELSE':
                outs.write(':' + token)
            # handle special case WHILE -> WHILE+
            elif word == 'WHILE':
                outs.write(token + tk.O_PLUS)
            else:
                outs.write(token)
            break
        # allowed names: letter + (letters, numbers, .)    
        elif not(c in name_chars): 
            if c!='':
                word = word[:-1]
                ins.seek(-1, 1)
            outs.write(word)            
            break
    return word
开发者ID:boriel,项目名称:pcbasic,代码行数:51,代码来源:tokenise.py


示例4: tokenise_oct

def tokenise_oct(ins, outs):
    """ Convert octal expression in Python string to number token. """
    # O is optional, could also be &777 instead of &O777
    if util.peek(ins).upper() == 'O':
        ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        if not c or c not in string.octdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 8) if word else 0
    outs.write(tk.T_OCT + str(vartypes.value_to_uint(val)))
开发者ID:nestormh,项目名称:pcbasic,代码行数:14,代码来源:representation.py


示例5: auto_step

 def auto_step(self):
     """ Generate an AUTO line number and wait for input. """
     numstr = str(state.basic_state.auto_linenum)
     console.write(numstr)
     if state.basic_state.auto_linenum in state.basic_state.line_numbers:
         console.write('*')
         line = bytearray(console.wait_screenline(from_start=True))
         if line[:len(numstr)+1] == numstr+'*':
             line[len(numstr)] = ' '
     else:
         console.write(' ')
         line = bytearray(console.wait_screenline(from_start=True))
     # run or store it; don't clear lines or raise undefined line number
     state.basic_state.direct_line = tokenise.tokenise_line(line)
     c = util.peek(state.basic_state.direct_line)
     if c == '\0':
         # check for lines starting with numbers (6553 6) and empty lines
         empty, scanline = program.check_number_start(state.basic_state.direct_line)
         if not empty:
             program.store_line(state.basic_state.direct_line)
             reset.clear()
         state.basic_state.auto_linenum = scanline + state.basic_state.auto_increment
     elif c != '':
         # it is a command, go and execute
         state.basic_state.parse_mode = True
开发者ID:gilsim12,项目名称:pcbasic,代码行数:25,代码来源:interpreter.py


示例6: detokenise_line

def detokenise_line(ins, bytepos=None):
    """ Convert a tokenised program line to ascii text. """
    current_line = util.parse_line_number(ins)
    if current_line < 0:
        # parse_line_number has returned -1 and left us at: .. 00 | _00_ 00 1A
        # stream ends or end of file sequence \x00\x00\x1A
        return -1, '', 0
    elif current_line == 0 and util.peek(ins) == ' ':
        # ignore up to one space after line number 0
        ins.read(1)
    linum = bytearray(str(current_line))
    # write one extra whitespace character after line number
    # unless first char is TAB
    if util.peek(ins) != '\t':
        linum += bytearray(' ')
    line, textpos = detokenise_compound_statement(ins, bytepos)
    return current_line, linum + line, textpos + len(linum) + 1
开发者ID:gilsim12,项目名称:pcbasic,代码行数:17,代码来源:tokenise.py


示例7: tokenise_oct

def tokenise_oct(ins, outs):
    """ Convert octal expression in Python string to number token. """
    # O is optional, could also be &777 instead of &O777
    if util.peek(ins).upper() == 'O':
        ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        # oct literals may be interrupted by whitespace
        if c and c in number_whitespace:
            ins.read(1)
        elif not c or c not in string.octdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 8) if word else 0
    outs.write(tk.T_OCT + str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
开发者ID:Yungzuck,项目名称:pcbasic,代码行数:17,代码来源:representation.py


示例8: tokenise_jump_number

def tokenise_jump_number(ins, outs):
    """ Convert an ascii line number pointer to tokenised form. """
    word = tokenise_uint(ins)
    if word != '':
        outs.write(tk.T_UINT + word)
    elif util.peek(ins) == '.':
        ins.read(1)
        outs.write('.')
开发者ID:boriel,项目名称:pcbasic,代码行数:8,代码来源:tokenise.py


示例9: tokenise_data

def tokenise_data(ins, outs):
    """ Pass DATA as is, till end of statement, except for literals. """
    while True:
        outs.write(ascii_read_to(ins, ('', '\r', '\0', ':', '"')))
        if util.peek(ins) == '"':
            # string literal in DATA
            tokenise_literal(ins, outs)
        else:
            break            
开发者ID:boriel,项目名称:pcbasic,代码行数:9,代码来源:tokenise.py


示例10: tokenise_hex

def tokenise_hex(ins, outs):
    """ Convert hex expression in Python string to number token. """
    ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        if not c or c not in string.hexdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 16) if word else 0
    outs.write(tk.T_HEX + str(vartypes.value_to_uint(val)))
开发者ID:nestormh,项目名称:pcbasic,代码行数:12,代码来源:representation.py


示例11: read_entry

def read_entry():
    """ READ a unit of DATA. """
    current = state.basic_state.bytecode.tell()
    state.basic_state.bytecode.seek(state.basic_state.data_pos)
    if util.peek(state.basic_state.bytecode) in util.end_statement:
        # initialise - find first DATA
        util.skip_to(state.basic_state.bytecode, ('\x84',))  # DATA
    if state.basic_state.bytecode.read(1) not in ('\x84', ','):
        # out of DATA
        raise error.RunError(4)
    vals, word, literal = '', '', False
    while True:
        # read next char; omit leading whitespace
        if not literal and vals == '':    
            c = util.skip_white(state.basic_state.bytecode)
        else:
            c = util.peek(state.basic_state.bytecode)
        # parse char
        if c == '' or (not literal and c == ',') or (c in util.end_line or (not literal and c in util.end_statement)):
            break
        elif c == '"':
            state.basic_state.bytecode.read(1)
            literal = not literal
            if not literal:
                util.require(state.basic_state.bytecode, util.end_statement+(',',))
        else:        
            state.basic_state.bytecode.read(1)
            if literal:
                vals += c
            else:
                word += c
            # omit trailing whitespace                        
            if c not in util.whitespace:    
                vals += word
                word = ''
    state.basic_state.data_pos = state.basic_state.bytecode.tell()
    state.basic_state.bytecode.seek(current)
    return vals
开发者ID:boriel,项目名称:pcbasic,代码行数:38,代码来源:flow.py


示例12: tokenise_hex

def tokenise_hex(ins, outs):
    """ Convert hex expression in Python string to number token. """
    # pass the H in &H
    ins.read(1)
    word = ''
    while True:
        c = util.peek(ins)
        # hex literals must not be interrupted by whitespace
        if not c or c not in string.hexdigits:
            break
        else:
            word += ins.read(1)
    val = int(word, 16) if word else 0
    outs.write(tk.T_HEX + str(vartypes.integer_to_bytes(vartypes.int_to_integer_unsigned(val))))
开发者ID:Yungzuck,项目名称:pcbasic,代码行数:14,代码来源:representation.py


示例13: tokenise_number

def tokenise_number(ins, outs):
    """ Convert Python-string number representation to number token. """
    c = util.peek(ins)
    if not c:
        return
    elif c == '&':
        # handle hex or oct constants
        ins.read(1)
        if util.peek(ins).upper() == 'H':
            # hex constant
            tokenise_hex(ins, outs)
        else:
            # octal constant
            tokenise_oct(ins, outs)
    elif c in string.digits + '.+-':
        # handle other numbers
        # note GW passes signs separately as a token
        # and only stores positive numbers in the program
        tokenise_dec(ins, outs)
    else:
        # why is this here?
        # this looks wrong but hasn't hurt so far
        ins.seek(-1, 1)
开发者ID:Yungzuck,项目名称:pcbasic,代码行数:23,代码来源:representation.py


示例14: store_line

 def store_line(self, line):
     """ Store a program line or schedule a command line for execution. """
     if not line:
         return True
     state.basic_state.direct_line = tokenise.tokenise_line(line)
     c = util.peek(state.basic_state.direct_line)
     if c == '\0':
         # check for lines starting with numbers (6553 6) and empty lines
         program.check_number_start(state.basic_state.direct_line)
         program.store_line(state.basic_state.direct_line)
         reset.clear()
     elif c != '':
         # it is a command, go and execute
         state.basic_state.parse_mode = True
     return not state.basic_state.parse_mode
开发者ID:gilsim12,项目名称:pcbasic,代码行数:15,代码来源:interpreter.py


示例15: get_number_tokens

def get_number_tokens(fors):
    """ Get consecutive number-related formatting tokens. """
    word, digits_before, decimals = '', 0, 0
    # + comes first
    leading_plus = (util.peek(fors) == '+')
    if leading_plus:
        word += fors.read(1)
    # $ and * combinations
    c = util.peek(fors)
    if c in ('$', '*'):
        word += fors.read(2)
        if word[-1] != c:
            fors.seek(-len(word), 1)
            return '', 0, 0
        if c == '*':
            digits_before += 2
            if util.peek(fors) == '$':
                word += fors.read(1)
        else:
            digits_before += 1
    # number field
    c = util.peek(fors)
    dot = (c == '.')
    if dot:
        word += fors.read(1)
    if c in ('.', '#'):
        while True:
            c = util.peek(fors)
            if not dot and c == '.':
                word += fors.read(1)
                dot = True
            elif c == '#' or (not dot and c == ','):
                word += fors.read(1)
                if dot:
                    decimals += 1
                else:
                    digits_before += 1
            else:
                break
    if digits_before + decimals == 0:
        fors.seek(-len(word), 1)
        return '', 0, 0
    # post characters
    if util.peek(fors, 4) == '^^^^':
        word += fors.read(4)
    if not leading_plus and util.peek(fors) in ('-', '+'):
        word += fors.read(1)
    return word, digits_before, decimals
开发者ID:nestormh,项目名称:pcbasic,代码行数:48,代码来源:print_and_input.py


示例16: get_string_tokens

def get_string_tokens(fors):
    """ Get consecutive string-related formatting tokens. """
    word = ''
    c = util.peek(fors)
    if c in ('!', '&'):
        word += fors.read(1)
    elif c == '\\':
        word += fors.read(1)
        # count the width of the \ \ token; only spaces allowed and closing \ is necessary
        while True: 
            c = fors.read(1)
            word += c
            if c == '\\':
                break
            elif c != ' ': # can be empty as well
                fors.seek(-len(word), 1)
                return ''
    return word
开发者ID:boriel,项目名称:pcbasic,代码行数:18,代码来源:representation.py


示例17: tokenise_line_number

def tokenise_line_number(ins, outs): 
    """ Convert an ascii line number to tokenised start-of-line. """
    linenum = tokenise_uint(ins)
    if linenum != '':    
        # terminates last line and fills up the first char in the buffer 
        # (that would be the magic number when written to file)
        # in direct mode, we'll know to expect a line number if the output 
        # starts with a  00
        outs.write('\0')        
        # write line number. first two bytes are for internal use 
        # & can be anything nonzero; we use this.
        outs.write('\xC0\xDE' + linenum)
        # ignore single whitespace after line number, if any, 
        # unless line number is zero (as does GW)
        if util.peek(ins) == ' ' and linenum != '\0\0' :
            ins.read(1)
    else:
        # direct line; internally, we need an anchor for the program pointer, 
        # so we encode a ':'
        outs.write(':')
开发者ID:boriel,项目名称:pcbasic,代码行数:20,代码来源:tokenise.py


示例18: parse_expression

def parse_expression(ins, allow_empty=False):
    """ Compute the value of the expression at the current code pointer. """
    stack = deque()
    units = deque()
    d = ''
    missing_error = error.MISSING_OPERAND
    # see https://en.wikipedia.org/wiki/Shunting-yard_algorithm
    while True:
        last = d
        d = util.skip_white(ins)
        # two-byte function tokens
        if d in tk.twobyte:
            d = util.peek(ins, n=2)
        if d == tk.NOT and not (last in operators or last == ''):
            # unary NOT ends expression except after another operator or at start
            break
        elif d in operators:
            ins.read(len(d))
            # get combined operators such as >=
            if d in combinable:
                nxt = util.skip_white(ins)
                if nxt in combinable:
                    d += ins.read(len(nxt))
            if last in operators or last == '' or d == tk.NOT:
                # also if last is ( but that leads to recursive call and last == ''
                nargs = 1
                # zero operands for a binary operator is always syntax error
                # because it will be seen as an illegal unary
                if d not in unary:
                    raise error.RunError(error.STX)
            else:
                nargs = 2
                _evaluate_stack(stack, units, operators[d], error.STX)
            stack.append((d, nargs))
        elif not (last in operators or last == ''):
            # repeated unit ends expression
            # repeated literals or variables or non-keywords like 'AS'
            break
        elif d == '(':
            units.append(parse_bracket(ins))
        elif d and d in string.ascii_letters:
            # variable name
            name, indices = parse_variable(ins)
            units.append(var.get_variable(name, indices))
        elif d in functions:
            # apply functions
            ins.read(len(d))
            try:
                units.append(functions[d](ins))
            except (ValueError, ArithmeticError) as e:
                units.append(_handle_math_error(e))
        elif d in tk.end_statement:
            break
        elif d in tk.end_expression or d in tk.keyword:
            # missing operand inside brackets or before comma is syntax error
            missing_error = error.STX
            break
        else:
            # literal
            units.append(parse_literal(ins))
    # empty expression is a syntax error (inside brackets)
    # or Missing Operand (in an assignment)
    # or not an error (in print and many functions)
    if units or stack:
        _evaluate_stack(stack, units, 0, missing_error)
        return units[0]
    elif allow_empty:
        return None
    else:
        raise error.RunError(missing_error)
开发者ID:gilsim12,项目名称:pcbasic,代码行数:70,代码来源:expressions.py


示例19: tokenise_literal

def tokenise_literal(ins, outs):
    """ Pass a string literal. """
    outs.write(ins.read(1))
    outs.write(ascii_read_to(ins, ('', '\r', '\0', '"') ))
    if util.peek(ins)=='"':
        outs.write(ins.read(1))    
开发者ID:boriel,项目名称:pcbasic,代码行数:6,代码来源:tokenise.py


示例20: eof

 def eof(self):
     """ Check for end of file EOF. """
     # for EOF(i)
     if self.mode in ('A', 'O'):
         return False
     return (util.peek(self.fhandle) in ('', '\x1a'))
开发者ID:NestorAlbelo,项目名称:PC_Basic_Brewer,代码行数:6,代码来源:iolayer.py



注:本文中的util.peek函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python util.perform_cleanup函数代码示例发布时间:2022-05-26
下一篇:
Python util.pconvert函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap