• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python diag.progress函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中swap.diag.progress函数的典型用法代码示例。如果您正苦于以下问题:Python progress函数的具体用法?Python progress怎么用?Python progress使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了progress函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: loadFiles

def loadFiles(files):
    graph = myStore.formula()
    graph.setClosureMode("e")    # Implement sameAs by smushing
    if verbose>0: progress("Loading %s..." % files)
    graph = myStore.loadMany(files, openFormula=graph)
    if verbose>0: progress("Loaded", graph)
    return graph
开发者ID:Mchockalingam,项目名称:swap,代码行数:7,代码来源:delta.py


示例2: parseProduction

    def parseProduction(parser, lhs, tok, stream):
        "The parser itself."

        if tok() is None: return None
        name, thing, line = tok()
        lookupTable = parser.branchTable[lhs]
        rhs = lookupTable.get(name, None)  # Predict branch from token
        if rhs == None:
            progress("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok(), lhs, lookupTable.keys(), parser.around(None, None)))
            raise SyntaxError("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok(), lhs, lookupTable.keys(), parser.around(None, None)))
        if parser.verb: progress( "%i  %s means expand %s as %s" %(parser.lineNumber,tok(), lhs, rhs.value()))
        tree = [lhs]
        for term in rhs:
            lit = term.fragid
            if lit != name: # Not token
                if lit in parser.tokenSet:
                    progress("Houston, we have a problem. %s is not equal to %s" % (lit, name))
                progress("recursing on %s, which is not %s. Token is %s" % (lit, name, `tok()`))
                tree.append(parser.parseProduction(term, tok, stream))
            else:
                progress("We found %s, which matches %s" % (lit, `tok()`))
                tree.append(tok())
                tok(parser.token(stream))  # Next token
            if tok():
                name, thing, line = tok()
            else:
                name, thing = None, None
        if hasattr(parser, "p_" + lhs.fragid):
            return getattr(parser, "p_" + lhs.fragid)(tree)
        return tree
开发者ID:AwelEshetu,项目名称:cwm,代码行数:32,代码来源:lexedParser.py


示例3: internalCheck

def internalCheck():
    global kb
    global cat
    transactions = kb.each(pred=rdf.type, obj=cat_ns.Internal)
    unbalanced = []
    while len(transactions) > 0:
        x = transactions.pop()
        month = monthNumber(x)
        if month < 0 : continue

        date = str(kb.the(subj=x, pred=qu.date))
        if len(kb.each(subj=x, pred=qu.in_USD)) != 1:
            progress("Ignoring !=1 amount transaction %s" % x)
            continue
        amount = float(str(kb.the(subj=x, pred=qu.in_USD)))
        for y in transactions:
            datey = str(kb.the(subj=y, pred=qu.date))
            if 1: #  date[0:10] == datey[0:10]:  # precision one day
                usds = kb.each(subj=y, pred=qu.in_USD)
                if len(usds) == 0:continue  # No amount => must be not in this period.
                if len(usds) != 1:
                    progress("Error: Ignoring: %i != 1 USD amounts for Internal transaction %s" % (len(usds), `y`+': '+ `usds`))
                    transactions.remove(y)
                    continue
                if abs(amount +
                        float(str(kb.the(subj=y, pred=qu.in_USD)))) < 0.001:
                    transactions.remove(y)
                    break
        else:
            unbalanced.append(x)
    if unbalanced:
        print "<h2>Unbalanced internal transactions</h2>"
        print transactionTable(unbalanced);
    return
开发者ID:Mchockalingam,项目名称:swap,代码行数:34,代码来源:fin.py


示例4: saveAs

def saveAs(uri, filename):
    gifStream = urlopen(uri)
    gifData = gifStream.read()
    gifStream.close
    progress('curl "%s" > %s' % (uri, filename))
    saveStream = open(filename, "w")
    saveStream.write(gifData)
    saveStream.close()
开发者ID:AwelEshetu,项目名称:cwm,代码行数:8,代码来源:day.py


示例5: getSize

 def getSize(s, atr):
     i = s.find(atr + '="') + len(atr) + 2
     val = ""
     while s[i] in "0123456789":
         val += s[i]
         i = i + 1
     x = int(val)
     progress("Found attribute %s=%i" % (atr, x))
     return x
开发者ID:AwelEshetu,项目名称:cwm,代码行数:9,代码来源:day.py


示例6: token

  def token(parser, str, i):
      """The Tokenizer:  returns (token type character, offset of token)
      Skips spaces.
      "0" means numeric
      "a" means alphanumeric
      """
      
      while 1:
          m = whiteSpace.match(str, i)
          if m == None or m.end() == i: break
          i = m.end()
      parser.countLines(str, i)
      if i == len(str):
          return "",  i # eof
      
      if parser.verb: progress( "%i) Looking at:  ...%s$%s..." % (
          parser.lineNumber, str[i-10:i],str[i:i+10]))
      for double in "=>", "<=", "^^":
          if double == str[i:i+2]: return double, i
  
      ch = str[i]
      if ch == ".": parser.keywordMode = 0 # hack
      if ch in singleCharacterSelectors:
          return ch, i
      if ch in "+-0123456789":
          return "0", i #  Numeric
      j = i+1
      if ch == "@":
          if i!=0 and whiteSpace.match(str[i-1]).end() == 0:
              return ch, i
          while str[j] not in notNameChars: j = j + 1
          if str[i+1:j] == "keywords" :
              parser.keywords = [] # Special
              parser.keywordMode = 1
          return str[i:j], i # keyword
      if ch == '"':  #"
          return '"', i #"
 
      # Alphanumeric: keyword hacks
      while str[j] not in notQNameChars: j = j+1
      word = str[i:j]
      if parser.keywordMode:
          parser.keywords.append(word)
      elif word in parser.keywords:
          if word == "keywords" :
              parser.keywords = []    # Special
              parser.keywordMode = 1
          if parser.atMode:
              return "@" + word, i  # implicit keyword
          return word, i
      return "a", i    # qname, langcode, or barename
开发者ID:dard12,项目名称:WebSearch,代码行数:51,代码来源:predictiveParser.py


示例7: removeCommon

def removeCommon(f, g, match):
    """Find common statements from f and g
    match gives the dictionary mapping bnodes in f to bnodes in g"""
    only_f, common_g = Set(), Set()
    for st in f.statements[:]:
        s, p, o = st.spo()
        assert s not in f._redirections 
        assert o not in f._redirections
        if s.generated(): sg = match.get(s, None)
        else: sg = s
        if o.generated(): og = match.get(o, None)
        else: og = o
        if og != None and sg != None:
            gsts = g.statementsMatching(subj=sg, pred=p, obj=og)
            if len(gsts) == 1:
                if verbose>4: progress("Statement in both", st)
                common_g.add(gsts[0])
                continue
        only_f.add(st)
    return only_f, Set(g.statements)-common_g
开发者ID:Mchockalingam,项目名称:swap,代码行数:20,代码来源:delta.py


示例8: consolidate

def consolidate(delta, patchVerb):
    """Consolidate patches
    
    Where the same left hand side applies to more than 1 RHS formula,
    roll those RHS formulae into one, to make the dif file more readable
    and faster to execute in some implementations
    """
    agenda = {}
    if verbose >3: progress("Consolidating %s" % patchVerb)
    for s in delta.statementsMatching(pred=patchVerb):
        list = agenda.get(s.subject(), None)
        if list == None:
            list = []
            agenda[s.subject()] = list
        list.append(s)
    for lhs, list in agenda.items():
        if verbose >3: progress("Patches lhs= %s: %s" %(lhs, list))
        if len(list) > 1:
            rhs = delta.newFormula()
            for s in list:
                delta.store.copyFormula(s.object(), rhs)
                delta.removeStatement(s)
            delta.add(subj=lhs, pred=patchVerb, obj=rhs.close())
开发者ID:Mchockalingam,项目名称:swap,代码行数:23,代码来源:delta.py


示例9: P

def P(s):
    """
    Input: a state s
    Output: possible pairs to add to the mapping
    """
    G1 = s.problem.G1
    G2 = s.problem.G2
    t1_out_size, t2_out_size, t1_in_size, t2_in_size = (len(s.t1_out), len(s.t2_out), len(s.t1_in), len(s.t2_in))
    progress("P(s) %s %s %s %s" % (t1_out_size, t2_out_size, t1_in_size, t2_in_size))
    if t1_out_size and t2_out_size:
        progress(", case 1")
        m = s.t2_out.first()
        if representsSelf(m):
            if m in s.t1_out:
                yield m, m, regular
        else:
            for n in s.t1_out:
                yield n, m, regular
    elif not t1_out_size and not t2_out_size and t1_in_size and t2_in_size:
        progress(", case 2")
        m = s.t2_in.first()
        if representsSelf(m):
            if m in s.t1_in:
                yield m, m, regular
        else:
            for n in s.t1_in:
                yield n, m, regular
    elif not t1_out_size and not t2_out_size and not t1_in_size and not t2_in_size:
        progress(", case 3")
        m = s.G2_not_taken.first()
        if representsSelf(m):
            if m in s.G1_not_taken:
                yield m, m, regular
        else:
            for n in s.G1_not_taken:
                yield n, m, regular
开发者ID:AwelEshetu,项目名称:cwm,代码行数:36,代码来源:vf2.py


示例10: parseProduction

    def parseProduction(parser, lhs, str, tok=None, here=0):
        "The parser itself."

        if tok == "": return tok, here # EOF    
        lookupTable = parser.branchTable[lhs]
        rhs = lookupTable.get(tok, None)  # Predict branch from token
        if rhs == None:
            progress("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok, lhs, lookupTable.keys(), parser.around(str, here)))
            raise SyntaxError("""Found %s when expecting some form of %s,
\tsuch as %s\n\t%s"""  % (tok, lhs, lookupTable.keys(), parser.around(str, here)))
        if parser.verb: progress( "%i  %s means expand %s as %s" %(parser.lineNumber,tok, lhs, rhs.value()))
        for term in rhs:
            if isinstance(term, Literal): # CFG Terminal
                lit = term.value()
                next = here + len(lit)
                if str[here:next] == lit: pass
                elif "@"+str[here:next-1] == lit: next = next-1
                else: raise SyntaxError(
                    "Found %s where %s expected\n\t %s" %
                        (`str[here:next]`, lit, parser.around(str, here)))
            else:
                rexp = tokenRegexps.get(term, None)
                if rexp == None: # Not token
                    tok, here = parser.parseProduction(term, str, tok, here)
                    continue
                m = rexp.match(str, here)
                if m == None:
                    progress("\n\n\nToken: should match %s\n\t %s" % 
                                (rexp.pattern, parser.around(str, here)))
                    raise SyntaxError("Token: should match %s\n\t %s" % 
                                (rexp.pattern, parser.around(str, here)))
                if parser.verb: progress( "Token matched to <%s> as pattern <%s>" % (str[here:m.end()], rexp.pattern))
                next = m.end()
            tok, here = parser.token(str, next)  # Next token
        return tok, here
开发者ID:dard12,项目名称:WebSearch,代码行数:36,代码来源:predictiveParser.py


示例11: match

def match(s, extras=BindingTree()):
    """
Input: an intermediate state s
Output: the mapping between the two graphs

When a match forces a predicate match, we add that
to extras --- we go through all of those before continuing
on our regularly scheduled P(s)
    """
    progress("starting match")
    progress("s.map=%s" % s.map)
    G2 = s.problem.G2
    for choice in extras:
        if not choice:
            if set(s.map.values()) >= G2.allNodes():
                yield s.map
            elif set(s.map.values()) >= G2.nodes():
                yield finish(s, s.map)

            nodeList = P(s)

        else:
            n, m = choice[0]
            nodeList = [(n, m, choice[1:])]

        nodeList = [x for x in nodeList]
        progress("nodeList=", nodeList)
        for n, m, realExtras in nodeList:
            progress("... trying n,m=%s,%s" % (n, m))
            newExtras = BindingTree()
            newExtras.int_and(realExtras)
            if F(s, n, m, newExtras):
                s2 = s.addNode(n, m)
                for x in match(s2, newExtras):
                    yield x
                s2.undo()
开发者ID:AwelEshetu,项目名称:cwm,代码行数:36,代码来源:vf2.py


示例12: lookUp

def lookUp(predicates, assumptions=Set()):
    """Look up all the schemas for the predicates given"""
    global verbose
    schemas = assumptions
    for pred in predicates:
        if verbose > 3: progress("Predicate: %s" % `pred`)
        u = pred.uriref()
        hash = u.find("#")
        if hash <0:
            if verbose > 1: progress("Warning: Predicate <%s> looks like web resource not Property" % u)
        else:
            schemas.add(u[:hash])
    if verbose > 2:
        for r in schemas:
            progress("Metadata to be loaded: ", r) 
    if schemas:
        return loadMany([(x) for x in schemas])
    return myStore.store.newFormula() # Empty formula
开发者ID:Mchockalingam,项目名称:swap,代码行数:18,代码来源:delta.py


示例13: main

def main():
    global already, agenda, errors
    parseAs = None
    grammarFile = None
    parseFile = None
    yaccFile = None
    global verbose
    global g
    verbose = 0
    lumped = 1

    try:
        opts, args = getopt.getopt(sys.argv[1:], "ha:v:p:g:y:",
            ["help", "as=",  "verbose=", "parse=", "grammar=", "yacc="])
    except getopt.GetoptError:
        usage()
        sys.exit(2)
    output = None
    for o, a in opts:
        if o in ("-h", "--help"):
            usage()
            sys.exit()
        if o in ("-v", "--verbose"):
            verbose =int(a)
            diag.chatty_flag = int(a)
        if o in ("-a", "--as"):
            parseAs = uripath.join(uripath.base(), a)
        if o in ("-p", "--parse"):
            parseFile = uripath.join(uripath.base(), a)
        if o in ("-g", "--grammar"):
            grammarFile = uripath.join(uripath.base(), a)
        if o in ("-y", "--yacc"):
            yaccFile = uripath.join(uripath.base(), a)[5:]  # strip off file:

    

#    if testFiles == []: testFiles = [ "/dev/stdin" ]
    if not parseAs:
        usage()
        sys.exit(2)
    parseAs = uripath.join(uripath.base(), parseAs)
    if not grammarFile:
        grammarFile = parseAs.split("#")[0]   # strip off fragid
    else:
        grammarFile = uripath.join(uripath.base(), grammarFile)


    
    # The Grammar formula
    progress("Loading " + grammarFile)
    start = clock()
    g = load(grammarFile)
    taken = clock() - start + 1
    progress("Loaded %i statements in %fs, ie %f/s." %
        (len(g), taken, len(g)/taken))
    
    document = g.newSymbol(parseAs)
    
    already = []
    agenda = []
    errors = []
    doProduction(document)
    while agenda:
        x = agenda[0]
        agenda = agenda[1:]
        already.append(x)
        doProduction(x)
        
    if errors != []:
        progress("###### FAILED with %i errors." % len(errors))
        for s in errors: progress ("\t%s" % s)
        exit(-2)
    else:
        progress( "Ok for predictive parsing")
    
    #if parser.verb: progress "Branch table:", branchTable
    if verbose:
        progress( "Literal terminals: %s" %  literalTerminals.keys())
        progress("Token regular expressions:")
        for r in tokenRegexps:
            progress( "\t%s matches %s" %(r, tokenRegexps[r].pattern) )
    
    if yaccFile:
        yacc=open(yaccFile, "w")
        yaccConvert(yacc, document, tokenRegexps)
        yacc.close()

    if parseFile == None: exit(0)

    
    ip = webAccess.urlopenForRDF(parseFile, None)
    
    str = ip.read().decode('utf_8')
    sink = g.newFormula()
    keywords = g.each(pred=BNF.keywords, subj=document)
    keywords = [a.value() for a in keywords]
    p = PredictiveParser(sink=sink, top=document, branchTable= branchTable,
            tokenRegexps= tokenRegexps, keywords =  keywords)
    p.verb = verbose
    start = clock()
#.........这里部分代码省略.........
开发者ID:dard12,项目名称:WebSearch,代码行数:101,代码来源:predictiveParser.py


示例14: differences

def differences(f, g, assumptions):
    """Smush the formulae.  Compare them, generating patch instructions."""
    global lumped
    
# Cross-map nodes:

    g_bnodes, g_definitions = nailFormula(g, assumptions)
    bnodes, definitions = nailFormula(f, assumptions)
    if verbose > 1: progress("\n Done nailing")
    definitions.reverse()  # go back down list @@@ reverse the g list too? @@@
    g_definitions.reverse()     # @@ needed for the patch generation
    
    unmatched = bnodes.copy()
    match = {}  # Mapping of nodes in f to nodes in g
    for x, inverse, pred, y in definitions:
        if x in match: continue # done already

        if x in f._redirections:
            if verbose > 3: progress("Redirected %s to %s. Ignoring" % (`x`, `f._redirections[x]`))
            unmatched.discard(x)
            continue

        if verbose > 3: progress("Definition of %s = %s%s%s"% (`x`, `y` , ".!^"[inverse], `pred`))

        if y.generated():
            while y in f._redirections:
                y = f._redirections[y]
                if verbose>4: progress(" redirected to  %s = %s%s%s"% (`x`,  `y`, "!^"[inverse], `pred`))
            yg = match.get(y, None)
            if yg == None:
                if verbose>4: progress("  Had definition for %s in terms of %s which is not matched"%(`x`,`y`))
                continue
        else:
            yg = y

        if inverse:  # Inverse functional property like ssn
            matches = Set(g.each(obj=yg, pred=pred))
        else: matches = Set(g.each(subj=yg, pred=pred))
        if len(matches) == 0:
            continue   # This is normal - the node does not exist in the other graph
#           raise RuntimeError("Can't match %s" % x)

        if len(matches) > 1:
            raise RuntimeError("""Rats. Wheras in the first graph %s%s%s uniquely selects %s,
                    in the other graph there are more than 1 matches: %s""" % (`y`, "!^"[inverse], `pred`, `x`,  `matches`))
        for q in matches:  # pick only one  @@ python function?
            z = q
            break
        if verbose > 2:
            progress("Found match for %s in %s " % (`x`,`z`))
        match[x] = z
        unmatched.discard(x)

    if len(unmatched) > 0:
        if verbose >1:
            progress("Failed to match all nodes:", unmatched)
            for n in unmatched:
                debugBnode(n, f)

    # Find common parts
    only_f, only_g = removeCommon(f,g, match)

    delta = f.newFormula()
    if len(only_f) == 0 and len(only_g) == 0:
        return delta

    f = f.close()    #  We are not going to mess with them any more
    g = g.close()
    
    common = Set([match[x] for x in match])

    if verbose>2: progress("Common bnodes (as named in g)", common)
    patches(delta, f, only_f, Set(), definitions, deleting=1)
    patches(delta, g, only_g, common, g_definitions, deleting=0)
    if lumped:
        consolidate(delta, delta.store.insertion)
        consolidate(delta, delta.store.deletion)
    return delta
开发者ID:Mchockalingam,项目名称:swap,代码行数:78,代码来源:delta.py


示例15: doProduction

def doProduction(lhs):
    "Generate branch tables for one production"
    global branchTable
    if lhs is BNF.void:
        progress("\nvoid")
        return
    if lhs is BNF.eof:
        progress( "\nEOF")
        return
    if isinstance(lhs, Literal):
        literalTerminals[lhs.value()] = 1
        return

    branchDict = {}

    rhs = g.the(pred=BNF.matches, subj=lhs)
    if rhs != None:
        if chatty_flag: progress( "\nToken %s matches regexp %s" %(lhs, rhs))
        try:
            tokenRegexps[lhs] = re.compile(rhs.value(), re.U)
        except:
            print rhs.value().encode('utf-8')
            raise
        cc = g.each(subj=lhs, pred=BNF.canStartWith)
        if cc == []: progress (recordError(
            "No record of what token %s can start with" % `lhs`))
        if chatty_flag: progress("\tCan start with: %s" % cc) 
        return
    if g.contains(subj=lhs, pred=RDF.type, obj=REGEX.Regex):
        import regex
        rhs = regex.makeRegex(g, lhs)
        try:
            tokenRegexps[lhs] = re.compile(rhs, re.U)
        except:
            print rhs
            raise
        cc = g.each(subj=lhs, pred=BNF.canStartWith)
        if cc == []: progress (recordError(
            "No record of what token %s can start with" % `lhs`))
        if chatty_flag: progress("\tCan start with: %s" % cc) 
        return         
    
    rhs = g.the(pred=BNF.mustBeOneSequence, subj=lhs)
    if rhs == None:
        progress (recordError("No definition of " + `lhs`))
        return
#       raise RuntimeError("No definition of %s  in\n %s" %(`lhs`, `g`))
    options = rhs
    if chatty_flag: progress ( "\nProduction %s :: %s  ie %s" %(`lhs`, `options` , `options.value()`))
    succ = g.each(subj=lhs, pred=BNF.canPrecede)
    if chatty_flag: progress("\tCan precede ", succ)

    branches = g.each(subj=lhs, pred=BNF.branch)
    for branch in branches:
        option = g.the(subj=branch, pred=BNF.sequence)
        if chatty_flag: progress( "\toption: "+`option.value()`)
        for part in option:
            if part not in already and part not in agenda: agenda.append(part)
            y = `part`
        conditions = g.each(subj=branch, pred=BNF.condition)
        if conditions == []:
            progress(
                recordError(" NO SELECTOR for %s option %s ie %s" %
                (`lhs`, `option`, `option.value()` )))
            if option.value == []: # Void case - the tricky one
                succ = g.each(subj=lhs, pred=BNF.canPrecede)
                for y in succ:
                    if chatty_flag: progress("\t\t\tCan precede ", `y`)
        if chatty_flag: progress("\t\tConditions: %s" %(conditions))
        for str1 in conditions:
            if str1 in branchDict:
                progress(recordError(
                    "Conflict: %s is also the condition for %s" % (
                                str1, branchDict[str1].value())))
            branchDict[str1.__str__()] = option
#           break

    for str1 in branchDict:
        for str2 in branchDict:
            s1 = unicode(str1)
            s2 = unicode(str2)
# @@ check that selectors are distinct, not substrings
            if (s1.startswith(s2) or s2.startswith(s1)) and branchDict[str1] is not branchDict[str2]:
                progress("WARNING: for %s, %s indicates %s, but  %s indicates %s" % (
                            lhs, s1, branchDict[str1], s2, branchDict[str2]))
    branchTable[lhs] = branchDict
开发者ID:dard12,项目名称:WebSearch,代码行数:86,代码来源:predictiveParser.py


示例16: yaccProduction

def yaccProduction(yacc, lhs,  tokenRegexps):
    if lhs is BNF.void:
        if chatty_flag: progress( "\nvoid")
        return
    if lhs is BNF.eof:
        if chatty_flag: progress( "\nEOF")
        return
    if isinstance(lhs, Literal):
        literalTerminals[lhs.value()] = 1
#       print "\nLiteral %s" %(lhs)
        return

    rhs = g.the(pred=BNF.matches, subj=lhs)
    if rhs != None:
        if chatty_flag: progress( "\nToken %s matches regexp %s" %(lhs, rhs))
#       tokenRegexps[lhs] = re.compile(rhs.value())
        return
    rhs = g.the(pred=BNF.mustBeOneSequence, subj=lhs)
    if rhs == None:
        progress( recordError("No definition of " + `lhs`))
        raise ValueError("No definition of %s  in\n %s" %(`lhs`, `g`))
    options = rhs
    if chatty_flag:
        progress ("\nProduction %s :: %s  ie %s" %(`lhs`, `options` , `options.value()`))
    yacc.write("\n%s:" % toYacc(lhs, tokenRegexps))

    branches = g.each(subj=lhs, pred=BNF.branch)
    first = 1
    for branch in branches:
        if not first:
            yacc.write("\t|\t")
        first = 0
        option = g.the(subj=branch, pred=BNF.sequence)
        if chatty_flag: progress( "\toption: "+`option.value()`)
        yacc.write("\t")
        if option.value() == [] and yacc: yacc.write(" /* empty */")
        for part in option:
            if part not in already and part not in agenda: agenda.append(part)
            yacc.write(" %s" % toYacc(part, tokenRegexps))
        yacc.write("\n")
    yacc.write("\t;\n")
开发者ID:dard12,项目名称:WebSearch,代码行数:41,代码来源:predictiveParser.py


示例17: doCommand

def doCommand(serialDevice=None, outputURI=None, doTracks=1, doWaypoints=1, verbose=0):

   if os.name == 'nt':
      if not serialDevice: serialDevice =  "com1"
      phys = Win32SerialLink(serialDevice)
   else:
      if not serialDevice:  serialDevice =  "/dev/ttyS0"
#      serialDevice =  "/dev/cu.USA19H191P1.1"
      phys = UnixSerialLink(serialDevice)
      
   gps = Garmin(phys)

   print "GPS Product ID: %d Descriptions: %s Software version: %2.2f" % \
         (gps.prod_id, gps.prod_descs, gps.soft_ver)


   f = formula() # Empty store of RDF data
   base = uripath.base()
   
   record = f.newBlankNode()
   f.add(record, RDF.type, GPS.Record)

   if doWaypoints:
        # show waypoints
        if verbose: print "Getting waypoints"
        wpts = gps.getWaypoints()
        for w in wpts:
            if verbose: progress(`w`)
            wpt = symbol(uripath.join(base, w.ident))
            f.add(record, GPS.waypoint, wpt)
            f.add(wpt, WGS.lat, obj=intern(degrees(w.slat)))
            f.add(wpt, WGS.long, obj=intern(degrees(w.slon)))


   if doTracks:
      # show track
      if verbose: print "Getting tracks"
      tracks = gps.getTracks()
      for t in tracks:
        track = f.newBlankNode()
        f.add(record, GPS.track, track)
        for p in t:
            if isinstance(p, TrackHdr):
                if verbose: progress(`p`)
                f.add(track, GPS.disp, intern(p.dspl))
                f.add(track, GPS.color, intern(p.color))
                f.add(track, GPS.trk_ident, intern(p.trk_ident))
            else:
                if verbose: progress(`p`)
                point = f.newBlankNode()
                f.add(track, GPS.trackpoint, point)
                f.add(point, WGS.lat, obj=intern(degrees(p.slat)))
                f.add(point, WGS.long, obj=intern(degrees(p.slon)))
#               if verbose: progress("    time=", p.time)
#                progress('p.time='+`p.time`) # @@
                if p.time == 0 or p.time == 0xffffffffL:
                    if verbose: progress("time=%8x, ignoring" % p.time)
                else:
                    f.add(point, WGS.time, obj=intern(isodate.fullString(TimeEpoch+p.time)))

   phys.f.close()  # Should really be done by the del() below, but isn't
   del(phys) # close serial link (?)
   f = f.close()
   if verbose:
        progress("Beginning output. You can disconnect the GPS now.")
   s = f.n3String(base=base, flags="d")   # Flag - no default prefix, preserve gps: prefix hint
   if outputURI != None:
        op = open(outputURI, "w")
        op.write(s)
        op.close()
   else:
        print s 
开发者ID:Mchockalingam,项目名称:swap,代码行数:72,代码来源:fromGarmin.py


示例18: nailFormula

def nailFormula(f, assumptions=Set()):
    """Smush the formula.
    Build a dictionary of nodes which are indirectly identified
    by [inverse] functonal properties."""
    global verbose
    cc, predicates, ss, oo = getParts(f)
    nodes = ss | oo
    sofar = {}
    bnodes = Set()
    for node in nodes:
        if node.generated() or node in f.existentials():
            bnodes.add(node)
            if verbose >=5: progress("Blank node: %s" % `node`)
        else:
            if verbose >=5: progress("Fixed node: %s" % `node`)
        sofar[node] = []

    meta = lookUp(predicates, assumptions)
    ifps = predicates & Set(meta.each(pred=RDF.type, obj=OWL.InverseFunctionalProperty))
    fps = predicates & Set(meta.each(pred=RDF.type, obj=OWL.FunctionalProperty))
    if verbose > 1:
        for p in fps:  progress("Functional Property:", p)
        for p in ifps: progress("Inverse Functional: ", p)
    
    definitions = []
    if len(bnodes) == 0:
        if verbose > 0: progress("No bnodes in graph")
        return  bnodes, definitions

    a = float(len(bnodes))/len(nodes)
    if verbose > 1: progress("Proportion of bodes which are blank: %f" % a)
#    if a == 0: return bnodes, definitions

    loose = bnodes.copy()
    equivs = Set()
    #  Note possible optmization: First pass only like this, 
    # future passes work from newNodes.
    while loose:
        newNailed = Set()
        for preds, inverse, char in ((fps, 0, "!"), (ifps, 1, "^")):
            for pred in preds:
                if verbose > 3: progress("Predicate", pred)
                ss = f.statementsMatching(pred=pred)
                for s in ss:
                    if inverse: y, x = s.object(), s.subject()
                    else: x, y = s.object(), s.subject()
                    if not x.generated(): continue  # Only anchor bnodes
                    if y not in loose:  # y is the possible anchor
                        defi = (x, inverse, pred, y)
                        if x in loose:   # this node
                            if verbose > 4: progress("   Nailed %s as %s%s%s" % (`x`, `y`, `char`, `pred`))
                            loose.discard(x)
                            newNailed.add(x)
                        else:
                            if verbose >=6 : progress(
                                "   (ignored %s as %s%s%s)" % (`x`, `y`, `char`, `pred`))
                        definitions.append(defi)
#                       if verbose > 9: progress("   Definition[x] is now", definition[x])
                        if inverse: equivalentSet = Set(f.each(obj=y, pred=pred))
                        else: equivalentSet = Set(f.each(subj=y, pred=pred))
                        if len(equivalentSet) > 1: equivs.add(equivalentSet)

        if not newNailed:
            if verbose > 1:
                progress("Failed to nail nodes:", loose)
                if verbose > 3:
                    for n in loose:
                        debugBnode(n, f)
            break

# At this point if we still have loose nodes, we have failed with ifps and fps.
# Diff may not be strong. (It might still be: the diffs might not involve weak definitions)

    weak = loose.copy()  # Remember
    if verbose > 0: progress("\nFailed to directly nail everything, looking for weak nailings.")
#    sys.exit(-1)  #@@@
    while loose:
        newNailed = Set()
        if verbose>2:
            progress()
            progress("Pass: loose = %s" % loose)
        for x in loose.copy():
            if verbose>3: progress("Checking weakly node %s" % x)
            for s in f.statementsMatching(obj=x):
                pred, y = s.predicate(), s.subject()
                if y in loose:
                    if verbose > 4: progress("Can't nail to loose %s" % y)
                    continue    # Can't nail to something loose
                others = f.each(subj=y, pred=pred)
                # @@ Should ignore redirected equivalent nodes in others
                if len(others) != 1:
                    if verbose>4: progress("Can't nail: %s all are %s of %s." % (others, pred, y))
                    continue  # Defn would be ambiguous in this graph
                defi = (x, 0, pred, y)
                if verbose >4: progress("   Weakly-nailed %s as %s%s%s" % (x, y, "!", pred))
                loose.discard(x)
                newNailed.add(x)
                definitions.append(defi)
                break # progress
            else:
#.........这里部分代码省略.........
开发者ID:Mchockalingam,项目名称:swap,代码行数:101,代码来源:delta.py


示例19: loadFiles

def loadFiles(files):
    graph = myStore.formula()
    graph.setClosureMode("e")    # Implement sameAs by smushing
    graph = myStore.loadMany(files, openFormula=graph)
    if verbose: progress("Loaded", graph, graph.__class__)
    return graph
开发者ID:Mchockalingam,项目名称:swap,代码行数:6,代码来源:sync.py


示例20: F

def F(s, n, m, extras):
    """
    Input: a state s, and a pair of node n and m
    Output: Whether adding n->m is worth persuing
    """
    ##    extras = BindingTree()
    try:
        hash(n)
        hash(m)
    except TypeError:
        return True
    if n in s.map or m in s.reverseMap:
        if extras is False:
            progress(" -- failed because of used already")
            return False
        return True

    if not easyMatches(s, n, m):
        progress(" -- failed because of easymatches")
        return False
    G1 = s.problem.G1
    G2 = s.problem.G2

    termin1, termout1, termin2, termout2, new1, new2 = 0, 0, 0, 0, 0, 0

    for obj, preds in G1.followers(n).items():
        if obj in s.map:
            image = s.map[obj]
            e = G2.edge(m, image)
            newBindings = BindingTree()
            if not e or not easyMatches(s, preds, e, newBindings):
                progress(" -- failed because of edge")
                return False
            if newBindings:
                extras.int_or(newBindings)
        else:
            if obj in s.t1_in:
                termin1 += 1
            if obj in s.t1_out:
                termout1 += 1
            if obj not in s.t1_in and obj not in s.t1_out:
                new1 += 1

    for subj, preds in G1.predecessors(n).items():
        if subj in s.map:
            image = s.map[subj]
            e = G2.edge(image, m)
            newBindings = BindingTree()
            if not e or not easyMatches(s, preds, e, newBindings):
                progress(" -- failed because of edge")
                return False
            if newBindings:
                extras.int_or(newBindings)
        else:
            if subj in s.t1_in:
                termin1 += 1
            if subj in s.t1_out:
                termout1 += 1
            if subj not in s.t1_in and subj not in s.t1_out:
                new1 += 1

    for obj, preds in G2.followers(m).items():
        progress("checking out %s's follower %s" % (m, obj))
        if obj in s.reverseMap:
            image = s.reverseMap[obj]
            e = G1.edge(n, image)
            newBindings = BindingTree()
            if not e or not easyMatches(s, preds, e, newBindings):
                progress(" -- failed because of edge")
                return False
        ##            if newBindings:
        ##                extras.int_or(newBindings)
        else:
            if obj in s.t2_in:
                termin2 += 1
            if obj in s.t2_out:
                termout2 += 1
            if obj not in s.t2_in and obj not in s.t2_out:
                new2 += 1

    for subj, preds in G2.predecessors(m).items():
        if subj in s.reverseMap:
            image = s.reverseMap[subj]
            e = G1.edge(image, n)
            newBindings = BindingTree()
            if not e or not easyMatches(s, preds, e, newBindings):
                progress(" -- failed because of edge")
                return False
        ##            if newBindings:
        ##                extras.int_or(newBindings)
        else:
            if subj in s.t2_in:
                termin2 += 1
            if subj in s.t2_out:
                termout2 += 1
            if subj not in s.t2_in and subj not in s.t2_out:
                new2 += 1

    ## For subgraph, change to <=
    if not isoCheck(termin1, termin2, termout1, termout2, new1, new2):
#.........这里部分代码省略.........
开发者ID:AwelEshetu,项目名称:cwm,代码行数:101,代码来源:vf2.py



注:本文中的swap.diag.progress函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python common.get_user_login_object函数代码示例发布时间:2022-05-27
下一篇:
Python swampy.Gui类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap