本文整理汇总了Python中pydblite.Base类的典型用法代码示例。如果您正苦于以下问题:Python Base类的具体用法?Python Base怎么用?Python Base使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Base类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: generateWeights
def generateWeights(graph, weightFile, param):
pdb = Base(weightFile)
pdb.create('pair', 'node1', 'node2','TS02','TS05','TS08', 'JC')
pdb.create_index('pair')
sortedNodes = sorted(graph.nodes())
for node in sortedNodes:
others = sorted(set(n for n in sortedNodes if n > node))
for other in others:
if graph.has_edge(node, other):
informations = list(edge for n1, n2, edge in graph.edges([node, other], data=True) if ((n1 == node and n2 == other) or (n1 == other and n2 == node)) )
timesofLinks = []
for info in informations:
timesofLinks.append(int(info['time']))
bagNode1 = list(eval(edge['keywords']) for n1, n2, edge in graph.edges([node], data=True) if (n1 != other and n2 != other) )
bagNode2 = list(eval(edge['keywords']) for n1, n2, edge in graph.edges([other], data=True) if (n1 != node and n2 != node) )
total_publications = len(informations)
k = int(param.t0_) - max(timesofLinks)
decayfunction02 = (1 - 0.2) ** k
decayfunction05 = (1 - 0.5) ** k
decayfunction08 = (1 - 0.8) ** k
pdb.insert(str(node) + ';' + str(other),node,other,(total_publications * decayfunction02) , (total_publications * decayfunction05) , (total_publications * decayfunction08), get_jacard_domain(bagNode1, bagNode2) )
pdb.commit()
return pdb
开发者ID:AndersonChaves,项目名称:Predicao-de-Links,代码行数:30,代码来源:FullExecutionGraphRich.py
示例2: generateWeights
def generateWeights(graph, weightFile, param):
pdb = Base(weightFile)
pdb.create('pair', 'node1', 'node2','FTI01','FTI02','FTI03','FTI04','FTI05','FTI06','FTI07','FTI08','FTI09')
pdb.create_index('pair')
sortedNodes = sorted(graph.nodes())
for node in sortedNodes:
others = sorted(set(n for n in sortedNodes if n > node))
for other in others:
if graph.has_edge(node, other):
informations = list(edge for n1, n2, edge in graph.edges([node, other], data=True) if ((n1 == node and n2 == other) or (n1 == other and n2 == node)) )
timesofLinks = []
for info in informations:
timesofLinks.append(int(info['time']))
total_publications = len(informations)
k = int(param.t0_) - max(timesofLinks)
FTI01 = total_publications * (0.1**k)
FTI02 = total_publications * (0.2**k)
FTI03 = total_publications * (0.3**k)
FTI04 = total_publications * (0.4**k)
FTI05 = total_publications * (0.5**k)
FTI06 = total_publications * (0.6**k)
FTI07 = total_publications * (0.7**k)
FTI08 = total_publications * (0.8**k)
FTI09 = total_publications * (0.9**k)
pdb.insert(str(node) + ';' + str(other),node,other, FTI01, FTI02, FTI03, FTI04, FTI05, FTI06, FTI07, FTI08, FTI09 )
pdb.commit()
return pdb
开发者ID:cptullio,项目名称:Predicao-de-Links,代码行数:33,代码来源:FullExecutionFTI.py
示例3: printdb
def printdb(dbpath): #Prints the contents of a PyDBLite database to the console
db = Base(dbpath)
if db.exists():
db.open()
retstr = ""
for obj in db:
retstr += str(obj)
retstr += "\n"
print retstr
return retstr
else:
print "The database does not exist or is corrupt.\n"
开发者ID:magomez96,项目名称:AdamTestBot,代码行数:12,代码来源:utils.py
示例4: printdb
def printdb(dbpath): # Prints the contents of a PyDBLite database to the console
db = Base(dbpath)
if db.exists():
db.open()
retstr = ""
for obj in db:
retstr += str(obj)
retstr += "\n"
encoded = retstr.encode("utf-8", errors="ignore")
print(encoded)
else:
print("The database does not exist or is corrupt.\n")
开发者ID:agincel,项目名称:AdamTestBot,代码行数:12,代码来源:utils.py
示例5: generateDataForCalculate
def generateDataForCalculate(self):
if self.trainnigGraph == None:
self.generating_Training_Graph()
_nodes = sorted(self.trainnigGraph.nodes())
adb = Base(self.filePathTrainingGraph + ".calc.pdl")
adb.create('pairNodes', 'common', 'time', 'domain' )
for node in sorted(_nodes):
othernodes = set(n for n in _nodes if n > node)
for other in othernodes:
common = set(networkx.common_neighbors(self.trainnigGraph, node, other))
arestas = self.trainnigGraph.edges([node, other], True)
开发者ID:AndersonChaves,项目名称:Predicao-de-Links,代码行数:13,代码来源:Parameterization.py
示例6: __init__
def __init__(self, database_name=None):
print ("Opening " + database_name)
db_r = Base(database_name)
# Choose the DB of the Question Thread URL
db_r.create('url', 'date', mode="open")
# Check if the DB is empty or new
if len(db_r)==0:
print ("ERROR: Database not found or empty")
sys.exit()
else:
print ("Database elements: " + str(len(db_r)))
for r in db_r:
self.url_to_scrape.append(UrlDate(r["url"], r["date"]))
# Making a SET of the Database in order to delete duplicate URLS
self.url_to_scrape = {x.url: x for x in self.url_to_scrape}.values()
print ("Database elements after set operation: " + str(len(db_r)))
开发者ID:piercolella,项目名称:qa-scrapers,代码行数:16,代码来源:YahooScraper.py
示例7: open_spider
def open_spider(self, spider):
# Creation of DB
self.db = Base(spider.database)
self.db.create('uid', 'type', 'author', 'title', 'text', 'date_time',
'tags', 'views', 'answers', 'resolve', 'upvotes', 'url',
mode="override")
dispatcher.connect(self.spider_closed, signals.spider_closed)
开发者ID:piercolella,项目名称:qa-scrapers,代码行数:7,代码来源:pipelines.py
示例8: do_Post
def do_Post (self, result, request, args):
def transfomType(x):
if isinstance(x, unicode): return str(x)
else: return x
requestBody = args['requestContent']
####### Replace this section by your logic #######
vTestId = transfomType(json.loads(requestBody)['testId'])
vTestMessage = transfomType(json.loads(requestBody)['testMessage'])
responseCode = 200 #ResponseCode.Ok
db = Base('database_service6.pdl')
db.create('testId', 'testMessage', mode="open")
db.insert(testId = vTestId, testMessage = vTestMessage)
db.commit()
result = []
responseBody = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
####### Replace this section by your logic #######
request.setResponseCode(responseCode)
resp = utils.serviceResponse(responseCode, responseBody)
return resp
开发者ID:LucasFLima,项目名称:Mestrado,代码行数:29,代码来源:service6_srv.py
示例9: do_Delete
def do_Delete (self, result, request, args):
def transfomType(x):
if isinstance(x, unicode): return str(x)
else: return x
####### Replace this section by your logic #######
db = Base('database_service6.pdl')
db.create('testId', 'testMessage', mode="open")
result = db(testId = int(args['testId']))
if len(result) == 0:
responseCode = 404 #ResponseCode.Ok
responseBody = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
else:
responseCode = 200 #ResponseCode.Ok
responseBody = json.dumps(result[0], sort_keys=True, indent=4, separators=(',', ': '))
db.delete(result[0])
db.commit()
####### Replace this section by your logic #######
request.setResponseCode(responseCode)
resp = utils.serviceResponse(responseCode, responseBody)
return resp
开发者ID:LucasFLima,项目名称:Mestrado,代码行数:25,代码来源:service6_srv.py
示例10: do_Get
def do_Get (self, result, request, args):
####### Replace this section by your logic #######
db = Base('database_service6.pdl')
db.create('testId', 'testMessage', mode="open")
result = db(testId = int(args['testId']))
if len(result) == 0:
responseCode = 404 #ResponseCode.Ok
responseBody = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
else:
responseCode = 200 #ResponseCode.Ok
responseBody = json.dumps(result[0], sort_keys=True, indent=4, separators=(',', ': '))
####### Replace this section by your logic #######
request.setResponseCode(responseCode)
resp = utils.serviceResponse(responseCode, responseBody)
return resp
开发者ID:LucasFLima,项目名称:Mestrado,代码行数:20,代码来源:service6_srv.py
示例11: checkCondition
def checkCondition(cls, result):
if result == "error":
return "erro"
else:
db = Base('database_service1.pdl')
db.create('cod', 'message', mode="open")
db.insert(cod='1', message='valid')
db.insert(cod='2', message='not valid')
db.commit()
#for rec in (db("age") > 30):
for rec in db:
print rec["cod"] +' '+ rec["message"]
return "ok"
开发者ID:LucasFLima,项目名称:Mestrado,代码行数:17,代码来源:tmp.py
示例12: get_gbq_query_results
def get_gbq_query_results(gbq_service, query, prj_id='ace-amplifier-455', index_col=None,
col_order=None, cache=True, cache_path='', db=None):
# set_trace()
if cache:
if db is None:
db = Base(cache_path).create('query', 'result_df', mode='open')
# db.create_index('query')
final_df = extract(db, query)
if len(final_df) > 0:
# print 'extracted from cache ' + db.path
return final_df
df = gbq.read_gbq(query, prj_id)
if cache:
db.insert(query, df)
db.commit()
print 'saved to cache ' + db.path
return df
开发者ID:sergey-linio,项目名称:weekly_reports,代码行数:21,代码来源:auth_to_gbq2.py
示例13: __init__
class Posts:
def __init__(self, filename, erase_db):
self.db = Base(filename)
self.db.create('author', 'content', 'date', mode="override" if erase_db else "open")
def addPost(self, post):
""" Persist a Post object in db and returns auto-generated id. """
post.id = self.db.insert(author = post.author, content = post.content, date = post.date)
self.db.commit()
return post.id
def getPost(self, id):
""" Get a post by its id. Returns a Post object or None if id is not found. """
db_entry = self.db[id] if id in self.db else None
return self.__createPost(db_entry) if db_entry is not None else None
def getPosts(self, from_date = None, to_date = None, author = None):
""" Get all posts matching optionally provided conditions. Returns a list (can be empty). """
iterator = self.db.filter()
if from_date is not None:
iterator = iterator & (self.db("date") > from_date)
if to_date is not None:
iterator = iterator & (self.db("date") < to_date)
if author is not None:
iterator = iterator & (self.db("author") == author)
return [self.__createPost(db_entry) for db_entry in iterator]
def getPostsCount(self):
""" Get total number of posts in db. """
return len(self.db)
def __createPost(self, db_entry):
return Post(db_entry['author'], db_entry['content'], db_entry['date'], db_entry['__id__'])
开发者ID:aragornis,项目名称:vdm-ws,代码行数:34,代码来源:repository.py
示例14: likes
def likes(currentMessage):
try:
db = Base('chatStorage/likes/likes.pdl') #The path to the database
db.create('userID', 'likes', 'dislikes', 'history', 'liked', mode="open") #Create a new DB if one doesn't exist. If it does, open it
sentMyKarma = False
try:
likes = 0
dislikes = 0
if int(currentMessage.reply_to_message.from_user.id) == int(117924410): #is replying to bot message with /likes
for user in db:
if int(user['userID']) == 117924410:
likes = int(user['likes'])
dislikes = int(user[dislikes])
karma = int(likes) - int(dislikes)
response = "Since you asked, I have " + str(likes) + " likes and " + str(dislikes) + " dislikes, for a total of " + str(karma) + " karma. "
if karma > 0:
response += "\xF0\x9F\x98\x83".decode("utf-8") #smiley
else:
response += "\xF0\x9F\x98\xAD".decode("utf-8") #crying
return response
sentMyKarma = True
except Exception:
print traceback.format_exc()
if not sentMyKarma:
userWasFound = False
for user in db:
if(int(user['userID']) == int(currentMessage.from_user.id)):
userWasFound = True
likes = int(user['likes'])
dislikes = int(user['dislikes'])
if userWasFound:
return currentMessage.from_user.first_name + ", you have " + str(likes) + " likes and " + str(dislikes) + " dislikes, for a total of " + str(int(likes) - int(dislikes)) + " karma."
else:
return "No like data found!"
except Exception:
print traceback.format_exc()
return ""
开发者ID:agincel,项目名称:AdamTestBot,代码行数:39,代码来源:atbLikesDB.py
示例15: get_results_db
def get_results_db(clear_cache=False, skip=[]):
cache_file = 'cache/results.pdl'
db = Base(cache_file)
if clear_cache or not db.exists() or os.path.getmtime(cache_file) < os.path.getmtime(results_dir):
warnings.warn('Rebuilding results cache...')
columns = set()
rows = []
p = pathlib.Path(results_dir)
for config_file in p.glob('*.config'):
with config_file.open() as config_fh:
settings_hash = config_file.stem
row = json.loads(config_fh.read())
if settings_hash in skip:
continue
row['hash'] = settings_hash
tests_count = analyze.count(config_file.parent, settings_hash)
row['iostat_cpu'], len_cpu_values = analyze.iostat_cpu(config_file.parent, settings_hash)
row['iperf_result'], len_iperf_values = getattr(analyze, row['iperf_name'])(config_file.parent, settings_hash, row)
if tests_count != len_cpu_values or tests_count != len_iperf_values:
raise analyze.AnalysisException('For test {}, mismatch in cardinality of tests between count ({}), iostat ({}) and iperf ({})'.format(settings_hash, tests_count, len_cpu_values, len_iperf_values), settings_hash)
if len_iperf_values > 0:
min_fairness = row['iperf_result']['fairness'][0] - row['iperf_result']['fairness'][1]
if min_fairness < (1 - 1 / (2 * row['parallelism'])):
warnings.warn('For test {}, fairness has a critical value: {}.'.format(settings_hash, row['iperf_result']['fairness']), RuntimeWarning)
columns = columns | set(row.keys())
rows.append(row)
db.create(*columns, mode='override')
for r in rows:
db.insert(**r)
db.commit()
warnings.warn('Results cache built.')
else:
warnings.warn('Reusing results cache.')
db.open()
return db
开发者ID:serl,项目名称:topoblocktest,代码行数:38,代码来源:test_master.py
示例16: open_spider
def open_spider(self, spider):
filename = "urls_log.txt"
self.log_target = codecs.open(filename, 'a+', encoding='utf-8')
self.log_target.truncate()
self.db = Base('URL_database.pdl')
self.db.create('url', 'date', mode="open")
self.log_target.write("***New url scraping session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
print("***New url scraping session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
self.log_target.write("*** Total url in the Database BEFORE new search: "+ str(len(self.db)) + " ***" + "\n")
dispatcher.connect(self.spider_closed, signals.spider_closed)
开发者ID:piercolella,项目名称:qa-scrapers,代码行数:14,代码来源:pipelines.py
示例17: DBPipeline
class DBPipeline(object):
def __init__(self):
#Creating log file
filename = "session_log.txt"
self.log_target = codecs.open(filename, 'a+', encoding='utf-8')
self.log_target.truncate()
self.log_target.write("***New session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
#Creating database for items
self.db = Base('QuestionThreadExtracted.pdl')
self.db.create('uid', 'type', 'author', 'title', 'text', 'date_time',
'tags', 'views', 'answers', 'resolve', 'upvotes', 'url', mode="open")
#Some data for the log file
self.number_of_questions = 0
self.number_of_answers = 0
self.last_id=0
dispatcher.connect(self.spider_closed, signals.spider_closed)
def process_item(self, item, spider):
self.db.insert(uid=item['uid'],
type=item['type'],
author=item['author'],
title=item['title'],
text=item['text'],
date_time=item['date_time'],
tags=item['tags'],
views=item['views'],
answers=item['answers'],
resolve=item['resolve'],
upvotes=item['upvotes'],
url=item['url']
)
#Count questions and answers
if "question" in item['type']:
self.number_of_questions+=1
if self.last_id<item['uid']:
self.last_id=item['uid']
else:
self.number_of_answers+=1
self.db.commit()
return item
def spider_closed(self, spider):
self.log_target.write("Questions founded: "+ str(self.number_of_questions) + "\n")
self.log_target.write("Answers founded: "+ str(self.number_of_answers) + "\n")
self.log_target.write("Last UID: "+str(self.last_id) + "\n" + "\n")
self.log_target.write("***Session End at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
self.log_target.close()
开发者ID:piercolella,项目名称:qa-scrapers,代码行数:57,代码来源:pipelines.py
示例18: __init__
class Rates:
""" Persistence layer for exchange rates. """
def __init__(self, filename, erase_db):
self.__db = Base(filename)
self.__db.create('currency', 'rate', mode="override" if erase_db else "open")
self.__db.create_index('currency')
def setRate(self, currency, rate):
""" Persist a currency's exchange rate. """
assert rate > 0.0
records = self.__db._currency[currency]
if len(records) > 0:
assert len(records) == 1 # We never expect several exchange rates for the same currency
self.__db.update(records[0], rate = rate)
else:
self.__db.insert(currency = currency, rate = rate)
self.__db.commit()
def getRate(self, currency):
""" Get the exchange rate with EUR for the provided currency or None if it is not found.
An exchange rate for currency CUR is Value(EUR) / Value(CUR): 1 EUR = rate(CUR) CUR <=> 1/rate(CUR) EUR = 1 CUR.
"""
records = self.__db._currency[currency]
return records[0]['rate'] if len(records) > 0 else None
def getAllRates(self):
""" Get all known exchange rates as a dict. """
return [(r['currency'], r['rate']) for r in self.__db]
def getRatesCount(self):
""" Get total number of exchange rates in db. """
return len(self.__db)
开发者ID:aragornis,项目名称:currency-converter,代码行数:36,代码来源:rates.py
示例19: calculatingWeights
def calculatingWeights(graph, nodesnotLinked, database, calculatingFile):
pdb = Base(calculatingFile)
pdb.create('node1', 'node2', 'cnWTS02','cnWTS05','cnWTS08', 'aaWTS02', 'aaWTS05', 'aaWTS08')
pdb.create_index('node1', 'node2')
element = 0
qtyofNodesToProcess = len(nodesnotLinked)
for pair in nodesnotLinked:
element = element+1
FormatingDataSets.printProgressofEvents(element, qtyofNodesToProcess, "Calculating features for nodes not liked: ")
neighbors_node1 = all_neighbors(graph, pair[0])
neighbors_node2 = all_neighbors(graph, pair[1])
len_neihbors_node1 = len(neighbors_node1)
len_neihbors_node2 = len(neighbors_node2)
CommonNeigbors = neighbors_node1.intersection(neighbors_node2)
CNWts02Feature = 0;
CNWts05Feature = 0;
CNWts08Feature = 0;
AAWts02Feature = 0;
AAWts05Feature = 0;
AAWts08Feature = 0;
CNWJCFeature = 0;
AAWJCFeature = 0;
for cn in CommonNeigbors:
item = get_partOfWeightCalculating(graph, database, pair, cn)
CNWts02Feature = CNWts02Feature + item['cnWts02'];
CNWts05Feature = CNWts05Feature + item['cnWts05'];
CNWts08Feature = CNWts08Feature + item['cnWts08'];
AAWts02Feature = AAWts02Feature + item['aaWts02'];
AAWts05Feature = AAWts05Feature + item['aaWts05'];
AAWts08Feature = AAWts08Feature + item['aaWts08'];
#CNWJCFeature = CNWJCFeature + item['cnWJC'];
#AAWJCFeature = AAWJCFeature + item['aaWJC'];
pdb.insert(str(pair[0]), str(pair[1]), CNWts02Feature, CNWts05Feature, CNWts08Feature, AAWts02Feature, AAWts05Feature, AAWts08Feature )
pdb.commit()
return pdb;
开发者ID:cptullio,项目名称:Predicao-de-Links,代码行数:39,代码来源:FullExecutionGraphRich_versaoTempo.py
示例20: convertcsv2db
def convertcsv2db(csvpath, dbpath): #Converts a CSV file to a PyDBLite database
db = Base(dbpath)
try:
csvfile = open(csvpath, 'rb')
except csv.Error:
print "Could not open CSV file at " + csvpath + "\n"
reader = csv.reader(csvfile)
header = reader.next()
try:
db.create(*header)
except IOError:
print "Existing DB at " + dbpath + "\n"
for row in reader:
db.insert(*row)
db.commit()
开发者ID:magomez96,项目名称:AdamTestBot,代码行数:15,代码来源:utils.py
注:本文中的pydblite.Base类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论