本文整理汇总了Python中supybot.utils.web.getUrl函数的典型用法代码示例。如果您正苦于以下问题:Python getUrl函数的具体用法?Python getUrl怎么用?Python getUrl使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getUrl函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _query_freebase
def _query_freebase(self, work_type, thing):
key = conf.get(conf.supybot.plugins.Cast.FreebaseApiKey)
props = FREEBASE_TYPES[work_type]
url = "https://www.googleapis.com/freebase/v1/search?query=%s&type=%s&key=%s" % (web.urlquote(thing),props['type'],key)
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
if len(response['result']) == 0:
return None
else:
fbid = response['result'][0]['id']
query = {
"id": fbid,
"type": props['type'],
"name": None,
"limit": 1
}
query.update(props['subquery'])
url = "https://www.googleapis.com/freebase/v1/mqlread?query=%s&key=%s" % (web.urlquote(simplejson.dumps(query)),key)
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
result = response['result']
if result is None:
return None
else:
return({
'props': props,
'url': "http://www.freebase.com" + result['id'],
'title': result['name'],
'characters': props['extractor'](result)
})
开发者ID:XSlicer,项目名称:supybot-plugins,代码行数:28,代码来源:plugin.py
示例2: _query_freebase
def _query_freebase(self, work_type, thing):
props = FREEBASE_TYPES[work_type]
url = "https://api.freebase.com/api/service/search?query=%s&type=%s" % (web.urlquote(thing),props['type'])
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
if len(response['result']) == 0:
return None
else:
fbid = response['result'][0]['id']
query = {
'escape': False,
'query': {
"id": fbid,
"type": props['type'],
"name": None,
"limit": 1
}
}
query['query'].update(props['subquery'])
url = "https://api.freebase.com/api/service/mqlread?query=%s" % web.urlquote(simplejson.dumps(query))
response = simplejson.loads(web.getUrl(url, headers=HEADERS))
result = response['result']
if result is None:
return None
else:
return({
'props': props,
'url': "http://www.freebase.com" + result['id'],
'title': result['name'],
'characters': props['extractor'](result)
})
开发者ID:bibliotechy,项目名称:supybot-plugins,代码行数:30,代码来源:plugin.py
示例3: loadByName
def loadByName(self, name):
if name.isdigit():
return self.loadById(name)
import supybot.utils.web as web
lines = filter(lambda x:'<id>' in x, web.getUrl('http://api.erpk.org/citizen/search/' + name '/1.xml?key=nIKh0F7U').split('\n'))
if not lines:
return None
line = lines[0]
id = line.split('>')[1].split('<')[0]
return self.loadById(id)
开发者ID:kg-bot,项目名称:SupyBot,代码行数:10,代码来源:lib.py
示例4: urbandict
def urbandict(self, irc, msg, args, opts, words):
"""<phrase>
Returns the definition and usage of <phrase> from UrbanDictionary.com.
"""
use_definition = None
for (opt,arg) in opts:
if opt == 'def':
use_definition = int(arg)
terms = ' '.join(words)
url = 'http://www.urbandictionary.com/define.php?term=%s' \
% web.urlquote(terms)
html = web.getUrl(url)
doc = fromstring(html)
if len(doc.xpath('//div[@id="not_defined_yet"]')):
irc.error('No definition found.', Raise=True)
definitions = []
for div in doc.xpath('//div[@class="definition"]'):
text = div.text_content()
if div.getnext().tag == 'div' \
and div.getnext().attrib.get('class', None) == 'example':
text += ' [example] ' + div.getnext().text_content() + ' [/example] '
text = re.sub(r'[\\\r\\\n]+', ' ', text)
definitions.append(text)
if use_definition != None:
definitions = [definitions[use_definition-1]]
reply_msg = '%s: %s' % (terms, '; '.join(definitions))
irc.reply(reply_msg.encode('utf8'))
开发者ID:D0MF,项目名称:supybot-plugins,代码行数:28,代码来源:plugin.py
示例5: _fetch_json
def _fetch_json(self, url):
doc = web.getUrl(url, headers=HEADERS)
try:
json = simplejson.loads(doc)
except ValueError:
return None
return json
开发者ID:sylvar,项目名称:supybot-plugins,代码行数:7,代码来源:plugin.py
示例6: trends
def trends(self, irc, msg, args, timeframe):
"""@trends [current|daily|weekly]
Return top trending Twitter topics for one of three timeframes:
current, daily or weekly. Default is current.
"""
if not timeframe:
timeframe = "current"
if timeframe not in ["current", "daily", "weekly"]:
irc.reply("Invalid timeframe. Must be one of 'current', 'daily' or 'weekly'")
return
url = "http://search.twitter.com/trends/%s.json" % timeframe
try:
doc = web.getUrl(url, headers=HEADERS)
json = simplejson.loads(doc)
except:
irc.reply("uh-oh, something went awry")
return
trends = json["trends"].values()[0]
tnames = [x["name"] for x in trends]
resp = ", ".join(["%d. %s" % t for t in zip(range(1, len(tnames) + 1), tnames)])
irc.reply(resp.encode("utf8", "ignore").replace("\n", " ").strip(" "))
开发者ID:sylvar,项目名称:supybot-plugins,代码行数:25,代码来源:plugin.py
示例7: _query_tmdb
def _query_tmdb(self, cmd, args):
url = "http://api.themoviedb.org/2.1/%s/en/json/%s/%s" % (cmd,TMDBK,urllib.quote(str(args)))
doc = web.getUrl(url, headers=HEADERS)
try:
json = simplejson.loads(doc)
except ValueError:
return None
return json
开发者ID:bibliotechy,项目名称:supybot-plugins,代码行数:8,代码来源:plugin.py
示例8: _chefjivevalleypig
def _chefjivevalleypig(self, irc, type, s):
params = urlencode(dict(input=s,type=type))
url = 'http://www.cs.utexas.edu/users/jbc/bork/bork.cgi?' + params
resp = web.getUrl(url, headers=HEADERS)
resp = re.sub('&(ampway|emp);','&',resp)
resp = BS.BeautifulStoneSoup(resp,convertEntities=BS.BeautifulStoneSoup.HTML_ENTITIES).contents[0]
resp = re.sub('\n', ' ', resp)
irc.reply(resp.encode('utf-8', 'ignore').strip())
开发者ID:bshum,项目名称:supybot-plugins,代码行数:8,代码来源:plugin.py
示例9: drunk
def drunk(self, irc, msg, s):
params = urlencode(dict(text=s, voice="drunk"))
url = "http://www.thevoicesofmany.com/text.php?" + params
resp = web.getUrl(url, headers=HEADERS)
soup = BS.BeautifulSoup(resp)
try:
translated = soup.find("td", id="top").blockquote.string
except:
irc.reply("oops, didn't work")
irc.reply(resp.encode("utf-8", "ignore").strip())
开发者ID:bibliotechy,项目名称:supybot-plugins,代码行数:10,代码来源:plugin.py
示例10: _yelp_api
def _yelp_api(self, params):
p = params.copy()
p["ywsid"] = YWSID
url = "http://api.yelp.com/business_review_search?" + urlencode(p)
doc = web.getUrl(url, headers=HEADERS)
try:
json = simplejson.loads(doc)
except ValueError:
return None
return json
开发者ID:sylvar,项目名称:supybot-plugins,代码行数:10,代码来源:plugin.py
示例11: drunk
def drunk(self, irc, msg, s):
params = urlencode(dict(text=s,voice='drunk'))
url = 'http://www.thevoicesofmany.com/text.php?' + params
resp = web.getUrl(url, headers=HEADERS)
soup = BS.BeautifulSoup(resp)
try:
translated = soup.find('td', id='top').blockquote.string
except:
irc.reply("oops, didn't work")
irc.reply(resp.encode('utf-8', 'ignore').strip())
开发者ID:sdellis,项目名称:supybot-plugins,代码行数:10,代码来源:plugin.py
示例12: _search
def _search(self, term):
xml = web.getUrl(SERVICE_URL % urlencode({"QueryString": term}), headers=HEADERS)
parser = etree.XMLParser(ns_clean=True, remove_blank_text=True)
tree = etree.parse(StringIO(xml), parser)
results = []
for r in self._xpath(tree, "//ns:Result"):
label = self._xpath(r, "ns:Label/text()", 0)
uri = self._xpath(r, "ns:URI/text()", 0)
category = self._xpath(r, "ns:Categories/ns:Category/ns:Label/text()", 0)
results.append((label, category, uri))
return results
开发者ID:gsf,项目名称:supybot-plugins,代码行数:11,代码来源:plugin.py
示例13: _getJsonResponse
def _getJsonResponse(self,url,retries = 2):
try:
log.debug('Retrieving: %s' % (url))
doc = web.getUrl(url, headers=HEADERS)
log.debug('Response: %s' % (doc))
response = simplejson.loads(doc)
return response
except web.Error, e:
log.warning('Error: %s',str(e))
if retries > 0:
log.warning('Retries left: %d' % (retries))
return self._getJsonResponse(url,retries=retries-1)
开发者ID:D0MF,项目名称:supybot-plugins,代码行数:12,代码来源:plugin.py
示例14: lyricsmania_urls
def lyricsmania_urls(artist, title):
title_norm = normalize(title)
artist_norm = normalize(artist)
url = 'http://www.lyricsmania.com/%s_lyrics_%s.html' % \
(title_norm, artist_norm)
logger.info("Fetching %s" % url)
html = web.getUrl(url, headers=HEADERS)
if html.find('not in our archive') != -1:
raise LyricsNotFound
doc = fromstring(html)
link = doc.xpath('//a[starts-with(@href, "/print")]')[0]
return (url, 'http://www.lyricsmania.com/%s' % link.attrib['href'])
开发者ID:D0MF,项目名称:supybot-plugins,代码行数:12,代码来源:plugin.py
示例15: sabram
def sabram(self, irc, msg, args):
""" [<text>]
Get @sabram to falsely attribute a quote to Cliff!
"""
template = '<sabram> Cliff said: "%s"'
if args:
irc.reply(template % ' '.join(args))
return
url = "http://www.ivyjoy.com/quote.shtml"
try:
resp = web.getUrl(url, headers={'User-agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.0.13) Gecko/2009073022 Firefox/3.0.13'})
soup = BS.BeautifulSoup(resp)
quotation = soup.find('font').contents[0].strip()
except:
irc.reply(template % "Some stupid error occurred")
irc.reply(template % quotation, prefixNick=False)
开发者ID:sdellis,项目名称:supybot-plugins,代码行数:16,代码来源:plugin.py
示例16: unicode
def unicode(self, irc, msg, args, query):
"""[query] - Look up unicode character details
"""
url = "http://unicodelookup.com/lookup?"
url = url + urlencode({'q' : query, 'o' : 0})
doc = web.getUrl(url, headers=HEADERS)
try:
json = simplejson.loads(doc)
responses = []
for result in json['results']:
ucode = result[2].replace('0x','U+')
responses.append('%s (%s): %s [HTML: %s / Decimal: %s / Hex: %s]' % (ucode, result[5], result[4], result[3], result[1], result[2]))
response = '; '.join(responses).encode('utf8','ignore')
irc.reply(response)
except ValueError:
irc.reply('No unicode characters matching /' + query + '/ found.')
开发者ID:D0MF,项目名称:supybot-plugins,代码行数:16,代码来源:plugin.py
示例17: lyricsmania
def lyricsmania(artist, title):
try:
(ref_url, print_url) = lyricsmania_urls(artist, title)
logger.info("Fetching %s" % print_url)
headers = HEADERS.copy()
headers['Referer'] = ref_url
html = web.getUrl(print_url, headers=headers)
doc = fromstring(html)
lyrics = doc.xpath('//div[@id="printprintx"]')[0]
return {
'artist': artist,
'song': title,
'lyrics': lyrics.text_content()
}
except LyricsNotFound:
return None
开发者ID:D0MF,项目名称:supybot-plugins,代码行数:16,代码来源:plugin.py
示例18: untiny
def untiny(self, irc, msg, args, url):
"""<url>
Return the whole URL for a tiny URL."""
data = json.loads(getUrl(self.registryValue('service') % url).decode())
if 'org_url' in data:
irc.reply(data['org_url'])
elif 'error' in data:
num, msg = data['error']
messages = {
'0': _('Invalid URL'),
'1': _('Unsupported tinyurl service'),
'2': _('Connection to tinyurl service failed'),
'3': _('Unable to get the original URL'),
}
irc.error(messages[num])
开发者ID:thefinn93,项目名称:Supybot-plugins,代码行数:16,代码来源:plugin.py
示例19: asteroid
def asteroid(self, irc, msg, args):
"""
Fetch the next Potentially Hazardous Asteroid as
reported by the Near Earth Object Program
(http://www.cfa.harvard.edu/iau/lists/PHACloseApp.html)
"""
# stupid astronomers and their stupid <pre> data
# example of parsed pha
# ('2002 AT4 ', => object name
# '2511159.67', => julian date
# '2163 Mar. 22.17', => calendar date
# '0.05000') => distance in AU
pattern = re.compile('\s*([\(\)\w ]+? )\s*([\d\.]+)\s*(\d{4} [a-z\.]+\s*[\d\.]+)\s*([\d\.]+)', re.I)
# get now in julian
julian_now = (time.time() / 86400.0) + 2440587
url = 'http://www.cfa.harvard.edu/iau/lists/PHACloseApp.html'
html = web.getUrl(url, headers=HEADERS)
tree = fromstring(html)
pre = tree.xpath('//pre')[0]
lines = pre.text.split('\n')[3:]
lines = [l for l in lines if len(l)]
# match group tuples become our object data
phas = [re.match(pattern, l).groups() for l in lines]
# ignore those earlier than now
phas = [p for p in phas if float(p[1]) > julian_now]
# sort by julian date
phas.sort(lambda a,b: cmp(float(a[1]), float(b[1])))
# grab the next event
(name, jd, date, au) = phas[0]
date = date.replace('.', ' ')
# the %j is just a placeholder
date = datetime.strptime(date, "%Y %b %d %j")
# convert AU to miles
miles = float(au) * 92955887.6
resp = "Object '%s' will pass within %s miles of earth on %s"
irc.reply(resp % (name.strip(), miles, date.strftime("%A, %B %d, %Y")))
开发者ID:D0MF,项目名称:supybot-plugins,代码行数:46,代码来源:plugin.py
示例20: songlist
def songlist(artist, searchstring=None):
artist = normalize(artist)
url = 'http://lyricsmania.com/%s_lyrics.html' % artist
logger.info("Fetching " + url)
html = web.getUrl(url, headers=HEADERS)
doc = fromstring(html)
titles = []
for a in doc.xpath('//a'):
if a.attrib.has_key('href') \
and a.attrib['href'].endswith("_lyrics_%s.html" % artist):
song = a.text_content()
if searchstring:
if not re.search(searchstring, song, re.I):
continue
titles.append(song)
return [re.sub(' lyrics$', '', x) for x in titles]
开发者ID:D0MF,项目名称:supybot-plugins,代码行数:19,代码来源:plugin.py
注:本文中的supybot.utils.web.getUrl函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论