本文整理汇总了Python中util.http.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: fml
def fml(inp):
".fml [id] -- Gets a random quote from fmyfife.com. Optionally gets [id]."
inp = inp.replace("#", "")
if inp:
if not inp.isdigit():
return "Invalid ID!"
try:
page = http.get(urljoin(base_url, inp))
except (HTTPError, IOError):
return "Could not fetch #%s. FML" % inp
else:
try:
page = http.get(urljoin(base_url, 'random'))
except (HTTPError, IOError):
return "I tried to use .fml, but it was broken. FML"
soup = BeautifulSoup(page)
soup.find('div', id='submit').extract()
post = soup.body.find('div', 'post')
try:
id = int(post.find('a', 'fmllink')['href'].split('/')[-1])
except TypeError:
return "Could not fetch #%s. FML" % inp
body = misc.strip_html(' '.join(link.renderContents() for link in post('a', 'fmllink')))
return '(#%d) %s' % (id, body)
开发者ID:frozenMC,项目名称:CloudBot,代码行数:28,代码来源:fmylife.py
示例2: login
def login(user, password):
http.jar.clear_expired_cookies()
if any(cookie.domain == 'forums.somethingawful.com' and cookie.name == 'bbuserid' for cookie in http.jar):
if any(cookie.domain == 'forums.somethingawful.com' and cookie.name == 'bbpassword' for cookie in http.jar):
return
assert("malformed cookie jar")
http.get("http://forums.somethingawful.com/account.php", cookies=True, post_data="action=login&username=%s&password=%s" % (user, password))
开发者ID:TZer0,项目名称:botmily,代码行数:7,代码来源:somethingawful.py
示例3: down
def down(inp):
'''.down <url> -- checks to see if the site is down'''
if 'http://' not in inp:
inp = 'http://' + inp
inp = 'http://' + urlparse.urlparse(inp).netloc
# http://mail.python.org/pipermail/python-list/2006-December/589854.html
try:
http.get(inp, get_method='HEAD')
return inp + ' seems to be up'
except http.URLError:
return inp + ' seems to be down'
开发者ID:030303,项目名称:skybot,代码行数:14,代码来源:down.py
示例4: down
def down(inp):
"""down <url> -- Checks if the site at <url> is up or down."""
if 'http://' not in inp:
inp = 'http://' + inp
inp = 'http://' + urlparse.urlparse(inp).netloc
# http://mail.python.org/pipermail/python-list/2006-December/589854.html
try:
http.get(inp, get_method='HEAD')
return '{} seems to be up'.format(inp)
except http.URLError:
return '{} seems to be down'.format(inp)
开发者ID:thejordan95,项目名称:Groovebot2,代码行数:14,代码来源:down.py
示例5: down
def down(inp):
"""down <url> -- Checks if the site at <url> is up or down.
:type inp: str
"""
if not inp.startswith("http://"):
inp = 'http://' + inp
inp = 'http://' + urllib.parse.urlparse(inp).netloc
try:
http.get(inp, get_method='HEAD')
return '{} seems to be up'.format(inp)
except http.URLError:
return '{} seems to be down'.format(inp)
开发者ID:FurCode,项目名称:RoboCop2,代码行数:15,代码来源:pagecheck.py
示例6: zeroclick
def zeroclick(inp, say=None, input=None):
"zeroclick/0click <search> -- gets zero-click info from DuckDuckGo"
if inp.group(2) != "":
if inp.group(2).lower() == "what is love":
return "http://youtu.be/xhrBDcQq2DM"
url = "http://duckduckgo.com/lite?"
params = {"q":inp.group(2).replace("\001","").encode('utf8', 'ignore')}
url = "http://duckduckgo.com/lite/?"+urllib.urlencode(params)
try:
data = http.get(url).decode("utf-8","ignore")
except http.HTTPError, e:
say(str(e)+": "+url)
return
#search = re.findall("""\t<td>.\t\s+(.*?).\t<\/td>""",data,re.M|re.DOTALL)
m = re.findall("\t<td>.\t\s+(.*?).\t<\/td>",data,re.M|re.DOTALL)
if len(m) == 1:
search = re.sub("\s+"," ", re.sub('<[^<]+?>',' ',m[0]))
else:
search = None
if search:
out = HTMLParser.HTMLParser().unescape(search.replace("<br>"," ").replace("<code>","\002").replace("</code>","\002"))
if out: say(u"\x0302\x02ǁ\x02\x03 {}".format(out.split(" [ More at")[0].split("}")[-1].strip()))
else: say(u"\x0302\x02ǁ\x02\x03 No results")
else:
say(u"\x0302\x02ǁ\x02\x03 No results found.")
开发者ID:nathan0,项目名称:skybot,代码行数:25,代码来源:search.py
示例7: clock
def clock(inp, say=None):
'''.time <area> -- gets the time in <area>'''
white_re = re.compile(r'\s+')
tags_re = re.compile(r'<[^<]*?>')
page = http.get('http://www.google.com/search', q="time in " + inp)
soup = BeautifulSoup(page)
response = soup.find('td', {'style' : 'font-size:medium'})
if response is None:
return "Could not get the time for " + inp + "!"
output = response.renderContents()
output = ' '.join(output.splitlines())
output = output.replace("\xa0", ",")
output = white_re.sub(' ', output.strip())
output = tags_re.sub('\x02', output.strip())
output = output.decode('utf-8', 'ignore')
return output
开发者ID:Red-M,项目名称:frogbot,代码行数:26,代码来源:gtime.py
示例8: suggest
def suggest(inp, inp_unstripped=''):
".suggest [#n] <phrase> -- gets a random/the nth suggested google search"
inp = inp_unstripped
m = re.match('^#(\d+) (.+)$', inp)
if m:
num, inp = m.groups()
num = int(num)
if num > 10:
return 'can only get first ten suggestions'
else:
num = 0
#page = http.get('http://suggestqueries.google.com/complete/search', output='json', client='hp', q=inp)
page = http.get('http://suggestqueries.google.com/complete/search', output='toolbar', hl='en', q=inp)
xml = minidom.parseString(page)
suggestions = xml.getElementsByTagName("CompleteSuggestion")
#page_json = page.split('(', 1)[1][:-1]
#suggestions = json.loads(page_json)[1]
if not suggestions:
return 'no suggestions found'
if num:
if len(suggestions) + 1 <= num:
return 'only got %d suggestions' % len(suggestions)
out = suggestions[num-1].childNodes[0].getAttribute('data')
out = suggestions[num - 1]
else:
choice = random.randint(1, len(suggestions))-1
out = suggestions[choice].childNodes[0].getAttribute('data')
return '#%d: %s' % (choice+1, out)
开发者ID:Cheekio,项目名称:botman,代码行数:32,代码来源:suggest.py
示例9: mcpaid
def mcpaid(inp):
".mcpaid <username> -- Checks if <username> has a premium Minecraft account."
login = http.get("http://www.minecraft.net/haspaid.jsp?user=" + inp)
if "true" in login:
return "The account '" + inp + "' is a premium Minecraft account!"
else:
return "The account '" + inp + "' is not a premium Minecraft account!"
开发者ID:frozenMC,项目名称:CloudBot,代码行数:7,代码来源:mctools.py
示例10: oblique
def oblique(inp, nick='', chan=''):
'.o/.oblique <command> <args> -- runs <command> using oblique web'
' services. see http://wiki.github.com/nslater/oblique/'
update_commands()
if ' ' in inp:
command, args = inp.split(None, 1)
else:
command = inp
args = ''
command = command.lower()
if command == 'refresh':
update_commands(True)
return '%d commands loaded.' % len(commands)
if command in commands:
url = commands[command]
url = url.replace('${nick}', nick)
url = url.replace('${sender}', chan)
url = url.replace('${args}', http.quote(args.encode('utf8')))
try:
return http.get(url)
except http.HTTPError, e:
return "http error %d" % e.code
开发者ID:Ashtheking,项目名称:APCSBot,代码行数:26,代码来源:oblique.py
示例11: show_title
def show_title(match, nick='', chan='', say=None):
matched = match.group().encode('utf-8')
url = urlnorm.normalize(matched)
host = Request(url).get_host()
if not nick in ignore:
page, response = http.get_html_and_response(url)
message = ''
if host not in ignore_hosts:
parser = BeautifulSoup(response)
title = parser.title.string.strip()
if title:
message = 'URL title: %s' % (title)
# Shorten URLs that are over 80 characters.
if len(url) >= 80:
short_url = http.get(
'http://is.gd/create.php',
query_params = {'format': 'simple', 'url': matched}
)
# Cheap error checking
if 'error: please' not in short_url.lower():
if message:
message += ' | Short URL: %s'
else:
message = 'Short URL: %s'
message = message % (short_url)
if message:
say(message)
开发者ID:kennethlove,项目名称:forrstdotcom,代码行数:34,代码来源:urls.py
示例12: parse_standings
def parse_standings(db=None):
url = "http://www.nascar.com/en_us/sprint-cup-series/drivers.html"
try:
page = http.get(url)
except HTTPError:
print "Can't get standings."
return ""
soup = BeautifulSoup(page)
drivers = soup.find_all('article', class_='driverCard')
for driver in drivers:
data = {
'first_name': '',
'last_name': '',
'driver_no': -1,
'rank': -1,
'points': -1
}
if 'data-first-name' in driver.attrs:
data['first_name'] = driver.attrs['data-first-name']
if 'data-last-name' in driver.attrs:
data['last_name'] = driver.attrs['data-last-name']
if 'data-rank' in driver.attrs:
data['rank'] = int(driver.attrs['data-rank'].replace('--', '-1'))
if 'data-number' in driver.attrs:
data['driver_no'] = driver.attrs['data-number']
if data['driver_no'] == '':
data['driver_no'] = -1
else:
data['driver_no'] = int(data['driver_no'])
data['points'] = int(driver.find('dl', class_='points').find('dd').find(text=True).replace('--', '-1'))
upsert_standings(db, data)
开发者ID:craisins,项目名称:nascarbot,代码行数:32,代码来源:nascar_live.py
示例13: convert
def convert(inp, say=None):
m = reg.findall(inp)
v1, c1, c2 = m[0]
j = http.get('http://www.google.com/ig/calculator?hl=en&q={0}{1}=?{2}'.format(v1, c1, c2))
g = greg.findall(j.decode('utf-8', errors='ignore'))
if j:
return '{0} = {1}'.format(*g)
开发者ID:ChrisJernigan,项目名称:homero,代码行数:7,代码来源:convert.py
示例14: parse_json
def parse_json(json_url, db=None):
# check to see if it's a 404 page
try:
page = http.get(json_url)
except HTTPError:
print "Can't get live stats."
return ""
page_matches = re.search(r'404 Not Found', page)
if page_matches is not None and page_matches.group(0):
return False
js = json.loads(page)
raceinfo = js
race_id = raceinfo['RaceID']
print "HERE IS THE RACE_ID => {}".format(race_id)
nascar['current_race_id'] = race_id
raceinfo = clean_raceinfo(raceinfo)
upsert_raceinfo(db, raceinfo)
previous_leader = get_current_leader(race_id, db)
passings = json.loads(page)
passings = passings['Passings']
for driver in passings:
driver = clean_racestats(driver)
upsert_racestats(race_id, db, driver)
current_leader = get_current_leader(race_id, db)
if current_leader != previous_leader:
messages.append("\x02[NEW LEADER]\x02 #{} {}".format(str(current_leader[0]), current_leader[1]))
print "parsed json"
开发者ID:craisins,项目名称:nascarbot,代码行数:33,代码来源:nascar_live.py
示例15: suggest
def suggest(inp, inp_unstripped=''):
".suggest [#n] <phrase> -- gets a random/the nth suggested google search"
inp = inp_unstripped
m = re.match('^#(\d+) (.+)$', inp)
if m:
num, inp = m.groups()
num = int(num)
if num > 10:
return 'can only get first ten suggestions'
else:
num = 0
page = http.get('http://google.com/complete/search', output='json', client='hp', q=inp)
page_json = page.split('(', 1)[1][:-1]
suggestions = json.loads(page_json)[1]
if not suggestions:
return 'no suggestions found'
if num:
if len(suggestions) + 1 <= num:
return 'only got %d suggestions' % len(suggestions)
out = suggestions[num - 1]
else:
out = random.choice(suggestions)
return '#%d: %s' % (int(out[2][0]) + 1, out[0].replace('<b>', '').replace('</b>', ''))
开发者ID:Animosity,项目名称:skybot,代码行数:25,代码来源:suggest.py
示例16: get_zipped_xml
def get_zipped_xml(*args, **kwargs):
try:
path = kwargs.pop("path")
except KeyError:
raise KeyError("must specify a path for the zipped file to be read")
zip_buffer = StringIO(http.get(*args, **kwargs))
return etree.parse(ZipFile(zip_buffer, "r").open(path))
开发者ID:underisk,项目名称:skybot,代码行数:7,代码来源:tvdb.py
示例17: _search
def _search(inp, say):
url = "http://duckduckgo.com/lite?"+urllib.urlencode({"q":inp.encode('utf8', 'ignore')})
try:
data = http.get(url)
except http.HTTPError, e:
say(str(e)+": "+url)
return
开发者ID:gbyers,项目名称:skybot,代码行数:7,代码来源:search.py
示例18: hashtag
def hashtag(inp, say=None, db=None, bot=None, me=None, conn=None, input=None, chan=None, notice=None):
"<word>? -- Shows what data is associated with <word>."
disabledhashes = database.get(db,'channels','disabledhashes','chan',chan)
split = inp.group(1).strip().split(" ")
try:
if chan[0] != '#':
pass
elif split[0].lower() in disabledhashes.lower():
notice('{} is disabled.'.format(split[0]))
return
except TypeError:
pass
try:
prefix_on = bot.config["plugins"]["factoids"].get("prefix", False)
except KeyError:
prefix_on = False
db_init(db)
# split up the input
split = inp.group(1).strip().split(" ")
factoid_id = split[0]
if len(split) >= 1:
arguments = " ".join(split[1:])
else:
arguments = ""
data = get_memory(db, factoid_id)
if data:
# factoid preprocessors
if data.startswith("<py>"):
code = data[4:].strip()
variables = 'input="""%s"""; nick="%s"; chan="%s"; bot_nick="%s";' % (arguments.replace('"', '\\"'),
input.nick, input.chan, input.conn.nick)
result = execute.eval_py(variables + code)
elif data.startswith("<url>"):
url = data[5:].strip()
try:
result = http.get(url)
except http.HttpError:
result = "Could not fetch URL."
else:
result = data
# factoid postprocessors
result = text.multiword_replace(result, shortcodes)
if result.startswith("<act>"):
result = result[5:].strip()
me(result)
else:
if prefix_on:
say("\x02[%s]:\x02 %s" % (factoid_id, result))
else:
say("\x02%s\x02 %s" % (factoid_id, result))
开发者ID:FrozenPigs,项目名称:uguubot,代码行数:59,代码来源:hashtags.py
示例19: wordpress
def wordpress(inp, say=None):
"wordpress/wp <name> -- checks if the WordPress exists"
domain = inp.split(" ")[0]
url = "http://en.wordpress.com/typo/?"+urllib.urlencode({"subdomain":domain})
try:
data = http.get(url)
except http.HTTPError, e:
return (str(e))
开发者ID:gbyers,项目名称:skybot,代码行数:8,代码来源:search.py
示例20: tfw
def tfw(inp):
'.tfw [zip|postcode] -- THE FUCKING WEATHER'
src = http.get(tfw_url % inp)
location = re.search(r_loc, src).group(1)
temp = re.search(r_tmp, src).group(1)
desc = re.search(r_dsc, src).group(1).replace("<br />", ". ")
c = int((int(temp) - 32) * (5.0/9.0))
return "%s. %sF/%sC. %s" % (location, temp, c, desc)
开发者ID:ChrisJernigan,项目名称:homero,代码行数:8,代码来源:tfw.py
注:本文中的util.http.get函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论