本文整理汇总了Python中urllib.request.read函数的典型用法代码示例。如果您正苦于以下问题:Python read函数的具体用法?Python read怎么用?Python read使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: createItemList
def createItemList():
request = urllib.request.urlopen('http://api.walmartlabs.com/v1/taxonomy?format=json&apiKey=tkbnu8astb9xxtn2ux9vw73b')
response = request.read()
jdict = json.loads(response.decode())
categories = []
items = {}
for i in jdict['categories']:
categories.append(i['id'])
nums = random.sample(range(0,len(categories)), 3)
reqStr1 = 'http://api.walmartlabs.com/v1/paginated/items?format=json&&category='+categories[nums[0]]+'&apiKey=tkbnu8astb9xxtn2ux9vw73b'
reqStr2 = 'http://api.walmartlabs.com/v1/paginated/items?format=json&&category='+categories[nums[1]]+'&apiKey=tkbnu8astb9xxtn2ux9vw73b'
reqStr3 = 'http://api.walmartlabs.com/v1/paginated/items?format=json&&category='+categories[nums[2]]+'&apiKey=tkbnu8astb9xxtn2ux9vw73b'
request = urllib.request.urlopen(reqStr1)
response = request.read()
jdict = json.loads(response.decode())
addToItemList(jdict, items)
request = urllib.request.urlopen(reqStr2)
response = request.read()
jdict = json.loads(response.decode())
addToItemList(jdict, items)
request = urllib.request.urlopen(reqStr3)
response = request.read()
jdict = json.loads(response.decode())
addToItemList(jdict, items)
return items
开发者ID:JonathanNix,项目名称:scripting,代码行数:31,代码来源:screenScrape.py
示例2: _access_project
def _access_project():
"""
Call the homepage of the project for given branch if an url is set. This is a cheap way to fill the lru cache.
"""
if hasattr(env, 'url'):
# wait for uwsgi-restart after touch.
time.sleep(10)
for lang in settings.LANGUAGES:
url = urllib.request.urlopen(env.url.format(lang[0]))
with contextlib.closing(url) as request:
request.read()
print('Read response from: {}'.format(request.url))
开发者ID:CDE-UNIBE,项目名称:qcat,代码行数:12,代码来源:__init__.py
示例3: get_nhl_live_games
def get_nhl_live_games(self, e, webCall=False):
if e.input:
today = e.input
else:
today = datetime.date.today().strftime("%Y-%m-%d")
url = "http://live.nhle.com/GameData/GCScoreboard/{}.jsonp".format(today)
request = urllib.request.urlopen(url)
data = request.read().decode()[15:-2]
data = json.loads(data)
games = []
for game in data['games']:
if not game['bsc']:
start = game['bs'].replace(':00 ', ' ')
gametxt = "{} - {} ({} ET)".format(game['atcommon'].title(),
game['htcommon'].title(),
start)
else:
gametxt = "{} {} - {} {} ({})".format(game['atcommon'].title(),
game['ats'],
game['hts'],
game['htcommon'].title(),
game['bs'])
games.append(gametxt)
if webCall:
return " | ".join(games)
e.output = " | ".join(games)
return e
开发者ID:KpaBap,项目名称:genmaybot,代码行数:30,代码来源:nhl.py
示例4: listen
def listen(self):
logging.debug(u'OnigiriAlert.listen() started.')
url = TWITCASTING_API_LIVE_STATUS + '?type=json&user=' + self.user
last_is_live = False
while True:
try:
request = urllib.request.urlopen(url)
encoding = request.headers.get_content_charset()
response = request.read().decode(encoding)
# logging.debug(response)
parsed = json.loads(response)
logging.debug(parsed)
is_live = parsed["islive"]
if last_is_live is False and is_live is True or DEBUG_FORCE_PUSH:
self.notify(parsed)
if DEBUG_FORCE_PUSH:
os.sys.exit()
last_is_live = is_live
# raise Exception('test exception')
except Exception as error:
logging.error("caught exception in polling loop, error: [{}]".format(error))
# os.sys.exit()
time.sleep(POLLING_INTERVAL)
logging.debug(u'OnigiriAlert.listen() ended.')
开发者ID:honishi,项目名称:onigiri-alert-server,代码行数:31,代码来源:onigiri.py
示例5: main
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description='A BeautifulSoup snippet.')
parser.add_argument("url", nargs=1, metavar="URL",
help="The URL of the webpage to parse.")
args = parser.parse_args()
url = args.url[0]
#print("url:", url)
# GET HTML ################################################################
request = urllib.request.urlopen(url)
#print("STATUS:", request.status)
html = request.read()
#print(html)
# PARSE HTML ##############################################################
soup = BeautifulSoup(html)
#print(soup.prettify())
for img in soup.find_all('img'):
print(img.get('src'))
开发者ID:jeremiedecock,项目名称:snippets,代码行数:29,代码来源:get_images.py
示例6: dnsHistory
def dnsHistory(domain):
rows = ''
print("\n-- Checking dns history --")
url = 'http://toolbar.netcraft.com/site_report?url=' + domain
try:
request = urllib.request.urlopen(url)
html = request.read()
except:
html = ''
soup = BeautifulSoup(''.join(html))
tables = soup.findAll(attrs={'class': 'TBtable'})
try:
table = tables[1]
except:
table = '' # Prevents errors if no history returned
rows = ''
if table:
rows = soup.table.findAll('tr') # Need to edit out again
x = -1
try:
for tr in rows:
columns = tr.findAll('td')
for td in columns:
text = ''.join(td.find(text=True))
if x % 5 == 0: # Only ip addresses are checked
if dns.query(text): # Finds last ip thats not CloudFlare
print(output("The last known ip address is: %s" % text))
if text not in iplist:
iplist.append(text)
raise End # Breaks from multiple loops
x += 1
except End:
pass
print("\n#" + "-" * 77 + "#")
开发者ID:alma4rebi,项目名称:V3n0M-Scanner,代码行数:34,代码来源:cloud.py
示例7: family_download_json
def family_download_json(self, family):
"""
Download json information from internet. It does not
save any data anywhere.
"""
request = urllib.request.urlopen(self.family_download_url(family))
return json.loads(request.read().decode('utf-8'))
开发者ID:vfrico,项目名称:fontsquirrel_dl,代码行数:7,代码来源:fontsquirrel.py
示例8: download
def download(self, name, md5):
url = url_prefix + name
response = ""
try:
request = urlopen(url)
txt = request.read()
#.decode('utf-8')
except urllib.error.HTTPError as e:
print('Unable to get %s - HTTPError = %s' % (name, e.reason))
return False
except urllib.error.URLError as e:
print ('Unable to get %s - URLError = %s' % (name, e.reason))
return False
except httplib.error.HTTPException as e:
print ('Unable to get %s - HTTPException' % name)
return False
except Exception as e:
import traceback
print ('Unable to get %s - Exception = %s' % (name, traceback.format_exc()))
return False
web_md5 = self.get_md5(txt)
if web_md5 != md5:
print("Checksum error in %s. Download aborted" % name)
return False
new_name = os.path.join(base_dir, name + "." + self.web_version.replace(".", "_"))
with open(new_name, "wb") as f:
f.write(txt)
return True
开发者ID:Astalaseven,项目名称:Cnchi,代码行数:33,代码来源:updater.py
示例9: pywget_inside_crawler
def pywget_inside_crawler(url, depth, start_dir, start_file, root_dir_name):
"""
Crawl the given url find all <a href> and <img src> tags
Get the information inside the tags and apply pywget_recursive() function on each of them
Arguments:
url -- the url that is to be crawler
depth -- total number of recursions
start_dir -- the directory of the this py file
start_file -- the first file that was downloaded, store it to avoid cycles
root_dir_name -- the root derectory to for downloading files
"""
depth -= 1
content = ''
try:
request = urllib.request.urlopen(url)
content = request.read().decode("utf-8")
except:
pass
# all the information that's inside <a href> and <img src> tags
match = re.findall(r'<a href="(.*?)"', content) + \
re.findall(r'<a href = "(.*?)"', content) + \
re.findall(r'<img src="(.*?)"', content) + \
re.findall(r'<img src = "(.*?)"', content)
prefix = url[0 : url.rfind('/')] # a prefix of the link. useful to check if a link is under the same domain
all_item_list = add_item_to_list(match, prefix) # add information to a list
for item in all_item_list:
pywget_recursive(item, depth, start_dir, start_file, root_dir_name) # recursively download the information
开发者ID:xuanshenbo,项目名称:Web-crawler,代码行数:33,代码来源:challenge.py
示例10: get_data_source_one
def get_data_source_one(self):
"""Retrieves Data from the first Yahoo Finance source"""
data = 'http://finance.yahoo.com/webservice/v1/symbols/' + self.stock + '/quote?format=json&view=detail'
request = urllib.request.urlopen(data)
response = request.read()
charset = request.info().get_content_charset('utf-8')
self.data_s1 = json.loads(response.decode(charset))
开发者ID:shanedonovan,项目名称:yahoo-finance,代码行数:7,代码来源:yahoo_finance.py
示例11: main
def main():
"""Main function"""
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description='A BeautifulSoup snippet.')
parser.add_argument("url", nargs=1, metavar="URL",
help="The URL of the webpage to parse.")
args = parser.parse_args()
url = args.url[0]
print("url:", url)
# GET HTML ################################################################
request = urllib.request.urlopen(url)
print("STATUS:", request.status)
html = request.read()
#print(html)
# PARSE HTML ##############################################################
soup = BeautifulSoup(html)
print(soup.prettify())
print("Element name:", soup.title.name)
print("Element value:", soup.title.string)
print()
for anchor in soup.find_all('a'):
print(anchor.get('href'))
开发者ID:jeremiedecock,项目名称:snippets,代码行数:34,代码来源:test.py
示例12: __init__
def __init__(self, force_update):
self.web_version = ""
self.web_files = []
response = ""
try:
update_info_url = _url_prefix + "update.info"
request = urlopen(update_info_url)
response = request.read().decode("utf-8")
except urllib.HTTPError as e:
logging.exception("Unable to get latest version info - HTTPError = %s" % e.reason)
except urllib.URLError as e:
logging.exception("Unable to get latest version info - URLError = %s" % e.reason)
except httplib.HTTPException as e:
logging.exception("Unable to get latest version info - HTTPException")
except Exception as e:
import traceback
logging.exception("Unable to get latest version info - Exception = %s" % traceback.format_exc())
if len(response) > 0:
updateInfo = json.loads(response)
self.web_version = updateInfo["version"]
self.web_files = updateInfo["files"]
logging.info("Cnchi Internet version: %s" % self.web_version)
self.force = force_update
开发者ID:pombredanne,项目名称:lution,代码行数:30,代码来源:updater.py
示例13: download
def download(self, path):
"""Scarica o mantieni il file"""
target_path = self._generate_path(path)
target_file = os.path.join(target_path, self.name)
downf = not os.path.exists(target_file)
if not downf:
""" A questo livello, il file esiste"""
self.path = target_file
self.directory = target_path
downf = downf or (self.size != os.path.getsize(target_file))
if downf:
try:
request = urllib.request.urlopen(self.url)
f = open(target_file, 'wb')
while True:
data = request.read(100*1024)
if data:
print("""downloading %s (%d/%d)\r""" %
(self.name, os.path.getsize(target_file), self.size))
f.write(data)
else:
break
print("""%s completed""" % (self.name))
f.close()
self.path = target_file
self.directory = target_path
except urllib.error.HTTPError:
path = None
开发者ID:ilario-pierbattista,项目名称:hasp-tracker,代码行数:28,代码来源:dataset_downloader.py
示例14: Challenge13
def Challenge13():
import xmlrpc.client
import urllib.request
startAddr = 'http://www.pythonchallenge.com/pc/return/evil4.jpg'
resultAddr = 'http://www.pythonchallenge.com/pc/return/'
XMLRPCserver = xmlrpc.client.Server(
'http://www.pythonchallenge.com/pc/phonebook.php'
)
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm='inflate',
uri=startAddr,
user='huge',
passwd='file')
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
request = urllib.request.urlopen(startAddr)
rData = request.read().decode()
evilName = rData.split()[0]
resultAddr += XMLRPCserver.phone(evilName).split('-')[1].lower() + '.html'
print(resultAddr)
开发者ID:jakamitsu,项目名称:pythonchallenge.com-solutions,代码行数:26,代码来源:solver.py
示例15: getJSON
def getJSON(self, url):
try:
request = urllib.request.urlopen(url)
data = json.loads(request.read().decode('UTF-8'))
return data
except urllib.error.URLError as e:
logging.warning("Error: TWITCH API connection")
开发者ID:choigit,项目名称:kritzbot,代码行数:7,代码来源:api.py
示例16: callAPI
def callAPI(self, resourcePath, method, queryParams, postData,
headerParams=None):
url = self.apiServer + resourcePath
headers = {}
if headerParams:
for param, value in headerParams.items():
headers[param] = value
#headers['Content-type'] = 'application/json'
headers['api_key'] = self.apiKey
if self.cookie:
headers['Cookie'] = self.cookie
data = None
if queryParams:
# Need to remove None values, these should not be sent
sentQueryParams = {}
for param, value in queryParams.items():
if value != None:
sentQueryParams[param] = value
url = url + '?' + urllib.parse.urlencode(sentQueryParams)
if method in ['GET']:
#Options to add statements later on and for compatibility
pass
elif method in ['POST', 'PUT', 'DELETE']:
if postData:
headers['Content-type'] = 'application/json'
data = self.sanitizeForSerialization(postData)
data = json.dumps(data)
else:
raise Exception('Method ' + method + ' is not recognized.')
if data:
data = data.encode('utf-8')
requestParams = MethodRequest(method=method, url=url,
headers=headers, data=data)
# Make the request
request = urllib.request.urlopen(requestParams)
encoding = request.headers.get_content_charset()
if not encoding:
encoding = 'iso-8859-1'
response = request.read().decode(encoding)
try:
data = json.loads(response)
except ValueError: # PUT requests don't return anything
data = None
return data
开发者ID:Iterable,项目名称:swagger-codegen,代码行数:60,代码来源:swagger.py
示例17: dl_extra_infos
def dl_extra_infos(year, month):
""" Download extra infos from CollecTor. """
url = "https://collector.torproject.org/archive/relay-descriptors/extra-infos"
filename = "extra-infos-%s-%s.tar.xz" % (year, month)
save_dir_path = "extra-infos"
if not os.path.isdir(save_dir_path):
os.mkdir(save_dir_path)
save_path = "%s/%s" % (save_dir_path, filename)
if os.path.isfile(save_path):
print(" [+] Extra infos %s found" % (save_path))
return save_path
# Check if the directory exists.
if os.path.isdir("%s" % (save_path[:-7])):
print(" [+] Extra infos %s found" % (save_path[:-7]))
return save_path
print(" [+] Downloading extra infos %s/%s" % (url, filename))
try:
request = urllib.request.urlopen("%s/%s" % (url, filename))
if request.code != 200:
print(" [-] Unable to fetch extra infos %s at %s" % (filename, url))
return None
except Exception as e:
print(" [-] Unable to fetch %s/%s" % (url, filename))
return None
fp = open(save_path, "wb+")
fp.write(request.read())
fp.close()
return save_path
开发者ID:dgoulet,项目名称:tor-parser,代码行数:30,代码来源:parse3.py
示例18: __init__
def __init__(self, color):
Segment.__init__(self)
self.set_icon('mail')
self.build_module('N/A')
unread = []
hl = False
try:
for account in open(os.environ['XDG_CONFIG_HOME'] + '/gmailaccounts', encoding='utf-8'):
(url, user, passwd) = account.split('|')
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm='New mail feed', uri='https://mail.google.com/', user=user, passwd=passwd)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
request = urllib.request.urlopen(url)
dom = xml.dom.minidom.parseString(request.read())
count = dom.getElementsByTagName('fullcount')[0].childNodes[0].data
if int(count) > 0:
hl = True
unread.append(count)
except (IOError, ValueError, KeyError):
return
if hl:
self.set_icon('mail')
self.build_module(' / '.join(unread))
开发者ID:laur89,项目名称:dwm-setup,代码行数:33,代码来源:pybar3_beta.py
示例19: getJSON_text
def getJSON_text(url):
request = urllib.request.urlopen(url)
data = request.read()
data_string = data.decode('UTF-8')
print(data_string)
return data_string
开发者ID:choigit,项目名称:kritzbot,代码行数:7,代码来源:api.py
示例20: hook_callback
def hook_callback(request, *args, **kwargs):
import json
import urllib.request
print("hook here")
data = request.read().decode('utf-8')
res = json.loads(data)
email = res['commits'][0]['author']['email']
u = User.objects.filter(email__exact=email).first()
p = Project.objects.filter(repository_url__exact=res['repository']['html_url']).first()
from AutoDoApp.Manager import ManagerThread
m = ManagerThread()
m.put_request(req=res['repository']['html_url'], desc=p.description)
token = u.access_token
import time
time.sleep(10) # Temporal time sleep
branch_id = p.branch_count
autodo_prefix_branch_name = "AutoDo_" + str(branch_id)
branch_name = "refs/heads/" + autodo_prefix_branch_name
create_a_branch(access_token=token,
branch_name=branch_name,
request=request)
create_file_commit(token, branch_name, request) # OAuth call back token
create_pull_request(token, autodo_prefix_branch_name, request)
p.update()
return HttpResponse(res)
开发者ID:jin8,项目名称:AutoDo,代码行数:27,代码来源:views.py
注:本文中的urllib.request.read函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论