本文整理汇总了Python中urllib2.urlopen函数的典型用法代码示例。如果您正苦于以下问题:Python urlopen函数的具体用法?Python urlopen怎么用?Python urlopen使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlopen函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: DownloadUpdate
def DownloadUpdate(self, file):
self.log('Downloading: %s' % file)
dirfile = os.path.join(self.UpdateTempDir,file)
dirname, filename = os.path.split(dirfile)
if not os.path.isdir(dirname):
try:
os.makedirs(dirname)
except:
self.log('Error creating directory: ' +dirname)
url = self.SVNPathAddress+urllib.quote(file)
try:
if re.findall(".xbt",url):
self.totalsize = int(re.findall("File length: ([0-9]*)",urllib2.urlopen(url+"?view=log").read())[0])
urllib.urlretrieve( url.decode("utf-8"), dirfile.decode("utf-8"))
else: urllib.urlretrieve( url.decode("utf-8"), dirfile.decode("utf-8") )
self.DownloadedFiles.append(urllib.unquote(url))
return 1
except:
try:
time.sleep(2)
if re.findall(".xbt",url):
self.totalsize = int(re.findall("File length: ([0-9]*)",urllib2.urlopen(url+"?view=log").read())[0])
urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8"))
else: urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8") )
urllib.urlretrieve(url.decode("utf-8"), dirfile.decode("utf-8"))
self.DownloadedFiles.append(urllib.unquote(url))
return 1
except:
self.log("Download failed: %s" % url)
self.DownloadFailedFiles.append(urllib.unquote(url))
return 0
开发者ID:reversTeam,项目名称:sualfreds-repo,代码行数:31,代码来源:updater_class.py
示例2: check_proxy
def check_proxy(self, specific={}):
""" Checks if proxy settings are set on the OS
Returns:
-- 1 when direct connection works fine
-- 2 when direct connection fails and any proxy is set in the OS
-- 3 and settings when direct connection fails but a proxy is set
see: https://docs.python.org/2/library/urllib.html#urllib.getproxies
"""
os_proxies = getproxies()
if len(os_proxies) == 0 and self.check_internet_connection:
logging.info("No proxy needed nor set. Direct connection works.")
return 1
elif len(os_proxies) == 0 and not self.check_internet_connection:
logging.error("Proxy not set in the OS. Needs to be specified")
return 2
else:
#
env['http_proxy'] = os_proxies.get("http")
env['https_proxy'] = os_proxies.get("https")
#
proxy = ProxyHandler({
'http': os_proxies.get("http"),
'https': os_proxies.get("https")
})
opener = build_opener(proxy)
install_opener(opener)
urlopen('http://www.google.com')
return 3, os_proxies
开发者ID:etiennebr,项目名称:DicoGIS,代码行数:28,代码来源:checknorris.py
示例3: recognise_eHentai
def recognise_eHentai(link, path):
url = str(link)
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
name = soup.findAll('title')
name = name[0].get_text().encode('utf-8')
name = str(name)
path = path + '\\' + name
download_eHentai(link, path)
pages = soup.find_all('span')
pages = pages[1].get_text()
pages = int(pages)
z = 0
while (pages > z):
z = z + 1
sopa = soup.find('div', 'sn')
sopa = sopa.find_all('a')
sopa = sopa[2].get('href')
url = str(sopa)
download_eHentai(url, path)
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
sopa = soup.find('div', 'sn')
sopa = sopa.find_all('a')
sopa = sopa[2].get('href')
download_eHentai(sopa, path)
开发者ID:DoctorMalboro,项目名称:HeyMang,代码行数:31,代码来源:mango.py
示例4: tag_to_server
def tag_to_server(scanid, tagid):
try:
myurl = tag_url % (scanid, tagid)
urlopen(myurl)
except:
print 'error'
print 'sent to server'
开发者ID:bonniee,项目名称:bib-web-2,代码行数:7,代码来源:daemon.py
示例5: pullPhotos
def pullPhotos(query):
print "looking for", query
url1 = "https://www.google.com/search?biw=1309&bih=704&sei=bsHjUbvaEILqrQeA-YCYDw&tbs=itp:lineart&tbm=isch&"
query2 = urllib.urlencode({"q": query})
req = urllib2.Request(url1 + query2, headers={"User-Agent": "Chrome"})
response = urllib2.urlopen(req).read()
parser = MyHTMLParser()
parser.feed(response)
print image_lib + "\\buffer\\" + query
if not os.path.exists(image_lib + "\\buffer"):
os.mkdir(image_lib + "\\buffer") # make directory to put them in
if not os.path.exists(image_lib + "\\buffer\\" + query):
os.mkdir(image_lib + "\\buffer\\" + query) # make directory to put them in
for i in xrange(5):
req_cat = urllib2.Request(cat_urls[i], headers={"User-Agent": "Chrome"})
response_cat = urllib2.urlopen(req_cat).read()
name = query + os.sep + query + str(i) + ".jpg"
fd = open(image_lib + "\\buffer\\" + name, "wb")
fd.write(response_cat)
fd.close()
print name, "written", "complexity is ", countComponents(image_lib + "\\buffer\\" + name)
print "done"
开发者ID:TechBridgeWorld,项目名称:TactileGraphics,代码行数:29,代码来源:pick_animal.py
示例6: main
def main():
#for p in range(1,intGetMaxPage +1):
#soup = BeautifulSoup()
try:
resp = urllib2.urlopen(getUrl,timeout=10)
soup = BeautifulSoup(resp)
soup = soup.find('div' ,{'id':'prodlist'})
#for k in soup.findAll("div", {'class': 'p-name'}): # 抓< div class='p=name'>...< /div>
for k in soup.findAll('a', href=True):
try:
url = k.get('href')
print k.text
print url
page_url = homeUrl + url
print page_url
resp_text_page = urllib2.urlopen(homeUrl + url, timeout=10)
soup_text_page = BeautifulSoup(resp_text_page)
contextPageUrl(soup_text_page,page_url)
except:
print "Unexpected error:", sys.exc_info()[0]
print "Unexpected error:", sys.exc_info()[1]
continue
except:
#continue
print "Unexpected error:", sys.exc_info()[0]
print "Unexpected error:", sys.exc_info()[1]
pass
开发者ID:choakai,项目名称:thesis,代码行数:33,代码来源:Crawler_PPT_Politic.py
示例7: flight_search_results
def flight_search_results(sid, searchid):
# 删除开头的$和逗号,并把数字转化成浮点类型
def parse_price(p):
return float(p[1:].replace(',', ''))
# 遍历检测
while 1:
time.sleep(2)
# 构造检测所用的 URL
url = 'http://www.kayak.com/s/basic/flight?'
url += 'searchid=%s&c=5&apimode=1&_sid_=%s&version=1' % (searchid, sid)
doc = xml.dom.minidom.parseString(urllib2.urlopen(url).read())
# 寻找 morepending 标签,并等待其不再为 true
more_pending = doc.getElementsByTagName('morepending')[0].firstChild
if more_pending is None or more_pending.data == 'false':
break
# 现在,下载完整列表
url = 'http://www.kayak.com/s/basic/flight?'
url += 'searchid=%s&c=999&apimode=1&_sid_=%s&version=1' % (searchid, sid)
doc = xml.dom.minidom.parseString(urllib2.urlopen(url).read())
# 得到不同元素组成的列表
prices = doc.getElementsByTagName('price')
departures = doc.getElementsByTagName('depart')
arrivals = doc.getElementsByTagName('arrive')
# 用 zip 将它们连在一起
return zip([p.firstChild.data.split(' ')[1] for p in departures],
[p.firstChild.data.split(' ')[1] for p in arrivals],
[parse_price(p.firstChild.data) for p in prices])
开发者ID:yangruihan,项目名称:PCI,代码行数:33,代码来源:kayak.py
示例8: parse
def parse(self,response):
#Get Access Token for Microsoft Translate
atrequest = urllib2.Request('https://datamarket.accesscontrol.windows.net/v2/OAuth2-13')
atrequest.add_data(atdata)
atresponse = urllib2.urlopen(atrequest)
access_token = json.loads(atresponse.read())['access_token']
hxs = HtmlXPathSelector(response)
sites = hxs.select('//span[contains(@class, "productsAzLink")]/a/text()').extract()
items = []
for site in sites:
text = []
item = IkeaItem()
item['name'],_,item['thing'] = unicode(site).partition(' ')
tosend = {'text': unicode(item['name']), 'from' : 'sv' , 'to' : 'en' }
request = urllib2.Request('http://api.microsofttranslator.com/v2/Http.svc/Translate?'+urllib.urlencode(tosend))
request.add_header('Authorization', 'Bearer '+access_token)
response = urllib2.urlopen(request)
doc = etree.fromstring(response.read())
for elem in doc.xpath('/foo:string', namespaces={'foo': 'http://schemas.microsoft.com/2003/10/Serialization/'}):
if elem.text:
elem_text = ' '.join(elem.text.split())
if len(elem_text) > 0:
text.append(elem_text)
item['translation'] = ' '.join(text)
items.append(item)
return items
开发者ID:astephen2,项目名称:ikea-translate,代码行数:33,代码来源:ikea_spider.py
示例9: resolve_novamov
def resolve_novamov(url, guid):
xbmc.log("Starting resolve_novamov with url: " + str(url) + " and guid: " + str(guid))
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
match1=re.compile('flashvars.file="(.+?)"').findall(link)
for file in match1:
file = file
match2=re.compile('flashvars.filekey="(.+?)"').findall(link)
for filekey in match2:
filekey = filekey
if not match1 or not match2:
return 'CONTENTREMOVED'
novaurl = 'http://www.novamov.com/api/player.api.php?user=undefined&key=' + filekey + '&codes=undefined&pass=undefined&file=' + file
req = urllib2.Request(novaurl)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link=response.read()
response.close()
match3=re.compile('url=(.+?\.flv)').findall(link)
for link in match3:
link = link
print ('auth url is ' + str(link))
return link
开发者ID:waleedkash,项目名称:pakee,代码行数:34,代码来源:default.py
示例10: start
def start(self):
with QMutexLocker(self.mutex):
self.stoped = False
#for i in range(self.start_p,self.end_p):
for i in range(1,3):
while self.suspended:
self.wait()
return
if self.stoped:
return
url ="http://www.99fang.com/service/agency/a1/?p=%d" % i
print url
try:
r = urllib2.urlopen(url).read()
soup = BeautifulSoup(r)
box = soup.find("div",{'class':'agency-call-box'})
lis = box("li")
for li in lis:
tel = li.a.string
print tel
r =urllib2.urlopen("http://suzhou.jjr360.com/app.php?c=spider&a=index&city=&tel=%s" % tel)
print r.read()
except:
pass
else:
#self.emit(SIGNAL("updateTime()"))
time.sleep(1)
开发者ID:aviatorBeijing,项目名称:ptpy,代码行数:30,代码来源:fetch99tel.py
示例11: post
def post(user,passwd):
fp = open("Score.txt", "w")
login_url="http://www.dean.gxnu.edu.cn/jwxt/index.php/api/user/login"
data={}
data['phone']="+8613512345678"
data['username']=user
data['password']=passwd
post_data=urllib.urlencode(data)
req=urllib2.urlopen(login_url,post_data)
content=req.read()
sid=content[56:82]
data2={}
data2['session_id']=sid
url2="http://www.dean.gxnu.edu.cn/jwxt/index.php/api/chengji/getyxcj"
sessionid="PHPSESSID="+sid
post_data2=urllib.urlencode(data2)
req2=urllib2.Request(url2,post_data2)
req2.add_header('Cookie',sessionid)
resp=urllib2.urlopen(req2)
content2=json.loads(resp.read().encode('utf-8'))
print u"课程名称\t\t成绩\t\t年度/学期\t\tbk\t\tcx\t\t绩点"
fp.writelines("课程名称\t\t成绩\t\t年度/学期\t\tbk\t\tcx\t\t绩点\n")
for subject in content2['msg']:
print subject['kcmc'] + "\t\t" + subject['cj'] + "\t\t" + subject['ndxq'][:-1] + "/" + subject['ndxq'][-1] + "\t\t" + subject['bk'] + "\t\t" + subject['cx'] + "\t\t" + subject['jd']
# print "%-40s\t%-10s" % (subject['kcmc'], subject['cj'])
fp.write(subject['kcmc'] + "\t\t" + subject['cj'] + "\t\t" + subject['ndxq'][:-1] + "/" + subject['ndxq'][-1] + "\t\t" + subject['bk'] + "\t\t" + subject['cx'] + "\t\t" + subject['jd'] + "\n")
fp.close()
开发者ID:0x24bin,项目名称:hack_tools_for_me,代码行数:28,代码来源:logingxnuforlinux.py
示例12: urlread
def urlread(url, get={}, post={}, headers={}, timeout=None):
req = urllib2.Request(url, urllib.urlencode(get), headers=headers)
try:
response = urllib2.urlopen(req, urllib.urlencode(post), timeout).read()
except:
response = urllib2.urlopen(req, urllib.urlencode(post)).read()
return response
开发者ID:kolia1985,项目名称:odash-client,代码行数:7,代码来源:helpers.py
示例13: getmodelvendor
def getmodelvendor(type,ipaddress):
if type=="thermostat":
modeladdress=ipaddress.replace('/sys','/tstat/model')
deviceModelUrl = urllib2.urlopen(modeladdress)
if (deviceModelUrl.getcode()==200):
deviceModel = parseJSONresponse(deviceModelUrl.read().decode("utf-8"),"model")
deviceVendor = "RadioThermostat"
deviceModelUrl.close()
return {'model':deviceModel,'vendor':deviceVendor}
elif type=="Philips":
deviceUrl = urllib2.urlopen(ipaddress)
dom=minidom.parse(deviceUrl)
deviceModel=dom.getElementsByTagName('modelName')[0].firstChild.data
deviceVendor=dom.getElementsByTagName('manufacturer')[0].firstChild.data
deviceUrl.close()
return {'model':deviceModel,'vendor':deviceVendor}
elif type=="WeMo":
deviceUrl = urllib2.urlopen(ipaddress)
dom=minidom.parse(deviceUrl)
deviceModel=dom.getElementsByTagName('modelName')[0].firstChild.data
deviceVendor=dom.getElementsByTagName('manufacturer')[0].firstChild.data
nickname = dom.getElementsByTagName('friendlyName')[0].firstChild.data
if str(deviceModel).lower() == 'socket':
deviceType = dom.getElementsByTagName('deviceType')[0].firstChild.data
deviceType = re.search('urn:Belkin:device:([A-Za-z]*):1',deviceType).groups()[0]
if (deviceType.lower() == 'controllee'):
deviceModel = deviceModel
else:
deviceModel = 'Unknown'
deviceUrl.close()
return {'model':deviceModel,'vendor':deviceVendor,'nickname':nickname}
开发者ID:kwarodom,项目名称:bemoss_os-1,代码行数:32,代码来源:WiFi.py
示例14: getcommits_from_project
def getcommits_from_project(project):
global access_token
url1 = 'https://api.github.com/user'
request1=Request(url1)
request1.add_header('Authorization', 'token %s' % access_token)
response1 = urlopen(request1)
result1 = json.load(response1)
person = result1['login']
repo_info=['Fasta','js2839']
owner= repo_info[1]
repo = repo_info[0]
url = 'https://api.github.com/repos/'+owner+'/'+repo+'/commits'
data=[]
request = Request(url)
request.add_header('Authorization', 'token %s' % access_token)
response = urlopen(request)
result = json.load(response)
for i in range(len(result)):
print 'result0'
data.append([result[i]['commit']['message'], result[i]['commit']['author']['name'], result[i]['commit']['author']['date']])
print data[i]
for com in data:
(per,sub_name)=getPercentage(com[0])
err = save_to_db( per, sub_name, com[1], project, com[2])
return
开发者ID:ychsieh,项目名称:Actually,代码行数:25,代码来源:views.py
示例15: login
def login(): # 模拟登录程序
postdata = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'userticket': '1',
'ssosimplelogin': '1',
'vsnf': '1',
'vsnval': '',
'su': '',
'service': 'miniblog',
'servertime': '',
'nonce': '',
'pwencode': 'rsa2', #'wsse',
'sp': '',
'encoding': 'UTF-8',
####
'prelt':'115',
'rsakv':'',
####
'url':'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
#'http://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'META'
}
global account
username = 'lasclocker%[email protected]'%(account)
pwd = '1161895575'
url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.5)'
try:#主要获取servertime和nonce这两个随机的内容
servertime, nonce, pubkey, rsakv = get_servertime()
except:
return
#global postdata
postdata['servertime'] = servertime
postdata['nonce'] = nonce
postdata['rsakv']= rsakv
postdata['su'] = get_user(username)#对用户名进行加密
postdata['sp'] = get_pwd(pwd, servertime, nonce, pubkey)#对密码进行加密
postdata = urllib.urlencode(postdata)
#headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0'}#设置post头部,根据不同的应用平台进行设定
headers = {'User-Agent':'Mozilla/5.0 (X11; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0'}
#headers = {'User-Agent':'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)'}
req = urllib2.Request(
url = url,
data = postdata,
headers = headers
)
result = urllib2.urlopen(req)
text = result.read()
p = re.compile('location\.replace\(\'(.*?)\'\)')
try:
login_url = p.search(text).group(1)
###print login_url
urllib2.urlopen(login_url)
print "Login successful!"
except:
print 'Login error!'
开发者ID:lasclocker,项目名称:Sina-MicroBlog-Potential-Customer-Mining,代码行数:60,代码来源:simulation_crawler.py
示例16: check_url
def check_url(self, url):
try:
urllib2.urlopen(url).headers.getheader('Content-Length')
except urllib2.HTTPError:
print("404 error checking url: " + url)
return False
return True
开发者ID:arges,项目名称:spork,代码行数:7,代码来源:get-linux-deb-url.py
示例17: get_proportional_hash_area
def get_proportional_hash_area(period):
""" Takes in periods accepted by P2Pool - hour, day, week, month or year,
then gets hash_data from the server running on localhost, parses it,
and calculates each miner's hash power against the total during that time.
"""
import urllib2, json
path1 = 'http://localhost:9332/web/graph_data/miner_hash_rates/last_'+period
result1 = json.load(urllib2.urlopen(path1))
path2 = 'http://localhost:9332/web/graph_data/miner_dead_hash_rates/last_'+period
result2 = json.load(urllib2.urlopen(path2))
hash_areas = {}
total_hash_area = 0
for row in result1:
for address in row[1]:
try:
hash_areas[address] += row[1][address] * row[2]
except KeyError:
hash_areas[address] = row[1][address] * row[2]
finally:
total_hash_area += row[1][address] * row[2]
for row in result2:
for address in row[1]:
hash_areas[address] -= row[1][address]*row[2]
total_hash_area -= row[1][address] * row[2]
proportions = {}
for address in hash_areas.keys():
proportions[address] = hash_areas[address] / total_hash_area
hash_areas[address] /= 1000000000000000
return hash_areas, proportions
开发者ID:hunterbunter,项目名称:p2pool-merged-payout-tool,代码行数:33,代码来源:payout.py
示例18: getSCLeg
def getSCLeg(partyDict):
houseSoup = BeautifulSoup(urllib2.urlopen('http://www.scstatehouse.gov/member.php?chamber=H&order=D').read())
senateSoup = BeautifulSoup(urllib2.urlopen('http://www.scstatehouse.gov/member.php?chamber=S&order=D').read())
houseTable = houseSoup.find('div', {'class': 'mainwidepanel'}).find_all('div', {'style': 'width: 325px; height: 135px; margin: 0 0 0 20px; text-align: left; float: left;'})
senateTable = senateSoup.find('div', {'class': 'mainwidepanel'}).find_all('div', {'style': 'width: 325px; height: 135px; margin: 0 0 0 20px; text-align: left; float: left;'})
dictList = []
for item in houseTable:
repInfo = {}
link = item.find('a')
if link is not None:
repInfo['Website'] = 'http://www.scstatehouse.gov' + link.get('href')
repInfo['Name'] = re.sub(r'\[.*$', '', link.string.strip()).strip().replace(' ', ' ').replace(' ', ' ')
repInfo['Party'] = partyDict[str(re.sub(r'^.*\[(.*)\].*$', r'\1', link.string.strip()))]
else:
repInfo['Name'] = 'VACANT'
repInfo['District'] = 'SC State House ' + re.sub(r'^.*(District [0-9]*).*$', r'\1', item.get_text())
dictList.append(repInfo)
for item in senateTable:
repInfo = {}
link = item.find('a')
if link is not None:
repInfo['Website'] = 'http://www.scstatehouse.gov' + link.get('href')
repInfo['Name'] = re.sub(r'\[.*$', '', link.string.strip()).strip().replace(' ', ' ').replace(' ', ' ')
repInfo['Party'] = partyDict[str(re.sub(r'^.*\[(.*)\].*$', r'\1', link.string.strip()))]
else:
repInfo['Name'] = 'VACANT'
repInfo['District'] = 'SC State Senate ' + re.sub(r'^.*(District [0-9]*).*$', r'\1', item.get_text())
dictList.append(repInfo)
return dictList
开发者ID:djbridges,项目名称:govBot,代码行数:32,代码来源:SCLeg.py
示例19: sendMessage
def sendMessage(subject, content, chanel, mobile) :
if content :
content = subject + content
subject = "时时彩计划方案"
if chanel == "serverChan" :
key = "SCU749Tfa80c68db4805b9421f52d360f6614cb565696559f19e"
url = "http://sc.ftqq.com/" + key +".send"
parameters = {
"text" : subject, "desp" : content,
"key" : key
}
elif chanel == "pushBear" :
url = "http://api.pushbear.com/smart"
parameters = {
"sendkey" : "96-d296f0cdb565bae82a833fabcd860309",
"text" : subject,
"mobile" : mobile,
"desp" : content
}
if chanel == "mail" :
sendMail("smtp.126.com", "[email protected]", ["[email protected]", "[email protected]"],
subject, content, "126.com", "dhysgzs*211", format='plain')
return
postData = urllib.urlencode(parameters)
request = urllib2.Request(url, postData)
urllib2.urlopen(request)
开发者ID:sgzs6721,项目名称:caipiao,代码行数:27,代码来源:notification.py
示例20: fetchVideo_DBpedia
def fetchVideo_DBpedia(videoName):
def is_person(url, response):
try:
for item in response[url.replace('data', 'resource')[:-5]]['http://www.w3.org/1999/02/22-rdf-syntax-ns#type']:
if item['value'] == 'http://dbpedia.org/ontology/Person':
return True
return False
except:
return False
def find_disambiguates(url, response):
ret = []
try:
for item in response[url.replace('data', 'resource')[:-5]]['http://dbpedia.org/ontology/wikiPageDisambiguates']:
ret.append(item['value'])
except:
pass
return ret
try:
url="http://dbpedia.org/"
videoName='_'.join(word[0] + word[1:] for word in videoName.title().split())
titleUrl = url+"data/"+videoName+".json"
response = json.loads(urllib2.urlopen(titleUrl).read())
if is_person(titleUrl, response):
return True
ds = find_disambiguates(titleUrl, response)
for d in ds:
d = d.replace('resource', 'data') + ".json"
if is_person(d, json.loads(urllib2.urlopen(d).read())):
return True
except:
return False
return False
开发者ID:Jimmy123,项目名称:video_similarity,代码行数:35,代码来源:videoTagFetch.py
注:本文中的urllib2.urlopen函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论