本文整理汇总了Python中urllib.urlcleanup函数的典型用法代码示例。如果您正苦于以下问题:Python urlcleanup函数的具体用法?Python urlcleanup怎么用?Python urlcleanup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlcleanup函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: getfiles
def getfiles(self, *files):
"""Download Rebase files."""
for file in self.update(*files):
print('copying %s' % file)
fn = os.path.basename(file)
# filename = os.path.join(Rebase, fn)
filename = os.path.join(os.getcwd(), fn)
print('to %s' % filename)
try:
self.retrieve(file, filename)
# The following line is a workaround for an urllib bug in
# Python 2.7.11 - 2.7.xx (?). It does not seem to work on
# Python 3.xx. Try to remove the line in new Python versions.
urlcleanup()
except IOError as e:
print(e)
print('This error is probably due to a non-solved ftp bug in '
'recent Python versions. Please download the emboss '
'files manually from http://rebase.neb.com/rebase/'
'rebase.f37.html and then run ranacompiler.py. Find '
'more details in the Restriction manual.')
self.close()
return
self.close()
return
开发者ID:HuttonICS,项目名称:biopython,代码行数:25,代码来源:rebase_update.py
示例2: mainProcess
def mainProcess(linkList=[]):
print 'Process {0} is starting to work!'.format(os.getpid())
st=time.time()
p=PAIRSeg()
p._PAIRSeg__ResetSQLVariables()
log=LogProcessor.LogProcess()
fNum=os.path.basename(linkList[0]).split('.')[0]
lNum=os.path.basename(linkList[len(linkList)-1]).split('.')[0]
numRange=fNum+'-'+lNum
for link in linkList:
fileName=os.path.basename(link)
filePath=p.dirPath+'/'+fileName
try:
urllib.urlretrieve(link,filePath)
#print '[Downloaded .zip File: [{0}]'.format(fileName)
urllib.urlcleanup()
zipOrNot=zipfile.is_zipfile(filePath)
if(zipOrNot==True):
p.ExtractTSV(filePath)
os.remove(filePath) # save space on sofus
elif(zipOrNot==False):
os.remove(filePath)
log.write(log.logPath_PAIR,fileName+'\t'+link+'\t'+'PAIR\tProcessed')
except:
print 'ERROR: time out. {fileName}'.format(fileName)
log.write(log.logPath_PAIR_Error,fileName+'\t'+link+'\t'+'PAIR\tProcessed')
p.writeCSV(numRange)
print 'Processed range:{range}'.format(range=numRange)
print '[Process {0} is finished. Populated {1} links. Time:{2}]'.format(os.getpid(),len(linkList),time.time()-st)
开发者ID:CoryXie,项目名称:UPET-USPTO-Patent-Exploring-Tool,代码行数:29,代码来源:PAIRParserSeg.py
示例3: image_analyzer
def image_analyzer(url, string, depth, k):
try:
example_image = download_image(url)
except (HTTPError,IOError), e:
urllib.urlcleanup()
sys.exit('Unable to download %s.\n' % url + 'Error: %s.\n' % e)
开发者ID:bitrex,项目名称:imagesearcher,代码行数:7,代码来源:imagesearcher.py
示例4: aursearch
def aursearch(keywords, totalnum):
search = "http://aur.archlinux.org/packages.php?O=0&L=0&C=0&K=" + keywords.replace(" ", "+") + "&SeB=nd&SB=n&SO=a&PP=25&do_Search=Go"
urllib.urlretrieve(search, "aur.tmp")
block = ""
num = 0
sect = 0
packages = ""
aur = open("aur.tmp")
for aurl in aur:
if num <= totalnum:
if aurl[:20] == " <td class='data1'>" or aurl[:20] == " <td class='data2'>": print "aursearch", aurl
if aurl[:20] == " <td class='data1'>" and not block == "data1":
sect = 0
block = "data1"
elif aurl[:20] == " <td class='data2'>" and not block == "data2":
sect = 0
block = "data2"
elif aurl[:20] == " <td class='data1'>" or aurl[:20] == " <td class='data2'>":
print aurl
sect += 1
if sect == 2:
pacname = aurl.split("<span class='black'>")
pacname = pacname[1].split("</span>")
pacname = pacname[0]
packages = packages + pacname
elif sect == 4:
pacdesc = aurl.split("<span class='blue'>")
pacdesc = pacdesc[1].split("</span>")
pacdesc = pacdesc[0]
packages = packages + " - " + pacdesc + "\n"
num += 1
aur.close()
urllib.urlcleanup()
return packages
开发者ID:GunioRobot,项目名称:home,代码行数:35,代码来源:ircfunctions.py
示例5: download_file
def download_file(url, destfile):
"""
download_file: function for download from url to save as destfile
@url the source file to download.
@destfile the destination save file for local.
"""
file_url = url
try:
print("--> Downloading file: %s" % file_url)
filename, msg = urllib.urlretrieve(
#'http://code.jquery.com/jquery-2.1.1.js',
file_url,
reporthook = reporthook)
print ""
#print "File:", filename
print "Header:"
print msg
if os.path.exists(filename):
if os.path.exists(destfile):
now = currenttime()
tmpfile = "%s.%s" % (destfile, now)
shutil.move(destfile, tmpfile)
shutil.move(filename, destfile)
#print 'File exists before cleanup:', os.path.exists(filename)
finally:
urllib.urlcleanup()
开发者ID:davidlu1001,项目名称:python-scripts-1,代码行数:29,代码来源:urllibdownload.py
示例6: downloadImage
def downloadImage( imageURL, subID ) :
"""Download images"""
# image url
image_url = 'http://bbs.sjtu.edu.cn' + imageURL
# create the directory to store images
# if not os.path.exists( './download' ) :
try :
os.makedirs( './download/' + subID )
except OSError :
pass
#print "Failed to create directories"
# get filename of image
filename = 'download/' + subID + '/' + imageURL.split( '/' )[-1]
# clear the cache that may have been built up
# by previous calls to urlretrieve()
urllib.urlcleanup()
# retrieve the image
try :
urllib.urlretrieve( image_url, filename )
except ContentTooShortError :
print "The data available was less than that of expected"
print "Downloading file %s was interrupted" \
% os.path.basename( filename )
else :
# get the size of file
size = os.path.getsize( filename ) / 1024
print ">>>File %s (%s Kb) was done..." % ( filename, size )
开发者ID:Crackpot,项目名称:gftop,代码行数:33,代码来源:bbsImage.py
示例7: get_wallpgig
def get_wallpgig(self):
if not self.check_connection():
return "No internet connection!"
tags_from_file = self.filemanage.file_get_tags()
query_from_file = self.filemanage.file_get_query()
if (set(tags_from_file) == set(self.tags)) & (query_from_file == self.query):
self.status.set_label("Loading urls...")
urls = self.filemanage.file_get_urls()
self.status.set_label("Urls loaded.")
else:
self.status.set_label("Downloading urls...")
urls = self.download_urls()
self.status.set_label("Urls downloaded.")
random.shuffle(urls)
try:
url = urls[0]
urllib.urlretrieve(url, tfile)
urllib.urlcleanup()
check = 1
except IndexError:
check = "No photos for this tags!"
except urllib2.URLError:
check = "No internet connection!"
return check
开发者ID:SimoneFisicaro,项目名称:wallp,代码行数:29,代码来源:wall.py
示例8: run
def run(self):
try:
urllib._urlopener = SmartFancyURLopener()
urllib.urlretrieve(self.url, self.tmpfile, reporthook=self._hook)
urllib.urlcleanup()
except Abort: print 'Download Aborted'
except:
开发者ID:Quihico,项目名称:passion-xbmc,代码行数:7,代码来源:downloader.py
示例9: populate_sidebar
def populate_sidebar(self, branch = 'master', count = 50):
self.commits = self.repo.commits(branch, max_count = count)
for commit in self.commits:
commit_time = time.strftime("%c", commit.authored_date)
parts = commit.message.split('\n')
if len(parts) > 1:
text = "<b>%s ...</b>" % parts[0]
else:
text = "<b>%s</b>" % commit.message
text += "\n<small>by %s on %s</small>" % (commit.author,
commit_time)
hashed = hashlib.md5(commit.author.email).hexdigest()
image_path = "%s/grav_cache/%s.jpg" % (installdir, hashed)
if not os.path.exists(image_path):
gravatar_url = "http://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({'gravatar_id':hashed,
'size':str(30)})
urllib.urlretrieve(gravatar_url, image_path)
urllib.urlcleanup()
image = gtk.gdk.pixbuf_new_from_file(image_path)
self.sidebar.add_item(None, [text, image])
开发者ID:techwizrd,项目名称:BitShift,代码行数:26,代码来源:bitshift.py
示例10: download_version
def download_version(version):
chromium_file = 'chromium-%s.tar.xz' % version
path = '%s%s' % (chromium_url, chromium_file)
if (args.clean):
remove_file_if_exists(chromium_file)
# Let's make sure we haven't already downloaded it.
if os.path.isfile("./%s" % chromium_file):
print "%s already exists!" % chromium_file
else:
print "Downloading %s" % path
# Perhaps look at using python-progressbar at some point?
urllib.urlretrieve(path, chromium_file, reporthook=dlProgress)
urllib.urlcleanup()
print ""
if (args.tests):
chromium_testdata_file = 'chromium-%s-testdata.tar.xz' % version
path = '%s%s' % (chromium_url, chromium_testdata_file)
if (args.clean):
remove_file_if_exists(chromium_testdata_file)
# Let's make sure we haven't already downloaded it.
if os.path.isfile("./%s" % chromium_testdata_file):
print "%s already exists!" % chromium_testdata_file
else:
# Perhaps look at using python-progressbar at some point?
print "Downloading %s" % path
urllib.urlretrieve(path, chromium_testdata_file, reporthook=dlProgress)
urllib.urlcleanup()
print ""
开发者ID:MagicGroup,项目名称:MagicSPECS,代码行数:34,代码来源:chromium-latest.py
示例11: oai_listIdentifiers
def oai_listIdentifiers(self, src="www.astros-test.bodleian.ox.ac.uk/sandbox", resumptionToken=None):
self.ids_data_file = '/tmp/%s_ids_data_file'%unicode(uuid.uuid4())
src_url = None
if resumptionToken:
src_url = "%s?verb=ListIdentifiers&resumptionToken=%s"%(src, resumptionToken)
else:
src_url = "%s?verb=ListIdentifiers&metadataPrefix=oai_dc"%src_
for arg, val in self.args.iteritems():
if val:
src_url = "%s&%s=%s"%(src_url, arg, val)
if 'args' in src:
src_url = "%s&%s"%(src_url,src['args'])
tries = 1
while tries < 11:
urlretrieve(src_url, self.ids_data_file)
if os.path.isfile(self.ids_data_file):
self.logger.info("Downloaded identifiers for %s - %s"%(src, src_url))
break
self.logger.warn("Error retreiving identifiers for %s - %s (try # %d)"%(src, src_url, tries))
tries += 1
urlcleanup()
tree = ET.ElementTree(file=self.ids_data_file)
rt = tree.getroot()
ids = rt.findall("%(ns)sListIdentifiers/%(ns)sheader/%(ns)sidentifier"%{'ns':self.oai_ns})
for ID in ids:
if resumptionToken and 'deletion' in resumptionToken:
self.delete_identifiers.append(ID.text)
else:
self.identifiers.append(ID.text)
rtoken = rt.findall("%(ns)sListIdentifiers/%(ns)sresumptionToken"%{'ns':self.oai_ns})
os.remove(self.ids_data_file)
if rtoken:
self.oai_listIdentifiers(src, resumptionToken=rtoken[0].text)
开发者ID:bhavanaananda,项目名称:Pylons-DataFinder,代码行数:33,代码来源:oai_client.py
示例12: iq_register
def iq_register(self, iq):
"""
Register to a new VMCast.
@type iq: xmpp.Protocol.Iq
@param iq: the sender request IQ
@rtype: xmpp.Protocol.Iq
@return: a ready-to-send IQ containing the results
"""
reply = iq.buildReply("result")
url = iq.getTag("query").getTag("archipel").getAttr("url")
try:
if not url or url == "":
raise Exception("IncorrectStanza", "Stanza must have url: %s" % str(iq))
try:
urllib.urlcleanup()
f = urllib.urlopen(url)
except:
raise Exception("The given url doesn't exist. Can't register.")
try:
self.getFeed(f.read())
except:
raise Exception("The given url doesn't contains a valid VMCast feed. Can't register.")
self.cursor.execute("INSERT INTO vmcastsources (url) VALUES ('%s')" % url)
self.database_connection.commit()
self.parseRSS()
self.entity.push_change("vmcasting", "register")
self.entity.shout("vmcast", "I'm now registred to vmcast %s as asked by %s" % (url, iq.getFrom()))
except Exception as ex:
reply = build_error_iq(self, ex, iq, ARCHIPEL_ERROR_CODE_VMCASTS_REGISTER)
return reply
开发者ID:CanaryTek,项目名称:Archipel,代码行数:30,代码来源:hypervisorrepomanager.py
示例13: showInfo
def showInfo(self):
if self.check == "true" and self.menulist:
m_title = self["menulist"].getCurrent()[0][0]
m_url = self["menulist"].getCurrent()[0][1]
if m_url:
#m_url = re.findall('(.*?)\.', m_url)
#extra_imdb_convert = "._V1_SX320.jpg"
#m_url = "http://ia.media-imdb.com/images/%s%s" % (m_url[0], extra_imdb_convert)
print "EMC iMDB: Download Poster - %s" % m_url
urllib._urlopener = AppURLopener()
urllib.urlretrieve(m_url, self.path)
urllib.urlcleanup()
if os.path.exists(self.path):
self.poster_resize(self.path, m_title)
#ptr = LoadPixmap(self.path)
#if ptr is None:
# ptr = LoadPixmap("/usr/lib/enigma2/python/Plugins/Extensions/EnhancedMovieCenter/img/no_poster.png")
# print "EMC iMDB: Load default NO Poster."
#if ptr is not None:
# self["poster"].instance.setPixmap(ptr)
# print "EMC iMDB: Load Poster - %s" % m_title
else:
print "EMC iMDB: No url found for - %s" % m_title
else:
print "EMC iMDB: No url found for - %s" % m_title
开发者ID:n3wb13,项目名称:e2openplugin-EnhancedMovieCenter,代码行数:26,代码来源:EMCCoverSearch.py
示例14: song_download
def song_download():
song = user_input('Enter the name of song: ')
try:
query_string = encode({"search_query" : song})
content = urlopen("http://www.youtube.com/results?" + query_string)
if version == 3:
##I hate RE
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', content.read().decode())
else:
##ok!! if its not going work! I'm gonna kill you!!!
search_results = re.findall(r'href=\"\/watch\?v=(.{11})', content.read())
##finally(Thanks to git)
except:
print('Something happened!!')
exit(1)
# youtube2mp3 API
downloadLinkOnly = 'http://www.youtubeinmp3.com/fetch/?video=' + 'http://www.youtube.com/watch?v=' + search_results[0]
try:
print('Downloading %s' % song)
urllib.urlretrieve(downloadLinkOnly, filename='%s.mp3' % song)
urllib.urlcleanup()
except:
print('Error %s' % song)
exit(1)
开发者ID:kashyap32,项目名称:Downloader,代码行数:32,代码来源:songs-apk.py
示例15: fetch_preferred_team_overview
def fetch_preferred_team_overview(self):
if not self.is_offday_for_preferred_team():
urllib.urlcleanup()
game = self.games[self.game_index_for_preferred_team()]
game_overview = mlbgame.overview(game.game_id)
debug.log("Preferred Team's Game Status: {}, {} {}".format(game_overview.status, game_overview.inning_state, game_overview.inning))
return game_overview
开发者ID:ccrabb,项目名称:mlb-led-scoreboard,代码行数:7,代码来源:data.py
示例16: _get_url
def _get_url(self, url, content=None):
urllib.urlcleanup()
headers = {"Accept": "text/plain"}
rest = self.base_url
user = self.username
passwd = self.password
if not rest or not user or not passwd:
self.logger.info("Could contact RT, bad or missing args (host: %s user: %s or passwd)", rest, user)
return u""
cj = cookielib.LWPCookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
urllib2.install_opener(opener)
if content is None:
data = {'user': user, 'pass': passwd}
else:
data = {'user': user, 'pass': passwd, 'content': content}
#self.log.info("Data to be sent by RT:\n%r", data)
ldata = urllib.urlencode(data)
uri = rest + url
login = urllib2.Request(uri, ldata)
response_data = ""
try:
response = urllib2.urlopen(login)
response_data = response.read()
self.logger.info("RT Connection successful: %r", response_data)
except urllib2.URLError, exc:
# could not connect to server
self.logger.info("RT Connection failed: %r", exc)
开发者ID:helderfernandes1279,项目名称:intelmq_first_version,代码行数:33,代码来源:rt_objects.py
示例17: dowload
def dowload(url):
try:
urlretrieve(url, self.image_name(url))
urlcleanup()
except:
return False
return True
开发者ID:loonyhom,项目名称:PythonSample,代码行数:7,代码来源:FatchImageFromDouban.py
示例18: firmware_update
def firmware_update(self, args):
if not args.file and not args.url:
raise Exception("Must provide firmware filename or URL")
if args.file:
fp = open(args.file, 'r')
elif args.url:
print "Downloading from", args.url
resp = urllib.urlretrieve(args.url)
fp = open(resp[0], 'r')
urllib.urlcleanup() # We still keep file pointer open
if fp.read(8) == '54525a52':
print "Converting firmware to binary"
fp.seek(0)
fp_old = fp
fp = tempfile.TemporaryFile()
fp.write(binascii.unhexlify(fp_old.read()))
fp_old.close()
fp.seek(0)
if fp.read(4) != 'KPKY':
raise Exception("KeepKey firmware header expected")
print "Please confirm action on device..."
fp.seek(0)
return self.client.firmware_update(fp=fp)
开发者ID:primehat23,项目名称:python-keepkey,代码行数:31,代码来源:cmdkk.py
示例19: run
def run ( self, site=None,flush_lists=True,flush_rpms=True ):
flushdirs = ['rpms','rdfs','lists']
if not flush_lists:
flushdirs.remove('lists')
if not flush_rpms:
flushdirs.remove('rpms')
urllib.urlcleanup()
if os.path.isdir(self.cachedir):
for subdir in flushdirs:
dir = os.path.join(self.cachedir, subdir)
if site:
dir = os.path.join(dir, site)
if os.path.isdir(dir):
shutil.rmtree(dir)
if not site:
os.mkdir(dir, 0755)
if not site:
#
# Now let'a recreate the infrastructure:
#
for subdir in ['file:', 'http:', 'https:', 'ftp:']:
for type in ['rpms/','rdfs/']:
sd = type + subdir
dir = os.path.join(self.cachedir, sd)
os.mkdir(dir, 0755)
开发者ID:BackupTheBerlios,项目名称:swup-svn,代码行数:26,代码来源:download.py
示例20: refresh_seeds
def refresh_seeds():
# Get a new batch of random bits from our friends at Fourmilab
try:
os.remove(cache_file)
except:
pass
urllib.urlretrieve("http://www.fourmilab.ch/cgi-bin/uncgi/Hotbits?nbytes=240&fmt=hex", cache_file)
# We know they're good because they're imported from Switzerland
urllib.urlcleanup()
# Load the web page source
bitsFile = open(cache_file)
lines = bitsFile.readlines()
bitsFile.close()
randomLines = []
# Filter out the good lines
for line in lines:
if re.match(r'[0123456789ABCDEF]+', line):
randomLines.append(line)
os.remove(cache_file)
outLine = ""
# Stuff them into one big line and write it back out
for line in randomLines:
outLine = outLine + string.strip(line)
bitsFile = open(cache_file, "w")
bitsFile.write(outLine)
bitsFile.close()
开发者ID:aevernon,项目名称:triggerman,代码行数:26,代码来源:hotbits.py
注:本文中的urllib.urlcleanup函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论