本文整理汇总了Python中urlgrabber.urlgrab函数的典型用法代码示例。如果您正苦于以下问题:Python urlgrab函数的具体用法?Python urlgrab怎么用?Python urlgrab使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urlgrab函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: srpm_from_ticket
def srpm_from_ticket(self):
'''Retrieve the latest srpmURL from the buzilla URL.
'''
try:
bugzillaURL = self.checklist.properties['ticketURL'].value
except KeyError:
# No ticket URL was given, set nothing
return
if not bugzillaURL:
# No ticket URL was given, set nothing
return
data = urlgrabber.urlread(bugzillaURL)
srpmList = re.compile('"((ht|f)tp(s)?://.*?\.src\.rpm)"', re.IGNORECASE).findall(data)
if srpmList == []:
# No SRPM was found. Just decide not to set anything.
return
# Set the srpm to the last SRPM listed on the page
srpmURL = srpmList[-1][0]
if not srpmURL:
# No srpm found. Just decide not to set anything.
return
# Download the srpm to the temporary directory.
urlgrabber.urlgrab(srpmURL, self.tmpDir)
# Fill the SRPMfile properties with the srpm in the temp directory
self.checklist.properties['SRPMfile'].value = (
self.tmpDir + os.path.basename(srpmURL))
开发者ID:BackupTheBerlios,项目名称:qa-assistant-svn,代码行数:28,代码来源:fedoraus.py
示例2: __download_prop_file
def __download_prop_file(self):
""" download prop file and validate """
# retry 3 times download prop file
for _ in range(3):
try:
sotimeout = float(pylons.config['download_thread_sotimeout'])
proxies = json.loads(pylons.config['urlgrabber_proxies'])
urlgrabber.urlgrab(
self.__uriDict['propUri'],
self.__uriDict['propPath'],
keepalive = 0,
timeout = sotimeout,
proxies = proxies)
break
except Exception:
randsleep = randint(30, 60)
time.sleep(randsleep)
if (not os.path.exists(self.__uriDict['propPath'])):
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) does not exist' % (self.__uriDict['propPath']))
if not PackageUtil.validateProp(self.__uriDict['propPath']):
raise AgentException(Errors.DC_MISSING_PROP_FILE,
'Prop file (%s) failed validation' % (self.__uriDict['propPath']))
开发者ID:cronuspaas,项目名称:cronusagent,代码行数:25,代码来源:download_thread.py
示例3: fetch
def fetch (self):
"""Return value: Fetched file's full path.."""
# import urlgrabber module
try:
import urlgrabber
except ImportError:
raise FetchError(_('Urlgrabber needs to be installed to run this command'))
if not self.url.filename():
raise FetchError(_('Filename error'))
if not os.access(self.destdir, os.W_OK):
raise FetchError(_('Access denied to write to destination directory: "%s"') % (self.destdir))
if os.path.exists(self.archive_file) and not os.access(self.archive_file, os.W_OK):
raise FetchError(_('Access denied to destination file: "%s"') % (self.archive_file))
try:
urlgrabber.urlgrab(self.url.get_uri(),
self.partial_file,
progress_obj = UIHandler(self.progress),
http_headers = self._get_http_headers(),
ftp_headers = self._get_ftp_headers(),
proxies = self._get_proxies(),
throttle = self._get_bandwith_limit(),
reget = self._test_range_support(),
user_agent = 'PiSi Fetcher/' + pisi.__version__)
except urlgrabber.grabber.URLGrabError, e:
raise FetchError(_('Could not fetch destination file "%s": %s') % (self.archive_file, e))
开发者ID:dhirajkhatiwada1,项目名称:uludag,代码行数:30,代码来源:fetcher.py
示例4: _batch_download
def _batch_download(self, uris, local_path=None, throttle=0):
"""Downloads a package from specified uri. This is a W.I.P!!!
Args:
uris (list of strings) - Uris of the package to download.
local_path (string) - Full path where the package is be saved.
Do not include a file name.
throttle (int) - Number of kilobytes to throttle the bandwidth by.
If throttle == 0, throttling is disabled.
Returns:
True if package downloaded successfully. False otherwise.
"""
success = False
if throttle != 0:
throttle *= 1024
for uri in uris:
try:
if local_path:
name = uri.split('/')[-1]
if '?' in name:
name = name.split('?')[0]
path = os.path.join(local_path, name)
else:
urlgrab(uri, throttle=throttle)
except Exception as e:
logger.exception(e)
开发者ID:MiguelMoll,项目名称:vFense,代码行数:35,代码来源:packagegrabber.py
示例5: run
def run(self,force=False):
"""
Download bootloader content for all of the latest bootloaders, since the user
has chosen to not supply their own. You may ask "why not get this from yum", though
Fedora has no IA64 repo, for instance, and we also want this to be able to work on Debian and
further do not want folks to have to install a cross compiler. For those that don't like this approach
they can still source their cross-arch bootloader content manually.
"""
content_server = "http://mdehaan.fedorapeople.org/loaders"
dest = "/var/lib/cobbler/loaders"
files = (
( "%s/README" % content_server, "%s/README" % dest ),
( "%s/COPYING.elilo" % content_server, "%s/COPYING.elilo" % dest ),
( "%s/COPYING.yaboot" % content_server, "%s/COPYING.yaboot" % dest),
( "%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest),
( "%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest ),
( "%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest),
( "%s/pxelinux.0-3.61" % content_server, "%s/pxelinux.0" % dest),
( "%s/menu.c32-3.61" % content_server, "%s/menu.c32" % dest),
)
self.logger.info("downloading content required to netboot all arches")
for f in files:
src = f[0]
dst = f[1]
if os.path.exists(dst) and not force:
self.logger.info("path %s already exists, not overwriting existing content, use --force if you wish to update" % dst)
continue
self.logger.info("downloading %s to %s" % (src,dst))
urlgrabber.urlgrab(src,dst)
return True
开发者ID:GunioRobot,项目名称:cobbler,代码行数:34,代码来源:action_dlcontent.py
示例6: download_file
def download_file(uri, dl_path, throttle):
if uri.startswith('https://api.github.com/'):
# TODO: handle 200 and 302 response
headers = (("Accept", "application/octet-stream"),)
urlgrab(uri, filename=dl_path, throttle=throttle, http_headers=headers)
else:
urlgrab(uri, filename=dl_path, throttle=throttle)
开发者ID:akaasjager,项目名称:vFense,代码行数:8,代码来源:downloader.py
示例7: downloadPackage
def downloadPackage(self):
# download the package
urlgrabber.urlgrab(self.testPkgUri, self.localPkgName)
urlgrabber.urlgrab(self.testPkgUri + '.prop', self.localPkgName + '.prop')
LOG.debug('localpackagename = %s', self.localPkgName)
assert os.path.exists(self.localPkgName + '.prop')
assert os.path.exists(self.localPkgName)
开发者ID:cronuspaas,项目名称:cronusagent,代码行数:9,代码来源:test_package.py
示例8: page_download
def page_download(page_url, folder):
page = urllib2.urlopen(page_url)
soup = BeautifulSoup(page)
print len(soup.find_all("a", { "class" : "next" }))
for src in soup.find_all('img'):
if src.get('src').endswith(sfx):
tgt_url = str(src.get('src').replace('small', 'big'))
print "saving : " + tgt_url
tgt_name = os.path.basename(tgt_url)
try:
urlgrabber.urlgrab(tgt_url, "./" + folder + "/" + tgt_name, progress_obj=urlgrabber.progress.TextMeter())
except urlgrabber.grabber.URLGrabError as detail:
print "Error eccours: " + detail
开发者ID:donie,项目名称:playground,代码行数:13,代码来源:gh_downloader.py
示例9: fetch_image_files
def fetch_image_files(layer, opts):
if opts.layer:
path = str(opts.layer)
if not opts.test and not os.path.isdir(path):
os.makedirs(path)
else:
path = "."
for image in layer["images"]:
filetype = image["url"].split(".")[-1]
target = os.path.join(path, image["hash"] + "." + filetype)
if opts.test:
print >>sys.stderr, image["url"], "->", target
else:
meter = urlgrabber.progress.text_progress_meter()
urlgrabber.urlgrab(image["url"], target, progress_obj=meter)
开发者ID:crschmidt,项目名称:oam,代码行数:15,代码来源:oam-fetch.py
示例10: updateLocalDb
def updateLocalDb():
try:
if urlgrabber.urlgrab(self.remote_db, self.local_db) == self.local_db:
updateLocalSum()
return True
except urlgrabber.grabber.URLGrabError:
return False
开发者ID:Pardus-Linux,项目名称:appinfo,代码行数:7,代码来源:client.py
示例11: grab
def grab(url, filename, timeout=120, retry=5, proxy=None, ftpmode=False):
print "Grabbing", url
def grab_fail_callback(data):
# Only print debug here when non fatal retries, debug in other cases
# is already printed
if (data.exception.errno in retrycodes) and (data.tries != data.retry):
print "grabbing retry %d/%d, exception %s"%(
data.tries, data.retry, data.exception)
try:
retrycodes = urlgrabber.grabber.URLGrabberOptions().retrycodes
if 12 not in retrycodes:
retrycodes.append(12)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
downloaded_file = urlgrabber.urlgrab(
url, filename,timeout=timeout,retry=retry, retrycodes=retrycodes,
progress_obj=SimpleProgress(), failure_callback=grab_fail_callback,
copy_local=True, proxies=proxy, ftp_disable_epsv=ftpmode)
if not downloaded_file:
return False
except urlgrabber.grabber.URLGrabError as e:
warn('URLGrabError %i: %s' % (e.errno, e.strerror))
if os.path.exists(filename):
os.unlink(filename)
return False
return True
开发者ID:oe-lite-bananapi-r1,项目名称:core,代码行数:26,代码来源:url.py
示例12: fetch_jetty
def fetch_jetty(self):
"""Download the requested version of Jetty"""
if path.exists(self.home):
return
url = self.node.config.get('jetty','REPO') + self.version + "/jetty-distribution-" + self.version + ".tar.gz"
if not path.exists(self.cachedir):
os.makedirs(self.cachedir)
f = tempfile.mktemp(prefix='jetty-' + self.version + '-', suffix='.tar.gz')
try:
print("Downloading Jetty from " + url)
meter = urlgrabber.progress.TextMeter()
urlgrabber.urlgrab(url, filename=f, progress_obj=meter)
subprocess.check_call(["tar", "-x", "-C", self.cachedir, "-f", f])
finally:
os.remove(f)
os.rename(path.join(self.cachedir, 'jetty-distribution-' + self.version), self.home)
开发者ID:nla,项目名称:jvmctl,代码行数:16,代码来源:jvmctl.py
示例13: __init__
def __init__(self):
data = StringIO.StringIO(urlgrabber.urlread("http://itunes.com/version"))
stream = gzip.GzipFile(fileobj=data)
data = stream.read()
updates = plistlib.readPlistFromString(data)
devs = self.findPods()
for (dev, name, family, firmware) in devs:
if not family:
family, firmware = self.getIPodData(dev)
print "Found %s with family %s and firmware %s" % (name, family, firmware)
if updates["iPodSoftwareVersions"].has_key(unicode(family)):
uri = updates["iPodSoftwareVersions"][unicode(family)]["FirmwareURL"]
print "Latest firmware: %s" % uri
print "Fetching firmware..."
path = urlgrabber.urlgrab(
uri, progress_obj=urlgrabber.progress.text_progress_meter(), reget="check_timestamp"
)
print "Extracting firmware..."
zf = zipfile.ZipFile(path)
for name in zf.namelist():
if name[:8] == "Firmware":
print "Firmware found."
outfile = open("Firmware", "wb")
outfile.write(zf.read(name))
outfile.close()
infile = open("Firmware", "rb")
outfile = open(dev, "wb")
# FIXME: do the following in pure python?
print "Making backup..."
commands.getoutput("dd if=%s of=Backup" % dev)
print "Uploading firmware..."
commands.getoutput("dd if=Firmware of=%s" % dev)
print "Done."
开发者ID:cberetta,项目名称:ipod-update,代码行数:33,代码来源:ipod-update.py
示例14: getRemoteURL
def getRemoteURL(url):
logger.info('downloading %s' % url)
start = time.time()
try:
fileName = urlgrabber.urlgrab(url, config.localOSMPath)
fileSize = os.path.getsize(fileName)
except Exception, e:
logger.warning('urlgrabber: %s' % e.strerror)
return(None)
开发者ID:gangele397,项目名称:jaunt,代码行数:9,代码来源:fetcher.py
示例15: archive_downloader
def archive_downloader(i):
list_name = i[0]
year = i[1]
month = i[2]
if not list_name or not year or not month:
return
basename = "{0}-{1}.txt.gz".format(year, month)
filename = "http://lists.fedoraproject.org/pipermail/{0}/{1}".format(list_name, basename)
try:
urlgrabber.urlgrab(filename)
pos = str(months.index(month) + 1)
if len(pos) == 1:
pos = "0{0}".format(pos)
newname = "{0}-{1}-{2}-{3}.txt".format(list_name, year, pos, month)
with open(newname, "w") as f:
f.write(gzip.open(basename).read())
print "== {0} downloaded ==".format(filename)
except urlgrabber.grabber.URLGrabError:
pass
开发者ID:pypingou,项目名称:mongomail,代码行数:19,代码来源:get_mbox.py
示例16: fetch_image_files
def fetch_image_files(client, bbox, opts):
# if opts.layer:
# path = str(opts.layer)
# if not opts.test and not os.path.isdir(path):
# os.makedirs(path)
# else:
files = []
args = {"archive":"true"} if opts.source else {}
for image in client.images_by_bbox(bbox, **args):
target = image.path.split("/")[-1]
if opts.dest:
meter = urlgrabber.progress.text_progress_meter()
target = os.path.join(opts.dest, target)
print >>sys.stderr, image.path, "->", target
urlgrabber.urlgrab(str(image.path), target, progress_obj=meter)
else:
print >>sys.stderr, image.path, "->", target
files.append(target)
return files
开发者ID:crschmidt,项目名称:mosaic-webui,代码行数:19,代码来源:mosaic.py
示例17: make_floppy
def make_floppy(kickstart):
(fd, floppy_path) = tempfile.mkstemp(
suffix='.floppy', prefix='tmp', dir="/tmp")
print("- creating floppy image at %s" % floppy_path)
# create the floppy image file
cmd = "dd if=/dev/zero of=%s bs=1440 count=1024" % floppy_path
print("- %s" % cmd)
rc = os.system(cmd)
if not rc == 0:
raise InfoException("dd failed")
# vfatify
cmd = "mkdosfs %s" % floppy_path
print("- %s" % cmd)
rc = os.system(cmd)
if not rc == 0:
raise InfoException("mkdosfs failed")
# mount the floppy
mount_path = tempfile.mkdtemp(suffix=".mnt", prefix='tmp', dir="/tmp")
cmd = "mount -o loop -t vfat %s %s" % (floppy_path, mount_path)
print("- %s" % cmd)
rc = os.system(cmd)
if not rc == 0:
raise InfoException("mount failed")
# download the kickstart file onto the mounted floppy
print("- downloading %s" % kickstart)
save_file = os.path.join(mount_path, "unattended.txt")
urlgrabber.urlgrab(kickstart, filename=save_file)
# umount
cmd = "umount %s" % mount_path
print("- %s" % cmd)
rc = os.system(cmd)
if not rc == 0:
raise InfoException("umount failed")
# return the path to the completed disk image to pass to virt-install
return floppy_path
开发者ID:Acidburn0zzz,项目名称:cobbler,代码行数:42,代码来源:utils.py
示例18: download_rpms
def download_rpms(pkg, outdir):
"""
TBD.
:param pkg: A dict contains RPM basic information other than url
:param outdir: Where to save RPM[s]
"""
url = RS.call("packages.getPackageUrl", [pkg["id"]], ["--no-cache"])[0]
logging.info("RPM URL: " + ', '.join(url))
return urlgrabber.urlgrab(url, os.path.join(outdir, os.path.basename(url)))
开发者ID:ssato,项目名称:rpmkit,代码行数:11,代码来源:make_rpmdb.py
示例19: get_images
def get_images(active=True, outdir='player_images', outlist='player_names.csv'):
import bs4, urlgrabber, httplib
if active:
list = 'http://stats.nba.com/frags/stats-site-page-players-directory-active.html'
else:
list = 'http://stats.nba.com/players.html'
# prepare player list
flist = open(outlist, 'w')
flist.write('# name\n')
# fetch and parse the NBA player list
player_page = urlgrabber.urlread(list)
soup = bs4.BeautifulSoup(player_page)
# loop through the player list
for p in soup('a', 'playerlink'):
phref = str(p['href'])
## exclude "historical" players
#if (len(phref.split('HISTADD')) == 1):
# verify that player pages exist
pname = phref.split('/')[-1]
conn = httplib.HTTPConnection('i.cdn.turner.com')
conn.request('HEAD', '/nba/nba/.element/img/2.0/sect/statscube/players/large/'+pname+'.png')
if (conn.getresponse().status != 404):
# download and save player images
img_link = 'http://i.cdn.turner.com/nba/nba/.element/img/2.0/sect/statscube/players/large/'+pname+'.png'
urlgrabber.urlgrab(img_link, filename=outdir+'/'+pname+'.png')
# write player names to list
flist.write(pname+'\n')
# close name list
flist.close()
return
开发者ID:ttal,项目名称:NBA-hair,代码行数:40,代码来源:hair.py
示例20: download
def download(url, progress=False):
""" Download the document pointed to by url to cwd
"""
filename = get_filename(url)
if os.path.exists(filename):
info(filename + " already exists in cwd. Not downloading. ")
else:
debug("Downloading " + url)
if progress:
import urlgrabber
from urlgrabber.progress import text_progress_meter
urlgrabber.urlgrab(url=url,
filename=filename,
progress_obj=text_progress_meter())
else:
urllib.urlretrieve(url=url, filename=filename)
debug("Finished Downloading " + filename)
return filename
开发者ID:pombredanne,项目名称:archiver,代码行数:23,代码来源:utils.py
注:本文中的urlgrabber.urlgrab函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论