本文整理汇总了Python中module.network.RequestFactory.getURL函数的典型用法代码示例。如果您正苦于以下问题:Python getURL函数的具体用法?Python getURL怎么用?Python getURL使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了getURL函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: notify
def notify(self,
event,
msg="",
key=self.getConfig('apikey')):
if not key:
return
if self.core.isClientConnected() and not self.getConfig('ignoreclient'):
return
elapsed_time = time.time() - self.last_notify
if elapsed_time < self.getConf("sendtimewait"):
return
if elapsed_time > 60:
self.notifications = 0
elif self.notifications >= self.getConf("sendpermin"):
return
getURL("http://www.notifymyandroid.com/publicapi/notify",
get={'apikey' : key,
'application': "pyLoad",
'event' : event,
'description': msg})
self.last_notify = time.time()
self.notifications += 1
开发者ID:kurtiss,项目名称:htpc,代码行数:31,代码来源:AndroidPhoneNotify.py
示例2: periodical
def periodical(self):
self.items_to_queue = []
self.items_to_collector = []
for site in ("top-rls", "movies", "Old_Stuff"):
address = "http://hd-area.org/index.php?s=" + site
req_page = getURL(address)
soup = BeautifulSoup(req_page)
self.get_title(soup)
if self.get_config("cinedubs") == True:
address = "http://hd-area.org/index.php?s=Cinedubs"
req_page = getURL(address)
soup = BeautifulSoup(req_page)
self.get_title(soup)
if len(self.get_config("pushoverapi")) > 2:
notifyPushover(self.get_config("pushoverapi"), self.items_to_queue, "QUEUE") if len(
self.items_to_queue
) > 0 else True
notifyPushover(self.get_config("pushoverapi"), self.items_to_collector, "COLLECTOR") if len(
self.items_to_collector
) > 0 else True
if len(self.get_config("pushbulletapi")) > 2:
notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_queue, "QUEUE") if len(
self.items_to_queue
) > 0 else True
notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_collector, "COLLECTOR") if len(
self.items_to_collector
) > 0 else True
开发者ID:kotarokun,项目名称:pyLoad-stuff,代码行数:27,代码来源:HDAreaOrg.py
示例3: getInfo
def getInfo(urls):
for url in urls:
h = getURL(url, just_header=True)
m = re.search(r'Location: (.+)\r\n', h)
if m and not re.match(m.group(1), FilefactoryCom.__pattern__): #: It's a direct link! Skipping
yield (url, 0, 3, url)
else: #: It's a standard html page
yield parseFileInfo(FilefactoryCom, url, getURL(url))
开发者ID:kurtiss,项目名称:htpc,代码行数:8,代码来源:FilefactoryCom.py
示例4: getInfo
def getInfo(urls):
for url in urls:
header = getURL(url, just_header=True)
if 'Location: http://cloudzer.net/404' in header:
file_info = (url, 0, 1, url)
else:
file_info = parseFileInfo(CloudzerNet, url, getURL(url, decode=True))
yield file_info
开发者ID:AlexandrePinheiro,项目名称:pyload,代码行数:8,代码来源:CloudzerNet.py
示例5: processCaptcha
def processCaptcha(self, task):
result = None
with open(task.captchaFile, "rb") as f:
data = f.read()
data = b64encode(data)
self.logDebug("%s : %s" % (task.captchaFile, data))
if task.isPositional():
mouse = 1
else:
mouse = 0
response = getURL(
self.API_URL,
post={
"apikey": self.getConfig("passkey"),
"prio": self.getConfig("prio"),
"confirm": self.getConfig("confirm"),
"captchaperhour": self.getConfig("captchaperhour"),
"maxtimeout": self.getConfig("timeout"),
"pyload": "1",
"source": "pyload",
"base64": "1",
"mouse": mouse,
"file-upload-01": data,
"action": "usercaptchaupload",
},
)
if response.isdigit():
self.logInfo(_("NewCaptchaID from upload: %s : %s" % (response, task.captchaFile)))
for i in range(1, 100, 1):
response2 = getURL(
self.API_URL,
get={
"apikey": self.getConfig("passkey"),
"id": response,
"pyload": "1",
"source": "pyload",
"action": "usercaptchacorrectdata",
},
)
if response2 != "":
break
time.sleep(3)
result = response2
task.data["ticket"] = response
self.logInfo("result %s : %s" % (response, result))
task.setResult(result)
else:
self.logError("Bad upload: %s" % response)
return False
开发者ID:jmcabgam,项目名称:pyload,代码行数:56,代码来源:Captcha9kw.py
示例6: respond
def respond(ticket, value):
conf = join(expanduser("~"), "ct.conf")
f = open(conf, "rb")
try:
getURL("http://captchatrader.com/api/respond",
post={"is_correct": value,
"username": f.readline().strip(),
"password": f.readline().strip(),
"ticket": ticket})
except Exception, e :
print "CT Exception:", e
log(DEBUG, str(e))
开发者ID:4Christopher,项目名称:pyload,代码行数:12,代码来源:PluginTester.py
示例7: getInfo
def getInfo(urls):
for url in urls:
header = getURL(url, just_header=True)
if 'Location: http://cloudzer.net/404' in header:
file_info = (url, 0, 1, url)
else:
if url.endswith('/'):
api_data = getURL(url + 'status')
else:
api_data = getURL(url + '/status')
name, size = api_data.splitlines()
size = parseFileSize(size)
file_info = (name, size, 2, url)
yield file_info
开发者ID:jmcabgam,项目名称:pyload,代码行数:14,代码来源:CloudzerNet.py
示例8: getHoster
def getHoster(self):
# If no accounts are available there will be no hosters available
if not self.account or not self.account.canUse():
print "ReloadCc: No accounts available"
return []
# Get account data
(user, data) = self.account.selectAccount()
# Get supported hosters list from reload.cc using the json API v1
query_params = dict(
via='pyload',
v=1,
get_supported='true',
get_traffic='true',
user=user
)
try:
query_params.update(dict(hash=self.account.infos[user]['pwdhash']))
except Exception:
query_params.update(dict(pwd=data['password']))
answer = getURL("http://api.reload.cc/login", get=query_params)
data = json_loads(answer)
# If account is not valid thera are no hosters available
if data['status'] != "ok":
print "ReloadCc: Status is not ok: %s" % data['status']
return []
# Extract hosters from json file
return data['msg']['supportedHosters']
开发者ID:4Christopher,项目名称:pyload,代码行数:34,代码来源:ReloadCc.py
示例9: getRtUpdate
def getRtUpdate(self):
rtUpdate = self.getStorage("rtUpdate")
if not rtUpdate:
if (
self.getStorage("version") != self.__version__
or int(self.getStorage("timestamp", 0)) + 86400000 < timestamp()
):
# that's right, we are even using jdownloader updates
rtUpdate = getURL("http://update0.jdownloader.org/pluginstuff/tbupdate.js")
rtUpdate = self.decrypt(rtUpdate.splitlines()[1])
# but we still need to fix the syntax to work with other engines than rhino
rtUpdate = re.sub(
r"for each\(var (\w+) in(\[[^\]]+\])\)\{",
r"zza=\2;for(var zzi=0;zzi<zza.length;zzi++){\1=zza[zzi];",
rtUpdate,
)
rtUpdate = re.sub(r"for\((\w+)=", r"for(var \1=", rtUpdate)
self.logDebug("rtUpdate")
self.setStorage("rtUpdate", rtUpdate)
self.setStorage("timestamp", timestamp())
self.setStorage("version", self.__version__)
else:
self.logError("Unable to download, wait for update...")
self.tempOffline()
return rtUpdate
开发者ID:Robbi373,项目名称:pyload,代码行数:27,代码来源:TurbobitNet.py
示例10: getInfo
def getInfo(urls):
result = [] #: [ .. (name, size, status, url) .. ]
regex = re.compile(DailymotionCom.__pattern__)
apiurl = "https://api.dailymotion.com/video/"
request = {"fields": "access_error,status,title"}
for url in urls:
id = regex.search(url).group("ID")
page = getURL(apiurl + id, get=request)
info = json_loads(page)
if "title" in info:
name = info["title"] + ".mp4"
else:
name = url
if "error" in info or info["access_error"]:
status = "offline"
else:
status = info["status"]
if status in ("ready", "published"):
status = "online"
elif status in ("waiting", "processing"):
status = "temp. offline"
else:
status = "offline"
result.append((name, 0, statusMap[status], url))
return result
开发者ID:kurtiss,项目名称:htpc,代码行数:28,代码来源:DailymotionCom.py
示例11: getInfo
def getInfo(urls):
result = []
for url in urls:
html = getURL(url)
if re.search(PutlockerCom.PATTERN_OFFLINE, html):
result.append((url, 0, 1, url))
else:
name = re.search(PutlockerCom.PATTERN_FILENAME_1, html)
if name is None:
name = re.search(PutlockerCom.PATTERN_FILENAME_2, html)
if name is None:
result.append((url, 0, 1, url))
continue
name = name.group(1)
# size = re.search(PutlockerCom.PATTERN_FILESIZE, html)
# if size is None:
# result.append((url, 0, 1, url))
# continue
# size = size.group(1)
result.append((name, 0, 2, url))
yield result
开发者ID:masterwaster,项目名称:pyload,代码行数:27,代码来源:PutlockerCom.py
示例12: process
def process(self, pyfile):
self.prepare()
if not re.match(self.__pattern__, self.pyfile.url):
if self.premium:
self.handleOverriden()
else:
self.fail("Only premium users can download from other hosters with %s" % self.HOSTER_NAME)
else:
try:
# Due to a 0.4.9 core bug self.load would use cookies even if
# cookies=False. Workaround using getURL to avoid cookies.
# Can be reverted in 0.5 as the cookies bug has been fixed.
self.html = getURL(pyfile.url, decode=True)
self.file_info = self.getFileInfo()
except PluginParseError:
self.file_info = None
self.location = self.getDirectDownloadLink()
if not self.file_info:
pyfile.name = html_unescape(
unquote(urlparse(self.location if self.location else pyfile.url).path.split("/")[-1])
)
if self.location:
self.startDownload(self.location)
elif self.premium:
self.handlePremium()
else:
self.handleFree()
开发者ID:wangjun,项目名称:pyload,代码行数:31,代码来源:XFileSharingPro.py
示例13: check_for_new_or_removed_hosters
def check_for_new_or_removed_hosters(self, hosters):
#get the old hosters
old_hosters = hosters.keys()
#load the current hosters from vipleech4u.com
page = getURL('http://vipleech4u.com/hosts.php')
current_hosters = self.HOSTER_PATTERN.findall(page)
current_hosters = [x.lower() for x in current_hosters]
#let's look for new hosters
new_hosters = []
for hoster in current_hosters:
if not hoster in old_hosters:
new_hosters.append(hoster)
#let's look for removed hosters
removed_hosters = []
for hoster in old_hosters:
if not hoster in current_hosters:
removed_hosters.append(hoster)
if new_hosters:
self.logDebug('The following new hosters were found on vipleech4u.com: %s' % str(new_hosters))
if removed_hosters:
self.logDebug('The following hosters were removed from vipleech4u.com: %s' % str(removed_hosters))
if not (new_hosters and removed_hosters):
self.logDebug('The hoster list is still valid.')
开发者ID:3DMeny,项目名称:pyload,代码行数:31,代码来源:Vipleech4uCom.py
示例14: getCredits
def getCredits(self):
response = getURL(self.GETCREDITS_URL,
post = {"key": self.getConfig("passkey")}
)
data = dict([x.split(' ',1) for x in response.splitlines()])
return int(data['Left'])
开发者ID:4Christopher,项目名称:pyload,代码行数:7,代码来源:BypassCaptcha.py
示例15: periodical
def periodical(self):
html_parser = HTMLParser.HTMLParser()
self.items_to_pyload = []
address = "https://trakt.tv/users/%s/watchlist" % self.get_config("traktuser")
page = getURL(address)
soup = BeautifulSoup(page)
trakttitles = []
# Get Trakt Watchlist Titles
for all in soup.findAll("div", {"class": "titles"}):
for title in all.findAll("h3"):
title = title.get_text()
title = replaceUmlauts(html_parser.unescape(title))
storage = self.retrieve(title)
if storage == "downloaded":
self.log_debug(title + ": already found and downloaded")
else:
trakttitles.append(title)
self.search(trakttitles)
# Pushnotification
if len(self.get_config("pushoverapi")) > 2:
notifyPushover(self.get_config("pushoverapi"), self.items_to_pyload) if len(
self.items_to_pyload
) > 0 else True
if len(self.get_config("pushbulletapi")) > 2:
notifyPushbullet(self.get_config("pushbulletapi"), self.items_to_pyload) if len(
self.items_to_pyload
) > 0 else True
开发者ID:muschikatz3,项目名称:pyLoad-stuff,代码行数:28,代码来源:TraktFetcher.py
示例16: loadPatterns
def loadPatterns(self):
page = getURL("http://linkdecrypter.com/")
m = re.search(r'<b>Supported\(\d+\)</b>: <i>([^+<]*)', page)
if not m:
self.logError(_("Crypter list not found"))
return
builtin = [name.lower() for name in self.core.pluginManager.crypterPlugins.keys()]
builtin.extend(["downloadserienjunkiesorg"])
crypter_pattern = re.compile("(\w[\w.-]+)")
online = []
for crypter in m.group(1).split(', '):
m = re.match(crypter_pattern, crypter)
if m and remove_chars(m.group(1), "-.") not in builtin:
online.append(m.group(1).replace(".", "\\."))
if not online:
self.logError(_("Crypter list is empty"))
return
regexp = r"https?://([^.]+\.)*?(%s)/.*" % "|".join(online)
dict = self.core.pluginManager.crypterPlugins[self.__name__]
dict["pattern"] = regexp
dict["re"] = re.compile(regexp)
self.logDebug("REGEXP: " + regexp)
开发者ID:3DMeny,项目名称:pyload,代码行数:28,代码来源:LinkdecrypterCom.py
示例17: getInfo
def getInfo(cls, url="", html=""):
info = cls.apiInfo(url)
online = True if info['status'] is 2 else False
try:
info['pattern'] = re.match(cls.__pattern__, url).groupdict() #: pattern groups will be saved here
except Exception:
info['pattern'] = {}
if not html and not online:
if not url:
info['error'] = "missing url"
info['status'] = 1
elif info['status'] is 3:
try:
html = getURL(url, cookies=cls.COOKIES, decode=not cls.TEXT_ENCODING)
if isinstance(cls.TEXT_ENCODING, basestring):
html = unicode(html, cls.TEXT_ENCODING)
except BadHeader, e:
info['error'] = "%d: %s" % (e.code, e.content)
if e.code is 404:
info['status'] = 1
elif e.code is 503:
info['status'] = 6
except Exception:
pass
开发者ID:kurtiss,项目名称:htpc,代码行数:33,代码来源:SimpleHoster.py
示例18: getInfo
def getInfo(urls):
# DDLStorage API Documentation:
# http://www.ddlstorage.com/cgi-bin/api_req.cgi?req_type=doc
ids = dict()
for url in urls:
m = re.search(DdlstorageCom.__pattern__, url)
ids[m.group('ID')] = url
for chunk in chunks(ids.keys(), 5):
api = getURL('http://www.ddlstorage.com/cgi-bin/api_req.cgi',
post={'req_type': 'file_info_free',
'client_id': 53472,
'file_code': ','.join(chunk),
'sign': md5('file_info_free%d%s%s' % (53472, ','.join(chunk),
'25JcpU2dPOKg8E2OEoRqMSRu068r0Cv3')).hexdigest()})
api = api.replace('<pre>', '').replace('</pre>', '')
api = json_loads(api)
result = list()
for el in api:
if el['status'] == 'online':
result.append((el['file_name'], int(el['file_size']), 2, ids[el['file_code']]))
else:
result.append((ids[el['file_code']], 0, 1, ids[el['file_code']]))
yield result
开发者ID:ASCIIteapot,项目名称:pyload,代码行数:25,代码来源:DdlstorageCom.py
示例19: getHoster
def getHoster(self):
json_data = getURL("http://unrestrict.li/api/jdownloader/hosts.php?format=json")
json_data = json_loads(json_data)
host_list = [element["host"] for element in json_data["result"]]
return host_list
开发者ID:B1GPY,项目名称:pyload,代码行数:7,代码来源:UnrestrictLi.py
示例20: _captchaResponse
def _captchaResponse(self, task, correct):
type = "correct" if correct else "refund"
if 'ticket' not in task.data:
self.logDebug("No CaptchaID for %s request (task: %s)" % (type, task))
return
passkey = self.getConfig("passkey")
for _i in xrange(3):
res = getURL(self.API_URL,
get={'action' : "usercaptchacorrectback",
'apikey' : passkey,
'api_key': passkey,
'correct': "1" if correct else "2",
'pyload' : "1",
'source' : "pyload",
'id' : task.data["ticket"]})
self.logDebug("Request %s: %s" % (type, res))
if res == "OK":
break
sleep(5)
else:
self.logDebug("Could not send %s request: %s" % (type, res))
开发者ID:kurtiss,项目名称:htpc,代码行数:27,代码来源:Captcha9kw.py
注:本文中的module.network.RequestFactory.getURL函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论