本文整理汇总了Python中mechanize._rfc3986.urljoin函数的典型用法代码示例。如果您正苦于以下问题:Python urljoin函数的具体用法?Python urljoin怎么用?Python urljoin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了urljoin函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: check_no_seek
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "test_fixtures/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:7,代码来源:test_functional.py
示例2: test_robots
def test_robots(self):
plain_opener = self.build_opener(
[mechanize.HTTPRobotRulesProcessor])
browser = self.make_browser()
for opener in plain_opener, browser:
opener.open(urljoin(self.uri, "robots"))
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:9,代码来源:test_functional.py
示例3: test_robots
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
开发者ID:Almad,项目名称:Mechanize,代码行数:9,代码来源:functional_tests.py
示例4: test_referer
def test_referer(self):
br = self.make_browser()
br.set_handle_refresh(True, honor_time=False)
referer = urljoin(self.uri, "test_fixtures/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:11,代码来源:test_functional.py
示例5: check
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "test_fixtures/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:12,代码来源:test_functional.py
示例6: test_seek_wrapper_class_name
def test_seek_wrapper_class_name(self):
opener = self.make_user_agent()
opener.set_seekable_responses(True)
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_("HTTPError instance" in repr(exc))
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:7,代码来源:test_functional.py
示例7: test_reload_read_incomplete
def test_reload_read_incomplete(self):
browser = self.make_browser()
r1 = browser.open(urljoin(self.uri,
"test_fixtures/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:16,代码来源:test_functional.py
示例8: test_cookies
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
开发者ID:Almad,项目名称:Mechanize,代码行数:35,代码来源:functional_tests.py
示例9: test_cookies
def test_cookies(self):
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
opener = self.build_opener(handlers)
r = opener.open(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
data = r.read()
self.assert_(data.find("Your browser supports cookies!") >= 0)
self.assertEquals(len(cj), 2)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assertEquals(samedata, data)
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:26,代码来源:test_functional.py
示例10: test_302_and_404
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
self.assertRaises(
mechanize.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:8,代码来源:test_functional.py
示例11: test_seekable_response_opener
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
开发者ID:Almad,项目名称:Mechanize,代码行数:9,代码来源:functional_tests.py
示例12: test_retrieve_to_named_file
def test_retrieve_to_named_file(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = os.path.join(self.make_temp_dir(), "python.html")
opener = self.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
self.assertEqual(filename, test_filename)
self._check_retrieve(url, filename, headers)
self.assert_(os.path.isfile(filename))
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:9,代码来源:test_functional.py
示例13: test_redirect_with_timeout
def test_redirect_with_timeout(self):
timeout_log = self._monkey_patch_socket()
timeout = 10.
# 301 redirect due to missing final '/'
req = mechanize.Request(urljoin(self.test_uri, "test_fixtures"),
timeout=timeout)
r = self.browser.open(req)
self.assert_("GeneralFAQ.html" in r.read(2048))
timeout_log.verify(timeout)
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:9,代码来源:test_functional.py
示例14: test_urlretrieve
def test_urlretrieve(self):
timeout_log = self._monkey_patch_socket()
timeout = 10.
url = urljoin(self.uri, "/mechanize/")
verif = CallbackVerifier(self)
filename, headers = mechanize.urlretrieve(url,
reporthook=verif.callback,
timeout=timeout)
timeout_log.stop()
self._check_retrieve(url, filename, headers)
timeout_log.verify(timeout)
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:11,代码来源:test_functional.py
示例15: test_redirect
def test_redirect(self):
# 301 redirect due to missing final '/'
codes = []
class ObservingHandler(mechanize.BaseHandler):
def http_response(self, request, response):
codes.append(response.code)
return response
self.browser.add_handler(ObservingHandler())
r = self.browser.open(urljoin(self.uri, "redirected_good"))
self.assertEqual(r.code, 200)
self.assertIn(302, codes)
self.assert_("GeneralFAQ.html" in r.read(2048))
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:12,代码来源:test_functional.py
示例16: test_retrieve
def test_retrieve(self):
# not passing an explicit filename downloads to a temporary file
# using a Request object instead of a URL works
url = urljoin(self.uri, "/mechanize/")
opener = self.build_opener()
verif = CallbackVerifier(self)
request = mechanize.Request(url)
filename, headers = opener.retrieve(request, reporthook=verif.callback)
self.assertEquals(request.visit, False)
self._check_retrieve(url, filename, headers)
opener.close()
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:13,代码来源:test_functional.py
示例17: setUp
def setUp(self):
mechanize._testcase.TestCase.setUp(self)
self.test_uri = urljoin(self.uri, "test_fixtures")
self.server = self.get_cached_fixture("server")
if self.no_proxies:
old_opener_m = mechanize._opener._opener
old_opener_u = urllib2._opener
mechanize.install_opener(mechanize.build_opener(
mechanize.ProxyHandler(proxies={})))
urllib2.install_opener(urllib2.build_opener(
urllib2.ProxyHandler(proxies={})))
def revert_install():
mechanize.install_opener(old_opener_m)
urllib2.install_opener(old_opener_u)
self.add_teardown(revert_install)
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:15,代码来源:test_functional.py
示例18: test_open_novisit
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
uri = urljoin(self.uri, "test_fixtures")
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(uri)
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
# Request argument instead of URL
r = self.browser.open_novisit(mechanize.Request(uri))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
开发者ID:kovidgoyal,项目名称:mechanize,代码行数:16,代码来源:test_functional.py
示例19: _mech_open
def _mech_open(self,
url,
data=None,
update_history=True,
visit=None,
timeout=_sockettimeout._GLOBAL_DEFAULT_TIMEOUT,
headers=None):
try:
url.get_full_url
if ' ' in url._Request__original:
url._Request__original = url._Request__original.replace(' ', '%20')
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, _authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
if ' ' in url:
url = url.replace(' ', '%20')
request = self._request(url, data, visit, timeout)
request.add_header("User-agent",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)")
if headers:
for headerName, headerValue in headers.items():
request.add_header(headerName, headerValue)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
开发者ID:orlenko,项目名称:LovedSongs,代码行数:44,代码来源:mechanize_crawler.py
示例20: _test_cookiejar
def _test_cookiejar(self, get_cookiejar, commit):
cookiejar = get_cookiejar()
br = mechanize.Browser()
br.set_cookiejar(cookiejar)
br.set_handle_refresh(False)
url = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
# no cookie was set on the first request
html = br.open(url).read()
self.assertEquals(html.find("Your browser supports cookies!"), -1)
self.assertEquals(len(cookiejar), 1)
# ... but now we have the cookie
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
commit(cookiejar)
# should still have the cookie when we load afresh
cookiejar = get_cookiejar()
br.set_cookiejar(cookiejar)
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
开发者ID:Almad,项目名称:Mechanize,代码行数:20,代码来源:functional_tests.py
注:本文中的mechanize._rfc3986.urljoin函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论