本文整理汇总了Python中w3lib.http.basic_auth_header函数的典型用法代码示例。如果您正苦于以下问题:Python basic_auth_header函数的具体用法?Python basic_auth_header怎么用?Python basic_auth_header使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了basic_auth_header函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_apikey
def test_apikey(self):
self.spider.crawlera_enabled = True
self.settings['CRAWLERA_APIKEY'] = apikey = 'apikey'
proxyauth = basic_auth_header(apikey, '')
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
self.spider.crawlera_apikey = apikey = 'notfromsettings'
proxyauth = basic_auth_header(apikey, '')
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
开发者ID:scrapy-plugins,项目名称:scrapy-crawlera,代码行数:9,代码来源:test_crawlera.py
示例2: _add_auth_header
def _add_auth_header(self, request):
if self._user_name != None and self._user_password != None:
request.add_header('Authorization', basic_auth_header(self._user_name, self._user_password))
else: # try netrc
try:
host = urlparse(self._get_server_url()).hostname
a = netrc().authenticators(host)
request.add_header('Authorization', basic_auth_header(a[0], a[2]))
except (NetrcParseError, IOError, TypeError):
pass
开发者ID:rm89,项目名称:scrapy,代码行数:10,代码来源:scrapydclient.py
示例3: test_userpass
def test_userpass(self):
self.spider.use_hubproxy = True
self.settings['HUBPROXY_USER'] = user = 'other'
self.settings['HUBPROXY_PASS'] = pass_ = 'secret'
proxyauth = basic_auth_header(user, pass_)
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
self.spider.hubproxy_user = user = 'notfromsettings'
self.spider.hubproxy_pass = pass_ = 'anothersecret'
proxyauth = basic_auth_header(user, pass_)
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
开发者ID:waytai,项目名称:scrapylib,代码行数:11,代码来源:test_hubproxy.py
示例4: test_userpass
def test_userpass(self):
self.spider.crawlera_enabled = True
self.settings["CRAWLERA_USER"] = user = "other"
self.settings["CRAWLERA_PASS"] = pass_ = "secret"
proxyauth = basic_auth_header(user, pass_)
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
self.spider.crawlera_user = user = "notfromsettings"
self.spider.crawlera_pass = pass_ = "anothersecret"
proxyauth = basic_auth_header(user, pass_)
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
开发者ID:pombredanne,项目名称:scrapylib,代码行数:11,代码来源:test_crawlera.py
示例5: test_apikey_assignment
def test_apikey_assignment(self):
self.spider.crawlera_enabled = True
apikey = 'someapikey'
self.settings['CRAWLERA_APIKEY'] = None
self.settings['CRAWLERA_USER'] = apikey
self.settings['CRAWLERA_PASS'] = ''
proxyauth = basic_auth_header(apikey, '')
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
self.settings['CRAWLERA_USER'] = None
self.settings['CRAWLERA_APIKEY'] = apikey
self.settings['CRAWLERA_PASS'] = ''
proxyauth = basic_auth_header(apikey, '')
self._assert_enabled(self.spider, self.settings, proxyauth=proxyauth)
开发者ID:redapple,项目名称:scrapy-crawlera,代码行数:15,代码来源:test_crawlera.py
示例6: __init__
def __init__(self, user, password, maxbans, url, crawler, enabled=False):
self.url = url
self.user = user
self.auth = basic_auth_header(user, password)
self.crawler = crawler
self.enabled = enabled
self.maxbans = maxbans
self.bans = 0
开发者ID:mt3,项目名称:scrapylib,代码行数:8,代码来源:hubproxy.py
示例7: __init__
def __init__(self, crawler, splash_base_url, slot_policy):
self.crawler = crawler
self.splash_base_url = splash_base_url
self.slot_policy = slot_policy
self.splash_auth = None
user = crawler.settings.get('SPLASH_USER')
passwd = crawler.settings.get('SPLASH_PASS', '')
if user:
self.splash_auth = basic_auth_header(user, passwd)
开发者ID:CognitiveScale,项目名称:scrapyjs,代码行数:9,代码来源:middleware.py
示例8: open_spider
def open_spider(self, spider):
try:
self.enabled = spider.use_hubproxy
self.user = spider.hubproxy_user
self.auth = basic_auth_header(spider.hubproxy_user, spider.hubproxy_pass)
except AttributeError:
pass
if self.enabled:
log.msg("Using hubproxy at %s (user: %s)" % (self.url, self.user),
spider=spider)
开发者ID:mt3,项目名称:scrapylib,代码行数:11,代码来源:hubproxy.py
示例9: _configure_js
def _configure_js(self, spec, settings):
self._job_id = settings.get('JOB', '')
self.js_enabled = False
self.SPLASH_HOST = None
if settings.get('SPLASH_URL'):
self.SPLASH_HOST = urlparse(settings.get('SPLASH_URL')).hostname
self.js_enabled = spec.get('js_enabled', False)
if self.js_enabled and (settings.get('SPLASH_PASS') is not None or
settings.get('SPLASH_USER') is not None):
self.splash_auth = basic_auth_header(
settings.get('SPLASH_USER', ''),
settings.get('SPLASH_PASS', ''))
self._filter_js_urls = self._build_js_url_filter(spec)
开发者ID:FrankieChan885,项目名称:portia,代码行数:13,代码来源:spider.py
示例10: _assert_enabled
def _assert_enabled(self, spider,
settings=None,
proxyurl='http://proxy.crawlera.com:8010',
proxyauth=basic_auth_header('apikey', ''),
maxbans=400,
download_timeout=190):
crawler = self._mock_crawler(spider, settings)
mw = self.mwcls.from_crawler(crawler)
mw.open_spider(spider)
req = Request('http://www.scrapytest.org')
assert mw.process_request(req, spider) is None
self.assertEqual(req.meta.get('proxy'), proxyurl)
self.assertEqual(req.meta.get('download_timeout'), download_timeout)
self.assertEqual(req.headers.get('Proxy-Authorization'), proxyauth)
res = Response(req.url)
assert mw.process_response(req, res, spider) is res
# disabled if 'dont_proxy=True' is set
req = Request('http://www.scrapytest.org')
req.meta['dont_proxy'] = True
assert mw.process_request(req, spider) is None
self.assertEqual(req.meta.get('proxy'), None)
self.assertEqual(req.meta.get('download_timeout'), None)
self.assertEqual(req.headers.get('Proxy-Authorization'), None)
res = Response(req.url)
assert mw.process_response(req, res, spider) is res
del req.meta['dont_proxy']
if maxbans > 0:
# assert ban count is reseted after a succesful response
res = Response('http://ban.me', status=self.bancode)
assert mw.process_response(req, res, spider) is res
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
res = Response('http://unban.me')
assert mw.process_response(req, res, spider) is res
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
self.assertEqual(mw._bans[None], 0)
# check for not banning before maxbans for bancode
for x in range(maxbans + 1):
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
res = Response(
'http://ban.me/%d' % x,
status=self.bancode,
headers={'X-Crawlera-Error': 'banned'},
)
assert mw.process_response(req, res, spider) is res
# max bans reached and close_spider called
self.assertEqual(crawler.engine.fake_spider_closed_result, (spider, 'banned'))
开发者ID:scrapy-plugins,项目名称:scrapy-crawlera,代码行数:50,代码来源:test_crawlera.py
示例11: _assert_enabled
def _assert_enabled(
self,
spider,
settings=None,
proxyurl="http://proxy.crawlera.com:8010?noconnect",
proxyauth=basic_auth_header("user", "pass"),
bancode=503,
maxbans=20,
download_timeout=1800,
):
crawler = self._mock_crawler(settings)
mw = self.mwcls.from_crawler(crawler)
mw.open_spider(spider)
req = Request("http://www.scrapytest.org")
assert mw.process_request(req, spider) is None
self.assertEqual(req.meta.get("proxy"), proxyurl)
self.assertEqual(req.meta.get("download_timeout"), download_timeout)
self.assertEqual(req.headers.get("Proxy-Authorization"), proxyauth)
res = Response(req.url)
assert mw.process_response(req, res, spider) is res
# disabled if 'dont_proxy' is set
req = Request("http://www.scrapytest.org")
req.meta["dont_proxy"] = True
assert mw.process_request(req, spider) is None
self.assertEqual(req.meta.get("proxy"), None)
self.assertEqual(req.meta.get("download_timeout"), None)
self.assertEqual(req.headers.get("Proxy-Authorization"), None)
res = Response(req.url)
assert mw.process_response(req, res, spider) is res
if maxbans > 0:
# assert ban count is reseted after a succesful response
res = Response("http://ban.me", status=bancode)
assert mw.process_response(req, res, spider) is res
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
res = Response("http://unban.me")
assert mw.process_response(req, res, spider) is res
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
self.assertEqual(mw._bans[None], 0)
# check for not banning before maxbans for bancode
for x in xrange(maxbans + 1):
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
res = Response("http://ban.me/%d" % x, status=bancode)
assert mw.process_response(req, res, spider) is res
# max bans reached and close_spider called
self.assertEqual(crawler.engine.fake_spider_closed_result, (spider, "banned"))
开发者ID:pombredanne,项目名称:scrapylib,代码行数:49,代码来源:test_crawlera.py
示例12: __init__
def __init__(self, name, spec, item_schemas, all_extractors, settings=None,
**kw):
super(IblSpider, self).__init__(name, **kw)
self._job_id = settings.get('JOB', '')
spec = deepcopy(spec)
for key, val in kw.items():
if isinstance(val, six.string_types) and key in STRING_KEYS:
val = val.splitlines()
spec[key] = val
self._item_template_pages = sorted(
((t['scrapes'], t) for t in spec['templates']
if t.get('page_type', 'item') == 'item'), key=itemgetter(0))
self._templates = [templ for _, templ in self._item_template_pages]
self.plugins = IndexedDict()
for plugin_class, plugin_name in zip(load_plugins(settings),
load_plugin_names(settings)):
instance = plugin_class()
instance.setup_bot(settings, spec, item_schemas, all_extractors)
self.plugins[plugin_name] = instance
self.js_enabled = False
self.SPLASH_HOST = None
if settings.get('SPLASH_URL'):
self.SPLASH_HOST = urlparse(settings.get('SPLASH_URL')).hostname
self.js_enabled = spec.get('js_enabled', False)
if self.js_enabled and (settings.get('SPLASH_PASS') is not None or
settings.get('SPLASH_USER') is not None):
self.splash_auth = basic_auth_header(
settings.get('SPLASH_USER', ''),
settings.get('SPLASH_PASS', ''))
self._filter_js_urls = self._build_js_url_filter(spec)
self.login_requests = []
self.form_requests = []
self._start_requests = []
self.generic_form = GenericForm(**kw)
self._create_init_requests(spec.get("init_requests", []))
self._process_start_urls(spec)
self.allowed_domains = spec.get(
'allowed_domains',
self._get_allowed_domains(self._templates)
)
self.page_actions = spec.get('page_actions', [])
if not self.allowed_domains:
self.allowed_domains = None
开发者ID:codegreencreative,项目名称:portia,代码行数:47,代码来源:spider.py
示例13: _configure_js
def _configure_js(self, spec, settings):
self.js_enabled = False
self.SPLASH_HOST = None
if settings.get('SPLASH_URL'):
self.SPLASH_HOST = urlparse(settings.get('SPLASH_URL')).hostname
self.js_enabled = spec.get('js_enabled', False)
if self.js_enabled and (settings.get('SPLASH_PASS') is not None or
settings.get('SPLASH_USER') is not None):
self.splash_auth = basic_auth_header(
settings.get('SPLASH_USER', ''),
settings.get('SPLASH_PASS', ''))
self.splash_wait = settings.getint('SPLASH_WAIT', 5)
self.splash_timeout = settings.getint('SPLASH_TIMEOUT', 30)
self.splash_js_source = settings.get(
'SPLASH_JS_SOURCE', 'function(){}')
self.splash_lua_source = settings.get('SPLASH_LUA_SOURCE', '')
self._filter_js_urls = self._build_js_url_filter(spec)
开发者ID:NamiStudio,项目名称:portia,代码行数:17,代码来源:spider.py
示例14: _assert_enabled
def _assert_enabled(self, spider,
settings=None,
proxyurl='http://proxy.scrapinghub.com:8010',
proxyauth=basic_auth_header('user', 'pass'),
bancode=503,
maxbans=20,
download_timeout=1800,
):
crawler = self._mock_crawler(settings)
mw = self.mwcls.from_crawler(crawler)
mw.open_spider(spider)
req = Request('http://www.scrapytest.org')
assert mw.process_request(req, spider) is None
self.assertEqual(req.meta.get('proxy'), proxyurl)
self.assertEqual(req.meta.get('download_timeout'), download_timeout)
self.assertEqual(req.headers.get('Proxy-Authorization'), proxyauth)
res = Response(req.url)
assert mw.process_response(req, res, spider) is res
if maxbans > 0:
# assert ban count is reseted after a succesful response
res = Response('http://ban.me', status=bancode)
assert mw.process_response(req, res, spider) is res
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
res = Response('http://unban.me')
assert mw.process_response(req, res, spider) is res
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
# check for not banning before maxbans for bancode
for x in xrange(maxbans + 1):
self.assertEqual(crawler.engine.fake_spider_closed_result, None)
res = Response('http://ban.me/%d' % x, status=bancode)
assert mw.process_response(req, res, spider) is res
# max bans reached and close_spider called
self.assertEqual(crawler.engine.fake_spider_closed_result, (spider, 'banned'))
开发者ID:alexcepoi,项目名称:scrapylib,代码行数:36,代码来源:test_hubproxy.py
示例15: test_basic_auth_header_encoding
def test_basic_auth_header_encoding(self):
self.assertEqual(b'Basic c29tw6Z1c8Oocjpzw7htZXDDpHNz',
basic_auth_header(u'somæusèr', u'sømepäss', encoding='utf8'))
# default encoding (ISO-8859-1)
self.assertEqual(b'Basic c29t5nVz6HI6c_htZXDkc3M=',
basic_auth_header(u'somæusèr', u'sømepäss'))
开发者ID:scrapy,项目名称:w3lib,代码行数:6,代码来源:test_http.py
示例16: get_proxyauth
def get_proxyauth(self, spider):
"""Hook to compute Proxy-Authorization header by custom rules."""
if self.apikey:
return basic_auth_header(self.apikey, "")
return basic_auth_header(self.user, getattr(self, "pass"))
开发者ID:kimg1234,项目名称:scrapy-crawlera,代码行数:5,代码来源:scrapy_crawlera.py
示例17: get_proxyauth
def get_proxyauth(self, spider):
"""Hook to compute Proxy-Authorization header by custom rules."""
return basic_auth_header(self.user, getattr(self, 'pass'))
开发者ID:2014fgq,项目名称:shixin,代码行数:3,代码来源:scrapy_crawlera.py
示例18: get_proxyauth
def get_proxyauth(self, spider):
"""Hook to compute Proxy-Authorization header by custom rules."""
return basic_auth_header(self.apikey, '')
开发者ID:scrapy-plugins,项目名称:scrapy-crawlera,代码行数:3,代码来源:middleware.py
示例19: spider_opened
def spider_opened(self, spider):
usr = getattr(spider, 'http_user', '')
pwd = getattr(spider, 'http_pass', '')
if usr or pwd:
self.auth = basic_auth_header(usr, pwd)
开发者ID:heartsg,项目名称:dscrapy,代码行数:5,代码来源:httpauth.py
示例20: _authorization
def _authorization(self, spider):
usr = getattr(spider, 'http_user', '')
pwd = getattr(spider, 'http_pass', '')
if usr or pwd:
return basic_auth_header(usr, pwd)
开发者ID:dreamfrog,项目名称:jophiel,代码行数:5,代码来源:httpauth.py
注:本文中的w3lib.http.basic_auth_header函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论