本文整理汇总了Python中urllib.parse.unquote_to_bytes函数的典型用法代码示例。如果您正苦于以下问题:Python unquote_to_bytes函数的具体用法?Python unquote_to_bytes怎么用?Python unquote_to_bytes使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unquote_to_bytes函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: frankiz_auth_check
def frankiz_auth_check(self, request):
import logging
logger = logging.getLogger(__name__)
student = None
response_data = {'valid': True, 'student': student}
RETURN_PAGE = ('http://' + request.get_host() + '/vaneau/').encode()
logger.error(RETURN_PAGE)
if not "timestamp" in request.query_params.keys() or not "response" in request.query_params.keys() or not "hash" in request.query_params.keys():
logger.error('KEYS')
response_data["valid"] = False
response = parse.unquote_to_bytes(request.query_params.get("response"))
ts = parse.unquote_to_bytes(request.query_params.get("timestamp"))
h = parse.unquote_to_bytes(request.query_params.get("hash"))
if abs(int(time.time()) - int(ts)) > 3600*3 or abs(int(ts) + 3*3600 - int(time.time())) < 30*60:
logger.error('TS')
response_data["valid"] = False
if hashlib.md5(ts + FKZ_KEY + response).hexdigest() != h.decode():
logger.error('HASH')
response_data["valid"] = False
if response_data["valid"]:
data = json.loads(response.decode())
try:
student = Student.objects.get(hruid=data["hruid"])
except Student.DoesNotExist:
student = Student.objects.create(hruid=data["hruid"], lastname=data["lastname"], firstname=data["firstname"], promo=data["promo"])
finally:
response_data["student"] = StudentSerializer(student).data
return Response(response_data, 200)
开发者ID:Binet-JTX,项目名称:vaneau-api,代码行数:35,代码来源:views.py
示例2: parse_qsl_to_bytes
def parse_qsl_to_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parse a query given as a string argument.
Data are returned as a list of name, value pairs as bytes.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
"""
# This code is the same as Python3's parse_qsl()
# (at https://hg.python.org/cpython/rev/c38ac7ab8d9a)
# except for the unquote(s, encoding, errors) calls replaced
# with unquote_to_bytes(s)
qs, _coerce_result = _coerce_args(qs)
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = unquote_to_bytes(name)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = unquote_to_bytes(value)
value = _coerce_result(value)
r.append((name, value))
return r
开发者ID:0daybug,项目名称:scrapy,代码行数:48,代码来源:url.py
示例3: verify_handshake
def verify_handshake(token, signature, action):
"""
Decodes a handshake token (X-Siphon-Handshake-Token header) and verifies
it against the given signature (X-Siphon-Handshake-Signature header).
If it passes verification it decodes the payload's JSON. If this is a
development handshake (i.e. tying a username to an app ID) it returns
a tuple containing two strings:
(<username>, <app-id>)
The given app ID is guaranteed to be owned by the user. Otherwise,
if this is a production handshake it returns:
(<submission-id>, <app-id>)
Note that production handshake's are not tied to a user.
"""
# Note that rsa.verify() detects the SHA-256 hashing method for us
payload = base64.b64decode(unquote(token))
signature_bytes = unquote_to_bytes(signature)
ok = rsa.verify(payload, signature_bytes, get_public_key())
if not ok:
raise HandshakeError()
try:
obj = json.loads(payload.decode('utf8'))
if 'action' in obj and obj['action'] != action:
raise HandshakeError()
if 'user_id' in obj and 'app_id' in obj:
return (obj['user_id'], obj['app_id'])
else:
return (obj['submission_id'], obj['app_id'])
except (ValueError, KeyError):
raise HandshakeError()
开发者ID:getsiphon,项目名称:siphon-web,代码行数:34,代码来源:external.py
示例4: get_decoder
def get_decoder(enc):
if enc == 'base64':
return a2b_base64
elif enc == 'url':
return lambda x: unquote_to_bytes(x.replace(b'+', b'%20'))
elif enc in ('lower_hex', 'upper_hex', 'hex'):
return a2b_hex
开发者ID:MagicPwn,项目名称:pentest_utils,代码行数:7,代码来源:padding_oracle.py
示例5: open_data_url
def open_data_url(url):
"""Decode URLs with the 'data' scheme. urllib can handle them
in Python 2, but that is broken in Python 3.
Inspired from Python 2.7.2’s urllib.py.
"""
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
try:
header, data = url.split(",", 1)
except ValueError:
raise IOError("bad data URL")
header = header[5:] # len("data:") == 5
if header:
semi = header.rfind(";")
if semi >= 0 and "=" not in header[semi:]:
encoding = header[semi + 1:]
else:
encoding = ""
else:
encoding = ""
data = unquote_to_bytes(data)
if encoding == "base64":
missing_padding = 4 - len(data) % 4
if missing_padding:
data += b"=" * missing_padding
return base64.decodestring(data)
return data
开发者ID:Borisenkov,项目名称:Printrun,代码行数:33,代码来源:image.py
示例6: parse_querystring
def parse_querystring(query_string):
"""
Parse a raw query string.
Args:
query_string: raw query string
Return a list of 2-tuples (key=value).
Examples:
>>> parsed = QueryString.parse_querystring('field1=foo&field2=bar')
>>> expected = [('field1', 'foo'), ('field2', 'bar')]
>>> parsed == expected
True
"""
result = []
# make sure the string neither begins nor ends with a &
# the same rule applies to query parameters split by a =
# ie filter out &field&, =field, field=, =field=value, etc
for param in query_string.strip('&').split('&'):
param_split = param.strip('=').split('=', 1) # max_splits=1
result.append(tuple([
unquote_plus(unquote_to_bytes(x.encode('utf-8')).decode('utf-8')) # 2/3 hack
for x in (param_split + [''])[:2] # make sure the param value is present
]))
return result
开发者ID:SWAT4Developers,项目名称:python-julia,代码行数:26,代码来源:parse.py
示例7: url_unescape
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
开发者ID:C4ptainCrunch,项目名称:webalchemy,代码行数:26,代码来源:escape.py
示例8: _dummy_request
def _dummy_request(self, **kwargs):
self.assertIn('body', kwargs)
self.assertIn('uri', kwargs)
self.assertIn('site', kwargs)
if kwargs['body'] is None:
# use uri and remove script path
parameters = kwargs['uri']
prefix = kwargs['site'].scriptpath() + '/api.php?'
self.assertEqual(prefix, parameters[:len(prefix)])
parameters = parameters[len(prefix):]
else:
parameters = kwargs['body']
parameters = parameters.encode('ascii') # it should be bytes anyway
# Extract parameter data from the body, it's ugly but allows us
# to verify that we actually test the right request
parameters = [p.split(b'=', 1) for p in parameters.split(b'&')]
keys = [p[0].decode('ascii') for p in parameters]
values = [unquote_to_bytes(p[1]) for p in parameters]
values = [v.decode(kwargs['site'].encoding()) for v in values]
values = [v.replace('+', ' ') for v in values]
values = [set(v.split('|')) for v in values]
parameters = dict(zip(keys, values))
if 'fake' not in parameters:
return False # do an actual request
if self.assert_parameters:
for param, value in self.assert_parameters.items():
self.assertIn(param, parameters)
if value is not None:
if isinstance(value, UnicodeType):
value = value.split('|')
self.assertLessEqual(set(value), parameters[param])
return self.data
开发者ID:magul,项目名称:pywikibot-core,代码行数:33,代码来源:api_tests.py
示例9: parse_qsl_to_bytes
def parse_qsl_to_bytes(query_string, keep_blank_values=False,
strict_parsing=False):
"""Parse a query given as a string argument.
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
strict_parsing: flag indicating what to do with parsing errors. If
false (the default), errors are silently ignored. If true,
errors raise a ValueError exception.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
Returns a list, as G-d intended.
"""
query_string, _coerce_result = _coerce_args(query_string)
pairs = [s2 for s1 in query_string.split('&') for s2 in s1.split(';')]
res = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nval = name_value.split('=', 1)
if len(nval) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nval.append('')
else:
continue
if len(nval[1]) or keep_blank_values:
name = nval[0].replace('+', ' ')
name = unquote_to_bytes(name)
name = _coerce_result(name)
value = nval[1].replace('+', ' ')
value = unquote_to_bytes(value)
value = _coerce_result(value)
res.append((name, value))
return res
开发者ID:qborg,项目名称:warp-tracker,代码行数:47,代码来源:http_server.py
示例10: visits_for_titles
def visits_for_titles(wiki_title_ptrs, wiki_visits_pagecounts_file, file_index, wikicode, verbosity):
'''
Append page visits to the data objects. We expect several of these, so append each to an array, e.g.
{'Q':Qid1, PGviews:[12,19,203]}
In the more recent files, missing values indicate <5 hits in that month, so we set these to 0
Having several values (one per month) allows us to trim off any that show an unusual spike
NB: see https://dumps.wikimedia.org/other/pagecounts-ez/ for format.
Pageviews totals files have a wikicode project name in ascii followed by .z for wikipedias (e.g. en.z) followed by space,
followed by uri-escaped title, followed by space, followed by integer. The format is a very difficult one to parse, as it varies
e.g. there are multiple differently quoted version of the same title, sometime with spaces not underscores, unicode encoding sometimes fails,
the bzip file sometimes appears truncated, etc etc. I've found that the best way to do this is to unquote_to_bytes first
(to remove uri-encoding), then convert to unicode.
In fact, the encoding is unclear, and sometimes utf-8 encoding seems to fail, so we pass on any utf-8 conversion errors.
Hopefully this should only affect a few taxa where the page title has odd accents that have not been either uri-escaped,
or properly encoded in utf-8.
'''
from urllib.parse import unquote_to_bytes
used = 0
match_project = (wikicode +' ').encode()
start_char = len(match_project)
with bz2.open(wiki_visits_pagecounts_file, 'rb') as PAGECOUNTfile:
try:
problem_lines = [] #there are apparently some errors in the unicode dumps
for n, line in enumerate(PAGECOUNTfile):
if (n % 10000000 == 0) and verbosity:
print("Reading pagecount file of number of page views: {} entries read from file {} ({}): mem usage {} Mb".format(n, file_index, wiki_visits_pagecounts_file.name, memory_usage_resource()), file=sys.stderr)
if line.startswith(match_project):
try:
info = line[start_char:].rstrip(b'\r\n\\rn').rsplit(b' ', 1)
title = unquote_to_bytes(info[0]).decode('UTF-8').replace(" ", "_") #even though most titles should not have spaces, some can sneak in via uri escaping
wiki_title_ptrs[title]['PGviews'][file_index] = (wiki_title_ptrs[title]['PGviews'][file_index] or 0) + int(info[1]) #sometimes there are multiple encodings of the same title, with different visit numbers
used += 1
except UnicodeDecodeError:
problem_lines.append(str(n))
except KeyError:
pass #title not in wiki_title_ptrs - this is expected for most entries
except ValueError as e:
if verbosity:
print(e, file=sys.stderr)
print(" Problem converting page view to an integer for {}".format(line), file=sys.stderr)
except EOFError as e:
#this happens sometimes, dunno why
if verbosity:
print(" Problem with end of file: {}. Used {} entries (should be {}: {}%. Skipping to next".format(e.args[-1],used, len(wiki_title_ptrs), used/len(wiki_title_ptrs) * 100), file=sys.stderr)
if len(problem_lines):
if verbosity>0:
if verbosity<=2:
print(" Problem decoding {} lines, but these will be ones with strange accents etc, so should mostly not be taxa.".format(len(problem_lines)), file=sys.stderr)
else:
print(" Problem decoding certain lines: the following lines have been ignored:\n{}".format(" \n".join(problem_lines)), file=sys.stderr)
if verbosity:
print(" NB: of {} WikiData taxon entries, {} ({:.2f}%) have pageview data for {} in '{}'. mem usage {:.1f} Mb".format(len(wiki_title_ptrs), used, used/len(wiki_title_ptrs) * 100, wikicode, wiki_visits_pagecounts_file if isinstance(wiki_visits_pagecounts_file, str) else wiki_visits_pagecounts_file.name, memory_usage_resource()), file=sys.stderr)
开发者ID:jrosindell,项目名称:OneZoom,代码行数:57,代码来源:OTT_popularity_mapping.py
示例11: download
def download(self, key, category, post, file, filename):
l = self.l
headers = { "User-Agent": "Mozilla AppleWebKit Chrome Safari" }
headers = None
response = requests.get(file, headers=headers)
if response.ok:
content = response.content
if not filename:
headers = response.headers
if "Content-disposition" in headers:
content_disposition = headers["Content-disposition"]
matched = self.CONTENT_REGEX.match(content_disposition)
if matched: filename = matched.group(1)
if not filename:
parsed = urlparse(file)
paths = parsed.path.split('/')
filename = paths[-1]
if filename:
try: filename = unquote_to_bytes(filename).decode("utf-8")
except:
try: filename = unquote_to_bytes(filename).decode("euc-kr")
except:
l.og("Error: Can't find filename.")
filename = Tools.md5(file)
l.og("\tSave to: {}".format(filename))
data = self.dm.find(post, filename)
if not data:
l.og("[Download] {}".format(filename))
wo = open(os.sep.join((self.savedir, filename)), "wb")
wo.write(content)
wo.close()
self.dm.add(key, category, post, file, filename)
return True
else:
l.og("Error: requests.get()")
l.og("\t{}".format(file))
开发者ID:mrquestion,项目名称:subtitle-crawler,代码行数:44,代码来源:__init__.py
示例12: _get_path
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode('iso-8859-1')
开发者ID:CodeMonk,项目名称:django,代码行数:10,代码来源:client.py
示例13: createQrcode
def createQrcode(url):
data = {"device_num":"1", "device_id_list":["koovox_02"]}
data = json.dumps(data, ensure_ascii=False)
request = req.Request(url, method = 'POST')
request.add_header('Content-Type', 'application/json')
request.add_header('encoding', 'utf-8')
response = req.urlopen(request, parse.unquote_to_bytes(data))
result = response.read()
print(result)
return result
开发者ID:MosaicHe,项目名称:weixin,代码行数:10,代码来源:wechatDeviceApi_3.4.py
示例14: unescapeRepeatedly
def unescapeRepeatedly(input):
'''Argument may be str or bytes. Returns bytes.'''
if None == input:
return None
while True:
un = unquote_to_bytes(input)
if un == input:
return input
input = un
开发者ID:internetarchive,项目名称:surt,代码行数:10,代码来源:GoogleURLCanonicalizer.py
示例15: createMenu
def createMenu(token, menu):
url = 'https://api.weixin.qq.com/cgi-bin/menu/create?access_token=%s' %token
data = menu
data = json.dumps(data,ensure_ascii=False)
request = req.Request(url, method='POST')
request.add_header('Content-Type', 'application/json')
request.add_header('encoding', 'utf-8')
response = req.urlopen(request, parse.unquote_to_bytes(data))
result = response.read()
print(result)
return result
开发者ID:MosaicHe,项目名称:weixin,代码行数:12,代码来源:wechatDeviceApi_3.4.py
示例16: _qs
def _qs(req, encoding='utf-8', to_unicode=False):
if req.method == 'POST':
qs = req.body
else:
qs = req.url.partition('?')[2]
if six.PY2:
uqs = unquote(to_native_str(qs, encoding))
elif six.PY3:
uqs = unquote_to_bytes(qs)
if to_unicode:
uqs = uqs.decode(encoding)
return parse_qs(uqs, True)
开发者ID:Parlin-Galanodel,项目名称:scrapy,代码行数:12,代码来源:test_http_request.py
示例17: url_unescape
def url_unescape(value, encoding='utf-8'):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
"""
if encoding is None:
return urllib_parse.unquote_to_bytes(value)
else:
return urllib_parse.unquote_plus(to_basestring(value), encoding=encoding)
开发者ID:1stvamp,项目名称:tornado,代码行数:12,代码来源:escape.py
示例18: chainFollow
def chainFollow(nothingValue):
seen = ''
print("Working...")
while True:
response = urllib.urlopen(site+nothingValue)
cookie = re.findall("(info=)(.*?)(;)",dict(response.info())["Set-Cookie"])
data = str(response.read())
found = re.findall("(next busynothing is )([0-9]+)",data)
seen += cookie[0][1]
if(len(found) > 0):
nothingValue = found[0][1]
else:
print(bz2.decompress(unquote_to_bytes(seen.replace("+"," "))).decode())
break
开发者ID:slamDuncan52,项目名称:pythonChallenge,代码行数:14,代码来源:17.py
示例19: frankiz_check
def frankiz_check(request):
import logging
logger = logging.getLogger(__name__)
if not "timestamp" in request.query_params.keys() or not "response" in request.query_params.keys() or not "hash" in request.query_params.keys():
logger.error('KEYS')
raise NotAuthenticated()
response = parse.unquote_to_bytes(request.query_params.get("response"))
ts = parse.unquote_to_bytes(request.query_params.get("timestamp"))
h = parse.unquote_to_bytes(request.query_params.get("hash"))
if abs(int(time.time()) - int(ts)) > 3600*3:
logger.error('TS')
raise NotAuthenticated()
if hashlib.md5(ts + FKZ_KEY + response).hexdigest() != h.decode():
logger.error('HASH')
raise NotAuthenticated()
data = json.loads(response.decode())
return request
开发者ID:Binet-JTX,项目名称:vaneau-api,代码行数:23,代码来源:views.py
示例20: do_GET
def do_GET(self):
sp = re.split('\?|&|=', self.path)
transmission.add_info_hash(
hexlify(
unquote_to_bytes(
sp[sp.index('info_hash')+1]
)
).decode()
)
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(b'd8:intervali3600e5:peers6:\x7f\x00\x00\x01\x00\x00e')
开发者ID:bitrab,项目名称:bittorrent-tracker-utils,代码行数:14,代码来源:traptracker.py
注:本文中的urllib.parse.unquote_to_bytes函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论