本文整理汇总了Python中multidict.MultiDict类的典型用法代码示例。如果您正苦于以下问题:Python MultiDict类的具体用法?Python MultiDict怎么用?Python MultiDict使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了MultiDict类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: ask_queries
async def ask_queries(reader, writer):
"""
This set of commands seems to elicit all of the unique information I can get out of
my receiver. (Denon AVR-S730H)
:param reader:
:param writer:
:return:
"""
commands = ["PW?", "MV?", "CV?", "MU?", "SI?"]
commands += ["ZM?", "SV?", "SD?", "SLP?", "MS?"]
commands += ["MSQUICK ?", "PSLOM ?"]
commands += ["PSMULTEQ: ?", "PSDYNEQ ?", "PSREFLEV ?", "PSDYNVOL ?"]
commands += ["PSEFF ?", "PSDEL ?", "PSSWR ?", "PSRSTR ?"]
commands += ["Z2?", "Z2MU?", "Z2SLP?", "Z2QUICK ?", "TMAN?"]
# commands = [b"Z2ON", b"SINET"]
facts = MultiDict()
for command in commands:
writer.write(command.encode("ascii") + b"\r")
lines = await read_lines_until(reader, 0.1)
for line in lines:
facts.add(line.strip(), command)
return facts
开发者ID:paulhoule,项目名称:tentacruel,代码行数:25,代码来源:__init__.py
示例2: post
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self.method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
body = yield from self.read()
content_charset = self.charset or 'utf-8'
environ = {'REQUEST_METHOD': self.method,
'CONTENT_LENGTH': str(len(body)),
'QUERY_STRING': '',
'CONTENT_TYPE': self.headers.get(hdrs.CONTENT_TYPE)}
fs = cgi.FieldStorage(fp=io.BytesIO(body),
environ=environ,
keep_blank_values=True,
encoding=content_charset)
supported_transfer_encoding = {
'base64': binascii.a2b_base64,
'quoted-printable': binascii.a2b_qp
}
out = MultiDict()
_count = 1
for field in fs.list or ():
transfer_encoding = field.headers.get(
hdrs.CONTENT_TRANSFER_ENCODING, None)
if field.filename:
ff = FileField(field.name,
field.filename,
field.file, # N.B. file closed error
field.type)
if self._post_files_cache is None:
self._post_files_cache = {}
self._post_files_cache[field.name+str(_count)] = field
_count += 1
out.add(field.name, ff)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
# binascii accepts bytes
value = value.encode('utf-8')
value = supported_transfer_encoding[
transfer_encoding](value)
out.add(field.name, value)
self._post = MultiDictProxy(out)
return self._post
开发者ID:1st1,项目名称:aiohttp,代码行数:59,代码来源:web_reqrep.py
示例3: update_query
def update_query(self, *args, **kwargs):
"""Return a new URL with query part updated."""
s = self._get_str_query(*args, **kwargs)
new_query = MultiDict(parse_qsl(s, keep_blank_values=True))
query = MultiDict(self.query)
query.update(new_query)
return URL(self._val._replace(query=self._get_str_query(query)), encoded=True)
开发者ID:asvetlov,项目名称:yarl,代码行数:8,代码来源:__init__.py
示例4: __init__
def __init__(self, method, url, *,
params=None, headers=None, skip_auto_headers=frozenset(),
data=None, cookies=None,
auth=None, version=http.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None,
proxy=None, proxy_auth=None,
timer=None, session=None, auto_decompress=True,
verify_ssl=None, fingerprint=None, ssl_context=None,
proxy_headers=None):
if verify_ssl is False and ssl_context is not None:
raise ValueError(
"Either disable ssl certificate validation by "
"verify_ssl=False or specify ssl_context, not both.")
if loop is None:
loop = asyncio.get_event_loop()
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
self._session = session
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.url = url.with_fragment(None)
self.original_url = url
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
self.response_class = response_class or ClientResponse
self._timer = timer if timer is not None else TimerNoop()
self._auto_decompress = auto_decompress
self._verify_ssl = verify_ssl
self._ssl_context = ssl_context
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_headers)
self.update_fingerprint(fingerprint)
self.update_body_from_data(data)
if data or self.method not in self.GET_METHODS:
self.update_transfer_encoding()
self.update_expect_continue(expect100)
开发者ID:wwqgtxx,项目名称:wwqLyParse,代码行数:57,代码来源:client_reqrep.py
示例5: links
def links(self):
links_str = ", ".join(self.headers.getall("link", []))
links = MultiDict()
if not links_str:
return MultiDictProxy(links)
for val in re.split(r",(?=\s*<)", links_str):
url, params = re.match(r"\s*<(.*)>(.*)", val).groups()
params = params.split(";")[1:]
link = MultiDict()
for param in params:
key, _, value, _ = re.match(
r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$",
param, re.M
).groups()
link.add(key, value)
key = link.get("rel", url)
link.add("url", self.url.join(URL(url)))
links.add(key, MultiDictProxy(link))
return MultiDictProxy(links)
开发者ID:skytian,项目名称:apue_pro,代码行数:29,代码来源:client_reqrep.py
示例6: implicit_tvals
def implicit_tvals(self):
tvals = dict()
ret = defaultdict(NoClobberDict)
tls = self.tlayers
rls = self.rlayers
tl_n = [t.category for t in tls]
def _parse_spec(s):
c, v = s.split('=')
return (c, v.split(','))
for c in self.controls:
tvs = []
for specs, region in c.tval_specs:
wsd = MultiDict()
for w in region:
# wsd[self.values(w, rls)] = w
rv = tuple(zip([l.category for l in rls],
self.values(w, rls)))
wsd[rv] = w
for rl, ws in wsd.items():
tl_v = map(list,
map(set, zip(*[self.values(w, tls)
for w in sorted(ws)])))
assert len(tl_n) == len(tl_v)
available = zip(tl_n, tl_v)
tmp = dict(available +
map(_parse_spec, specs.split('*')))
chunk = [(tl, rl) for tl in
list(product(*[tmp[t] for t in tl_n]))]
tvs.append(chunk)
from itertools import combinations
assert 0 == sum(map(len, [set.intersection(set(x), set(y))
for x, y in list(combinations(tvs, 2))]))
for tv, rv in sorted(set(sum(tvs, []))):
ret[tv][rv] = c
return dict([(k, dict(v)) for k, v in ret.items()])
开发者ID:gberriz,项目名称:datarail-2.0-old,代码行数:47,代码来源:dump_well_metadata.py
示例7: __init__
def __init__(self, method, url, *,
params=None, headers=None, skip_auto_headers=frozenset(),
data=None, cookies=None,
auth=None, version=http.HttpVersion11, compress=None,
chunked=None, expect100=False,
loop=None, response_class=None,
proxy=None, proxy_auth=None, proxy_from_env=False,
timer=None, session=None, auto_decompress=True):
if loop is None:
loop = asyncio.get_event_loop()
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
self._session = session
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.url = url.with_fragment(None)
self.original_url = url
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
self.response_class = response_class or ClientResponse
self._timer = timer if timer is not None else TimerNoop()
self._auto_decompress = auto_decompress
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_from_env)
self.update_body_from_data(data)
self.update_transfer_encoding()
self.update_expect_continue(expect100)
开发者ID:Eyepea,项目名称:aiohttp,代码行数:46,代码来源:client_reqrep.py
示例8: info_data
def info_data(self, request, **params):
headers = self.getheaders(request)
data = {'method': request.method,
'headers': headers,
'pulsar': self.pulsar_info(request)}
if request.method in ENCODE_URL_METHODS:
data['args'] = as_dict(request.url_data)
else:
args, files = request.data_and_files()
jfiles = MultiDict()
if files:
for name, part in files.items():
try:
part = part.string()
except UnicodeError:
part = part.base64()
jfiles.add(name, part)
data.update((('args', as_dict(args)),
('files', as_dict(jfiles))))
data.update(params)
return data
开发者ID:quantmind,项目名称:pulsar,代码行数:21,代码来源:manage.py
示例9: parse_mimetype
def parse_mimetype(mimetype: str) -> MimeType:
"""Parses a MIME type into its components.
mimetype is a MIME type string.
Returns a MimeType object.
Example:
>>> parse_mimetype('text/html; charset=utf-8')
MimeType(type='text', subtype='html', suffix='',
parameters={'charset': 'utf-8'})
"""
if not mimetype:
return MimeType(type='', subtype='', suffix='',
parameters=MultiDictProxy(MultiDict()))
parts = mimetype.split(';')
params = MultiDict() # type: MultiDict[str]
for item in parts[1:]:
if not item:
continue
key, value = cast(Tuple[str, str],
item.split('=', 1) if '=' in item else (item, ''))
params.add(key.lower().strip(), value.strip(' "'))
fulltype = parts[0].strip().lower()
if fulltype == '*':
fulltype = '*/*'
mtype, stype = (cast(Tuple[str, str], fulltype.split('/', 1))
if '/' in fulltype else (fulltype, ''))
stype, suffix = (cast(Tuple[str, str], stype.split('+', 1))
if '+' in stype else (stype, ''))
return MimeType(type=mtype, subtype=stype, suffix=suffix,
parameters=MultiDictProxy(params))
开发者ID:KeepSafe,项目名称:aiohttp,代码行数:38,代码来源:helpers.py
示例10: links
def links(self) -> 'MultiDictProxy[MultiDictProxy[Union[str, URL]]]':
links_str = ", ".join(self.headers.getall("link", []))
if not links_str:
return MultiDictProxy(MultiDict())
links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]
for val in re.split(r",(?=\s*<)", links_str):
match = re.match(r"\s*<(.*)>(.*)", val)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
url, params_str = match.groups()
params = params_str.split(";")[1:]
link = MultiDict() # type: MultiDict[Union[str, URL]]
for param in params:
match = re.match(
r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$",
param, re.M
)
if match is None: # pragma: no cover
# the check exists to suppress mypy error
continue
key, _, value, _ = match.groups()
link.add(key, value)
key = link.get("rel", url) # type: ignore
link.add("url", self.url.join(URL(url)))
links.add(key, MultiDictProxy(link))
return MultiDictProxy(links)
开发者ID:wwqgtxx,项目名称:wwqLyParse,代码行数:37,代码来源:client_reqrep.py
示例11: post
def post(self):
"""Return POST parameters."""
if self._post is not None:
return self._post
if self._method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
out = MultiDict()
if content_type == 'multipart/form-data':
multipart = yield from self.multipart()
field = yield from multipart.next()
while field is not None:
size = 0
max_size = self._client_max_size
content_type = field.headers.get(hdrs.CONTENT_TYPE)
if field.filename:
# store file in temp file
tmp = tempfile.TemporaryFile()
chunk = yield from field.read_chunk(size=2**16)
while chunk:
chunk = field.decode(chunk)
tmp.write(chunk)
size += len(chunk)
if max_size > 0 and size > max_size:
raise ValueError(
'Maximum request body size exceeded')
chunk = yield from field.read_chunk(size=2**16)
tmp.seek(0)
ff = FileField(field.name, field.filename,
tmp, content_type, field.headers)
out.add(field.name, ff)
else:
value = yield from field.read(decode=True)
if content_type is None or \
content_type.startswith('text/'):
charset = field.get_charset(default='utf-8')
value = value.decode(charset)
out.add(field.name, value)
size += len(value)
if max_size > 0 and size > max_size:
raise ValueError(
'Maximum request body size exceeded')
field = yield from multipart.next()
else:
data = yield from self.read()
if data:
charset = self.charset or 'utf-8'
out.extend(
parse_qsl(
data.rstrip().decode(charset),
keep_blank_values=True,
encoding=charset))
self._post = MultiDictProxy(out)
return self._post
开发者ID:sunghyunzz,项目名称:aiohttp,代码行数:68,代码来源:web_request.py
示例12: __init__
def __init__(self, method: str, url: URL, *,
params: Optional[Mapping[str, str]]=None,
headers: Optional[LooseHeaders]=None,
skip_auto_headers: Iterable[str]=frozenset(),
data: Any=None,
cookies: Optional[LooseCookies]=None,
auth: Optional[BasicAuth]=None,
version: http.HttpVersion=http.HttpVersion11,
compress: Optional[str]=None,
chunked: Optional[bool]=None,
expect100: bool=False,
loop: Optional[asyncio.AbstractEventLoop]=None,
response_class: Optional[Type['ClientResponse']]=None,
proxy: Optional[URL]=None,
proxy_auth: Optional[BasicAuth]=None,
timer: Optional[BaseTimerContext]=None,
session: Optional['ClientSession']=None,
ssl: Union[SSLContext, bool, Fingerprint, None]=None,
proxy_headers: Optional[LooseHeaders]=None,
traces: Optional[List['Trace']]=None):
if loop is None:
loop = asyncio.get_event_loop()
assert isinstance(url, URL), url
assert isinstance(proxy, (URL, type(None))), proxy
# FIXME: session is None in tests only, need to fix tests
# assert session is not None
self._session = cast('ClientSession', session)
if params:
q = MultiDict(url.query)
url2 = url.with_query(params)
q.extend(url2.query)
url = url.with_query(q)
self.original_url = url
self.url = url.with_fragment(None)
self.method = method.upper()
self.chunked = chunked
self.compress = compress
self.loop = loop
self.length = None
if response_class is None:
real_response_class = ClientResponse
else:
real_response_class = response_class
self.response_class = real_response_class # type: Type[ClientResponse]
self._timer = timer if timer is not None else TimerNoop()
self._ssl = ssl
if loop.get_debug():
self._source_traceback = traceback.extract_stack(sys._getframe(1))
self.update_version(version)
self.update_host(url)
self.update_headers(headers)
self.update_auto_headers(skip_auto_headers)
self.update_cookies(cookies)
self.update_content_encoding(data)
self.update_auth(auth)
self.update_proxy(proxy, proxy_auth, proxy_headers)
self.update_body_from_data(data)
if data or self.method not in self.GET_METHODS:
self.update_transfer_encoding()
self.update_expect_continue(expect100)
if traces is None:
traces = []
self._traces = traces
开发者ID:wwqgtxx,项目名称:wwqLyParse,代码行数:68,代码来源:client_reqrep.py
示例13: request
def request(self, method, path, params=(), auth=None, **kwargs):
kwargs['auth'] = TokenAuth(auth) if isinstance(auth, dict) else auth
params = MultiDict(params)
params.extend(self.params)
url = urljoin(self.url, path).rstrip('/') + self.trailing
return super().request(method, url, params=params, **kwargs)
开发者ID:coady,项目名称:clients,代码行数:6,代码来源:aio.py
示例14: post
async def post(self) -> MultiDictProxy:
"""Return POST parameters."""
if self._post is not None:
return self._post
if self._method not in self.POST_METHODS:
self._post = MultiDictProxy(MultiDict())
return self._post
content_type = self.content_type
if (content_type not in ('',
'application/x-www-form-urlencoded',
'multipart/form-data')):
self._post = MultiDictProxy(MultiDict())
return self._post
out = MultiDict() # type: MultiDict
if content_type == 'multipart/form-data':
multipart = await self.multipart()
max_size = self._client_max_size
field = await multipart.next()
while field is not None:
size = 0
content_type = field.headers.get(hdrs.CONTENT_TYPE)
if field.filename:
# store file in temp file
tmp = tempfile.TemporaryFile()
chunk = await field.read_chunk(size=2**16)
while chunk:
chunk = field.decode(chunk)
tmp.write(chunk)
size += len(chunk)
if 0 < max_size < size:
raise HTTPRequestEntityTooLarge(
max_size=max_size,
actual_size=size
)
chunk = await field.read_chunk(size=2**16)
tmp.seek(0)
ff = FileField(field.name, field.filename,
cast(io.BufferedReader, tmp),
content_type, field.headers)
out.add(field.name, ff)
else:
value = await field.read(decode=True)
if content_type is None or \
content_type.startswith('text/'):
charset = field.get_charset(default='utf-8')
value = value.decode(charset)
out.add(field.name, value)
size += len(value)
if 0 < max_size < size:
raise HTTPRequestEntityTooLarge(
max_size=max_size,
actual_size=size
)
field = await multipart.next()
else:
data = await self.read()
if data:
charset = self.charset or 'utf-8'
out.extend(
parse_qsl(
data.rstrip().decode(charset),
keep_blank_values=True,
encoding=charset))
self._post = MultiDictProxy(out)
return self._post
开发者ID:3lnc,项目名称:aiohttp,代码行数:73,代码来源:web_request.py
注:本文中的multidict.MultiDict类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论