本文整理汇总了Python中urllib.parse函数的典型用法代码示例。如果您正苦于以下问题:Python parse函数的具体用法?Python parse怎么用?Python parse使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了parse函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: main
def main():
connect = client.rtm_connect()
if not connect:
print('Slack RTM Connect Error!')
return
print('Slack RTM Connect Success!')
while True:
for data in client.rtm_read():
if data['type'] == 'message':
if 'bot_id' not in data:
parse(data['text'])
time.sleep(0.1)
开发者ID:ahastudio,项目名称:CodingLife,代码行数:12,代码来源:bot.py
示例2: __init__
def __init__(self, url, key=None, secret=None, expiration_days=0, private=False, content_type=None, create=True):
from boto.s3.connection import S3Connection
from boto.s3.key import Key
self.url = parse(url)
self.expiration_days = expiration_days
self.buffer = StringIO()
self.private = private
self.closed = False
self._readreq = True
self._writereq = False
self.content_type = content_type or mimetypes.guess_type(self.url.path)[0]
bucket = self.url.netloc
if bucket.endswith(".s3.amazonaws.com"):
bucket = bucket[:-17]
self.client = S3Connection(key, secret)
self.name = "s3://" + bucket + self.url.path
if create:
self.bucket = self.client.create_bucket(bucket)
else:
self.bucket = self.client.get_bucket(bucket, validate=False)
self.key = Key(self.bucket)
self.key.key = self.url.path.lstrip("/")
self.buffer.truncate(0)
开发者ID:fusioneng,项目名称:python-s3file,代码行数:30,代码来源:s3file.py
示例3: is_open
def is_open(dpmt, course, crn):
base = "http://my.illinois.edu"
page = blogotubes('http://www.courses.illinois.edu')
if not page:
print(page); return -1
url = geturl(page, 'Class Schedule')
if not url:
print(url); return -1
page = blogotubes(base+url)
if not page:
print('lol'+page); return -1
url = geturl(page, dpmt)
if not url:
print(url); return -1
page = blogotubes(base+url) # Get list of courses in dpmt
if not page:
print(page); return -1
url = geturl(page, course)
if not url:
print(url); return -1
page = blogotubes(base+url) # Get list of sections in course
if not page:
print(page); return -1
result = parse(page, crn) # Parse openness of section
if result:
return 1
else:
return 0
开发者ID:Woodlndcreatures,项目名称:UIUC_ClassSniffer,代码行数:28,代码来源:classSniffer.py
示例4: migrate
def migrate(path, name):
print('----%s----' % name)
input_f = open(path, 'r', encoding='utf-8')
quotes = []
prev = ''
for line in input_f.readlines():
text, page = parse(line, prev)
if len(page) > 0:
verifyPage(page, line)
pair = dict()
pair['text'] = text.lstrip()
pair['page'] = page
quotes += [pair, ]
prev = ''
else:
prev = text
input_f.close()
if len(prev):
pair['text'] = prev
pair['page'] = 0
book = {
'title': name,
'quotes': quotes
}
return book
开发者ID:Devgrapher,项目名称:booklib,代码行数:35,代码来源:migrate-note.py
示例5: onSend
def onSend(self):
self.getFields()
message = self.generateMessage().toLatin1()
subject = self.subject_.toLatin1()
params = urllib.parse(
{
"kontakt": "cad",
"from_name": self.name_,
"from_mail": self.email_,
"subject": subject,
"kommentar": message,
}
)
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
conn = http.client.HTTPConnection("www.ipek.uni-karlsruhe.de:80")
conn.request("POST", "/cms/de/kontakt/kontakt.php", params, headers)
response = conn.getresponse()
print(response.status, response.reason)
data = response.read()
conn.close()
self.close()
return
开发者ID:baumannm,项目名称:cadstart,代码行数:34,代码来源:pyCADfeedbackFormuiHandler.py
示例6: process_response
def process_response(self, r):
def parse(item, type):
text = item.xpath('.//td[3]/text()')[0].strip()
context = item.xpath('.//td[@class="codeContext"]/text()')
where = item.xpath('.//td[@class="linenumber"]/text()')[0]
return {
'type': type,
'text': text,
'context': context[0] if context else '',
'where': where
}
doc = html.document_fromstring(r)
return chain((parse(item, 'Error')
for item in doc.xpath('//div[@id="errors"]//tr')),
(parse(item, 'Warning')
for item in doc.xpath('//div[@id="warnings"]//tr')))
开发者ID:hugoArregui,项目名称:w3c-validators,代码行数:16,代码来源:w3c-css-validator.py
示例7: list_pages
def list_pages(namespace_url=None):
list_url = namespace_url or INDEX_INDEX
print('Crawling {}'.format(list_url))
tree = parse(list_url)
for a in tree.xpath('//a[@class="twikilink"]'):
name = a.text.strip()
url = a.attrib['href']
if namespace_url:
yield (name,), url
else:
yield ('Main', name), url
if not namespace_url:
namespaces = tree.xpath(
'//a[starts-with(@href, "index_report.php?groupname=")]'
)
for a in namespaces:
namespace = a.text.strip()
url = urllib.parse.urljoin(
INDEX_INDEX, a.attrib['href']
)
for key, value in list_pages(url):
assert len(key) == 1
yield (namespace,) + key, value
开发者ID:dahlia,项目名称:cliche,代码行数:26,代码来源:crawler.py
示例8: convert
def convert(data, field):
if isinstance(data, Literal):
data = data.value
if isinstance(data, URIRef):
return str(data)
if isinstance(field, IndexedLanguageField):
lng = {}
for d in data:
lang = d.language
if not lang:
lang = 'null'
lng[lang] = str(d)
return lng
if isinstance(data, list):
return [x for x in [convert(x, field) for x in data] if x]
elif isinstance(field, IndexedDateTimeField):
if data is None:
return None
if isinstance(data, str):
data = parse(data)
return data.strftime('%Y-%m-%dT%H:%M:%S')
elif data and isinstance(data, FedoraObject):
return data.id
return data
开发者ID:mesemus,项目名称:fedoralink,代码行数:30,代码来源:elastic.py
示例9: fetch_full_search_results
def fetch_full_search_results(course_search_url, partial_search_results):
params = {
'ICAJAX': '1',
'ICType': 'Panel',
'ICElementNum': '0',
'ICStateNum': '57',
'ICAction': '$ICField106$hviewall$0',
'ICXPos': '0',
'ICYPos': '0',
'ICFocus': '',
'ICSaveWarningFilter': '0',
'ICChanged': '-1',
'ICResubmit': '0',
'ICModalWidget': '0',
'ICZoomGrid': '0',
'ICZoomGridRt': '0',
'ICModalLongClosed': '',
'ICActionPrompt': 'false',
'ICFind': '',
'ICAddCount': '',
}
dynamic_keys = ('ICSID', 'ICStateNum')
dynamic_params = {}
for key in dynamic_keys:
dynamic_params[key] = parse('form[name=win0] input[name=%s]' % key,
partial_search_results)[0]['value']
params.update(dynamic_params)
return fetch(course_search_url, params)
开发者ID:jwintersinger,项目名称:classy,代码行数:30,代码来源:classy.py
示例10: main
def main():
''' download file and return it as string '''
cj = http.cookiejar.CookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(opener)
inputhtml= urllib.request.urlopen(URL1).readlines()
print(cj)
imgdata = parse(inputhtml)
writedata('img.png', imgdata)
ocrfix()
password = ocrdecode()
print (password)
postdata = post_data(password)
print(postdata)
responsehtml= urllib.request.urlopen(URL1, postdata).readlines()
resultlines = list(map(lambda x: x.decode("utf-8"), responsehtml))
for r in resultlines:
print(r)
开发者ID:FloatingGuy,项目名称:programming-challenges,代码行数:25,代码来源:solve.py
示例11: detect_redirect
def detect_redirect(self):
parse = urllib.request.urlparse
# the original url
org_url = self.url_data
# get an opener doing redirections
try:
opener = self._create_fetcher(redirect_handler=False)
response = opener.open(self.url)
except:
raise UnknownHostName(self.url)
# the new url
new_url = parse(response.geturl())
# detect a redirection
new_loc = new_url.scheme + '://' + new_url.netloc
org_loc = org_url.scheme + '://' + org_url.netloc
self.is_redirected = not(new_loc == org_loc)
if self.is_redirected:
self.printer.print_debug_line('%s redirects to %s' % (org_loc, new_loc),2)
else:
self.printer.print_debug_line('%s does not redirect' % (org_loc, ), 2)
# create an response object and add it to the cache
R = _create_response(response)
self.cache[new_loc] = R
self.cache[self.url] = R
return (self.is_redirected, new_loc)
开发者ID:seoanaliz,项目名称:wig,代码行数:33,代码来源:request2.py
示例12: iratingchart
def iratingchart(self, custid=None, category=ct.IRATING_ROAD_CHART):
""" Gets the irating data of a driver using its custom id (custid)
that generates the chart located in the driver's profile. """
r = self.__req(ct.URL_STATS_CHART % (custid, category),
cookie=self.last_cookie)
return parse(r)
开发者ID:ArjandeV,项目名称:ir_webstats,代码行数:7,代码来源:client.py
示例13: parseParms
def parseParms(xfile):
if debugMode():
print("parseParms:",xfile)
pdict = {}
try:
statxml = os.stat(xfile)
except:
print("Error, file",xfile,"not found")
return None
try:
t = parse(xfile)
except:
print("Error,could not parse",xfile)
return None
root = t.getroot()
kids = list(root)
for k in kids:
pdict[k.tag] = k.text
return pdict
开发者ID:rjohnson8103,项目名称:WParchive,代码行数:25,代码来源:wpexp2dita.py
示例14: __check_cookie
def __check_cookie(self):
""" Checks the cookie by testing a request response"""
r = parse(self.__req(ct.URL_DRIVER_COUNTS, cookie=self.last_cookie))
if isinstance(r, dict):
return True
return False
开发者ID:ArjandeV,项目名称:ir_webstats,代码行数:7,代码来源:client.py
示例15: get_article
def get_article(self, candidates, best_candidate):
# Now that we have the top candidate, look through its siblings for content that might also be related.
# Things like preambles, content split by ads that we removed, etc.
sibling_score_threshold = max([10, best_candidate['content_score'] * 0.2])
output = parse("<div/>")
for sibling in best_candidate['elem'].parent.contents:
if isinstance(sibling, NavigableString): continue
append = False
if sibling is best_candidate['elem']:
append = True
sibling_key = HashableElement(sibling)
if sibling_key in candidates and candidates[sibling_key]['content_score'] >= sibling_score_threshold:
append = True
if sibling.name == "p":
link_density = self.get_link_density(sibling)
node_content = sibling.string or ""
node_length = len(node_content)
if node_length > 80 and link_density < 0.25:
append = True
elif node_length < 80 and link_density == 0 and re.search('\.( |$)', node_content):
append = True
if append:
output.append(sibling)
if not output: output.append(best_candidate)
return output
开发者ID:edd07,项目名称:python-readability,代码行数:30,代码来源:readability.py
示例16: decrypt
def decrypt(self, request, sessionid):
""" Avoid showing plain sessionids
Optionally require that a referer exists and matches the
whitelist, or reset the session
"""
if not sessionid:
return ""
# (nonce, sessionid) = sessionid.split(":", 1)
# sessionid = self.xor(nonce, sessionid.decode("base64"))
secret = self._secret(request)
if self.settings.get("HOSTS", []):
referer = request.META.get("HTTP_REFERER", "None")
if referer == "None":
# End session unless a referer is passed
return ""
url = parse(referer)
if url.hostname not in self.settings["HOSTS"]:
err = "%s is unauthorised" % url.hostname
raise Exception(err)
cipher = Fernet(secret)
session_key = cipher.decrypt(sessionid)
try:
return str(session_key, "utf8")
except:
return ""
开发者ID:edcrewe,项目名称:django-cookieless,代码行数:27,代码来源:utils.py
示例17: determine_course_status
def determine_course_status(subject_name, course_name, term):
pages = {}
# PeopleSoft stores all pages in iframe whose src is continually modified.
# Fetch the page inside this iframe.
pages['container'] = fetch('https://prdrps2.ehs.ucalgary.ca/psauthent/class-search/public')
target_content = parse('[name=TargetContent]', pages['container'])[0]
search_form_url = urllib.parse.unquote(target_content['src'])
# Fetch class search form.
pages['course_search'] = fetch(search_form_url)
course_search_url = 'https://prdrps2.ehs.ucalgary.ca/psc/saprd/' + \
'EMPLOYEE/HRMS/c/COMMUNITY_ACCESS.CLASS_SEARCH.GBL'
# Fetch initial set of search results.
pages['search_results_partial'] = fetch_initial_search_results(
course_search_url, term, subject_name, course_name, pages['course_search']
)
# Fetch full set of search results.
# TODO: for classes where all results are on first page (i.e., those with <=
# 3 sections), do not perform this query, but instead simply use
# search_results_partial.
pages['search_results_full'] = fetch_full_search_results(
course_search_url, pages['search_results_partial']
)
return pages['search_results_full']
开发者ID:jwintersinger,项目名称:classy,代码行数:29,代码来源:classy.py
示例18: load_url
def load_url(parse, url, max_requests = 1, timeout = 60) :
requests = []
for i in range( max_requests ) :
req = {
'url' : url ,
'date' : arrow.now(TZ).format( TIMEFMT )
}
requests.append( req )
try:
with urllib.request.urlopen(url, timeout=timeout) as conn:
req['code'] = conn.getcode()
now = arrow.now(TZ)
data = parse(conn)
return LoadUrlResult( data , now , requests )
except urllib.error.HTTPError as e :
req['code'] = e.code
now = arrow.now(TZ)
raise LoadUrlException( now , requests )
开发者ID:aureooms,项目名称:stib-mivb-api,代码行数:31,代码来源:__main__.py
示例19: hosted_results
def hosted_results(self, session_host=None, session_name=None,
date_range=None, sort=ct .SORT_TIME,
order=ct.ORDER_DESC, page=1):
""" Search hosted races results using various fields. Returns a tuple
(results, total_results) so if you want all results you should
request different pages (using page) until you gather all
total_results. Each page has 25 (ct.NUM_ENTRIES) results max."""
lowerbound = ct.NUM_ENTRIES * (page - 1) + 1
upperbound = lowerbound + ct.NUM_ENTRIES - 1
data = {'sort': sort, 'order': order, 'lowerbound': lowerbound,
'upperbound': upperbound}
if session_host is not None:
data['sessionhost'] = session_host
if session_name is not None:
data['sessionname'] = session_name
if date_range is not None:
# Date range
tc = lambda s:\
time.mktime(datetime.datetime.strptime(s, "%Y-%m-%d").
timetuple()) * 1000
data['starttime_lowerbound'] = tc(date_range[0])
# multiplied by 1000
data['starttime_upperbound'] = tc(date_range[1])
r = self.__req(ct.URL_HOSTED_RESULTS, data=data)
# tofile(r)
res = parse(r)
total_results = res['rowcount']
results = res['rows'] # doesn't need format_results
return results, total_results
开发者ID:ArjandeV,项目名称:ir_webstats,代码行数:33,代码来源:client.py
示例20: inputMove
def inputMove():
moves = []
mc.events.clearAll()
while len(moves) < 2:
try:
chats = mc.events.pollChatPosts()
move = parse(chats[0].message)
for m in moves:
drawSquare(m[0],m[1])
return move
except:
pass
hits = mc.events.pollBlockHits()
if len(hits) > 0:
c = hits[0].pos
if ( corner.x <= c.x and corner.y -1 <= c.y and corner.z <= c.z and
c.x < corner.x + 64 and c.y < corner.y + MAXHEIGHT and c.z < corner.z + 64 ):
m = (c.x - corner.x) / 8, (c.z - corner.z) /8
if len(moves) == 0 or m[0] != moves[0][0] or m[1] != moves[0][1]:
highlightSquare(m[0],m[1])
moves.append(m)
time.sleep(0.2)
mc.events.clearAll() # debounce
continue
for m in moves:
drawSquare(m[0],m[1])
moves = []
mc.postToChat('Canceled. Enter another move.')
time.sleep(0.2)
mc.events.clearAll() # debounce
time.sleep(0.2)
for m in moves:
drawSquare(m[0],m[1])
return tuple(moves)
开发者ID:Michaelangel007,项目名称:raspberryjammod,代码行数:34,代码来源:chess.py
注:本文中的urllib.parse函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论