本文整理汇总了Python中utils.unescape函数的典型用法代码示例。如果您正苦于以下问题:Python unescape函数的具体用法?Python unescape怎么用?Python unescape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unescape函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: parse_video_response
def parse_video_response(self, response):
data = simplejson.loads(response.read())
items = list()
count = 0
for video in data['videos']:
vid_url, thumb_url, category_id, dur = self.get_video_urls(video)
count += 1
if vid_url is None:
continue
meta = video.get('meta')
items.append({
'label': unescape(meta.get('title')),
'thumbnail': thumb_url,
'info': {
'plot': unescape(meta.get('preamble') or ''),
'originaltitle': unescape(meta.get('title') or '???'),
'tagline': unescape(meta.get('preamble') or ''),
'aired': self.get_date(meta.get('timePublished')),
'duration': self.get_duration(meta.get('duration'))
},
'stream_info': {
'video': {
'duration': meta.get('duration', 0)
}
},
'path': vid_url,
'is_playable': True,
})
return items, (count < self.PER_PAGE)
开发者ID:androa,项目名称:vgtv-xbmc,代码行数:32,代码来源:api.py
示例2: csv2po
def csv2po(csv_file):
"""Convert a file-like object `csv_file` to a polib.POFile object"""
po = polib.POFile()
# Reset to reading from the beginning of the file
csv_file.seek(0)
csv_reader = csv.reader(csv_file)
for count, row in enumerate(csv_reader):
# Skip the first two header rows
if count < len(csv_header_rows):
continue
msgid = unescape(row[0])
msgid_plural = unescape(row[1])
msgctxt = row[2]
msgstr, msgstr_plural = undo_plurals(msgid_plural, row[3])
entry = polib.POEntry()
entry.msgid = msgid
if msgid_plural:
entry.msgid_plural = msgid_plural
if msgctxt:
entry.msgctxt = msgctxt
if msgstr:
entry.msgstr = msgstr
if msgstr_plural:
entry.msgstr_plural = msgstr_plural
po.append(entry)
return po
开发者ID:sleepyjames,项目名称:ppb,代码行数:33,代码来源:po2csv.py
示例3: load
def load(request):
if not request.user.is_authenticated():
return HttpResponseRedirect("/accounts/login")
if request.method == 'POST':
form = ImportDeliciousForm(request.POST,request.FILES)
if form.is_valid():
db = get_database()[Bookmark.collection_name]
html=request.FILES['exported'].read().decode('utf8')
soup=BeautifulSoup(html)
for item in soup.findAll('dt'):
desc=''
next=item.findNextSiblings()
if next:
next=next[0]
if 'name' in dir(next) and next.name=='dd':
desc=unescape(u''.join(imap(unicode, next.contents)))
db.Bookmark({'url': urlSanitize(item.a['href']),
'seq': getNextVal('seq'),
'tags': item.a['tags'].split(','),
'user': unicode(request.user),
'created': datetime.fromtimestamp(float(item.a['add_date'])),
'private': item.a['private']=='1',
'title': unescape(unicode(item.a.string)),
'notes': unicode(desc)}).save()
return HttpResponseRedirect('/u/%s/' % request.user)
else:
form = ImportDeliciousForm()
return render_to_response('import.html', { 'form': form, }, context_instance=RequestContext(request) )
开发者ID:dnet,项目名称:omnom,代码行数:28,代码来源:views.py
示例4: AddDir
def AddDir(name, mode, url=None, image=None, fanart=None, isFolder=False, isPlayable=False, desc='', plot='', contextMenu=None, replaceItems=False, infoLabels=None):
try:
name = name.encode('utf-8')
url = utils.fixUnicode(utils.unescape(url))
except:
pass
try:
if not validateMode(mode, name):
return
if not fanart:
fanart = FANART
name = name.replace('_', ' ')
infoLabels = {'title':name, 'fanart':fanart, 'description':desc, 'plot':plot}
image = utils.patchImage(mode, image, url, infoLabels)
u = ''
u += '?mode=' + str(mode)
u += '&title=' + urllib.quote_plus(name)
if image:
u += '&image=' + urllib.quote_plus(image)
if url:
u += '&url=' + urllib.quote_plus(url).replace('%25LB%25', '%')
APPLICATION.addDir(utils.unescape(name), mode, u, image, isFolder, isPlayable, contextMenu=contextMenu, replaceItems=replaceItems, infoLabels=infoLabels)
except Exception, e:
raise
开发者ID:TheLivebox,项目名称:TheLiveBox,代码行数:33,代码来源:livebox.py
示例5: __reload
def __reload(self, values):
self.__raw.__dict__.update(values)
self.firstname = unescape(self.__raw.firstname)
self.lastname = unescape(self.__raw.lastname)
self.company = unescape(self.__raw.company)
self.colleagues = self.__raw.colleagues
self.id = int(self.__raw.id_user)
self.lang = LANG_ID[int(self.__raw.lang) + 1]
开发者ID:magarcia,项目名称:python-producteev,代码行数:8,代码来源:users.py
示例6: fetch_albums
def fetch_albums(url):
html = urlopen(url)
found = re.findall(r'<td class="Title".*?<a href="/music/url\?q=(/music/album\?id%3D.*?)".*?>(.*?)</a>', html)
print '# albums:', len(found), urllib.unquote(url)
for link, title in found:
link = 'http://www.google.cn'+link.split('&')[0]
title = unescape(title)
print urllib.unquote(link), '|', title
found = re.findall(r'<td>.*?<a class="imglink" href="/music/url\?q=(.*?)"', html)
pages = [ 'http://www.google.cn'+urllib.unquote(i.split('&')[0]) for i in found ]
cache[url] = True
for page in pages:
if page not in cache:
cache[page] = False
another_page = None
for page, done in cache.iteritems():
if not done:
another_page = page
break
if another_page:
fetch_albums(another_page)
开发者ID:huanghao,项目名称:muse,代码行数:26,代码来源:fetch_searched_albums.py
示例7: get_categories
def get_categories(self, root_id=0, only_series=False):
categories = self.get_category_tree()
root = int(root_id)
matches = []
for category in categories:
id = category.get('id')
if category.get('showCategory') is False:
continue
if only_series is True and category.get('isSeries') is not True:
continue
if only_series is False and category.get('parentId') != root:
continue
matches.append({
'label': unescape(category.get('title')),
'path': self.plugin.url_for(
'show_category',
id=str(id),
mode='all'
),
'id': id
})
return matches
开发者ID:rexxars,项目名称:vgtv-xbmc,代码行数:28,代码来源:api.py
示例8: __init__
def __init__(
self,
uid,
summary,
dtstamp=None,
created=None,
last_modified=None,
related_to=None,
completed=None,
percent_complete=None,
x_kde_ktimetracker_totalsessiontime=None,
x_kde_ktimetracker_totaltasktime=None,
x_kde_ktimetracker_bctype=None,
):
self.uid = uid
self.summary = unescape(summary)
self.dtstamp = dtstamp
self.created = created
self.last_modified = last_modified
self.related_to = related_to
self.completed = completed
self.percent_complete = percent_complete
self.x_kde_ktimetracker_totalsessiontime = x_kde_ktimetracker_totalsessiontime
self.x_kde_ktimetracker_totaltasktime = x_kde_ktimetracker_totaltasktime
self.x_kde_ktimetracker_bctype = x_kde_ktimetracker_bctype
self.todos = {}
开发者ID:softformance,项目名称:basecamp.karm,代码行数:27,代码来源:todo.py
示例9: gen_solution
def gen_solution(cur, td, num, p_id):
# import pdb
# pdb.set_trace()
global testcase_id
global testcase_crawled
if num == 0:
column_name = 'java'
elif num == 1:
column_name = 'cpp'
elif num == 2:
column_name = 'csharp'
else:
column_name = 'VB'
cur.execute('select %s from problem where id = %d' % (column_name, p_id))
if cur.fetchall()[0][0] != None:
return
p = compile('"/stat\?c=problem_solution.*?"')
l = p.findall(td)
if len(l) == 1:
url = topcoder_site_url + unescape(l[0][1:-1])
try:
page = topcoder.get_page(url)
except Exception, e:
print url, e
return
p = compile('<TD CLASS="problemText" COLSPAN="8" VALIGN="middle" ALIGN="left">[\d\D]*?</TD>')
try:
code = escape_string(p.findall(page)[0])
except Exception, e:
print 'No code found:',url,e
return
开发者ID:Andimeo,项目名称:topcoder_crawler,代码行数:32,代码来源:gen_problem_solution.py
示例10: insertPicDetail
def insertPicDetail(self,picDetailModel):
cur = self.con.cursor()
try:
sql = '''INSERT INTO admin_picdetail
(`pid`, `pic_path`, `height`, `width`, `pic_desc`, `categoary_id`, `albunm_name`, `albunm_id`, `user_id`,
`time`, `taoke_num_iid`, `taoke_title`, `taoke_price`)
VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s')'''%(picDetailModel.pId
,picDetailModel.picPath
,picDetailModel.height
,picDetailModel.width
,picDetailModel.desc
,picDetailModel.cateId
,picDetailModel.albunmName
,picDetailModel.albunmId
,picDetailModel.userId
,picDetailModel.time
,picDetailModel.taokeNumIID
,picDetailModel.title
,picDetailModel.price)
sql = utils.unescape(sql).encode('utf-8')
cur.execute(sql)
self.con.commit()
except Exception,what:
print '========-------=======',what
# print sql
pass
开发者ID:poorevil,项目名称:MutilRequests,代码行数:31,代码来源:PicDetailDao_mysql.py
示例11: fetch_bioguide_page
def fetch_bioguide_page(bioguide, force):
url = "http://bioguide.congress.gov/scripts/biodisplay.pl?index=%s" % bioguide
cache = "legislators/bioguide/%s.html" % bioguide
try:
body = download(url, cache, force)
# Fix a problem?
body = body.replace("Á\xc2\x81", "Á")
# Entities like ’ are in Windows-1252 encoding. Normally lxml
# handles that for us, but we're also parsing HTML. The lxml.html.HTMLParser
# doesn't support specifying an encoding, and the lxml.etree.HTMLParser doesn't
# provide a cssselect method on element objects. So we'll just decode ourselves.
body = utils.unescape(body, "Windows-1252")
dom = lxml.html.parse(io.StringIO(body)).getroot()
except lxml.etree.XMLSyntaxError:
raise Exception("Error parsing: " + url)
# Sanity check.
if len(dom.cssselect("title")) == 0:
raise Exception("No page for bioguide %s!" % bioguide)
return dom
开发者ID:TheWalkers,项目名称:congress-legislators,代码行数:25,代码来源:bioguide.py
示例12: settings_to_log
def settings_to_log( self ):
try:
utils.log( "Settings" )
setting_values = self.read_settings_xml()
for k, v in sorted( setting_values.items() ):
utils.log( "%30s: %s" % ( k, str( utils.unescape( v.decode('utf-8', 'ignore') ) ) ) )
except:
traceback.print_exc()
开发者ID:Giftie,项目名称:service.makemkv.rip,代码行数:8,代码来源:settings.py
示例13: _locate
def _locate(self, town_name):
town_name = utils.unescape(town_name.strip())
if town_name not in self.location_cache:
try:
self.location_cache[town_name] = self.geo_locator.geocode(town_name)
except geopy.exc.GeocoderTimedOut:
print "Geocoder Timeout."
return None
return self.location_cache[town_name]
开发者ID:hikhvar,项目名称:jtr_scrapper,代码行数:9,代码来源:jtr_spider.py
示例14: getFavourites
def getFavourites(file, limit=10000, validate=True, superSearch=False):
import xbmcgui
file = xbmc.translatePath(file)
xml = '<favourites></favourites>'
if os.path.exists(file):
fav = open(file , 'r')
xml = fav.read()
fav.close()
items = []
faves = re.compile('<favourite(.+?)</favourite>').findall(xml)
for fave in faves:
fave = fave.replace('"', '&_quot_;')
fave = fave.replace('\'', '"')
fave = utils.unescape(fave)
fave = fave.replace('name=""', '')
try: name = re.compile('name="(.+?)"').findall(fave)[0]
except: name = ''
try: thumb = re.compile('thumb="(.+?)"').findall(fave)[0]
except: thumb = ''
try: cmd = fave.split('>', 1)[-1]
except: cmd = ''
#name = utils.Clean(name.replace( '&_quot_;', '"'))
name = name.replace( '&_quot_;', '"')
thumb = thumb.replace('&_quot_;', '"')
cmd = cmd.replace( '&_quot_;', '"')
add = False
if superSearch:
add = isValid(cmd)
elif (SHOWUNAVAIL) or (not validate) or isValid(cmd):
add = True
if add:
cmd = upgradeCmd(cmd)
if cmd.startswith('PlayMedia'):
option = 'mode'
try:
mode = int(favourite.getOption(cmd, option))
except:
win = xbmcgui.getCurrentWindowId()
cmd = updateSFOption(cmd, 'winID', win)
items.append([name, thumb, cmd])
if len(items) > limit:
return items
return items
开发者ID:kemalsecer,项目名称:spoyser-repo,代码行数:57,代码来源:favourite.py
示例15: post
def post(self):
text = self.request.get('text')
if text:
conver_url = utils.unescape(self.request.get('url'))
conver = Conver.get_for_url(conver_url)
message = Message(author=PermaUser.get_current_permauser(), text=text, conver=conver)
message.put()
self.distribute_message(message)
else:
logging.error("No message '%S'saved for %s", text, conver_url)
开发者ID:lehrblogger,项目名称:Teatime_PageChat,代码行数:11,代码来源:handlers.py
示例16: addtoKodiFavorites
def addtoKodiFavorites(command, name, thumbnail):
import xml.etree.ElementTree
from utils import unescape
#adding to favorites involve 3 steps:
# 1.) add the favorite via jsonrpc (script params not included)
# 2.) modify the favourites.xml to include script params <-- (kodi18 leia alpha1) i think there is another favourites file or this file is cached until another favorite is added
# 3.) ??? <-- adding another favorite will delete the first one (until kodi is restarted) need to find a way for kodi to reload the modified favourite.xml
#http://kodi.wiki/view/JSON-RPC_API/v8#Favourites
#schema=xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "JSONRPC.Introspect", "id": 1}')
#log(repr(schema))
favorite_was_found=False
#add_dummy_favorite()
temp_command='script.reddit.reader' #can't add script favorites with parameter using jsonrpc
saved_command='RunScript("script.reddit.reader")'
json_rpc_command={"jsonrpc": "2.0",
"method": "Favourites.AddFavourite",
'params': {
'title': name,
'type': 'script',
'path': temp_command,
'thumbnail':thumbnail,
},
'id': '1'
}
a=xbmc.executeJSONRPC(json.dumps(json_rpc_command))
#log(repr(a))
a=json.loads(a)
if a.get('result','')=="OK":
log('Favourite added')
#now that we've created the favorite, we edit it to add parameters
favorites_xml = xbmc.translatePath(os.path.join(addon.getAddonInfo('profile'), '..','..','favourites.xml'))
if os.path.exists(favorites_xml):
#log('{0} exists'.format(favorites_xml) )
et = xml.etree.ElementTree.parse(favorites_xml)
root=et.getroot()
for f in root.findall('favourite'):
#the name attribute is escape encoded the xml file.
fav_name=unescape( f.get('name') ) #replaces & to & etc.
fav_cmd=f.text
#log('*a*'+repr(name) + ' ' + saved_command)
#log('*b*'+repr(fav_name) + ' ' + fav_cmd )
#log('---')
if (fav_name==name) and (fav_cmd==saved_command):
log('Favourite entry found {0}'.format(fav_name) )
favorite_was_found=True
f.text=command
if favorite_was_found:
et.write(favorites_xml)
xbmc_notify(translation(32028), fav_name, icon=thumbnail)
开发者ID:gedisony,项目名称:script.reddit.reader,代码行数:54,代码来源:actions.py
示例17: extract_tweets
def extract_tweets(tweets, cmd_line=False):
""" prints the tweets from tweets: list of tweet dicts """
tweet_texts = []
for tweet in tweets:
text = get_tweet(tweet)
if cmd_line:
text = text.encode('unicode-escape')
text = ununicode(text)
text = unescape(text)
tweet_texts.append(parser(text))
return tweet_texts
开发者ID:cshintov,项目名称:timeline,代码行数:11,代码来源:tweets.py
示例18: parse_starting_page
def parse_starting_page(self, response):
ranking = 0
for sel in response.xpath('//div[@class="content"]/table/tr'):
team_link = sel.xpath('td/a/@href').extract_first()
if team_link is not None:
team_name = sel.xpath('td/a/text()').extract_first()
data = sel.xpath('td/text()').extract()
ranking_item = JtrTeamRankingItem()
ranking_item['team_name'] = utils.unescape(team_name)
if len(data) == 4:
ranking, city, tournaments, points = data
else:
city, tournaments, points = data
ranking_item['ranking'] = int(ranking.split("/")[0].strip().strip("."))
ranking_item['hometown'] = utils.unescape(city)
ranking_item['points'] = float(points)
ranking_item['number_of_tournaments'] = utils.unescape(tournaments)
ranking_item['crawl_date'] = datetime.datetime.now()
yield ranking_item
yield scrapy.Request(response.urljoin(team_link), callback=self.parse_team_site)
开发者ID:hikhvar,项目名称:jtr_scrapper,代码行数:20,代码来源:jtr_spider.py
示例19: check_url
def check_url(url, geturl=False):
send = []
try:
o = urllib.urlopen(url)
ctype, clength = o.info().get("Content-Type"), o.info().get("Content-Length")
if o.info().gettype() == "text/html":
title = 'Pas de titre'
html = o.read(1000000)
try:
SoupList = BeautifulSoup(utils.unescape(html),
parseOnlyThese=SoupStrainer('title'))
except UnicodeDecodeError:
SoupList = BeautifulSoup(utils.unescape(html.decode("latin1", "ignore")),
parseOnlyThese=SoupStrainer('title'))
try:
titles = [title for title in SoupList]
title = utils.xhtml2text(titles[0].renderContents())
except IndexError:
title = "Pas de titre"
except HTMLParseError:
pass
if geturl:
send.append("%s : [Lien] Titre : %s" %
(o.geturl(), " ".join(title.split())))
else:
send.append("[Lien] Titre : %s" % " ".join(title.split()))
else:
send.append("[Lien] Type: %s, Taille : %s octets" % (ctype, clength))
o.close()
except IOError as error:
if error[1] == 401:
send.append("Je ne peux pas m'authentifier sur %s :'(" % url)
elif error[1] == 404:
send.append("%s n'existe pas !" % url)
elif error[1] == 403:
send.append("Il est interdit d'accéder à %s !" % url)
else:
send.append("Erreur %s sur %s" % (error[1], url))
except httplib.InvalidURL:
send.append("L'URL %s n'est pas valide !" % url)
return send
开发者ID:alub,项目名称:pipobot,代码行数:41,代码来源:utils.py
示例20: get_musicbrainz_artists
def get_musicbrainz_artists( artist_search, limit=1 ):
log( "Artist: %s" % artist_search, xbmc.LOGDEBUG )
score = ""
name = ""
id = ""
sortname = ""
artists = []
artist_name = smart_unicode( artist_search.replace( '"', '?' ) )
url = artist_url % ( server, quote_plus( artist_name.encode("utf-8") ), limit )
htmlsource = get_html_source( url, "", save_file = False, overwrite = False )
match = re.findall( '''<artist(.*?)</artist>''', htmlsource )
if match:
for item in match:
artist = {}
artist["score"] = ""
artist["name"] = ""
artist["id"] = ""
artist["sortname"] = ""
score_match = re.search( '''score="(.*?)"''', item )
name_match = re.search( '''<name>(.*?)</name>''', item )
id_match = re.search( '''id="(.*?)"(?:.*?)>''', item )
if not id_match:
id_match = re.search( '''id="(.*?)">''', item )
sort_name_match = re.search( '''<sort-name>(.*?)</sort-name>''', item )
if score_match:
artist["score"] = score_match.group(1)
if name_match:
artist["name"] = unescape( smart_unicode( name_match.group(1) ) )
if id_match:
artist["id"] = id_match.group(1)
if sort_name_match:
artist["sortname"] = unescape( smart_unicode( sort_name_match.group(1) ) )
log( "Score : %s" % artist["score"], xbmc.LOGDEBUG )
log( "Id : %s" % artist["id"], xbmc.LOGDEBUG )
log( "Name : %s" % artist["name"], xbmc.LOGDEBUG )
log( "Sort Name : %s" % artist["sortname"], xbmc.LOGDEBUG )
artists.append(artist)
else:
log( "No Artist ID found for Artist: %s" % repr( artist_search ), xbmc.LOGDEBUG )
xbmc.sleep( mb_delay )
return artists
开发者ID:Rah85,项目名称:script.cdartmanager,代码行数:41,代码来源:musicbrainz_utils.py
注:本文中的utils.unescape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论