• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python urllib2.quote函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中urllib2.quote函数的典型用法代码示例。如果您正苦于以下问题:Python quote函数的具体用法?Python quote怎么用?Python quote使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了quote函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _update_request_uri_query

def _update_request_uri_query(request):
    '''pulls the query string out of the URI and moves it into 
    the query portion of the request object.  If there are already
    query parameters on the request the parameters in the URI will
    appear after the existing parameters'''

    if '?' in request.path:
        request.path, _, query_string = request.path.partition('?')
        if query_string:
            query_params = query_string.split('&')
            for query in query_params:
                if '=' in query:
                    name, _, value = query.partition('=')
                    request.query.append((name, value))

    request.path = urllib2.quote(request.path, '/()$=\',')

    #add encoded queries to request.path. 
    if request.query:
        request.path += '?' 
        for name, value in request.query:
            if value is not None:
                request.path += name + '=' + urllib2.quote(value, '/()$=\',') + '&'
        request.path = request.path[:-1]

    return request.path, request.query
开发者ID:laptopplusheerlen,项目名称:azure-sdk-for-python,代码行数:26,代码来源:__init__.py


示例2: set_language

 def set_language(self):
     "Set the language"
     nextpage = request.params.get('next', None)
     if not nextpage:
         nextpage = request.headers.get('Referer', None)
     if not nextpage:
         nextpage = '/'
     if '://' in nextpage:
         from_url = urlparse(nextpage)
         nextpage = from_url[2]
     lang_code = request.params.get('language', None)
     if lang_code and check_language(lang_code):
         session['lang'] = lang_code
         session.save()
     params = []
     for param in request.params:
         if not param in ['language', 'amp']:
             value = request.params[param]
             if value:
                 if (param == 'came_from' and
                     '://' in urllib2.unquote(value)):
                     urlparts = urlparse(urllib2.unquote(value))
                     value = urlparts[2] or '/'
                 params.append('%s=%s' % (urllib2.quote(param),
                                         urllib2.quote(value)))
     if 'lc=1' not in params:
         params.append('lc=1')
     if params:
         nextpage = "%s?%s" % (nextpage, '&'.join(params))
     redirect(nextpage)
开发者ID:TetraAsh,项目名称:baruwa2,代码行数:30,代码来源:accounts.py


示例3: test_import_to_shape

    def test_import_to_shape(self):
        from gnmvidispine.vs_item import VSItem
        i = VSItem(host=self.fake_host,port=self.fake_port,user=self.fake_user,passwd=self.fake_passwd)

        i.name = "VX-123"
        i.sendAuthorized = MagicMock(return_value=self.MockedResponse(200,  self.import_job_doc))
        
        with self.assertRaises(ValueError):
            i.import_to_shape() #expect ValueError if neither uri nor file ref
        
        fake_uri="file:///path/to/newmedia.mxf"
        quoted_uri=quote(fake_uri,"")   #we are embedding a URI as a parameter with another URL so it must be double-encoded
        
        i.import_to_shape(uri=fake_uri,shape_tag="shapetagname",priority="HIGH")
        i.sendAuthorized.assert_called_with('POST',
                                            '/API/item/VX-123/shape?priority=HIGH&essence=false&tag=shapetagname&thumbnails=true&uri={0}'.format(quoted_uri)
                                            ,"",{'Accept':'application/xml'}, rawData=False)

        fake_uri = "file:///path/to/" + quote("media with spaces.mxf",safe="/")
        quoted_uri = quote(fake_uri,"")  # we are embedding a URI as a parameter with another URL so it must be double-encoded
        
        i.import_to_shape(uri=fake_uri, shape_tag="shapetagname", priority="HIGH")
        i.sendAuthorized.assert_called_with('POST',
                                            '/API/item/VX-123/shape?priority=HIGH&essence=false&tag=shapetagname&thumbnails=true&uri={0}'.format(
                                                quoted_uri)
                                            , "", {'Accept': 'application/xml'}, rawData=False)

        fake_uri = "file:///path/to/" + quote("media+with+plusses.mxf",safe="/+")
        quoted_uri = quote(fake_uri,"")  # we are embedding a URI as a parameter with another URL so it must be double-encoded
        
        i.import_to_shape(uri=fake_uri, shape_tag="shapetagname", priority="HIGH")
        i.sendAuthorized.assert_called_with('POST',
                                            '/API/item/VX-123/shape?priority=HIGH&essence=false&tag=shapetagname&thumbnails=true&uri={0}'.format(
                                                quoted_uri)
                                            , "", {'Accept': 'application/xml'}, rawData=False)
开发者ID:fredex42,项目名称:gnmvidispine,代码行数:35,代码来源:test_vsitem.py


示例4: get_lyrics

def get_lyrics(entry, db):
    global errors
    global successes

    title = entry['title'].encode('utf-8')
    artist = entry['artist'].encode('utf-8')
    year = entry['year']

    artist_clean = urllib2.quote(sanitize_artist(artist).replace(" ", "_"))
    title_clean = urllib2.quote(sanitize_title(title).replace(" ", "_"))
    url = 'http://lyrics.wikia.com/' + artist_clean + ':' + title_clean
    page = requests.get(url)
    if page.status_code != 200:
        print "404 error getting lyrics for " + title + " by " + artist + ", " + str(year)
        errors += 1
    else:
        page_soup = BeautifulSoup(page.text)
        lyrics = page_soup.select(".lyricbox")
        if len(lyrics) == 0:
            print "Parsing error getting lyrics for " + title + " by " + artist + ", " + str(year)
            errors += 1
            return

        lyrics = lyrics[0]
        [x.extract() for x in lyrics.findAll('script')]
        lyrics = lyrics.get_text(' ', strip=True).encode('utf-8')
        lyrics = santize(lyrics)
        entry['lyrics'] = lyrics
        db.entries.save(entry)
        successes += 1
        print "Successfully extracted lyrics for " + title + " by " + artist
开发者ID:MistaTwista,项目名称:top-40,代码行数:31,代码来源:lyrics.py


示例5: get

 def get(self,method,args=None):
     """ GET to DeepDetect server """
     u = self.__ddurl
     u += method
     headers = {}
     if args is not None:
         sep = "?"
         for arg,argv in args.iteritems():
             u += sep
             sep = "&"
             u += urllib2.quote(arg)
             u += '='
             if argv is not None:
                 u += urllib2.quote(argv)
                 
     LOG("GET %s"%u)
     response = None
     try:
         req = urllib2.Request(u)
         response = urllib2.urlopen(req, timeout=DD_TIMEOUT)
         jsonresponse=response.read()
     except:
         raise DDCommunicationError(u,"GET",headers,None,response)
     LOG(jsonresponse)
     try:
         return self.__return_format(jsonresponse)
     except:
         raise DDDataError(u,"GET",headers,None,jsonresponse)
开发者ID:nagyistge,项目名称:deepdetect,代码行数:28,代码来源:dd_client.py


示例6: get_lat_lng

def get_lat_lng(address, city, state):
    c = config.load()

    # If address is a PO Box, skip
    if re.search('P(\.)?O(\.)?(\sBox\s)[0-9]+', address) is not None or address == '':
        return None
    else:
        url = 'https://api.smartystreets.com/street-address?'
        url += 'state=' + urllib2.quote(str(state))
        url += '&city=' + urllib2.quote(str(city))
        url += '&auth-id=' + c['ss_id']
        url += '&auth-token=' + c['ss_token']
        url += '&street=' + urllib2.quote(str(address))

        result = json.load(urllib2.urlopen(url))

        if len(result) == 1:
            lat_lng = {'lat': result[0]['metadata']['latitude'], 'lng': result[0]['metadata']['longitude']}
            return lat_lng
        elif len(result) == 0:
            # return generic lat/lng if zero results so we can come back later to fix it
            lat_lng = {'lat': 36.0, 'lng': -76.0}
            return lat_lng
        else:
            print result
            exit(-1)
开发者ID:openchattanooga,项目名称:open-health-inspection-scraper,代码行数:26,代码来源:scrapertools.py


示例7: build_query

 def build_query(self):
     """
     Builds query to access to cghub server.
     """
     parts = []
     for key, value in self.query.iteritems():
         if isinstance(value, list) or isinstance(value, tuple):
             value_str = '+OR+'.join([
                     self.escape_query_value(key, v) for v in value])
             value_str = '(%s)' % value_str
         else:
             value_str = self.escape_query_value(key, value)
         parts.append('='.join([key, value_str]))
     if self.offset:
         parts.append('='.join(['start', str(self.offset)]))
     if self.limit:
         parts.append('='.join(['rows', str(self.limit)]))
     if self.sort_by:
         if self.sort_by[0] == '-':
             parts.append('='.join([
                     'sort_by',
                     '%s:desc' % urllib2.quote(self.sort_by[1:])]))
         else:
             parts.append('='.join([
                     'sort_by',
                     '%s:asc' % urllib2.quote(self.sort_by)]))
     return '&'.join(parts)
开发者ID:42cc,项目名称:cghub-python-api,代码行数:27,代码来源:api.py


示例8: get_SIMBAD_coordinates

def get_SIMBAD_coordinates(name):
    url = VOTABLE_OPTIONS + SIMBAD_VOTABLE_SCRIPT_START + QUERY_VOTABLE_FULLCOORDINATES + SIMBAD_VOTABLE_SCRIPT_MIDDLE + name + SIMBAD_VOTABLE_SCRIPT_END

    try:
        response = urllib2.urlopen(SIMBAD_ROOT_1+NAME_SCRIPT+urllib2.quote(url))
    except urllib2.URLError:
        try:
            response = urllib2.urlopen(SIMBAD_ROOT_2+NAME_SCRIPT+urllib2.quote(url))
        except urllib2.URLError:
            return None

    try:
        response_votable = votable.parse(response.fp)
        first_table = response_votable.get_first_table()
    except:
        return None
    else:
        ra = float(first_table.array[0][0])
        dec = float(first_table.array[0][1])

        try:
            coords, created = AstronomicalCoordinates.objects.get_or_create(right_ascension=ra, declination=dec)
        except MultipleObjectsReturned:
            coords = AstronomicalCoordinates.objects.filter(right_ascension=ra, declination=dec).first()

        return coords
开发者ID:andycasey,项目名称:arcsecond.io,代码行数:26,代码来源:simbad.py


示例9: get_SIMBAD_object_types

def get_SIMBAD_object_types(name):
    url = SIMBAD_BASIC_SCRIPT + QUERY_OTYPES + name

    try:
        response = urllib2.urlopen(SIMBAD_ROOT_1+NAME_SCRIPT+urllib2.quote(url))
    except urllib2.URLError:
        try:
            response = urllib2.urlopen(SIMBAD_ROOT_2+NAME_SCRIPT+urllib2.quote(url))
        except urllib2.URLError:
            return None

    otypes = []
    ok = False

    value_line = None
    for line in response.readlines():
        if ok and len(line.strip()) > 0:
            value_line = line.strip()
        if line.find(QUERY_DATA_DELIMITER) >= 0:
            ok = True

    if value_line is not None and len(value_line) > 0:
        values = value_line.split(",")
        for value in values:
            otype, created = ObjectType.objects.get_or_create(value=value)
            otypes.append(otype)

    return otypes
开发者ID:andycasey,项目名称:arcsecond.io,代码行数:28,代码来源:simbad.py


示例10: decorated_function

            def decorated_function(*args, **kwargs):
                page = int(request.args.get('page', 1))

                # 这里要转换成str类型, 否则会报类型错误
                _path = request.path.encode("utf-8")

                # 对于非ASCII的URL,需要进行URL编码
                if quote(_path).count('%25') <= 0:
                    _path = quote(_path)

                _viewkey = 'mobile%s' % _path if request.MOBILE else _path
                cache_key = str(key % _viewkey)

                if page > 1:
                    cache_key = '%s_%s' % (cache_key, page)

                rv = cache.get(cache_key)
                if rv is not None: 
                    return rv
                rv = f(*args, **kwargs)
                _suffix = u"\n<!-- cached at %s -->" % str(datetime.datetime.now())
                if hasattr(rv, "data"):
                    rv.data += _suffix
                if isinstance(rv, unicode):
                    rv += _suffix
                cache.set(cache_key, rv, timeout)
                return rv
开发者ID:hugleecool,项目名称:wtxlog,代码行数:27,代码来源:ext.py


示例11: plos_search

def plos_search(query, query_type = None, rows = 20, more_parameters = None, fq = '''doc_type:full AND article_type:"Research Article"''', output = "json", verbose = False):
    '''
    Accesses the PLOS search API.
    query: the text of your query.
    query_type: subject, author, etc.
    rows: maximum number of results to return.
    more_parameters: an optional dictionary; key-value pairs are parameter names and values for the search api.
    fq: determines what kind of results are returned.
    Set by default to return only full documents that are research articles (almost always what you want).
    output: determines output type. Set to JSON by default, XML is also possible, along with a few others.
    '''
    api_key = "..."

    query_string = ""
    if query_type:
        query_string += query_type + ":"
    query_string += '"' + query + '"'

    params_string = ""
    if more_parameters:
        params_string = "&" + "&".join([key + "=" + quote(value) for key, value in more_parameters.iteritems()])

    fq_string = "&fq=" + quote(fq)

    url = "http://api.plos.org/search?q=" + query_string + params_string + fq_string + "&wt=" + output + "&rows=" + str(rows) + "&api_key=" + api_key
    headers = {'Content-Type': 'application/' + output}
    if verbose:
        print url
    r = requests.get(url, headers=headers)
    r.encoding = "UTF-8" # just to be sure
    return r.json()["response"]["docs"]
开发者ID:mchelen,项目名称:citation_scripts,代码行数:31,代码来源:search_and_DOI_utilities.py


示例12: _generate_url

    def _generate_url(self, options):
        options['Service'] = 'AWSECommerceService'
        options['AWSAccessKeyId'] = self.access_key_id
        options['AssociateTag'] = self.associate_tag
        options['Timestamp'] = self._generate_timestamp()

        # 'None' が含まれている場合は削除する.
        for k, v in options.items():
            if v is None:
                del options[k]

        # 署名(v2)を作成する.
        keys = sorted(options.keys())
        args = '&'.join('%s=%s' % (key, urllib2.quote(unicode(options[key])
                        .encode('utf-8'), safe='~')) for key in keys)

        msg = 'GET'
        msg += '\n' + self.uri
        msg += '\n' + self.end_point
        msg += '\n' + args

        hmac.new(self.secret_key or '', msg, hashlib.sha256).digest()
        signature = urllib2.quote(
            base64.b64encode(hmac.new(self.secret_key or '', msg, hashlib.sha256).digest()))

        url = "http://%s%s?%s&Signature=%s" % (self.uri, self.end_point, args, signature)

        return url
开发者ID:kkas,项目名称:amazon-prod-advertise-api,代码行数:28,代码来源:amazon_prod_api.py


示例13: _get_archived_json_results

    def _get_archived_json_results(self):
        """Download JSON file that only contains test
        name list from test-results server. This is for generating incremental
        JSON so the file generated has info for tests that failed before but
        pass or are skipped from current run.

        Returns (archived_results, error) tuple where error is None if results
        were successfully read.
        """
        results_json = {}
        old_results = None
        error = None

        if not self._test_results_server:
            return {}, None

        results_file_url = (self.URL_FOR_TEST_LIST_JSON %
            (urllib2.quote(self._test_results_server),
             urllib2.quote(self._builder_name),
             self.RESULTS_FILENAME,
             urllib2.quote(self._test_type),
             urllib2.quote(self._master_name)))

        try:
            # FIXME: We should talk to the network via a Host object.
            results_file = urllib2.urlopen(results_file_url)
            info = results_file.info()
            old_results = results_file.read()
        except urllib2.HTTPError, http_error:
            # A non-4xx status code means the bot is hosed for some reason
            # and we can't grab the results.json file off of it.
            if (http_error.code < 400 and http_error.code >= 500):
                error = http_error
开发者ID:venkatarajasekhar,项目名称:Qt,代码行数:33,代码来源:json_results_generator.py


示例14: translate

def translate(phrase, in_lang):
    if in_lang == "en":
        out_lang = "ja"
    else:
        out_lang = "en"

    if True:
        url = (
            "http://api.microsofttranslator.com/V2/Ajax.svc/GetTranslations?appId=F2926FC35C3732CEC3E9C92913745F9C28912821&from="
            + in_lang
            + "&to="
            + out_lang
            + "&maxTranslations=1"
        )
        url += "&text=" + quote(phrase.encode("utf-8"))

        response = urlfetch.fetch(url=url)

        content = re.sub(u"\xEF\xBB\xBF", "", response.content)
        data = json.loads(content)
        translated_text = data["Translations"][0]["TranslatedText"]
        time.sleep(0.1)
    else:
        url = "https://www.googleapis.com/language/translate/v2?"
        url += "&source=" + in_lang
        url += "&target=" + out_lang
        url += "&q=" + quote(phrase.encode("utf-8"))
        url += "&key=" + "AIzaSyAI3PoUAJ_uP0o33EDgUfSEUMALepQAaNA"

        content = urlfetch.fetch(url=url).content
        data = json.loads(content)

        translated_text = data["data"]["translations"][0]["translatedText"]

    return translated_text
开发者ID:unclehighbrow,项目名称:translationpartyexhibit,代码行数:35,代码来源:twilio.py


示例15: searchBook

def searchBook(isbn_num):
    logText("Searching for: ", isbn_num)
    
    query = "AWSAccessKeyId=" + AWSAccessKeyID + "&AssociateTag=abc&Keywords="
    query += isbn_num 
    query += "&Operation=ItemSearch&ResponseGroup=ItemAttributes&SearchIndex=Books&Service=AWSECommerceService"
    query += "&Timestamp=" + urllib2.quote(datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.000Z"))[:-1]
    # query += "&Version=2011-08-01"
    
    data = "GET\n"
    data += "ecs.amazonaws.com\n"
    data += "/onca/xml\n"
    data += query
    
    a = hmac.new(AWSSecret, data, hashlib.sha256)
    signature = urllib2.quote(base64.encodestring(a.digest())[:-1])
    
    url = "http://ecs.amazonaws.com/onca/xml?" + query + "&Signature=" + signature
    
    # print "URL : ", url
    
    url_obj = urllib2.urlopen(url)
    
    
    data = url_obj.read()
    
    book_info = getInfoFromXML(data)
        
    logText( " - Title: ", book_info[0])
    logText( " - Price: ", book_info[1])
    storeInDB( (book_info[0], isbn_num, book_info[1]) )
开发者ID:bshep,项目名称:ISBNbarcodescanner,代码行数:31,代码来源:main.py


示例16: __raw_search_anime

 def __raw_search_anime(self, query):
     h = httplib2.Http()
     resp, content = h.request(self.malapiurl + '/anime/search?q=' + urllib2.quote(query))
     print self.malapiurl + '/anime/search?q=' + urllib2.quote(query)
     if int(resp['status']) != 200:
         return None
     return content
开发者ID:jasonmoofang,项目名称:animaster,代码行数:7,代码来源:mal.py


示例17: sb_search

def sb_search():
    sickbeard = {}
    params = ''

    try:
        params = '&name=%s' % (urllib2.quote(request.args['name']))
    except:
        pass

    try:
        params = '&tvdbid=%s' % (urllib2.quote(request.args['tvdbid']))
    except:
        pass

    try:
        params = '&lang=%s' % (urllib2.quote(request.args['lang']))
    except:
        pass

    if params is not '':
        params = '/?cmd=sb.searchtvdb%s' % params

        try:
            sickbeard = sickbeard_api(params)
            sickbeard = sickbeard['data']['results']
        except:
            sickbeard = None

    else:
        sickbeard = None

    return render_template('sickbeard-search.html',
        data=sickbeard,
        sickbeard='results',
    )
开发者ID:DejaVu,项目名称:maraschino,代码行数:35,代码来源:sickbeard.py


示例18: lastfm_info

def lastfm_info(tracktuple, trinfo):
    if tracktuple[0] != '':
        mbid = '&mbid=' + tracktuple[0]
    else: mbid = ''
    artist = urllib2.quote(tracktuple[1].encode('utf-8'))
    songtitle = urllib2.quote(tracktuple[2].encode('utf-8'))
    query = 'http://ws.audioscrobbler.com/2.0/?method=track.getInfo&api_key='\
        + LASTFM_KEY + mbid + '&artist=' + artist + '&track='\
        + songtitle + '&format=json'
    response = json.loads(urllib2.urlopen(query).read())
    result = None
    try:
        result = response['track']
    except KeyError:
        global lastfm_failed
        print '?? No result for', tracktuple, 'on last.fm'
        print '   ', response
        lastfm_failed.append(tracktuple)
    if result != None:
        trinfo['track']['name'] = response['track']['name']
        try:
            album_response = response['track']['album']
            trinfo['track']['album'] = {}
            trinfo['track']['album']['title'] = album_response['title']
            trinfo['track']['album']['url'] = album_response['url']
            trinfo['track']['album']['artist'] = album_response['artist']
            trinfo['track']['album']['mbid'] = album_response['mbid']
        except KeyError:
            print '?? No album for', trinfo['track']['name']
        trinfo['track']['artist'] = response['track']['artist']
        trinfo['track']['toptags'] = response['track']['toptags']
        trinfo['track']['id']['musicbrainz'] = response['track']['mbid']
        trinfo['track']['duration'] = response['track']['duration']
        print trinfo['track']['name'], 'succesfully appended'
    return trinfo
开发者ID:aolieman,项目名称:ii2013-weather-tunes,代码行数:35,代码来源:music-features.py


示例19: buildURL

 def buildURL(self, params):
     """RESTリクエストのURLアドレスを構築"""
     params["Service"] = "AWSECommerceService"
     params["AWSAccessKeyId"] = self.access_key
     if self.associate_tag is not None:
         params["AssociateTag"] = self.associate_tag
     params["Timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
     sorted_params = sorted(params.items())
     
     # paramsのハッシュを展開
     request = []
     #print sorted_params
     for p in sorted_params:
         pair = "%s=%s" % (p[0], urllib2.quote(p[1].encode("utf-8")))
         request.append(pair)
     
     # 2009/8/15から認証が導入されている
     # Secret Access Keyを使ってHMAC-SHA256を計算
     msg = "GET\nwebservices.amazon.co.jp\n/onca/xml\n%s" % ("&".join(request))
     hmac_digest = hmac.new(self.secret_access_key, msg, hashlib.sha256).digest()
     base64_encoded = base64.b64encode(hmac_digest)
     signature = urllib2.quote(base64_encoded)
     
     # Signatureをリクエストに追加してURLを作成
     request.append("Signature=%s" % signature)
     url = self.amazonurl + "?" + "&".join(request)
     
     return url
开发者ID:rysk92,项目名称:watasync,代码行数:28,代码来源:Amazon.py


示例20: baiduMusic

    def baiduMusic(self, musicTitle, musicAuthor):
        baseurl = r"http://box.zhangmen.baidu.com/x?op=12&count=1&title=%s$$%s$$$$" % \
        (urllib2.quote(musicTitle.encode("utf-8")),urllib2.quote(musicAuthor.encode("utf-8")))
        
        resp = urllib2.urlopen(baseurl)
        xml = resp.read()
        
        #.*?是只获取<url>之间的数据 普通url
        url = re.findall('<url>.*?</url>',xml)
        #.*?是只获取<durl>之间的数据 高品质url
        durl = re.findall('<durl>.*?</durl>',xml)

        #获取第一个url中encode标签的数据
        url1 = re.findall('<encode>.*?CDATA\[(.*?)\]].*?</encode>',url[0])
        url2 = re.findall('<decode>.*?CDATA\[(.*?)\]].*?</decode>',url[0])
        
        #取出url1最后一个 /(包含) 之前的数据加上url2最后一个 &(不包含) 之前的数据
        urlpath = url1[0][:url1[0].rindex('/')+1] + url2[0][:url2[0].rindex('&')]
        durlpath = ""
        if durl:
            durl1 = re.findall('<encode>.*?CDATA\[(.*?)\]].*?</encode>',durl[0])
            durl2 = re.findall('<decode>.*?CDATA\[(.*?)\]].*?</decode>',durl[0])
            durlpath = durl1[0][:durl1[0].rindex('/')+1] + durl2[0][:durl2[0].rindex('&')]

        return urlpath, durlpath
开发者ID:chenghao,项目名称:weixin_py,代码行数:25,代码来源:music.py



注:本文中的urllib2.quote函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python urllib2.splittype函数代码示例发布时间:2022-05-27
下一篇:
Python urllib2.parse_keqv_list函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap