• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python ujson.load函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中ujson.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了load函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_loadFileArgsError

 def test_loadFileArgsError(self):
     try:
         ujson.load("[]")
     except TypeError:
         pass
     else:
         assert False, "expected TypeError"
开发者ID:karanlyons,项目名称:ultrajson,代码行数:7,代码来源:tests.py


示例2: load_cooc_dict

def load_cooc_dict():
    global cw_dict, c_dict
    liblogger.info("load cooc dict")
    pxy_cache_file = cooc_dict_file + ".pxy.cache"
    py_cache_file = cooc_dict_file + ".py.cache"
    if using_cache and os.path.exists(pxy_cache_file) and os.path.exists(py_cache_file):
        cw_dict = json.load(open(pxy_cache_file))
        c_dict = json.load(open(py_cache_file))
        return 
    cooc_dict = json.load(open(cooc_dict_file))
    cw_dict = defaultdict(int)
    c_dict = defaultdict(int)
    for w in cooc_dict:
        #ctxs = [eval(ctx) for ctx in cooc_dict[w].keys()]
        for ctx in cooc_dict[w]:
            count = cooc_dict[w][ctx]
            cw = (w, ctx)
            count = cooc_dict[w][ctx]
            cw_dict[cw] += count
            c_dict[ctx] += count
    liblogger.info("norm cooc dict for P(x, y)")
    cw_sum = float(sum(cw_dict.values()))
    for cw in cw_dict:
        cw_dict[cw] = math.log(cw_dict[cw] / cw_sum)
    json.dump(cw_dict, open(pxy_cache_file, "w"))
    liblogger.info("ctx dict P(y)")
    c_sum = float(sum(c_dict.values()))
    for c in c_dict:
        c_dict[c] = math.log(c_dict[c] / c_sum)
    json.dump(c_dict, open(py_cache_file, "w"))
开发者ID:tq010or,项目名称:lexsim,代码行数:30,代码来源:weighter.py


示例3: extract_json_data

    def extract_json_data(self, filename, option):
        '''
        Imports .json files from peeringdb and returns a list of dictionaries with all the retrieved IXP information.
        Input: 
            a) filename: A .json file name.
            b) mypath: The directory path of the database.
            c) option: Flag to download the file.
            d) config: Dictionary that contains the config file.
        Ouput: 
            a) A list of dictionaries.
        '''

        try:
            with open(self.homepath + '/database' + filename) as data_file:
                obj = ujson.load(data_file)
        except:
            print(filename + ' was not found.')

            if not self.downloader.download_peering(option):
                print("Could not download " + filename +
                      ". Copying from the default database.")
                try:
                    copyfile(self.libpath + '/database/Default' + filename,
                             self.homepath + '/database' + filename)
                except:
                    print('Could not copy ' + filename +
                          ' from the default database.')

            try:
                with open(self.homepath + '/database' + filename) as data_file:
                    obj = ujson.load(data_file)
            except:
                print('Could not open ' + filename + '. Exiting.')
                exit(0)
        return (obj['data'])
开发者ID:gnomikos,项目名称:traIXroute,代码行数:35,代码来源:handle_pdb.py


示例4: get_translation_percentage

    def get_translation_percentage(self, locale_path: Text, locale: Text) -> int:

        # backend stats
        po = polib.pofile(self.get_po_filename(locale_path, locale))
        not_translated = len(po.untranslated_entries())
        total = len(po.translated_entries()) + not_translated

        # frontend stats
        with open(self.get_json_filename(locale_path, locale)) as reader:
            for key, value in ujson.load(reader).items():
                total += 1
                if value == '':
                    not_translated += 1

        # mobile stats
        with open(os.path.join(locale_path, 'mobile_info.json')) as mob:
            mobile_info = ujson.load(mob)
        try:
            info = mobile_info[locale]
        except KeyError:
            if self.strict:
                raise
            info = {'total': 0, 'not_translated': 0}

        total += info['total']
        not_translated += info['not_translated']

        return (total - not_translated) * 100 // total
开发者ID:gnprice,项目名称:zulip,代码行数:28,代码来源:compilemessages.py


示例5: load_place_savers

def load_place_savers(user_dir):
    """
    This function loads the following place saving parameters:
    1. cur_hop - Current hop of collection algorithm
    2. cur_user_list - List of users collented during current hop
    3. next_user_list - List of users to collect on next hop
    4. added_topics_for_cur_hop - Topics added from current hop (if relevant to sampling method)
    5. unavailable_accounts - List of unavailable accounts
    6. finished_users - Users that have already been collected

    :param user_dir: Directory where profile information is saved
    :return place_saver_obj: Python dictionary of forementioned fields
    """
    # Load object
    try:
        jfid = open(os.path.join(user_dir, "place_saver_v1.txt"))
        place_saver_obj = ujson.load(jfid)
        jfid.close()
    except ValueError:
        jfid = open(os.path.join(user_dir, "place_saver_v2.txt"))
        place_saver_obj = ujson.load(jfid)
        jfid.close()
    except IOError:
        print "The object 'place_saver' does not exist, creating it now"
        place_saver_obj = {}
    # Make all necessary fields in case they don't already exist
    if "cur_user_list" not in place_saver_obj.keys():
        place_saver_obj["cur_user_list"] = set([])
    if "next_user_list" not in place_saver_obj.keys():
        place_saver_obj["next_user_list"] = set([])
    if "cur_hop" not in place_saver_obj.keys():
        place_saver_obj["cur_hop"] = 0
    if "added_topics_for_cur_hop" not in place_saver_obj.keys():
        place_saver_obj["added_topics_for_cur_hop"] = set([])
    if "unavailable_accounts" not in place_saver_obj.keys():
        place_saver_obj["unavailable_accounts"] = set([])
    if "finished_users" not in place_saver_obj.keys():
        place_saver_obj["finished_users"] = {}
    jsons = filter(lambda k: re.match("userInfo_*", k), os.listdir(user_dir))
    for jj in range(len(jsons)):
        if jj % 200 == 0:
            print "Check profile JSON {} of {}".format(jj + 1, len(jsons))
        try:
            full_filename = os.path.join(user_dir, jsons[jj])
            if os.path.getsize(full_filename) == 0:
                continue
            jfid = open(full_filename)
            profile = ujson.load(jfid)
            jfid.close()
            if profile["id"] in place_saver_obj["finished_users"].keys():
                continue
            else:
                place_saver_obj["finished_users"][profile["id"]] = jsons[jj]
        except ValueError:
            continue
    # Ensure that all fields are set objects
    for kk in place_saver_obj.keys():
        if (kk != "finished_users") and (kk != "cur_hop"):
            place_saver_obj[kk] = set(place_saver_obj[kk])
    return place_saver_obj
开发者ID:mitll,项目名称:TweetE,代码行数:60,代码来源:breadth_first_sampling.py


示例6: setUp

 def setUp(self):
     with open("tests/data/square.geojson") as f:
         self.square_geojson = json.load(f)
     with open("tests/data/square.topojson") as f:
         self.square_topojson = json.load(f)
     with open("tests/data/multipolygons_spherical.geojson") as f:
         self.ref = json.load(f)
开发者ID:mthh,项目名称:topojson.py,代码行数:7,代码来源:topojson_test.py


示例7: test_orderbook

def test_orderbook():
    variable_order_book = Book()
    control_order_book = Book()

    with open('testdata/messages.json') as messages_json_file:
        messages = json.load(messages_json_file)

    with open('testdata/beginning_level_3.json') as begin_json_file:
        beginning_level_3 = json.load(begin_json_file)

    with open('testdata/ending_level_3.json') as end_json_file:
        ending_level_3 = json.load(end_json_file)

    try:
        assert beginning_level_3['sequence'] + 1 == messages[0]['sequence']
        assert ending_level_3['sequence'] == messages[-1]['sequence']
    except AssertionError:
        print("Problem with sample data sequences")

    variable_order_book.get_level3(beginning_level_3)

    start = time.time()
    [variable_order_book.process_message(message) for message in messages]
    end = time.time()
    print('messages per sec: {0}'.format(int(len(messages)/(end-start))))

    control_order_book.get_level3(ending_level_3)

    dict_compare(variable_order_book.asks.price_map, control_order_book.asks.price_map, price_map=True)
    dict_compare(variable_order_book.asks.order_map, control_order_book.asks.order_map, order_map=True)
开发者ID:hashtagcpt,项目名称:coinbase-exchange-order-book,代码行数:30,代码来源:orderbooktest.py


示例8: _load

def _load(logger, tests_root, manifest, types=None, meta_filters=None, allow_cached=True):
    # "manifest" is a path or file-like object.
    manifest_path = (manifest if isinstance(manifest, string_types)
                     else manifest.name)
    if allow_cached and manifest_path in __load_cache:
        return __load_cache[manifest_path]

    if isinstance(manifest, string_types):
        if os.path.exists(manifest):
            logger.debug("Opening manifest at %s" % manifest)
        else:
            logger.debug("Creating new manifest at %s" % manifest)
        try:
            with open(manifest) as f:
                rv = Manifest.from_json(tests_root,
                                        fast_json.load(f),
                                        types=types,
                                        meta_filters=meta_filters)
        except IOError:
            return None
        except ValueError:
            logger.warning("%r may be corrupted", manifest)
            return None
    else:
        rv = Manifest.from_json(tests_root,
                                fast_json.load(manifest),
                                types=types,
                                meta_filters=meta_filters)

    if allow_cached:
        __load_cache[manifest_path] = rv
    return rv
开发者ID:alvestrand,项目名称:web-platform-tests,代码行数:32,代码来源:manifest.py


示例9: __init__

    def __init__(self, path, writer_queue=None):
        """Initialize using path to file and optional thread-safe queue.

        Queue is used for json serializable data to be written to file when
        self.write_queued() is called.

        If the file at 'path' doesn't exist it will be created.
        """

        self.path = os.path.realpath(os.path.expanduser(path))
        if not os.path.exists(self.path):
            print("Persistence file %s does not exist yet, creating it...")
            json.dump({}, open(self.path, 'w'))
        else:
            # check for json-ness
            try:
                json.load(open(self.path))
                LOG.debug("Loaded existing persistence file %s.",
                          os.path.relpath(self.path))
            except ValueError as err:
                raise ValueError("The persistence file -> %s is not "
                                 "a valid json file. | %s"
                                 % (os.path.relpath(self.path), err))
        if writer_queue and not isinstance(writer_queue, Queue.Queue):
            raise TypeError('writer_queue should be a Queue.Queue.')
        elif writer_queue:
            self.synq = writer_queue
            self.synq._persisted = set()
        else:
            self.synq = None
开发者ID:shterrel,项目名称:Waldo,代码行数:30,代码来源:batch.py


示例10: combine_dicts

def combine_dicts():
    with open('title10to100000.json') as tag200, open('title100000plus.json') as tag1500:
        tag200dict = ujson.load(tag200)
        tag500dict = ujson.load(tag1500)
        newdict = dict(chain(tag200dict.items(), tag500dict.items()))
        with open('titletagwords.json', 'w') as write:
            ujson.dump(newdict, write)
开发者ID:slyfocks,项目名称:facebookkaggle,代码行数:7,代码来源:ngramanalysis.py


示例11: reading_vqa_data

def reading_vqa_data(vqa_dir, section):
    ans = 'mscoco_%s2014_annotations.json' % section
    with (vqa_dir / ans).open() as file_:
        ans_data = json.load(file_)
    image_by_id = {}
    answers_by_id = {}
    for answer in ans_data['annotations']:
        image = str(answer['image_id'])
        mca = answer['multiple_choice_answer']
        img = '0'*(12 - len(image)) + image
        s = '/data/%s/images' % section
        s = s + '/COCO_%s2014_' % section + img + '.jpg'
        image_by_id[answer['question_id']] = s
        answers_by_id[answer['question_id']] = mca
    filename = ('MultipleChoice_mscoco_'
                '%s2014_questions.json' % section)
    with (vqa_dir / filename).open() as file_:
        ques_data = json.load(file_)
    for question in ques_data['questions']:
        text = question['question']
        ques_id = question['question_id']
        options = question['multiple_choices']
        image_path = image_by_id[ques_id]
        image = Image.open(image_path)
        if min(image.size) < IMAGE_SIZE:
            image_path = prev_image
            image_by_id[ques_id] = image_path
        else:
            if (answers_by_id[ques_id] == 'yes'):
                prev_image = image_path
        yield ques_id, image_by_id[ques_id], text, options, answers_by_id[ques_id]
开发者ID:trucviennguyen,项目名称:VQA,代码行数:31,代码来源:train_images.py


示例12: addin_dubbed_video_mappings

def addin_dubbed_video_mappings(node_data, lang=en_lang_code):
    # Get the dubbed videos from the spreadsheet and substitute them
    # for the video, and topic attributes of the returned data struct.

    build_path = os.path.join(os.getcwd(), "build")

    # Create a dubbed_video_mappings.json, at build folder.
    if os.path.exists(os.path.join(build_path, "dubbed_video_mappings.json")):
        logging.info("Dubbed videos json already exist at %s" % (DUBBED_VIDEOS_MAPPING_FILEPATH))
    else:
        main()

    # Get the list of video ids from dubbed video mappings
    lang_code = get_lang_name(lang).lower()
    dubbed_videos_path = os.path.join(build_path, "dubbed_video_mappings.json")
    with open(dubbed_videos_path, "r") as f:
        dubbed_videos_load = ujson.load(f)

    dubbed_videos_list = dubbed_videos_load.get(lang_code)
    # If dubbed_videos_list is None It means that the language code is not available in dubbed video mappings.
    if not dubbed_videos_list:
        return node_data

    # Get the current youtube_ids, and topic_paths from the khan api node data.
    youtube_ids = []
    topic_paths = []
    for node in node_data:
        node_kind = node.get("kind")
        if node_kind == NodeType.video:
            youtube_ids.append(node.get("youtube_id"))
        if node_kind == NodeType.topic:
            topic_paths.append(node.get("path"))

    en_nodes_path = os.path.join(build_path, "en_nodes.json")
    with open(en_nodes_path, "r") as f:
        en_node_load = ujson.load(f)

    en_node_list = []
    # The en_nodes.json must be the same data structure to node_data variable from khan api.
    for node in en_node_load:
        node_kind = node.get("kind")

        if node_kind == NodeType.video:
            youtube_id = node["youtube_id"]
            if not youtube_id in youtube_ids:
                if youtube_id in dubbed_videos_list:
                    node["youtube_id"] = dubbed_videos_list[youtube_id]
                    node["translated_youtube_lang"] = lang
                    en_node_list.append(node)
                    youtube_ids.append(youtube_id)

        # Append all topics that's not in topic_paths list.
        if node_kind == NodeType.topic:
            if not node["path"] in topic_paths:
                en_node_list.append(node)
                topic_paths.append(node["path"])

    node_data += en_node_list
    return node_data
开发者ID:rtibbles,项目名称:content-pack-maker,代码行数:59,代码来源:khanacademy.py


示例13: main

def main():
    parser = argparse.ArgumentParser(description = "Analysis scripts for LexNorm in W-NUT 2015")
    parser.add_argument("--pred", required = True, help = "A JSON file: Your predictions over test data formatted in JSON as training data")
    parser.add_argument("--oracle", required = True, help = "A JSON file: The oracle annotations of test data formatted in JSON as training data")
    args = parser.parse_args()

    predicates = json.load(open(args.pred))
    training_list = json.load(open(args.pred))
    oov_detection_performance(training_list,predicates)
开发者ID:cgl,项目名称:CWA-Normalizer,代码行数:9,代码来源:analysis.py


示例14: LoadData

 def LoadData(self):
     fp=gzip.open('data/dictbase/word_pos.txt.gz')
     self.word_pos=json.load(fp)
     fp.close()
     fp=gzip.open('data/dictbase/word_pos_max.txt.gz')
     self.word_pos_max=json.load(fp)
     fp.close()
     fp=gzip.open('data/dictbase/word_trans.txt.gz')
     self.word_tran=json.load(fp)
     fp.close()
开发者ID:WeiEast,项目名称:chinese_decode,代码行数:10,代码来源:decoder.py


示例15: demo

def demo(config):
    with open(config.word_emb_file, "r") as fh:
        word_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.char_emb_file, "r") as fh:
        char_mat = np.array(json.load(fh), dtype=np.float32)
    with open(config.test_meta, "r") as fh:
        meta = json.load(fh)

    model = Model(config, None, word_mat, char_mat, trainable=False, demo = True)
    demo = Demo(model, config)
开发者ID:txye,项目名称:QANet,代码行数:10,代码来源:main.py


示例16: main

def main(unused_argv):
  task = json.load(sys.stdin)
  json_path = os.path.join(
    os.path.dirname(__file__), '..', '..', 'solutions',
    'state-of-the-art.json')
  with open(json_path) as f:
    solutions = json.load(f)
  for solution in solutions:
    if (solution['problemId'] == task['id'] and
        solution['seed'] == task['sourceSeeds'][0]):
      json.dump([solution], sys.stdout)
      sys.stdout.write('\n')
开发者ID:nya3jp,项目名称:icfpc2015,代码行数:12,代码来源:corpus.py


示例17: update_in_resources

def update_in_resources(alias, updates):
    if type(update) is not dict: return("Updates need to be specified as key:value pairs in a dictionnary. Process Aborted.")
    keys = updates.keys()
    values = updates.values()
    
    if keys not in ['alias','tag','title']:
        return '''The updates' dictionary do not have the right keys; they must all be in ['alias','tag','title'].
        Note: Do not include 'timestamp' when doing updates. Process Aborted'''
    
    if len(keys) is not len(values):
        return("Number of Keys and Values do not match. Process Aborted.")
    
    def helper(movie, keys, values):
        for k in range(len(keys)):
            movie[keys[k]] = values[k]
        movie['timestamp'] = datetime.datetime.now()
        return movie
    
    if 'resources.json' not in os.listdir('.'):
               return " The file 'resources.json' is not in the current working directory. Process Aborted."
    
    with open('resources.json') as json_file:
               resource = ujson.load(json_file)
    
    if is_in_resources(resource, alias) == False :
        return "Movie with alias is not in resource file. Movie must be added first." %(alias)
    else:
        movie = list(filter((lambda movie : movie['alias'] in alias), resource['movies']))
        if len(movie) is not 1 : return("That's weird...multiple matches for alias given. Process Aborted.")
        else: 
            updated = helper(movie[0], keys, values); del movie
            if 'alias' not in updated.keys(): return("Update has no 'alias' key. Process Aborted.")
            if 'tag' not in updated.keys(): return("Update has no 'tag' key. Process Aborted.")
            if 'title' not in updated.keys(): return("Update has no 'title' key. Process Aborted.")
            if 'timestamp' not in updated.keys(): return("Update has no 'timestamp' key. Process Aborted.")
            deleted = delete(alias)
            if deleted is not True : return deleted
            del deleted
            
            with open('resources.json') as json_file:
                resource = ujson.load(json_file)
            
            resource['movies']. append(updated)
            resource['logs'].append({
                'timestamp': datetime.datetime.now(),
                'type': 'post',
                'message': " '%s' with alias '%s' and tag '%s' was successfully added as an update." %(updated['title'], updated['alias'], updated['tag'])
            
            })
            
            with open('resources.json', 'w') as outfile:
                ujson.dump(resource, outfile)
            return " '%s' with alias '%s' and tag '%s' was successfully added as an update." %(updated['title'], updated['alias'], updated['tag'])
开发者ID:ahmedtadde,项目名称:CBM,代码行数:53,代码来源:resources.py


示例18: update_unesco_regions

    def update_unesco_regions(self):
        """
        This code will create/update unesco regions and update the country -> region mapping
        """
        import os
        import ujson
        from geodata.models import Region
        from iati.models import RegionVocabulary

        base = os.path.dirname(os.path.abspath(__file__))

        location = base + '/data_backup/unesco_regions.json'
        json_data = open(location)
        unesco_regions = ujson.load(json_data)
        json_data.close()

        location_map = base + '/data_backup/unesco_country_region_mapping.json'
        json_data_map = open(location_map)
        unesco_mapping = ujson.load(json_data_map)
        json_data_map.close()

        #save regions and put in list
        regions = []
        region_vocabulary = RegionVocabulary.objects.get_or_create(
            code=999,
            name='UNESCO')[0]

        for region_id, info in unesco_regions.items():

            center_location_string = 'POINT(' + info['longitude'] + ' ' + info['latitude'] + ')'
            center_location = fromstr(
                center_location_string,
                srid=4326)
            region = Region.objects.get_or_create(
                code=region_id,
                defaults={
                    'name': info['name'],
                    'region_vocabulary': region_vocabulary,
                    'parental_region': None,
                    'center_longlat': center_location})[0]
            regions.append(region)

        # save country -> region mapping
        for line in unesco_mapping:

            region_id = line["UNESCO Region Code"]
            country_id = line["Country ID"]
            country = Country.objects.get(code=country_id)
            for region in regions:
                if region.code == region_id:
                    country.unesco_region = region
                    country.save()
开发者ID:catalpainternational,项目名称:OIPA,代码行数:52,代码来源:updaters.py


示例19: load_tfidf

def load_tfidf(vocab_path, idf_weights_path):
    """Loads tfidf vectorizer from its components.
    :param str vocab_path: path to the vectorizer vocabulary JSON.
    :param str idf_weights_path: path to idf weights JSON.
    :rtype: sklearn.feature_extraction.text.TfidfVectorizer

    """
    tfidf = TfidfVectorizer(analyzer=lambda x: x,
                            vocabulary=json.load(open(vocab_path)))
    idf_vector = np.array(json.load(open(idf_weights_path)))
    tfidf._tfidf._idf_diag = scipy.sparse.diags([idf_vector], [0])
    tfidf.vocabulary_ = tfidf.vocabulary
    return tfidf
开发者ID:chubbymaggie,项目名称:virus-names,代码行数:13,代码来源:name_generator.py


示例20: insert_classes

def insert_classes(cursor):
    """
    Fetch and insert the classes from classes.json
    :param cursor:
    :return:
    """
    ranks = dict()
    with open(RANKS_PATH, encoding='UTF-8') as ranks_file:
        ranks_dict = ujson.load(ranks_file)
        for rank, ranked_archetypes in ranks_dict.items():
            try:
                rank = int(rank.strip("Rank"))
            except ValueError:
                rank = MAX_RANK
            for ranked_classes in ranked_archetypes.values():
                for ranked_class in ranked_classes:
                    ranks[ranked_class] = rank

    with open(CLASSES_PATH, encoding='UTF-8') as classes_file:
        classes_dict = ujson.load(classes_file)
        classes = list()
        # Get list of sorted classes
        sorted_classes_ids = list()
        for class_id in classes_dict.keys():
            if '_' in class_id:
                splited_class_id = class_id.split("_", 1)
                sorted_classes_ids.append((class_id, int(splited_class_id[0].strip("Char")), int(splited_class_id[-1])))
            else:
                sorted_classes_ids.append((class_id, 0, 0))
        sorted_classes_ids.sort(key=lambda tup: tup[2])
        sorted_classes_ids.sort(key=lambda tup: tup[1])
        # Start processing them
        for class_id, archetype, char_n in sorted_classes_ids:
            _class = classes_dict[class_id]
            class_info = list()
            # Get Class Name
            class_info.append(get_value(_class, "Class", "name", str))
            # Get Class Archetype
            class_info.append(get_archetype_id(get_value(_class, "Class", "base", str)))
            # Get Rank
            class_info.append(ranks.get(class_id, 0))
            # Get Icon
            class_info.append(format_icon(get_value(_class, "Class", "icon", str)))
            # Get Temp ID
            class_info.append(class_id)

            classes.append(tuple(class_info))

        classes = tuple(classes)

        cursor.executemany("INSERT INTO classes (name, archetype, rank, icon, temp_id) VALUES (?, ?, ?, ?, ?)", classes)
开发者ID:ramiro314,项目名称:ToSHelper,代码行数:51,代码来源:tosdb_scripts.py



注:本文中的ujson.load函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ujson.loads函数代码示例发布时间:2022-05-27
下一篇:
Python ujson.encode函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap