• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python ujson.dump函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中ujson.dump函数的典型用法代码示例。如果您正苦于以下问题:Python dump函数的具体用法?Python dump怎么用?Python dump使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dump函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _update_local

def _update_local(fname, qparams, replacement):
    """Update a document created using the local framework
    Parameters
    -----------
    fname: str
        Name of the query should be run
    qparams: dict
        Query parameters. Similar to online query methods
    replacement: dict
        Fields/value pair to be updated. Beware of disallowed fields
        such as time and uid
    """
    try:
        with open(fname, 'r') as fp:
            local_payload = ujson.load(fp)
        qobj = mongoquery.Query(qparams)
        for _sample in local_payload:
            try:
                if qobj.match(_sample):
                    for k, v in replacement.items():
                        _sample[k] = v
            except mongoquery.QueryError:
                pass
        with open(fname, 'w') as fp:
            ujson.dump(local_payload, fp)
    except FileNotFoundError:
        raise RuntimeWarning('Local file {} does not exist'.format(fname))
开发者ID:CJ-Wright,项目名称:amostra,代码行数:27,代码来源:local_commands.py


示例2: saveTweets

    def saveTweets(self):
        print "\nDumping tweets to file, contains %s tweets with %s accepted, %s rejected, %s partial matches, and %s irrelevant" % (self.cfg['StopCount'],
                        self.acceptedCount,
                        self.excludedCount,
                        self.partialCount,
                        self.irrelevantCount)
        print '\tJson text dump complete....\n'
                
        meaningful =  self.jsonAccepted*self.cfg['KeepAccepted'] + self.jsonPartial*self.cfg['KeepPartial'] + self.jsonExcluded*self.cfg['KeepExcluded']
        
        if self.cfg['TweetData'] != 'all':
            meaningful = cleanJson(meaningful,self.cfg,self.tweetTypes)
            
        timeStamp = self.startTime
        
        if self.cfg['KeepRaw']:
            with open(self.pathOut+'Raw_'+self.cfg['FileName']+'_'+timeStamp+'.json', 'w') as outFile:
                json.dump(self.jsonRaw,outFile)
            outFile.close()

        with open(self.pathOut+'FilteredTweets_'+self.cfg['FileName']+'_'+timeStamp+'.json', 'w') as outFile:
            json.dump(meaningful,outFile)
        outFile.close()
        giListener.flushTweets(self) 
        print "Updating geoPickle"
        self.geoCache = updateGeoPickle(self.geoCache,self.cfg['Directory']+'caches/'+pickleName) 
开发者ID:jschlitt84,项目名称:ChatterGrabber,代码行数:26,代码来源:giListener.py


示例3: main

def main(argv):
    args = docopt(__doc__, argv=argv)

    params = dict(p.split(':') for p in args['--parameters'])

    # format sort paramaters.
    if args['--sort']:
        for i, field in enumerate(args['--sort']):
            key = 'sort[{0}]'.format(i)
            params[key] = field.strip().replace(':', ' ')

    query = ' '.join(args['<query>'])
    if args['--itemlist']:
        fields = ['identifier']
    else:
        fields = args['--field']
    search = search_items(query, fields=args['--field'], params=params, v2=args['--v2'])
    if args['--number-found']:
        sys.stdout.write('{0}\n'.format(search.num_found))
        sys.exit(0)
    for result in search:
        try:
            if args['--itemlist']:
                sys.stdout.write(result.get('identifier', ''))
            else:
                json.dump(result, sys.stdout)
            sys.stdout.write('\n')
        except IOError:
            sys.exit(0)
开发者ID:digikeri,项目名称:internetarchive,代码行数:29,代码来源:ia_search.py


示例4: process

    def process(self, inputs):
        try:
            out = None
            for x in inputs:
                prov = inputs[x]

            if isinstance(prov, list) and "data" in prov[0]:
                prov = prov[0]["data"]
            elif "_d4p" in prov:
                prov = prov["_d4p"]
         

            self.bulk.append(prov)
            #self.log(os.environ['PBS_NODEFILE'])
            #self.log(socket.gethostname())
            if len(self.bulk) == 100: 
            #:
            #    None
                filep = open(
                    os.environ['PROV_PATH'] +
                    "/bulk_" +
                    getUniqueId(),
                    "wr")
                ujson.dump(self.bulk, filep)
                #
                filep.close()
                self.bulk[:]=[]
#                for x in self.bulk:
#                    del x
        except:
            self.log(traceback.format_exc())
开发者ID:aspinuso,项目名称:VERCE,代码行数:31,代码来源:rtxcorr3.py


示例5: saveDatabase

	def saveDatabase(self):
		self.proxySend("Creating dict from room objects.")
		db = {}
		for vnum, roomObj in iterItems(self.rooms):
			newRoom = {}
			newRoom["name"] = roomObj.name
			newRoom["desc"] = roomObj.desc
			newRoom["dynamicDesc"] = roomObj.dynamicDesc
			newRoom["note"] = roomObj.note
			newRoom["terrain"] = roomObj.terrain
			newRoom["light"] = roomObj.light
			newRoom["align"] = roomObj.align
			newRoom["portable"] = roomObj.portable
			newRoom["ridable"] = roomObj.ridable
			newRoom["mobFlags"] = list(roomObj.mobFlags)
			newRoom["loadFlags"] = list(roomObj.loadFlags)
			newRoom["x"] = roomObj.x
			newRoom["y"] = roomObj.y
			newRoom["z"] = roomObj.z
			newRoom["exits"] = {}
			for direction, exitObj in iterItems(roomObj.exits):
				newExit = {}
				newExit["exitFlags"] = list(exitObj.exitFlags)
				newExit["doorFlags"] = list(exitObj.doorFlags)
				newExit["door"] = exitObj.door
				newExit["to"] = exitObj.to
				newRoom["exits"][direction] = newExit
			db[vnum] = newRoom
		self.proxySend("Saving the database in JSon format.")
		with codecs.open(MAP_FILE, "wb", encoding="utf-8") as fileObj:
			json.dump(db, fileObj)
		self.proxySend("Map Database saved.")
开发者ID:alex-games,项目名称:a1,代码行数:32,代码来源:mapper.py


示例6: run_experiment

def run_experiment():
    http_client = AsyncHTTPClient()
    num_files = len(os.listdir("./urls"))
    for i, url_file in enumerate(os.listdir("./urls")):
        if not url_file.endswith(".json"):
            print "Skilling: ", url_file
            continue
        urls = json.load(open("./urls/" + url_file))
        filtered_urls = filter(data_not_exists, urls)
        random.shuffle(filtered_urls)
        p = PB.ProgressBar(maxval=len(filtered_urls)//10 + 1, widgets=("{} / {}".format(i, num_files), PB.Bar(), PB.ETA())).start()
        for urls_chunk in p(chunk_seq(filtered_urls, 10)):
            try:
                responses = yield [http_client.fetch(url['url']) for url in urls_chunk]
            except:
                print "Failed for some result in: ", urls_chunk
                continue
            for raw, response in izip(urls_chunk, responses):
                url = raw['url']
                data = {"url" : url, "body" : response.body, "desc" : raw['desc']}
                fname = url_to_filename(raw)
                try:
                    os.makedirs(os.path.dirname(fname))
                except OSError:
                    pass
                json.dump(data, open(fname, "w+"))
            time.sleep(.5)
开发者ID:mynameisfiber,项目名称:congressional_data,代码行数:27,代码来源:download_hearings.py


示例7: export_uploads_local_helper

def export_uploads_local_helper(realm, output_dir, local_dir):
    # type: (Realm, Path, Path) -> None
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    count = 0
    records = []
    for attachment in Attachment.objects.filter(realm_id=realm.id):
        local_path = os.path.join(local_dir, attachment.path_id)
        output_path = os.path.join(output_dir, attachment.path_id)
        mkdir_p(os.path.dirname(output_path))
        subprocess.check_call(["cp", "-a", local_path, output_path])
        stat = os.stat(local_path)
        record = dict(realm_id=attachment.realm.id,
                      user_profile_id=attachment.owner.id,
                      user_profile_email=attachment.owner.email,
                      s3_path=attachment.path_id,
                      path=attachment.path_id,
                      size=stat.st_size,
                      last_modified=stat.st_mtime,
                      content_type=None)
        records.append(record)

        count += 1

        if (count % 100 == 0):
            logging.info("Finished %s" % (count,))
    with open(os.path.join(output_dir, "records.json"), "w") as records_file:
        ujson.dump(records, records_file, indent=4)
开发者ID:HKingz,项目名称:zulip,代码行数:29,代码来源:export.py


示例8: get_processed

def get_processed(a1=18, a2=24, p1=0, p2=8, l=10000, g='top-1,top-10%25,top-15%25,theory'):
  # processor is imported in functions to avoid deadlock when running
  # test_process in processor.py since that imports this module.
  import processor

  if not os.path.exists('cached_data'):
    os.makedirs('cached_data')

  processed = {}
  a1 = int(a1)
  a2 = int(a2)
  p1 = int(p1)
  p2 = int(p2)
  l = int(l)
  g = urllib.unquote(g).decode('utf8')
  goals = g.split(',')
  for goal in goals:
    filename = "cached_data/a1%ia2%ip1%ip2%il%i-%s.json" % (a1, a2, p1, p2, l, goal)
    processed_goal = []

    if os.path.isfile(filename):
      with open(filename) as fhandler:    
        processed_goal = ujson.load(fhandler)
    else:
      compatibilities = get_compatibilities(a1, a2, p1, p2, l)
      processed_goal = list(processor.process(compatibilities, lifetimes=l, goal=goal))
      with open(filename, 'w') as fhandler:
        ujson.dump(processed_goal, fhandler)
    processed[goal] = processed_goal
  return processed
开发者ID:jaycode,项目名称:mathematics-of-love,代码行数:30,代码来源:data.py


示例9: semantic_labeling

def semantic_labeling(train_dataset, test_dataset, train_dataset2=None, evaluate_train_set=False, reuse_rf_model=True):
    """Doing semantic labeling, train on train_dataset, and test on test_dataset.

    train_dataset2 is optionally provided in case train_dataset, and test_dataset doesn't have overlapping semantic types
    For example, given that train_dataset is soccer domains, and test_dataset is weather domains; the system isn't able
    to recognize semantic types of test_dataset because of no overlapping. We need to provide another train_dataset2, which
    has semantic types of weather domains; so that the system is able to make prediction.

    Train_dataset2 is default to train_dataset. (train_dataset is use to train RandomForest)

    :param train_dataset: str
    :param test_dataset: str
    :param train_dataset2: Optional[str]
    :param evaluate_train_set: bool
    :param reuse_rf_model: bool
    :return:
    """
    logger = get_logger("semantic-labeling-api", format_str='>>>>>> %(asctime)s - %(levelname)s:%(name)s:%(module)s:%(lineno)d:   %(message)s')

    if train_dataset2 is None:
        train_dataset2 = train_dataset
        datasets = [train_dataset, test_dataset]
    else:
        datasets = [train_dataset, test_dataset, train_dataset2]

    semantic_labeler = SemanticLabeler()
    # read data into memory
    logger.info("Read data into memory")
    semantic_labeler.read_data_sources(list(set(datasets)))
    # index datasets that haven't been indexed before

    not_indexed_datasets = list({dataset for dataset in datasets if not is_indexed(dataset)})
    if len(not_indexed_datasets) > 0:
        logger.info("Index not-indexed datasets: %s" % ",".join(not_indexed_datasets))
        semantic_labeler.train_semantic_types(not_indexed_datasets)

    # remove existing file if not reuse previous random forest model
    if not reuse_rf_model and os.path.exists("model/lr.pkl"):
        os.remove("model/lr.pkl")

    # train the model
    logger.info("Train randomforest... with args ([1], [%s]", train_dataset)
    semantic_labeler.train_random_forest([1], [train_dataset])

    # generate semantic typing
    logger.info("Generate semantic typing using: trainset: %s, for testset: %s", train_dataset, test_dataset)
    result = semantic_labeler.test_semantic_types_from_2_sets(train_dataset2, test_dataset)

    if not os.path.exists("output"):
        os.mkdir("output")
    with open("output/%s_result.json" % test_dataset, "w") as f:
        ujson.dump(result, f)

    if evaluate_train_set:
        logger.info("Generate semantic typing for trainset")
        result = semantic_labeler.test_semantic_types_from_2_sets(train_dataset2, train_dataset2)
        with open("output/%s_result.json" % train_dataset2, "w") as f:
            ujson.dump(result, f)

    return result
开发者ID:minhptx,项目名称:iswc-2016-semantic-labeling,代码行数:60,代码来源:api.py


示例10: main

def main(args):
    """
    Main method
    Rolling like it's 2006
    """
    conn = boto.connect_s3(
            aws_access_key_id=access_key,
            aws_secret_access_key=secret_key)
    bucket = conn.get_bucket("tweettrack")
    if len(sys.argv) == 4:
        followertable = read_followertable(args[1], bucket)
        assert followertable is not None
        print "followertable is this long: %d, and we're saving it" % (len(followertable),)
        with open("followertable.json", "w") as followertable_file:
            ujson.dump(followertable, followertable_file)
    else:
        print "followerstable..."
        with open(sys.argv[4], "r") as followertable_file:
            followertable = ujson.load(followertable_file)
        print "followerstable done..."
        #print "gammas..."
        #with open(sys.argv[5], "r") as gamma_file:
        #    gammas = ujson.load(gamma_file)
        #    gc.collect()
        #print "gammas done..."
    gammas = get_gammas(args[2], bucket)
    #with open("gammas.json", "w") as gamma_file:
    #    ujson.dump(gammas, gamma_file)
    do_join(args[3], followertable, gammas, bucket)
    conn.close()
开发者ID:jvictor0,项目名称:TweetTracker,代码行数:30,代码来源:GammaJoin.py


示例11: get_compatibilities

def get_compatibilities(a1=18, a2=24, p1=0, p2=8, l=10000):
  compatibilities = []
  a1 = int(a1)
  a2 = int(a2)
  p1 = int(p1)
  p2 = int(p2)
  l = int(l)
  filename = "cached_data/a1%ia2%ip1%ip2%il%i.json" % (a1, a2, p1, p2, l)
  if not os.path.exists('cached_data'):
    os.makedirs('cached_data')
  if os.path.isfile(filename):
    with open(filename) as fhandler:    
      compatibilities = ujson.load(fhandler)
  else:
    for lt in range(1, l+1):
      # Number of candidates met per year should range between p1 and p2.
      yearly_num_candidates = []
      for a in range(0, (a2-a1)):
        yearly_num_candidates.append(random.choice(range(p1, p2)))
      for year, num_candidates in enumerate(yearly_num_candidates):
        # Compatibility scores of candidates should follow a normal distribution.
        scores = np.random.normal(size=num_candidates)
        for score in scores:
          compatibilities.append({
            'lifetime': lt,
            'candidate_score': round(score,3),
            'candidate_age_met': a1+year
          })
    with open(filename, 'w') as fhandler:
      ujson.dump(compatibilities, fhandler)
  return compatibilities
开发者ID:jaycode,项目名称:mathematics-of-love,代码行数:31,代码来源:data.py


示例12: process

    def process(self, id: int):
        """Increment offsets from a volume.
        """
        text = Text.query.get(id)

        tokens = text.tokens()

        # Assemble token list.

        rows = [

            dict(

                text_id=id,

                ratio=i/len(tokens),
                offset=i,

                **token._asdict()

            )

            for i, token in enumerate(tokens)

        ]

        # Flush to disk.

        path = os.path.join(self.result_dir, str(uuid.uuid4()))

        with open_makedirs(path, 'w') as fh:
            ujson.dump(rows, fh)
开发者ID:davidmcclure,项目名称:literary-interior,代码行数:32,代码来源:ext_tokens.py


示例13: add_to_resources

def add_to_resources(movie):
    
    if type(movie) is not dict: return("Movie need to be specified as key:value pairs in a dictionnary. Process Aborted.")
    
    if 'alias' not in movie.keys(): return "Update has no 'alias' key. Process Aborted."
    if 'tag' not in movie.keys(): return "Update has no 'tag' key. Process Aborted."
    if 'title' not in movie.keys(): return "Update has no 'title' key. Process Aborted."
    
    if 'resources.json' not in os.listdir('.'):
            return " The file 'resources.json' is not in the current working directory. Process Aborted."
    
    with open('resources.json') as json_file:  
        resource = ujson.load(json_file)
    
    if is_in_resources(resource, movie['alias']) == True :
        return "%s with alias '%s' and tag '%s' is already added. Need to update?.. use the update function" %(movie['title'], movie['alias'], movie['tag'])
    else:
        movie['timestamp'] = datetime.datetime.now()
        resource['movies'].append(movie)
        
        resource['logs'].append({
        'timestamp': datetime.datetime.now(),
        'type': 'post',
        'message': " '%s' with alias '%s' and tag '%s' was successfully added." %(movie['title'], movie['alias'], movie['tag'])
        })
        
        with open('resources.json', 'w') as outfile:  
            ujson.dump(resource, outfile)
        
        return "%s with alias '%s' and tag '%s' was successfully added." %(movie['title'], movie['alias'], movie['tag'])
开发者ID:ahmedtadde,项目名称:CBM,代码行数:30,代码来源:resources.py


示例14: create

    def create(self, name=None, time=None, uid=None, container=None,
               **kwargs):
        """Create a sample locally

        Parameters
        ----------
        name: str
            Name of the sample
        time: float
            Timestamp generated by the client
        uid: str
            Unique identifier for this sample
        container: str, doct.Document
            The container/group sample is contained within

        Returns
        -------
        payload: dict
            Document dict that was inserted
        """
        # TODO: Allow container to be an object
        if container:
            container = doc_or_uid_to_uid(container)
        payload = dict(uid=uid if uid else str(uuid4()),
                       name=name, time=time if time else ttime.time(),
                       container=container if container else 'NULL',
                       **kwargs)
        self.sample_list.append(payload)
        with open(self._samp_fname, 'w+') as fp:
            ujson.dump(self.sample_list, fp)
        return payload
开发者ID:CJ-Wright,项目名称:amostra,代码行数:31,代码来源:local_commands.py


示例15: dump_event_queues

def dump_event_queues():
    start = time.time()

    with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
        ujson.dump([(qid, client.to_dict()) for (qid, client) in six.iteritems(clients)], stored_queues)

    logging.info("Tornado dumped %d event queues in %.3fs" % (len(clients), time.time() - start))
开发者ID:RomiPierre,项目名称:zulip,代码行数:7,代码来源:event_queue.py


示例16: save

    def save(self, data):
        """Save data to file.

        Careful, this overwrites any existing data on file.
        Use self.udpate() to perform partial updates.
        """
        json.dump(data, open(self.path, 'w'))
开发者ID:shterrel,项目名称:Waldo,代码行数:7,代码来源:batch.py


示例17: run

    def run(self):
        names = collections.defaultdict(set)
        url = "http://www.jstor.org/kbart/collections/all-archive-titles"
        output = shellout("""curl -sL "{url}" > {output} """, url=url)

        with luigi.LocalTarget(output, format=TSV).open() as handle:
            for row in handle.iter_tsv():
                if len(row) < 27:
                    self.logger.warn("short KBART row, skipping: %s", row)
                    continue

                issns = row[1:3]
                parts = [p.strip() for p in row[26].split(";")]

                for issn in [v.strip() for v in issns]:
                    if not issn:
                        continue
                    for name in parts:
                        if not name:
                            continue
                        names[issn].add(name)

        with self.output().open('w') as output:
            import json  # ujson does not support cls keyword
            json.dump(names, output, cls=SetEncoder)
开发者ID:miku,项目名称:siskin,代码行数:25,代码来源:jstor.py


示例18: __init__

    def __init__(self, path, writer_queue=None):
        """Initialize using path to file and optional thread-safe queue.

        Queue is used for json serializable data to be written to file when
        self.write_queued() is called.

        If the file at 'path' doesn't exist it will be created.
        """

        self.path = os.path.realpath(os.path.expanduser(path))
        if not os.path.exists(self.path):
            print("Persistence file %s does not exist yet, creating it...")
            json.dump({}, open(self.path, 'w'))
        else:
            # check for json-ness
            try:
                json.load(open(self.path))
                LOG.debug("Loaded existing persistence file %s.",
                          os.path.relpath(self.path))
            except ValueError as err:
                raise ValueError("The persistence file -> %s is not "
                                 "a valid json file. | %s"
                                 % (os.path.relpath(self.path), err))
        if writer_queue and not isinstance(writer_queue, Queue.Queue):
            raise TypeError('writer_queue should be a Queue.Queue.')
        elif writer_queue:
            self.synq = writer_queue
            self.synq._persisted = set()
        else:
            self.synq = None
开发者ID:shterrel,项目名称:Waldo,代码行数:30,代码来源:batch.py


示例19: savemsgstore

def savemsgstore():
    try:
        f = open("generalmessage.json", "w")
        ujson.dump(generalmessagestore, f)
        f.close()
    except:
        pass
开发者ID:joelhaasnoot,项目名称:KV78Turbo-OVAPI,代码行数:7,代码来源:kv78turbo-backend.py


示例20: combine_dicts

def combine_dicts():
    with open('title10to100000.json') as tag200, open('title100000plus.json') as tag1500:
        tag200dict = ujson.load(tag200)
        tag500dict = ujson.load(tag1500)
        newdict = dict(chain(tag200dict.items(), tag500dict.items()))
        with open('titletagwords.json', 'w') as write:
            ujson.dump(newdict, write)
开发者ID:slyfocks,项目名称:facebookkaggle,代码行数:7,代码来源:ngramanalysis.py



注:本文中的ujson.dump函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ujson.dumps函数代码示例发布时间:2022-05-27
下一篇:
Python ujson.decode函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap