• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python persist.load_obj函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中neon.util.persist.load_obj函数的典型用法代码示例。如果您正苦于以下问题:Python load_obj函数的具体用法?Python load_obj怎么用?Python load_obj使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了load_obj函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)
    parser.add_argument('--output_path', required=True,
                        help='Output path used when training model')
    parser.add_argument('--w2v_path', required=False, default=None,
                        help='Path to GoogleNews w2v file for voab expansion.')
    parser.add_argument('--eval_data_path', required=False, default='./SICK_data',
                        help='Path to the SICK dataset for evaluating semantic relateness')
    parser.add_argument('--max_vocab_size', required=False, default=1000000,
                        help='Limit the vocabulary expansion to fit in GPU memory')
    parser.add_argument('--subset_pct', required=False, default=100,
                        help='subset of training dataset to use (use to retreive \
                        preprocessed data from training)')
    args = parser.parse_args(gen_be=True)

    # load vocab file from training
    _, vocab_file = load_data(args.data_dir, output_path=args.output_path,
                              subset_pct=float(args.subset_pct))
    vocab, _, _ = load_obj(vocab_file)

    vocab_size = len(vocab)
    neon_logger.display("\nVocab size from the dataset is: {}".format(vocab_size))

    index_from = 2  # 0: padding 1: oov
    vocab_size_layer = vocab_size + index_from
    max_len = 30

    # load trained model
    model_dict = load_obj(args.model_file)

    # Vocabulary expansion trick needs to pass the correct vocab set to evaluate (for tokenization)
    if args.w2v_path:
        neon_logger.display("Performing Vocabulary Expansion... Loading W2V...")
        w2v_vocab, w2v_vocab_size = get_w2v_vocab(args.w2v_path,
                                                  int(args.max_vocab_size), cache=True)

        vocab_size_layer = w2v_vocab_size + index_from
        model = load_sent_encoder(model_dict, expand_vocab=True, orig_vocab=vocab,
                                  w2v_vocab=w2v_vocab, w2v_path=args.w2v_path, use_recur_last=True)
        vocab = w2v_vocab
    else:
        # otherwise stick with original vocab size used to train the model
        model = load_sent_encoder(model_dict, use_recur_last=True)

    model.initialize(dataset=(max_len, 1))

    evaluate(model, vocab=vocab, data_path=args.eval_data_path, evaltest=True,
             vocab_size_layer=vocab_size_layer)
开发者ID:NervanaSystems,项目名称:neon,代码行数:49,代码来源:eval_sick.py


示例2: load_vgg_weights

def load_vgg_weights(model, path):
    url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/'
    filename = 'VGG_ILSVRC_16_layers_fc_reduced_fused_conv_bias.p'
    size = 86046032

    workdir, filepath = Dataset._valid_path_append(path, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    print('De-serializing the pre-trained VGG16 model with dilated convolutions...')
    pdict = load_obj(filepath)

    model_layers = [l for l in model.layers.layers[0].layers]
    # convert source model into dictionary with layer name as keys
    src_layers = {layer['config']['name']: layer for layer in pdict['model']['config']['layers']}

    i = 0
    for layer in model_layers:
        if layer.classnm == 'Convolution_bias' and i < 15:
            # no states in above parameter file
            layer.load_weights(src_layers['Convolution_bias_'+str(i)], load_states=False)
            print('{} loaded from source file'.format(layer.name))
            i += 1
        elif hasattr(layer, 'W'):
            print('Skipping {} layer'.format(layer.name))
开发者ID:NervanaSystems,项目名称:neon,代码行数:25,代码来源:ssd_container.py


示例3: __init__

    def __init__(
        self, repo_dir, inner_size, do_transforms=True, rgb=True, multiview=False, set_name="train", subset_pct=100
    ):

        assert subset_pct > 0 and subset_pct <= 100, "subset_pct must be between 0 and 100"
        assert set_name in ["train", "validation"]
        self.set_name = set_name if set_name == "train" else "val"

        self.repo_dir = repo_dir
        self.inner_size = inner_size
        self.minibatch_size = self.be.bsz

        # Load from repo dataset_cache:
        try:
            cache_filepath = os.path.join(repo_dir, "dataset_cache.pkl")
            dataset_cache = load_obj(cache_filepath)
        except:
            raise IOError(
                "Cannot find dataset cache in %s.  Run batch_writer to preprocess the"
                "data and create batch files for imageset" % (repo_dir)
            )

        # Should have following defined:
        req_attributes = [
            "global_mean",
            "nclass",
            "val_start",
            "ntrain",
            "label_names",
            "train_nrec",
            "img_size",
            "nval",
            "train_start",
            "val_nrec",
            "label_dict",
            "batch_prefix",
        ]

        for r in req_attributes:
            if r not in dataset_cache:
                raise ValueError("Dataset cache missing required attribute %s" % (r))

        self.__dict__.update(dataset_cache)
        self.filename = os.path.join(repo_dir, self.batch_prefix)

        self.center = False if do_transforms else True
        self.flip = True if do_transforms else False
        self.rgb = rgb
        self.multiview = multiview
        self.label = "l_id"
        if isinstance(self.nclass, dict):
            self.nclass = self.nclass[self.label]

        # Rough percentage
        self.recs_available = getattr(self, self.set_name + "_nrec")
        self.macro_start = getattr(self, self.set_name + "_start")
        self.macros_available = getattr(self, "n" + self.set_name)
        self.ndata = int(self.recs_available * subset_pct / 100.0)

        self.start = 0
开发者ID:huhoo,项目名称:neon,代码行数:60,代码来源:image.py


示例4: load_imagenet_weights

def load_imagenet_weights(model, path):
    # load a pre-trained Alexnet from Neon model zoo to the local
    url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/'
    filename = 'alexnet.p'
    size = 488808400

    workdir, filepath = Dataset._valid_path_append(path, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    print 'De-serializing the pre-trained Alexnet using ImageNet I1K ...'
    pdict = load_obj(filepath)

    param_layers = [l for l in model.layers_to_optimize]
    param_dict_list = pdict['layer_params_states']
    i = 0
    for layer, ps in zip(param_layers, param_dict_list):
        i = i+1
        print i, layer.name
        layer.set_params(ps)
        if 'states' in ps:
            layer.set_states(ps)
        if i == 10:
            print 'Only load the pre-trained weights up to conv5 layer of Alexnet'
            break
开发者ID:Jicheng-Yan,项目名称:neon,代码行数:25,代码来源:fast_rcnn_alexnet.py


示例5: load_vgg_all_weights

def load_vgg_all_weights(model, path):
    # load a pre-trained VGG16 from Neon model zoo to the local
    url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/'
    filename = 'VGG_D.p'
    size = 554227541

    workdir, filepath = Dataset._valid_path_append(path, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    print('De-serializing the pre-trained VGG16 model...')
    pdict = load_obj(filepath)

    param_layers = [l for l in model.layers.layers[0].layers]
    param_dict_list = pdict['model']['config']['layers']

    i = 0
    for layer, ps in zip(param_layers, param_dict_list):
        i += 1
        if i == 43:
            break
        layer.load_weights(ps, load_states=True)
        print(layer.name + " <-- " + ps['config']['name'])

    # to load the fc6 and fc7 from caffe into neon fc layers after ROI pooling
    neon_fc_layers = model.layers.layers[2].layers[1].layers[0].layers[2:5] +\
        model.layers.layers[2].layers[1].layers[0].layers[6:9]
    vgg_fc_layers = param_dict_list[44:47] + param_dict_list[48:51]

    for layer, ps in zip(neon_fc_layers, vgg_fc_layers):
        layer.load_weights(ps, load_states=True)
        print(layer.name + " <-- " + ps['config']['name'])
开发者ID:rlugojr,项目名称:neon,代码行数:32,代码来源:util.py


示例6: load_imagenet_weights

def load_imagenet_weights(model, path):
    # load a pre-trained Alexnet from Neon model zoo to the local
    url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/alexnet/old/pre_v1.4.0/'
    filename = 'alexnet.p'
    size = 488808400

    workdir, filepath = Dataset._valid_path_append(path, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    print 'Loading the Alexnet pre-trained with ImageNet I1K from: ' + filepath
    pdict = load_obj(filepath)

    param_layers = [l for l in model.layers.layers]
        
    param_dict_list = pdict['model']['config']['layers']
    skip_loading = False
    for i, layer in enumerate(param_layers):
        if not load_pre_trained_weight(i, layer):
            skip_loading = True
        if not skip_loading:
            ps = param_dict_list[i]
            print "Loading weights for:{} [src: {}]".format(layer.name, ps['config']['name'])
            layer.load_weights(ps, load_states=True)
        else:
            config_name = param_dict_list[i]['config']['name'] if i < len(param_dict_list) else ""
            print "Skipped loading weights for: {} [src: {}]".format(layer.name, config_name)
        
    return
开发者ID:623401157,项目名称:ModelZoo,代码行数:29,代码来源:transfer_learning.py


示例7: deserialize

def deserialize(fn, datasets=None, inference=False):
    """
    Helper function to load all objects from a serialized file,
    this includes callbacks and datasets as well as the model, layers,
    etc.

    Arguments:
        datasets (DataSet, optional): If the dataset is not serialized
                                      in the file it can be passed in
                                      as an argument.  This will also
                                      override any dataset in the serialized
                                      file
        inference (bool, optional): if true only the weights will be loaded, not
                                    the states
    Returns:
        Model: the model object
        Dataset: the data set object
        Callback: the callbacks
    """
    config_dict = load_obj(fn)

    if datasets is not None:
        logger.warn('Ignoring datasets serialized in archive file %s' % fn)
    elif 'datasets' in config_dict:
        ds_cls = load_class(config_dict['datasets']['type'])
        dataset = ds_cls.gen_class(config_dict['datasets']['config'])
        datasets = dataset.gen_iterators()

    if 'train' in datasets:
        data_iter = datasets['train']
    else:
        key = datasets.keys()[0]
        data_iter = datasets[key]
        logger.warn('Could not find training set iterator'
                    'using %s instead' % key)

    model = Model(config_dict, data_iter)

    callbacks = None
    if 'callbacks' in config_dict:
        # run through the callbacks looking for dataset objects
        # replace them with the corresponding data set above
        cbs = config_dict['callbacks']['callbacks']
        for cb in cbs:
            if 'config' not in cb:
                cb['config'] = {}
            for arg in cb['config']:
                if type(cb['config'][arg]) is dict and 'type' in cb['config'][arg]:
                    if cb['config'][arg]['type'] == 'Data':
                        key = cb['config'][arg]['name']
                        if key in datasets:
                            cb['config'][arg] = datasets[key]
                        else:
                            cb['config'][arg] = None
        # now we can generate the callbacks
        callbacks = Callbacks.load_callbacks(config_dict['callbacks'], model)
    return (model, dataset, callbacks)
开发者ID:AdityoSanjaya,项目名称:neon,代码行数:57,代码来源:load_experiment.py


示例8: load_callbacks

 def load_callbacks(cls, cdict, model, data=[]):
     if type(cdict) is str:
         cdict = load_obj(cdict)
     callbacks = cls(model, output_file=cdict['output_file'])
     callbacks.epoch_marker = cdict['epoch_marker']
     callbacks.callbacks = []
     for cb in cdict['callbacks']:
         module = load_class(cb['type'])
         callbacks.callbacks.append(module(**cb['config']))
     return callbacks
开发者ID:EnriqueSMarquez,项目名称:CNNs_RelatedProjects,代码行数:10,代码来源:callbacks.py


示例9: read_images

    def read_images(self, split):
        """
        Read sentences and image features from pickled dict

        Args:
            split (str): test or train split
        """
        data_path = os.path.join(self.path, "features.pkl.gz")
        self.dataset = load_obj(data_path)
        self.sent_data = self.dataset["sents"][split]
        self.features = self.dataset["feats"]
开发者ID:hkozachkov,项目名称:neon,代码行数:11,代码来源:imagecaption.py


示例10: read_images

    def read_images(self, split):
        """
        Read sentences and image features from pickled dict

        Args:
            split (str): test or train split
        """
        data_path = os.path.join(self.path, 'features.pkl.gz')
        from neon.util.persist import load_obj
        self.dataset = load_obj(data_path)
        self.sent_data = self.dataset['sents'][split]
        self.features = self.dataset['feats']
开发者ID:AdityoSanjaya,项目名称:neon,代码行数:12,代码来源:imagecaption.py


示例11: load_params

    def load_params(self, param_path):
        """
        Loads the model parameters (per layer weights, epochs run, optimizer
        states) saved in param_path from serialize().

        Arguments:
            param_path (str): File containing serialized python dict with layer
                              weights and states.
        """
        pdict = load_obj(param_path)
        self.deserialize(pdict, weights_only=True)
        logger.info('Model weights loaded from %s', param_path)
开发者ID:maony,项目名称:neon,代码行数:12,代码来源:model.py


示例12: __init__

    def __init__(self, repo_dir, inner_size,
                 do_transforms=True, rgb=True, multiview=False,
                 set_name='train', subset_pct=100):

        assert(subset_pct > 0 and subset_pct <= 100), "subset_pct must be between 0 and 100"
        assert(set_name in ['train', 'validation'])
        self.set_name = set_name if set_name == 'train' else 'val'

        self.repo_dir = repo_dir
        self.inner_size = inner_size
        self.minibatch_size = self.be.bsz

        # Load from repo dataset_cache:
        try:
            cache_filepath = os.path.join(repo_dir, 'dataset_cache.pkl')
            dataset_cache = load_obj(cache_filepath)
        except IOError:
            raise IOError("Cannot find '%s/dataset_cache.pkl'. Run batch_writer to "
                          "preprocess the data and create batch files for imageset"
                          % (repo_dir))

        # Should have following defined:
        req_attributes = ['global_mean', 'nclass', 'val_start', 'ntrain', 'label_names',
                          'train_nrec', 'img_size', 'nval', 'train_start', 'val_nrec',
                          'label_dict', 'batch_prefix']

        for r in req_attributes:
            if r not in dataset_cache:
                raise ValueError("Dataset cache missing required attribute %s" % (r))

        global_mean = dataset_cache['global_mean']
        if global_mean is not None and global_mean.shape != (3, 1):
            raise ValueError('Dataset cache global mean is not in the proper format. Run '
                             'neon/util/update_dataset_cache.py utility on %s.' % cache_filepath)

        self.__dict__.update(dataset_cache)
        self.filename = os.path.join(repo_dir, self.batch_prefix)

        self.center = False if do_transforms else True
        self.flip = True if do_transforms else False
        self.rgb = rgb
        self.multiview = multiview
        self.label = 'l_id'
        if isinstance(self.nclass, dict):
            self.nclass = self.nclass[self.label]

        # Rough percentage
        self.recs_available = getattr(self, self.set_name + '_nrec')
        self.macro_start = getattr(self, self.set_name + '_start')
        self.macros_available = getattr(self, 'n' + self.set_name)
        self.ndata = int(self.recs_available * subset_pct / 100.)

        self.start = 0
开发者ID:ferenckulcsar,项目名称:neon,代码行数:53,代码来源:image.py


示例13: load_caffe_weights

def load_caffe_weights(model, file_path):
    pdict = load_obj(file_path)['params']

    #  we match by name with the caffe blobs
    for (pos, layer) in enumerate(model.layers.layers):
        if pos == 1:  # skip conv4_3
            continue
        load_weights(layer.layers, pdict)

    # we handle the tree-in-tree next
    conv4_3_loc = model.layers.layers[1].layers[1].layers[0].layers
    conv4_3_conf = model.layers.layers[1].layers[1].layers[1].layers
    load_weights(conv4_3_loc, pdict)
    load_weights(conv4_3_conf, pdict)
开发者ID:NervanaSystems,项目名称:neon,代码行数:14,代码来源:ssd_container.py


示例14: run

    def run(self):
        load_dir = self.image_dir
        train_tar = os.path.join(load_dir, "ILSVRC2012_img_train.tar")
        validation_tar = os.path.join(load_dir, "ILSVRC2012_img_val.tar")

        for infile in (train_tar, validation_tar):
            if not os.path.exists(infile):
                raise IOError(
                    infile + " not found. Please ensure you have ImageNet downloaded."
                    "More info here: http://www.image-net.org/download-imageurls"
                )
        # download our version of the metadata
        meta_dir = load_i1kmeta(self.out_dir)
        meta_file = os.path.join(meta_dir, "neon_ILSVRC2012_devmeta.pkl")
        self.meta = load_obj(meta_file)
        self.__dict__.update(self.meta)  # get label_dict, label_names, global_mean from meta
        self.global_mean = np.mean(self.global_mean.reshape(3, -1), axis=1).reshape(3, 1)[::-1]

        np.random.seed(0)
        with tarfile.open(train_tar) as tf:
            s_sets = tf.getmembers()
            s_tars = [tarfile.open(fileobj=tf.extractfile(s)) for s in s_sets]
            print("Building trainset list from synset tars.")
            t_jpegfiles = []
            totalsz = len(s_tars)
            for i, st in enumerate(s_tars):
                if i % 100 == 0:
                    print("%d%% ..." % (int(round((100.0 * i) / totalsz))))
                t_jpegfiles += [st.extractfile(m) for m in st.getmembers()]
                st.close()
            print("Done loading")
            np.random.shuffle(t_jpegfiles)
            train_labels = [[self.label_dict[j.name[:9]]] for j in t_jpegfiles]
            self.train_nrec = len(t_jpegfiles)
            self.ntrain = -(-self.train_nrec // self.macro_size)
            self.nclass = {"l_id": 1000}
            self.train_start = 0
            train_labels = {"l_id": np.array(train_labels, dtype=np.int32)}
            self.write_batches("train", self.train_start, train_labels, t_jpegfiles)

        with tarfile.open(validation_tar) as tf:
            jpegfiles = sorted([tf.extractfile(m) for m in tf.getmembers()], key=lambda x: x.name)
            self.val_nrec = len(jpegfiles)
            self.nval = -(-self.val_nrec // self.macro_size)
            self.val_start = 10 ** int(np.log10(self.ntrain) + 1)
            val_labels = {"l_id": np.array(self.val_ground_truth, dtype=np.int32)}
            self.write_batches("val", self.val_start, val_labels, jpegfiles)
        self.save_meta()
开发者ID:hgl888,项目名称:neon,代码行数:48,代码来源:batch_writer.py


示例15: load_params

    def load_params(self, param_path, load_states=True):
        """
        Loads the model parameters (per layer weights, epochs run, optimizer
        states) saved in param_path from serialize().

        Arguments:
            param_path (str): File containing serialized python dict with layer
                              weights and states.
            load_states (bool):  if False, then only the weights will be loaded
                                 into a model in which the layers have already been
                                 created, otherwise will (re)create the layers from
                                 the serialized parameters and set the learning
                                 states as well
        """
        self.deserialize(load_obj(param_path), load_states=load_states)
        logger.info('Model weights loaded from %s', param_path)
开发者ID:AdrienAtallah,项目名称:neon,代码行数:16,代码来源:model.py


示例16: load_weights

    def load_weights(self, weight_path):
        """
        Loads the layer weights saved in weight_path from serialize().

        Arguments:
            weight_path (str): File containing serialized python dict with layer
                               weights and states.
        """
        pdict = load_obj(weight_path)
        self.epoch_index = pdict["epoch_index"]

        param_layers = [l for l in self.layers_to_optimize]
        param_dict_list = pdict["layer_params_states"]
        for l, ps in zip(param_layers, param_dict_list):
            l.set_params(ps["params"])
            if "states" in ps:
                l.set_states(ps["states"])
开发者ID:huhoo,项目名称:neon,代码行数:17,代码来源:model.py


示例17: load_vgg_weights

def load_vgg_weights(model, path):
    # load a pre-trained VGG16 from Neon model zoo to the local
    url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/'
    filename = 'VGG_D_Conv.p'
    size = 169645138

    workdir, filepath = Dataset._valid_path_append(path, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    neon_logger.display('De-serializing the pre-trained VGG16 model...')
    pdict = load_obj(filepath)

    param_layers = [l for l in model.layers.layers[0].layers[0].layers]
    param_dict_list = pdict['model']['config']['layers']
    for layer, ps in zip(param_layers, param_dict_list):
        neon_logger.display("{}".format(layer.name, ps['config']['name']))
        layer.load_weights(ps, load_states=True)
开发者ID:JediKoder,项目名称:neon,代码行数:18,代码来源:util.py


示例18: load_weights

    def load_weights(self, weight_path):
        """
        Loads the layer weights saved in weight_path from serialize().

        Arguments:
            weight_path (str): File containing serialized python dict with layer
                               weights and states.
        """
        pdict = load_obj(weight_path)

        self.epoch_index = pdict['epoch_index']

        param_layers = [l for l in self.layers_to_optimize]
        param_dict_list = pdict['layer_params_states']
        for l, ps in zip(param_layers, param_dict_list):
            l.set_params(ps['params'])
            if 'states' in ps:
                l.set_states(ps['states'])

        logger.info('Model weights loaded from %s', weight_path)
开发者ID:GerritKlaschke,项目名称:neon,代码行数:20,代码来源:model.py


示例19: load_imagenet_weights

def load_imagenet_weights(model, path):
    # load a pre-trained Alexnet from Neon model zoo to the local
    url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/alexnet/'
    filename = 'alexnet.p'
    size = 488808400

    workdir, filepath = Dataset._valid_path_append(path, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    print 'De-serializing the pre-trained Alexnet using ImageNet I1K ...'
    pdict = load_obj(filepath)

    param_layers = [l for l in model.layers.layers[0].layers[0].layers]
    param_dict_list = pdict['model']['config']['layers']
    for layer, ps in zip(param_layers, param_dict_list):
        print layer.name, ps['config']['name']
        layer.load_weights(ps, load_states=True)
        if ps['config']['name'] == 'Pooling_2':
            print 'Only load the pre-trained weights up to conv5 layer of Alexnet'
            break
开发者ID:AdityoSanjaya,项目名称:neon,代码行数:21,代码来源:fast_rcnn_alexnet.py


示例20: load_vgg_weights

def load_vgg_weights(model, path):
    # load a pre-trained VGG16 from Neon model zoo to the local
    url = 'https://s3-us-west-1.amazonaws.com/nervana-modelzoo/VGG/'
    filename = 'VGG_D.p'
    size = 554227541

    workdir, filepath = Dataset._valid_path_append(path, '', filename)
    if not os.path.exists(filepath):
        Dataset.fetch_dataset(url, filename, filepath, size)

    print 'De-serializing the pre-trained VGG16 model...'
    pdict = load_obj(filepath)

    param_layers = [l for l in model.layers.layers[0].layers[0].layers]
    param_dict_list = pdict['model']['config']['layers']
    i = 0
    for layer, ps in zip(param_layers, param_dict_list):
        i += 1
        print layer.name, ps['config']['name']
        layer.load_weights(ps, load_states=True)
        if i == 43:
            break
开发者ID:AdrienAtallah,项目名称:neon,代码行数:22,代码来源:util.py



注:本文中的neon.util.persist.load_obj函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python persist.save_obj函数代码示例发布时间:2022-05-27
下一篇:
Python param.req_param函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap