• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python toolz.get函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中toolz.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get函数的19个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: pandas_read_csv

    def pandas_read_csv(self, usecols=None, **kwargs):
        """ Use pandas.read_csv with the right keyword arguments

        In particular we know what dtypes should be, which columns are dates,
        etc...
        """
        dtypes, dates = dshape_to_pandas(self.schema)

        if usecols:
            if builtins.all(isinstance(c, int) for c in usecols):
                usecols = get(usecols, self.columns)
            dates = [name for name in dates if name in usecols]

        result = pd.read_csv(self.path,
                             names=kwargs.pop('names', self.columns),
                             usecols=usecols,
                             compression={'gz': 'gzip',
                                          'bz2': 'bz2'}.get(ext(self.path)),
                             dtype=kwargs.pop('dtype', dtypes),
                             parse_dates=kwargs.pop('parse_dates', dates),
                             encoding=kwargs.pop('encoding', self.encoding),
                             header=0 if self.header else None,
                             **merge(kwargs, clean_dialect(self.dialect)))

        reorder = get(list(usecols)) if usecols and len(usecols) > 1 else identity

        if isinstance(result, (pd.Series, pd.DataFrame)):
            return reorder(result)
        else:
            return map(reorder, result)
开发者ID:Casolt,项目名称:blaze,代码行数:30,代码来源:csv.py


示例2: test_markov_tables

def test_markov_tables():
  markov_features = ['markov_N', 'markov_R', 'markov_NR', 'markov_RN', 'markov_NN', 'markov_RR']
  assert(t.get(markov_features, f.markov_tables(markovtesttree), str(0.0)) == 
    ('0.809523809524', '0.190476190476', '0.285714285714', '0.285714285714', '0.428571428571', '0.0'))
  assert(t.get(markov_features, f.markov_tables(smalltree), str(0.0)) == 
    ('1.0', '0.0', '0.0', '0.0', '1.0', '0.0'))
  assert(t.get(markov_features, f.markov_tables(f.compress(smalltree)), str(0.0)) == 
    ('1.0', '0.0', '0.0', '0.0', '0.0', '0.0'))
开发者ID:vshesh,项目名称:alignment-tree,代码行数:8,代码来源:features_test.py


示例3: select_permits

def select_permits():
    types = get(permit_type_checkbox.active, permit_types)
    res_non = get(res_non_checkbox.active, res_non_types)
    selected = df[(df.year >= min_year.value) &
                  (df.year <= max_year.value) &
                  (df.permit_value >= min_permit_cost.value) &
                  (df.permit_value <= max_permit_cost.value) &
                  (df.type.isin(types)) &
                  (df.res_non.isin(res_non))]
    return selected
开发者ID:jcrist,项目名称:xdata-2016-census,代码行数:10,代码来源:main.py


示例4: records_to_tuples

def records_to_tuples(ds, data):
    """ Transform records into tuples

    Examples
    --------
    >>> seq = [{'a': 1, 'b': 10}, {'a': 2, 'b': 20}]
    >>> list(records_to_tuples('var * {a: int, b: int}', seq))
    [(1, 10), (2, 20)]

    >>> records_to_tuples('{a: int, b: int}', seq[0])  # single elements
    (1, 10)

    >>> records_to_tuples('var * int', [1, 2, 3])  # pass through on non-records
    [1, 2, 3]

    See Also
    --------

    tuples_to_records
    """
    if isinstance(ds, (str, unicode)):
        ds = dshape(ds)
    if isinstance(ds.measure, Record) and len(ds.shape) == 1:
        return pluck(ds.measure.names, data, default=None)
    if isinstance(ds.measure, Record) and len(ds.shape) == 0:
        return get(ds.measure.names, data)
    if not isinstance(ds.measure, Record):
        return data
    raise NotImplementedError()
开发者ID:kwin-wang,项目名称:odo,代码行数:29,代码来源:utils.py


示例5: post_compute

def post_compute(e, q, d):
    """
    Execute a query using MongoDB's aggregation pipeline

    The compute_up functions operate on Mongo Collection / list-of-dict
    queries.  Once they're done we need to actually execute the query on
    MongoDB.  We do this using the aggregation pipeline framework.

    http://docs.mongodb.org/manual/core/aggregation-pipeline/
    """
    d = {'$project': toolz.merge({'_id': 0},  # remove mongo identifier
                                 dict((col, 1) for col in e.fields))}
    q = q.append(d)

    if not e.dshape.shape:  # not a collection
        result = q.coll.aggregate(list(q.query))['result'][0]
        if isscalar(e.dshape.measure):
            return result[e._name]
        else:
            return get(e.fields, result)

    dicts = q.coll.aggregate(list(q.query))['result']

    if isscalar(e.dshape.measure):
        return list(pluck(e.fields[0], dicts, default=None))  # dicts -> values
    else:
        return list(pluck(e.fields, dicts, default=None))  # dicts -> tuples
开发者ID:Casolt,项目名称:blaze,代码行数:27,代码来源:mongo.py


示例6: partial_reduce

def partial_reduce(func, x, split_every, keepdims=False, dtype=None, name=None):
    """Partial reduction across multiple axes.

    Parameters
    ----------
    func : function
    x : Array
    split_every : dict
        Maximum reduction block sizes in each dimension.

    Example
    -------
    Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
    dimension, and 3 blocks in the 2nd dimension:

    >>> partial_reduce(np.min, x, {0: 1, 2: 3})    # doctest: +SKIP
    """
    name = name or 'p_reduce-' + tokenize(func, x, split_every, keepdims, dtype)
    parts = [list(partition_all(split_every.get(i, 1), range(n))) for (i, n)
             in enumerate(x.numblocks)]
    keys = product(*map(range, map(len, parts)))
    out_chunks = [tuple(1 for p in partition_all(split_every[i], c)) if i
                  in split_every else c for (i, c) in enumerate(x.chunks)]
    if not keepdims:
        out_axis = [i for i in range(x.ndim) if i not in split_every]
        getter = lambda k: get(out_axis, k)
        keys = map(getter, keys)
        out_chunks = list(getter(out_chunks))
    dsk = {}
    for k, p in zip(keys, product(*parts)):
        decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
        dummy = dict(i for i in enumerate(p) if i[0] not in decided)
        g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
        dsk[(name,) + k] = (func, g)
    return Array(merge(dsk, x.dask), name, out_chunks, dtype=dtype)
开发者ID:jcorbin,项目名称:dask,代码行数:35,代码来源:reductions.py


示例7: finalize

 def finalize(bases):
     shape = bases[0].shape[:2]
     out = nd.empty(shape, dshape)
     for path, finalizer, inds in zip(paths, finalizers, indices):
         arr = reduce(getattr, path, out)
         np_arr = nd.as_numpy(arr.view_scalars(arr.dtype.value_type))
         np_arr[:] = finalizer(*get(inds, bases))
     return out
开发者ID:jcrist,项目名称:datashader,代码行数:8,代码来源:compiler.py


示例8: __init__

 def __init__(self, lhs, rhs, on_left=None, on_right=None):
     self.lhs = lhs
     self.rhs = rhs
     if not on_left and not on_right:
         on_left = on_right = unpack(list(sorted(
             set(lhs.columns) & set(rhs.columns),
             key=lhs.columns.index)))
     if not on_right:
         on_right = on_left
     if isinstance(on_left, tuple):
         on_left = list(on_left)
     if isinstance(on_right, tuple):
         on_right = list(on_right)
     self._on_left = tuple(on_left) if isinstance(on_left, list) else on_left
     self._on_right = (tuple(on_right) if isinstance(on_right, list)
                         else on_right)
     if get(on_left, lhs.schema[0]) != get(on_right, rhs.schema[0]):
         raise TypeError("Schema's of joining columns do not match")
开发者ID:B-Rich,项目名称:blaze,代码行数:18,代码来源:table.py


示例9: func

        def func(scheduler):
            """ Get CPU and Memory usage on each worker """
            workers = [k for k, v in sorted(scheduler.ncores.items(),
                                            key=lambda x: x[0], reverse=True)]
            nannies = [(ip, scheduler.nannies[(ip, port)])
                       for ip, port in workers]
            dicts = [get(-1, scheduler.resource_logs[w], dict())
                     for w in nannies]

            return {'workers': workers,
                    'cpu': [d.get('cpu_percent', -1) for d in dicts],
                    'memory': [d.get('memory_percent', -1) for d in dicts]}
开发者ID:lucashtnguyen,项目名称:distributed,代码行数:12,代码来源:resource_monitor.py


示例10: rget

def rget(sequence, key, default=None):
    """Get element in a sequence or dict.

    Like toolz.get but with parameters in reverse order.

    Args:
        sequence (sequence or dict): sequence or dict
        key (str or int): key to access in sequence

    Returns:
        object: value behind the key
    """
    return get(key, sequence, default=default)
开发者ID:MattWellie,项目名称:chanjo,代码行数:13,代码来源:utils.py


示例11: join

def join(lhs, rhs, on_left=None, on_right=None, how='inner'):
    if not on_left and not on_right:
        on_left = on_right = unpack(list(sorted(
            set(lhs.columns) & set(rhs.columns),
            key=lhs.columns.index)))
    if not on_right:
        on_right = on_left
    if isinstance(on_left, tuple):
        on_left = list(on_left)
    if isinstance(on_right, tuple):
        on_right = list(on_right)
    if get(on_left, lhs.schema[0]) != get(on_right, rhs.schema[0]):
        raise TypeError("Schema's of joining columns do not match")
    _on_left = tuple(on_left) if isinstance(on_left, list) else on_left
    _on_right = (tuple(on_right) if isinstance(on_right, list)
                        else on_right)

    how = how.lower()
    if how not in ('inner', 'outer', 'left', 'right'):
        raise ValueError("How parameter should be one of "
                         "\n\tinner, outer, left, right."
                         "\nGot: %s" % how)

    return Join(lhs, rhs, _on_left, _on_right, how)
开发者ID:ChrisBeaumont,项目名称:blaze,代码行数:24,代码来源:table.py


示例12: get_prox_key

def get_prox_key(infos, key, default=None, reduce=None, array=True):
    """ Build array of prox output for each operator and iteration.
    
    Return an {#iterations} by {#prox operators} array (unless a reduction is performed).
    
    reduce is usually, np.mean, np.min, or np.max
    
    Some prox outputs may be `None` or `{}`. In that case,
    return the `default` value.
    """
    g = ([get(key, p, default=default) for p in info['prox_infos']] for info in infos )
    
    if reduce:
        g = (reduce(row) for row in g)
        
    g = list(g)
    if array:
        g = np.array(g)

    return g
开发者ID:ajfriend,项目名称:admm,代码行数:20,代码来源:report.py


示例13: types_of_fields

def types_of_fields(fields, expr):
    """ Get the types of fields in an expression

    Examples
    --------
    >>> from blaze import symbol
    >>> expr = symbol('e', 'var * {x: int64, y: float32}')
    >>> types_of_fields('y', expr)
    ctype("float32")

    >>> types_of_fields(['y', 'x'], expr)
    (ctype("float32"), ctype("int64"))

    >>> types_of_fields('x', expr.x)
    ctype("int64")
    """
    if isinstance(expr.dshape.measure, Record):
        return get(fields, expr.dshape.measure)
    else:
        if isinstance(fields, (tuple, list, set)):
            assert len(fields) == 1
            fields, = fields
        assert fields == expr._name
        return expr.dshape.measure
开发者ID:CaptainAL,项目名称:Spyder,代码行数:24,代码来源:collections.py


示例14: list

      list(t.get(markov_features, markov_tables(tree), str(0.0))) +
      list(t.get(markov_features, markov_tables(compress(tree)), str(0.0)))))

if __name__ == '__main__':

  markov_features = ['markov_N', 'markov_R', 'markov_NR', 'markov_RN', 'markov_NN', 'markov_RR']
  opts, args = getopt.getopt(sys.argv[1:], 'l:n', ['--language', '--normalize'])
  language = None
  normalize = False
  for o,a in opts:
    if o == '-l' or o == '--language':
      language = a + ','
    if o == '-n' or o == '--normalize':
      normalize = True

  # Generate header for feature values
  print(('language,' if language is not None else '') +
          (','.join(','.join(x[0] + (lambda s: '' if s[0] == '<' else '__' + s)(nf.__name__)
                             for nf in x[2]) for x in features) if normalize
            else ','.join(x[0] for x in features)) + ',' +
          ','.join(markov_features) + ',' +
          ','.join([('compressed_' + f) for f in markov_features]))

  # For each tree, compute its associated feature values
  for line in fileinput.input(args):
    tree = parse_sexp(line)[0]
    print((language or '') + ','.join([str(','.join(str(float(f[1](tree)/float(nf(tree)))) for nf in f[2]) if normalize else f[1](tree)) for f in features] +
      list(t.get(markov_features, markov_tables(tree), str(0.0))) +
      list(t.get(markov_features, markov_tables(compress(tree)), str(0.0)))))

开发者ID:vshesh,项目名称:alignment-tree,代码行数:29,代码来源:features.py


示例15: __getitem__

 def __getitem__(self, key):
     if isinstance(key, list):
         return RecordAggregate(dict(zip(key, get(key, self._data))),
                                self.x_axis, self.y_axis)
     return self._data[key]
开发者ID:WilfR,项目名称:datashader,代码行数:5,代码来源:aggregates.py


示例16: featurize

def featurize(tree, normalize=True):
  markov_features = ['markov_N', 'markov_R', 'markov_NR', 'markov_RN', 'markov_NN', 'markov_RR']
  tree = parse_sexp(tree)[0]
  return (','.join([str(','.join(str(float(f[1](tree)/float(nf(tree)))) for nf in f[2]) if normalize else f[1](tree)) for f in features] +
      list(t.get(markov_features, markov_tables(tree), str(0.0))) +
      list(t.get(markov_features, markov_tables(compress(tree)), str(0.0)))))
开发者ID:vshesh,项目名称:alignment-tree,代码行数:6,代码来源:features.py


示例17: finalize

 def finalize(bases, **kwargs):
     data = {key: finalizer(get(inds, bases), **kwargs)
             for (key, finalizer, inds) in calls}
     return xr.Dataset(data)
开发者ID:jsignell,项目名称:datashader,代码行数:4,代码来源:compiler.py


示例18: combine

 def combine(base_tuples):
     bases = tuple(np.stack(bs) for bs in zip(*base_tuples))
     return tuple(f(*get(inds, bases)) for (f, inds) in calls)
开发者ID:jcrist,项目名称:datashader,代码行数:3,代码来源:compiler.py


示例19: finalize

 def finalize(bases, **kwargs):
     data = {key: finalizer(get(inds, bases), **kwargs)
             for (key, finalizer, inds) in calls}
     return RecordAggregate(data, **kwargs)
开发者ID:WilfR,项目名称:datashader,代码行数:4,代码来源:compiler.py



注:本文中的toolz.get函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python toolz.get_in函数代码示例发布时间:2022-05-27
下一篇:
Python toolz.flip函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap