• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python toolz.first函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中toolz.first函数的典型用法代码示例。如果您正苦于以下问题:Python first函数的具体用法?Python first怎么用?Python first使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了first函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: has_next_day

def has_next_day(dates_dict, year, month, day):
    """Return next day found in nested dates_dict
    or None if can't find one."""
    # Check current month for next days
    days = sorted(dates_dict[year][month].keys())
    if day != last(days):
        di = days.index(day)
        next_day = days[di + 1]
        return {"year": year, "month": month, "day": next_day}
    # dates_dict[year][month][next_day])

    # Check current year for next months
    months = sorted(dates_dict[year].keys())
    if month != last(months):
        mi = months.index(month)
        next_month = months[mi + 1]
        next_day = first(sorted(dates_dict[year][next_month].keys()))
        return {"year": year, "month": next_month, "day": next_day}

    # Check for next years
    years = sorted(dates_dict.keys())
    if year != last(years):
        yi = years.index(year)
        next_year = years[yi + 1]
        next_month = first(sorted(dates_dict[next_year].keys()))
        next_day = first(sorted(dates_dict[next_year][next_month].keys()))
        return {"year": next_year, "month": next_month, "day": next_day}
    return False
开发者ID:jasalt,项目名称:weatherlapse,代码行数:28,代码来源:utils.py


示例2: single_partition_join

def single_partition_join(left, right, **kwargs):
    # if the merge is perfomed on_index, divisions can be kept, otherwise the
    # new index will not necessarily correspond the current divisions

    meta = pd.merge(left._meta_nonempty, right._meta_nonempty, **kwargs)
    name = 'merge-' + tokenize(left, right, **kwargs)
    if left.npartitions == 1:
        left_key = first(left.__dask_keys__())
        dsk = {(name, i): (apply, pd.merge, [left_key, right_key], kwargs)
               for i, right_key in enumerate(right.__dask_keys__())}

        if kwargs.get('right_index') or right._contains_index_name(
                kwargs.get('right_on')):
            divisions = right.divisions
        else:
            divisions = [None for _ in right.divisions]

    elif right.npartitions == 1:
        right_key = first(right.__dask_keys__())
        dsk = {(name, i): (apply, pd.merge, [left_key, right_key], kwargs)
               for i, left_key in enumerate(left.__dask_keys__())}

        if kwargs.get('left_index') or left._contains_index_name(
                kwargs.get('left_on')):
            divisions = left.divisions
        else:
            divisions = [None for _ in left.divisions]

    return new_dd_object(toolz.merge(dsk, left.dask, right.dask), name,
                         meta, divisions)
开发者ID:floriango,项目名称:dask,代码行数:30,代码来源:multi.py


示例3: single_partition_join

def single_partition_join(left, right, **kwargs):
    # if the merge is perfomed on_index, divisions can be kept, otherwise the
    # new index will not necessarily correspond the current divisions

    meta = left._meta_nonempty.merge(right._meta_nonempty, **kwargs)
    kwargs['empty_index_dtype'] = meta.index.dtype
    name = 'merge-' + tokenize(left, right, **kwargs)
    if left.npartitions == 1 and kwargs['how'] in ('inner', 'right'):
        left_key = first(left.__dask_keys__())
        dsk = {(name, i): (apply, merge_chunk, [left_key, right_key], kwargs)
               for i, right_key in enumerate(right.__dask_keys__())}

        if kwargs.get('right_index') or right._contains_index_name(
                kwargs.get('right_on')):
            divisions = right.divisions
        else:
            divisions = [None for _ in right.divisions]

    elif right.npartitions == 1 and kwargs['how'] in ('inner', 'left'):
        right_key = first(right.__dask_keys__())
        dsk = {(name, i): (apply, merge_chunk, [left_key, right_key], kwargs)
               for i, left_key in enumerate(left.__dask_keys__())}

        if kwargs.get('left_index') or left._contains_index_name(
                kwargs.get('left_on')):
            divisions = left.divisions
        else:
            divisions = [None for _ in left.divisions]
    else:
        raise NotImplementedError("single_partition_join has no fallback for invalid calls")

    graph = HighLevelGraph.from_collections(name, dsk, dependencies=[left, right])
    return new_dd_object(graph, name, meta, divisions)
开发者ID:yliapis,项目名称:dask,代码行数:33,代码来源:multi.py


示例4: has_previous_day

def has_previous_day(dates_dict, year, month, day):
    """Return previous day found in nested dates_dict
    or None if can't find one."""
    days = sorted(dates_dict[year][month].keys())
    # Check current month
    if day != first(days):
        di = days.index(day)
        prev_day = days[di - 1]
        return {"year": year, "month": month, "day": prev_day}

    # Check current year
    months = sorted(dates_dict[year].keys())
    if month != first(months):
        mi = months.index(month)
        prev_month = months[mi - 1]
        last_day = last(sorted(dates_dict[year][prev_month].keys()))
        return {"year": year, "month": prev_month, "day": last_day}

    # Check other years
    years = sorted(dates_dict.keys())
    if year != first(years):
        yi = years.index(year)
        prev_year = years[yi - 1]
        prev_month = last(sorted(dates_dict[prev_year].keys()))
        last_day = last(sorted(dates_dict[prev_year][prev_month].keys()))
        return {"year": prev_year, "month": prev_month, "day": last_day}

    return False
开发者ID:jasalt,项目名称:weatherlapse,代码行数:28,代码来源:utils.py


示例5: _get_larger_chroms

def _get_larger_chroms(ref_file):
    """Retrieve larger chromosomes, avoiding the smaller ones for plotting.
    """
    from scipy.cluster.vq import kmeans, vq
    all_sizes = []
    for c in ref.file_contigs(ref_file):
        all_sizes.append(float(c.size))
    all_sizes.sort()
    # separate out smaller chromosomes and haplotypes with kmeans
    centroids, _ = kmeans(np.array(all_sizes), 2)
    idx, _ = vq(np.array(all_sizes), centroids)
    little_sizes = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx, all_sizes)))
    little_sizes = [x[1] for x in little_sizes]
    # create one more cluster with the smaller, removing the haplotypes
    centroids2, _ = kmeans(np.array(little_sizes), 2)
    idx2, _ = vq(np.array(little_sizes), centroids2)
    little_sizes2 = tz.first(tz.partitionby(lambda xs: xs[0], zip(idx2, little_sizes)))
    little_sizes2 = [x[1] for x in little_sizes2]
    # get any chromosomes not in haplotype/random bin
    thresh = max(little_sizes2)
    larger_chroms = []
    for c in ref.file_contigs(ref_file):
        if c.size > thresh:
            larger_chroms.append(c.name)
    return larger_chroms
开发者ID:Kisun,项目名称:bcbio-nextgen,代码行数:25,代码来源:cnvkit.py


示例6: str_cat_sql

def str_cat_sql(expr, lhs, rhs, **kwargs):
    left, right = first(lhs.inner_columns), first(rhs.inner_columns)
    if expr.sep:
        result = (left + expr.sep + right).label(expr.lhs._name)
    else:
        result = (left + right).label(expr.lhs._name)
    return reconstruct_select([result], lhs)
开发者ID:giangzuzana,项目名称:blaze,代码行数:7,代码来源:sql.py


示例7: test_live_migrate_anti_affinity

    def test_live_migrate_anti_affinity(self):
        """
        Make sure that if we have an anti-affinity group set, and we try
        to live migrate to a host with the anti-affinity group, it will
        fail

        - Creates an
        :return:
        """
        data = self.setup_affinities(self.sanity)

        # Make sure that the affinity and anti-aff instances are booted up
        aff_inst = data["aff_instance"]
        anti_inst = data["anti_instance"]
        smog.nova.poll_status(aff_inst, "ACTIVE")
        smog.nova.poll_status(anti_inst, "ACTIVE")

        # Now, perform a live migration for the anti_inst.  This should fail
        # Get what host the instance is currently on, and compare before/after
        discovered = self.sanity.discover()
        fltrfn = lambda x: x.instance.name == "aa-test"

        # In functional-speak, find the instance object in out discovered
        # discovered Instance objects whose name is 'aff-test'.  There should
        # only be one of these, so take the first one.  Use toolz.first rather
        # than use index ([0]).  In the general case this is better (for
        # example, what if we use a generator or iterator instead of list or
        # tuple.  Remember, functional programming rulez!
        before_inst = toolz.first(filter(fltrfn, [inst for inst in discovered]))
        before_host = before_inst.host
        anti_inst.live_migrate()
        discovered = self.sanity.discover()
        after_inst = toolz.first(filter(fltrfn, [inst for inst in discovered]))
        after_host = after_inst.host
        self.assertTrue(before_host.hostname == after_host.hostname)
开发者ID:arif29march,项目名称:smog,代码行数:35,代码来源:sanity.py


示例8: binop_sql

def binop_sql(t, lhs, rhs, **kwargs):
    if isinstance(lhs, Select):
        assert len(lhs.c) == 1, "Select cannot have more than a single column when doing" " arithmetic, got %r" % lhs
        lhs = first(lhs.inner_columns)
    if isinstance(rhs, Select):
        assert len(rhs.c) == 1, "Select cannot have more than a single column when doing" " arithmetic, got %r" % rhs
        rhs = first(rhs.inner_columns)

    return t.op(lhs, rhs)
开发者ID:jessezwd,项目名称:blaze,代码行数:9,代码来源:sql.py


示例9: coalesce_sql_select

def coalesce_sql_select(expr, lhs, rhs, **kwargs):
    if isinstance(lhs, Select):
        orig = lhs
        lhs = first(lhs.inner_columns)
    else:
        orig = rhs
        rhs = first(rhs.inner_columns)
    result = sa.sql.functions.coalesce(lhs, rhs).label(expr._name)
    return reconstruct_select([result], orig)
开发者ID:giangzuzana,项目名称:blaze,代码行数:9,代码来源:sql.py


示例10: compute_up

def compute_up(expr, data, **kwargs):
    name = expr._name
    try:
        inner_columns = list(data.inner_columns)
        names = list(c.name for c in data.inner_columns)
        column = inner_columns[names.index(name)]
    except (KeyError, ValueError):
        single_column_select = compute(expr, first(data.inner_columns), post_compute=False, return_type="native")
        column = first(single_column_select.inner_columns)
        result = unify_froms(sa.select([column]), data.froms + single_column_select.froms)
        return result.where(unify_wheres([data, single_column_select]))
    else:
        return data.with_only_columns([column])
开发者ID:kwmsmith,项目名称:blaze,代码行数:13,代码来源:sql.py


示例11: binop_sql

    def binop_sql(t, lhs, rhs, **kwargs):
        if isinstance(lhs, Select):
            assert len(lhs.c) == 1, (
                'Select cannot have more than a single column when doing'
                ' arithmetic, got %r' % lhs
            )
            lhs = first(lhs.inner_columns)
        if isinstance(rhs, Select):
            assert len(rhs.c) == 1, (
                'Select cannot have more than a single column when doing'
                ' arithmetic, got %r' % rhs
            )
            rhs = first(rhs.inner_columns)

        return f(t, lhs, rhs)
开发者ID:giangzuzana,项目名称:blaze,代码行数:15,代码来源:sql.py


示例12: port

 def port(self):
     if not self._port:
         try:
             self._port = first(self._sockets.values()).getsockname()[1]
         except StopIteration:
             raise OSError("Server has no port.  Please call .listen first")
     return self._port
开发者ID:sonlia,项目名称:distributed,代码行数:7,代码来源:core.py


示例13: _schema

 def _schema(self):
     schema = self._child.schema[0]
     if isinstance(schema, Record) and len(schema.types) == 1:
         result = toolz.first(schema.types)
     else:
         result = schema
     return DataShape(result)
开发者ID:testmana2,项目名称:blaze,代码行数:7,代码来源:reductions.py


示例14: test_basic

def test_basic():
    def test_g():
        time.sleep(0.01)

    def test_h():
        time.sleep(0.02)

    def test_f():
        for i in range(100):
            test_g()
            test_h()

    thread = threading.Thread(target=test_f)
    thread.daemon = True
    thread.start()

    state = create()

    for i in range(100):
        time.sleep(0.02)
        frame = sys._current_frames()[thread.ident]
        process(frame, None, state)

    assert state['count'] == 100
    d = state
    while len(d['children']) == 1:
        d = first(d['children'].values())

    assert d['count'] == 100
    assert 'test_f' in str(d['description'])
    g = [c for c in d['children'].values() if 'test_g' in str(c['description'])][0]
    h = [c for c in d['children'].values() if 'test_h' in str(c['description'])][0]

    assert g['count'] < h['count']
    assert 95 < g['count'] + h['count'] <= 100
开发者ID:tomMoral,项目名称:distributed,代码行数:35,代码来源:test_profile.py


示例15: test_pre_compute_with_projection_projects_on_data_frames

def test_pre_compute_with_projection_projects_on_data_frames():
    csv = CSV(example('iris.csv'))
    s = symbol('s', discover(csv))
    result = pre_compute(s[['sepal_length', 'sepal_width']].distinct(),
                         csv, comfortable_memory=10)
    assert set(first(result).columns) == \
            set(['sepal_length', 'sepal_width'])
开发者ID:abudulemusa,项目名称:blaze,代码行数:7,代码来源:test_csv_compute.py


示例16: post_compute

def post_compute(expr, query, scope=None):
    """ Execute SQLAlchemy query against SQLAlchemy engines

    If the result of compute is a SQLAlchemy query then it is likely that the
    data elements are themselves SQL objects which contain SQLAlchemy engines.
    We find these engines and, if they are all the same, run the query against
    these engines and return the result.
    """
    if not all(isinstance(val, (MetaData, Engine, Table)) for val in scope.values()):
        return query

    engines = set(filter(None, map(engine_of, scope.values())))

    if not engines:
        return query

    if len(set(map(str, engines))) != 1:
        raise NotImplementedError("Expected single SQLAlchemy engine")

    engine = first(engines)

    with engine.connect() as conn:  # Perform query
        result = conn.execute(select(query)).fetchall()

    if isscalar(expr.dshape):
        return result[0][0]
    if isscalar(expr.dshape.measure):
        return [x[0] for x in result]
    return result
开发者ID:leolujuyi,项目名称:blaze,代码行数:29,代码来源:sql.py


示例17: scalar_coerce

def scalar_coerce(rec, val):
    if len(rec.fields) == 1:
        return scalar_coerce(first(rec.types), val)
    else:
        raise TypeError("Trying to coerce complex datashape\n"
                "got dshape: %s\n"
                "scalar_coerce only intended for scalar values" % rec)
开发者ID:ChrisBg,项目名称:blaze,代码行数:7,代码来源:numbers.py


示例18: test_pre_compute_calls_lean_projection

def test_pre_compute_calls_lean_projection():
    csv = CSV(example('iris.csv'))
    s = symbol('s', discover(csv))
    result = pre_compute(s.sort('sepal_length').species,
                         csv, comfortable_memory=10)
    assert set(first(result).columns) == \
            set(['sepal_length', 'species'])
开发者ID:abudulemusa,项目名称:blaze,代码行数:7,代码来源:test_csv_compute.py


示例19: execute_string_group_by_find_in_set

def execute_string_group_by_find_in_set(op, needle, haystack, **kwargs):
    # `list` could contain series, series groupbys, or scalars
    # mixing series and series groupbys is not allowed
    series_in_haystack = [
        type(piece)
        for piece in haystack
        if isinstance(piece, (pd.Series, SeriesGroupBy))
    ]

    if not series_in_haystack:
        return ibis.util.safe_index(haystack, needle)

    try:
        collection_type, = frozenset(map(type, series_in_haystack))
    except ValueError:
        raise ValueError('Mixing Series and SeriesGroupBy is not allowed')

    pieces = haystack_to_series_of_lists(
        [getattr(piece, 'obj', piece) for piece in haystack]
    )

    result = pieces.map(toolz.flip(ibis.util.safe_index)(needle))
    if issubclass(collection_type, pd.Series):
        return result

    assert issubclass(collection_type, SeriesGroupBy)

    return result.groupby(
        toolz.first(
            piece.grouper.groupings
            for piece in haystack
            if hasattr(piece, 'grouper')
        )
    )
开发者ID:cloudera,项目名称:ibis,代码行数:34,代码来源:strings.py


示例20: test_context_manager

    def test_context_manager(self, dt_tb, dt_data):
        """ check the context manager auto-closes the resources """

        with Data("{0}::dt".format(dt_tb)) as t:
            f = first(t._resources().values())
            assert f.isopen
        assert not f.isopen
开发者ID:abudulemusa,项目名称:blaze,代码行数:7,代码来源:test_pytables.py



注:本文中的toolz.first函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python toolz.flip函数代码示例发布时间:2022-05-27
下一篇:
Python toolz.filter函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap