本文整理汇总了Python中toolz.curried.filter函数的典型用法代码示例。如果您正苦于以下问题:Python filter函数的具体用法?Python filter怎么用?Python filter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了filter函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: count_predictions
def count_predictions(filtered_predictions_list, target_label):
return pipe(
filtered_predictions_list,
filter(lambda (_, x): x == target_label),
list,
len
)
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:7,代码来源:_confusion_matrix_generator.py
示例2: rolling_fit_opt_weights
def rolling_fit_opt_weights(df, opt_weights_func, look_ahead_per):
"""applies opt_weights_func to rolling window on pandas df"""
num_rows = df.shape[0]
p = pipe(xrange(num_rows),
filter(lambda x: x + look_ahead_per < num_rows),
map(lambda x: {df.index[x]: opt_weights_func(df.iloc[x:x+look_ahead_per+1])}))
return pd.DataFrame(merge(p)).T
开发者ID:rhouck,项目名称:nn_port,代码行数:7,代码来源:opt_weights.py
示例3: fancify_summary
def fancify_summary(expr):
""" Separate a complex summary into two pieces
Helps pandas compute_by on summaries
>>> t = symbol('t', 'var * {x: int, y: int}')
>>> one, two, three = fancify_summary(summary(a=t.x.sum(), b=t.x.sum() + t.y.count() - 1))
A simpler summary with only raw reductions
>>> one
summary(x_sum=sum(t.x), y_count=count(t.y))
A mapping of those names to new leaves to use in another compuation
>>> two # doctest: +SKIP
{'x_sum': x_sum, 'y_count': y_count}
A mapping of computations to do for each column
>>> three # doctest: +SKIP
{'a': x_sum, 'b': (x_sum + y_count) - 1}
In this way, ``compute_by`` is able to do simple pandas reductions using
groups.agg(...) and then do columnwise arithmetic afterwards.
"""
seen_names.clear()
name_dict.clear()
exprs = pipe(expr.values, map(Expr._traverse), concat, filter(lambda x: isinstance(x, Reduction)), set)
one = summary(**dict((_name(expr), expr) for expr in exprs))
two = dict((_name(expr), symbol(_name(expr), datashape.var * expr.dshape)) for expr in exprs)
d = dict((expr, two[_name(expr)]) for expr in exprs)
three = dict((name, value._subs(d)) for name, value in zip(expr.names, expr.values))
return one, two, three
开发者ID:arvindchari88,项目名称:newGitTest,代码行数:34,代码来源:pandas.py
示例4: compute_down
def compute_down(expr, data, **kwargs):
""" Compile a blaze expression to a sparksql expression"""
leaves = expr._leaves()
# make sure we only have a single leaf node
if len(leaves) != 1:
raise ValueError('Must compile from exactly one root database')
leaf, = leaves
# field expressions on the database are Field instances with a record
# measure whose immediate child is the database leaf
tables = pipe(expr._subterms(), filter(istable(leaf)), list)
# raise if we don't have tables in our database
if not tables:
raise ValueError('Expressions not referencing a table cannot be '
'compiled')
# make new symbols for each table
new_leaves = [symbol(t._name, t.dshape) for t in tables]
# sub them in the expression
expr = expr._subs(dict(zip(tables, new_leaves)))
# compute using sqlalchemy
scope = dict(zip(new_leaves, map(make_sqlalchemy_table, tables)))
query = compute(expr, scope)
# interpolate params
compiled = literalquery(query, dialect=HiveDialect())
return data.sql(str(compiled))
开发者ID:Will-So,项目名称:blaze,代码行数:32,代码来源:sparksql.py
示例5: get_tenant_metrics
def get_tenant_metrics(tenant_id, scaling_groups, servers, _print=False):
"""
Produce per-group metrics for all the groups of a tenant
:param list scaling_groups: Tenant's scaling groups as dict from CASS
:param dict servers: Servers from Nova grouped based on scaling group ID.
Expects only ACTIVE or BUILD servers
:return: ``list`` of (tenantId, groupId, desired, actual) GroupMetrics
"""
if _print:
print('processing tenant {} with groups {} and servers {}'.format(
tenant_id, len(scaling_groups), len(servers)))
metrics = []
for group in scaling_groups:
group_id = group['groupId']
create_metrics = partial(GroupMetrics, tenant_id,
group_id, group['desired'])
if group_id not in servers:
metrics.append(create_metrics(0, 0))
else:
active = len(list(filter(lambda s: s['status'] == 'ACTIVE',
servers[group_id])))
metrics.append(
create_metrics(active, len(servers[group_id]) - active))
return metrics
开发者ID:glyph,项目名称:otter,代码行数:25,代码来源:metrics.py
示例6: ccds_to_bed
def ccds_to_bed(ccds_stream):
"""Convert CCDS dump to Chanjo-style BED stream.
Main entry point for default Chanjo converter (ccds). It converts
a sorted (start, chrom) CCDS database to the Chanjo BED-format.
Args:
ccds_stream (file): file handle to read CCDS lines from
Yields:
Interval: interval with merged block and superblock ids
"""
return pipe(
ccds_stream,
filter(grep('Public')), # filter out Public tx
map(text_type.rstrip), # strip \n and spaces
map(split(sep='\t')), # split into list
map(extract_intervals), # convert to Interval
concat, # flatten
map(rename_sex_interval), # rename sex contigs
partial(lazy_groupby, key=attrgetter('contig')), # group by contig
pluck(1), # extract second item
map(groupby(attrgetter('name'))), # non-lazy group by id
map(valmap(merge_related_elements)), # group intervals
map(itervalues), # extract values
map(partial(sorted, key=attrgetter('start'))), # sort by start pos
concat # flatten
)
开发者ID:dnil,项目名称:chanjo,代码行数:28,代码来源:core.py
示例7: get_groups
def get_groups(parsed, store, conf):
"""
Return groups based on argument provided
:param Namespace parsed: arguments parsed
:param store: Otter scaling group collection
:param dict conf: config
:return: Deferred fired with list of {"tenantId": .., "groupId": ..} dict
"""
log = mock_log()
if parsed.group:
groups = [g.split(":") for g in parsed.group]
return succeed(
[{"tenantId": tid, "groupId": gid} for tid, gid in groups])
elif parsed.all:
d = store.get_all_valid_groups()
elif parsed.tenant_id:
d = get_groups_of_tenants(log, store, parsed.tenant_id)
elif parsed.disabled_tenants:
non_conv_tenants = conf["non-convergence-tenants"]
d = store.get_all_valid_groups()
d.addCallback(
filter(lambda g: g["tenantId"] not in set(non_conv_tenants)))
d.addCallback(list)
elif parsed.conf_conv_tenants:
d = get_groups_of_tenants(log, store, conf["convergence-tenants"])
else:
raise SystemExit("Unexpected group selection")
return d
开发者ID:rackerlabs,项目名称:otter,代码行数:30,代码来源:trigger_convergence.py
示例8: test_pipeline_example
def test_pipeline_example():
from functools import reduce
import operator as op
data = range(100)
result1 = math.sqrt(
reduce(
op.add,
builtins.map(
lambda x: x ** 2.0,
builtins.filter(
lambda x: x % 2 == 0,
data,
)
)
)
)
from toolz.curried import filter, map, reduce
from flowly.tz import chained
transform = chained(
filter(lambda x: x % 2 == 0),
map(lambda x: x ** 2.0),
reduce(op.add),
math.sqrt,
)
result2 = transform(data)
assert result1 == result2
开发者ID:chmp,项目名称:flowly,代码行数:31,代码来源:test_examples.py
示例9: functional
def functional():
return count_by(itemgetter('hour'),
map(json.loads,
filter(None,
mapcat(lambda output: output.strip().split('\n'),
map(lambda date: logs[date.strftime('%Y/%m/%d')],
map(lambda days_ago: today - timedelta(days=days_ago),
range(1, days_of_logs + 1)))))))
开发者ID:berrytj,项目名称:bookends,代码行数:8,代码来源:example.py
示例10: __dir__
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) or isinstance(self.dshape.measure, datashape.Map) and self.fields:
result.extend(map(valid_identifier, self.fields))
result.extend(toolz.merge(schema_methods(self.dshape.measure), dshape_methods(self.dshape)))
return sorted(set(filter(isvalid_identifier, result)))
开发者ID:postelrich,项目名称:blaze,代码行数:8,代码来源:expressions.py
示例11: load_tgvH_file
def load_tgvH_file():
json_file_name = 'tgvH.json'
nodes = read_ast_json_file(json_file_name)
variables_definitions = filter(
lambda node: node['type'].startswith('variable_'),
nodes,
)
return variables_definitions
开发者ID:openfisca,项目名称:calculette-impots-m-language-parser,代码行数:8,代码来源:json_ast_to_data.py
示例12: get_clb_contents
def get_clb_contents():
"""
Get Rackspace Cloud Load Balancer contents as list of `CLBNode`. CLB
health monitor information is also returned as a pmap of :obj:`CLB` objects
mapped on LB ID.
:return: Effect of (``list`` of :obj:`CLBNode`, `pmap` of :obj:`CLB`)
:rtype: :obj:`Effect`
"""
# If we get a CLBNotFoundError while fetching feeds, we should throw away
# all nodes related to that load balancer, because we don't want to act on
# data that we know is invalid/outdated (for example, if we can't fetch a
# feed because CLB was deleted, we don't want to say that we have a node in
# DRAINING with draining time of 0; we should just say that the node is
# gone).
def gone(r):
return catch(CLBNotFoundError, lambda exc: r)
lb_ids = [lb['id'] for lb in (yield _retry(get_clbs()))]
node_reqs = [_retry(get_clb_nodes(lb_id).on(error=gone([])))
for lb_id in lb_ids]
healthmon_reqs = [
_retry(get_clb_health_monitor(lb_id).on(error=gone(None)))
for lb_id in lb_ids]
all_nodes_hms = yield parallel(node_reqs + healthmon_reqs)
all_nodes, hms = all_nodes_hms[:len(lb_ids)], all_nodes_hms[len(lb_ids):]
lb_nodes = {
lb_id: [CLBNode.from_node_json(lb_id, node)
for node in nodes]
for lb_id, nodes in zip(lb_ids, all_nodes)}
clbs = {
str(lb_id): CLB(bool(health_mon))
for lb_id, health_mon in zip(lb_ids, hms) if health_mon is not None}
draining = [n for n in concat(lb_nodes.values())
if n.description.condition == CLBNodeCondition.DRAINING]
feeds = yield parallel(
[_retry(get_clb_node_feed(n.description.lb_id, n.node_id).on(
error=gone(None)))
for n in draining]
)
nodes_to_feeds = dict(zip(draining, feeds))
deleted_lbs = set([
node.description.lb_id
for (node, feed) in nodes_to_feeds.items() if feed is None])
def update_drained_at(node):
feed = nodes_to_feeds.get(node)
if node.description.lb_id in deleted_lbs:
return None
if feed is not None:
node.drained_at = extract_clb_drained_at(feed)
return node
nodes = map(update_drained_at, concat(lb_nodes.values()))
yield do_return((
list(filter(bool, nodes)),
pmap(keyfilter(lambda k: k not in deleted_lbs, clbs))))
开发者ID:rackerlabs,项目名称:otter,代码行数:58,代码来源:gathering.py
示例13: visit_ternary_operator
def visit_ternary_operator(node):
return pipe([
visit_node(node['value_if_true']),
visit_node(node['condition']),
visit_node(node['value_if_false']) if 'value_if_false' in node else None,
],
filter(None),
concat,
)
开发者ID:openfisca,项目名称:calculette-impots-m-language-parser,代码行数:9,代码来源:dependencies_visitors.py
示例14: discover_jsonlines
def discover_jsonlines(j, n=10, encoding='utf-8', **kwargs):
with json_lines(j.path, encoding=encoding) as lines:
data = pipe(lines, filter(nonempty), map(json.loads), take(n), list)
if len(data) < n:
ds = discover(data)
else:
ds = var * discover(data).subshape[0]
return date_to_datetime_dshape(ds)
开发者ID:mrocklin,项目名称:into,代码行数:9,代码来源:json.py
示例15: piped
def piped():
return (_| range(1, days_of_logs + 1)
| map(lambda days_ago: today - timedelta(days=days_ago))
| map(lambda date: logs[date.strftime('%Y/%m/%d')])
| mapcat(lambda output: output.strip().split('\n'))
| filter(None)
| map(json.loads)
| count_by(itemgetter('hour'))
|_)
开发者ID:berrytj,项目名称:bookends,代码行数:9,代码来源:example.py
示例16: __dir__
def __dir__(self):
result = dir(type(self))
if isrecord(self.dshape.measure) and self.fields:
result.extend(list(map(valid_identifier, self.fields)))
d = toolz.merge(schema_methods(self.dshape.measure),
dshape_methods(self.dshape))
result.extend(list(d))
return sorted(set(filter(isvalid_identifier, result)))
开发者ID:cournape,项目名称:blaze,代码行数:10,代码来源:expressions.py
示例17: parse_violations
def parse_violations(do_request):
""""""
logger.info('Parsing violations')
return toolz.compose(
# filter out meaningless values
curried.filter(lambda x: x not in ('IME PREDPISA', '')),
# extract data from each row
curried.map(lambda tr: pq(tr).find('td').eq(1).text()),
# get all rows in tables
curried.mapcat(lambda page: page('table.MsoNormalTable tr')),
# get all subpages
curried.map(do_request),
# let's skip empty urls/strings
curried.filter(lambda a: a),
# get menu links
curried.map(lambda a: pq(a).attr('href')),
# get menu elements
lambda doc: doc('.moduletable_menu a'),
# get main page
do_request,
)(VIOLATION_URL + '/index.php')
开发者ID:domenkozar,项目名称:zakonodajni-monitor-parser,代码行数:22,代码来源:parsers.py
示例18: get_label_predictions
def get_label_predictions(predictions_list, all_labels, label):
def count_predictions(filtered_predictions_list, target_label):
return pipe(
filtered_predictions_list,
filter(lambda (_, x): x == target_label),
list,
len
)
filtered_predictions = pipe(
predictions_list,
filter(lambda (x, _): x == label)
)
count_predictions_partial = \
partial(count_predictions, list(filtered_predictions))
return pipe(
all_labels,
map(lambda target:
{target: count_predictions_partial(target)}),
map(pmap),
merge,
pmap
)
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:22,代码来源:_confusion_matrix_generator.py
示例19: process
def process(text):
""" Replace failures in docstring with results """
parts = pipe(text, parser.parse,
filter(None),
map(separate_fence),
concat, list)
scope = dict() # scope of variables in our executed environment
state = dict() # state of pymarkdown traversal
out_parts = list()
for part in parts:
out, scope, state = step(part, scope, state)
out_parts.extend(out)
head = '\n'.join(sorted(state.get('headers', set())))
body = pipe(out_parts, map(render_part),
filter(None),
'\n'.join)
foot = '\n\n'.join(state.get('footers', []))
return '\n\n'.join([head, body, foot]).strip()
开发者ID:Dannnno,项目名称:pymarkdown,代码行数:22,代码来源:core.py
示例20: get_scaling_group_servers
def get_scaling_group_servers(tenant_id, authenticator, service_name, region,
server_predicate=None, clock=None):
"""
Return tenant's servers that belong to a scaling group as
{group_id: [server1, server2]} ``dict``. No specific ordering is guaranteed
:param server_predicate: `callable` taking single server as arg and returns True
if the server should be included, False otherwise
"""
def has_group_id(s):
return 'metadata' in s and 'rax:auto_scaling_group_id' in s['metadata']
def group_id(s):
return s['metadata']['rax:auto_scaling_group_id']
server_predicate = server_predicate if server_predicate is not None else lambda s: s
servers_apply = compose(groupby(group_id), filter(server_predicate), filter(has_group_id))
d = get_all_server_details(tenant_id, authenticator, service_name, region, clock=clock)
d.addCallback(servers_apply)
return d
开发者ID:zancas,项目名称:otter,代码行数:22,代码来源:convergence.py
注:本文中的toolz.curried.filter函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论