本文整理汇总了Python中toolz.pipe函数的典型用法代码示例。如果您正苦于以下问题:Python pipe函数的具体用法?Python pipe怎么用?Python pipe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pipe函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_parses_figures_to_superpositions
def test_parses_figures_to_superpositions():
"confirm figures yield expected superpositions"
superpositions = (Superpositions.of_valid_figures(), Superpositions.of_flawed_figures())
expected_superpositions = pipe(superpositions, concat, tuple)
figures = (Figures.valid(), Figures.flawed())
found_superpositions = pipe(figures, concat, superpositions_from_figures, tuple)
assert expected_superpositions == found_superpositions
开发者ID:gJigsaw,项目名称:KataBankOCR,代码行数:7,代码来源:test_superpositions_from_figures.py
示例2: parser
def parser(filename, *args, **kwargs):
g = nx.DiGraph()
tz.pipe(filename, c_open(mode='r'),
c.map(str.strip),
c.map(c_split(sep=',')),
g.add_edges_from)
return g
开发者ID:jni,项目名称:prin,代码行数:7,代码来源:edge_pairs_csv.py
示例3: process
def process(paths, load_, transform_, filter_, sink_):
""" Generic pipeline
:param paths: input paths
:param load_: data loading function
:param transform_: transformation function
:param filter_: filter functions
:param sink_: output function
:return:
"""
for path in paths:
pipe(path, load_, transform_, filter_, sink_)
开发者ID:eliasah,项目名称:airship-convert,代码行数:12,代码来源:__init__.py
示例4: count_predictions
def count_predictions(filtered_predictions_list, target_label):
return pipe(
filtered_predictions_list,
filter(lambda (_, x): x == target_label),
list,
len
)
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:7,代码来源:_confusion_matrix_generator.py
示例5: ghost_internal
def ghost_internal(x, axes):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
axes: dict
The size of the shared boundary per axis
The axes dict informs how many cells to overlap between neighboring blocks
{0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
"""
dims = list(map(len, x.blockdims))
expand_key2 = partial(expand_key, dims=dims)
interior_keys = pipe(x._keys(), flatten,
map(expand_key2), map(flatten),
concat, list)
interior_slices = dict((k, fractional_slice(k, axes))
for k in interior_keys)
shape = (3,) * x.ndim
name = next(ghost_names)
ghost_blocks = dict(((name,) + k[1:],
(rec_concatenate, (concrete, expand_key2(k))))
for k in interior_keys)
blockdims = [ [bds[0] + axes.get(i, 0)]
+ [bd + axes.get(i, 0) * 2 for bd in bds[1:-1]]
+ [bds[-1] + axes.get(i, 0)]
for i, bds in enumerate(x.blockdims)]
return Array(merge(interior_slices, ghost_blocks, x.dask),
name, blockdims=blockdims)
开发者ID:kastnerkyle,项目名称:dask,代码行数:35,代码来源:ghost.py
示例6: compute_up
def compute_up(expr, data, **kwargs):
if not valid_grouper(expr):
raise TypeError("Grouper must have a non-nested record or one "
"dimensional collection datashape, "
"got %s of type %r with dshape %s" %
(expr.grouper, type(expr.grouper).__name__, expr.dshape))
s = alias_it(data)
if valid_reducer(expr.apply):
reduction = compute(expr.apply, s, post_compute=False)
else:
raise TypeError('apply must be a Summary expression')
grouper = get_inner_columns(compute(expr.grouper, s, post_compute=False))
reduction_columns = pipe(reduction.inner_columns,
map(get_inner_columns),
concat)
columns = list(unique(chain(grouper, reduction_columns)))
if (not isinstance(s, sa.sql.selectable.Alias) or
(hasattr(s, 'froms') and isinstance(s.froms[0],
sa.sql.selectable.Join))):
assert len(s.froms) == 1, 'only a single FROM clause supported for now'
from_obj, = s.froms
else:
from_obj = None
return reconstruct_select(columns,
getattr(s, 'element', s),
from_obj=from_obj,
group_by=grouper)
开发者ID:earney,项目名称:blaze,代码行数:31,代码来源:sql.py
示例7: main
def main():
transforms = [
t.parentdir_expand,
t.unambiguous_path,
t.physical_path
]
print(pipe(sys.argv[1], *transforms))
开发者ID:digwanderlust,项目名称:pathlt,代码行数:7,代码来源:__main__.py
示例8: freq
def freq(tokenset):
"""
Find number of occurrences of each value 'tokenset'.
"""
return tlz.pipe(tokenset,
tlz.frequencies,
dict.items)
开发者ID:steven-cutting,项目名称:SimpleTokenizer,代码行数:7,代码来源:utils.py
示例9: streaming_pca
def streaming_pca(samples, n_components=2, batch_size=50):
ipca = decomposition.IncrementalPCA(n_components=n_components,
batch_size=batch_size)
_ = list(tz.pipe(samples, curried.partition(batch_size),
curried.map(np.array),
curried.map(ipca.partial_fit)))
return ipca
开发者ID:jeromeku,项目名称:streaming-talk,代码行数:7,代码来源:session.py
示例10: __get_all_metrics_for_each_class
def __get_all_metrics_for_each_class(self):
def __get_all_metrics_for_class(confusion_table):
return pmap({
str(confusion_table.get_class_name()): pmap({
"Accuracy": confusion_table.accuracy,
"Precision": confusion_table.precision,
"Recall": confusion_table.recall,
"Specificity": confusion_table.specificity,
"F1score": confusion_table.f1score,
"Fall Out": confusion_table.fall_out,
"Miss Rate": confusion_table.miss_rate,
"False Discovery Rate": confusion_table.FDR,
"False Omission Rate": confusion_table.FOR,
"Negative Predictive Value": confusion_table.NPV,
"Positive Likelihood Ratio": confusion_table.PLR,
"Negative Likelihood Ratio": confusion_table.NLR,
"Diagnostic Odds Ratio": confusion_table.DOR,
})
})
return pipe(
self.__confusion_tables,
itervalues,
map(__get_all_metrics_for_class),
reduce(lambda x, y: x + y),
)
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:25,代码来源:_confusion_matrix.py
示例11: gender_from_bam
def gender_from_bam(bam_path, prefix=''):
"""Predict the gender from a BAM alignment file.
Args:
bam_path (path): path to a BAM alignment file
prefix (str, optional): string to prefix to 'X', 'Y'
Returns:
Gender: tuple of X coverage, Y coverage, and sex prediction
Examples:
>>> gender_from_bam('alignment.bam', prefix='chr')
Gender(x_coverage=123.31, y_coverage=0.13, sex='female')
"""
# setup: connect to a BAM file
bam = BamFile(bam_path)
# step 0: fake some BED interval rows (already 1,1-based!)
fake_bed_rows = [("%sX" % prefix, 1, 59373566),
("%sY" % prefix, 69362, 11375310)]
# step 1: run the pipeline
sequence = pipe(
fake_bed_rows,
map(lambda interval: bam(*interval)),
map(average)
)
# step: make the prediction
x_coverage, y_coverage = list(sequence)
sex = predict_gender(x_coverage, y_coverage)
return Gender(x_coverage, y_coverage, sex)
开发者ID:dnil,项目名称:chanjo,代码行数:32,代码来源:core.py
示例12: ccds_to_bed
def ccds_to_bed(ccds_stream):
"""Convert CCDS dump to Chanjo-style BED stream.
Main entry point for default Chanjo converter (ccds). It converts
a sorted (start, chrom) CCDS database to the Chanjo BED-format.
Args:
ccds_stream (file): file handle to read CCDS lines from
Yields:
Interval: interval with merged block and superblock ids
"""
return pipe(
ccds_stream,
filter(grep('Public')), # filter out Public tx
map(text_type.rstrip), # strip \n and spaces
map(split(sep='\t')), # split into list
map(extract_intervals), # convert to Interval
concat, # flatten
map(rename_sex_interval), # rename sex contigs
partial(lazy_groupby, key=attrgetter('contig')), # group by contig
pluck(1), # extract second item
map(groupby(attrgetter('name'))), # non-lazy group by id
map(valmap(merge_related_elements)), # group intervals
map(itervalues), # extract values
map(partial(sorted, key=attrgetter('start'))), # sort by start pos
concat # flatten
)
开发者ID:dnil,项目名称:chanjo,代码行数:28,代码来源:core.py
示例13: test__filter_stopwords
def test__filter_stopwords(tokenset, count):
assert(tlz.pipe(tokenset,
utils.filter_stopwords,
list,
len,
lambda length: length == count,
))
开发者ID:steven-cutting,项目名称:SimpleTokenizer,代码行数:7,代码来源:test_utils.py
示例14: alpino
def alpino(doc, output="raw"):
"""Wrapper around the Alpino (dependency) parser for Dutch.
Expects an environment variable ALPINO_HOME to point at
the Alpino installation dir.
The script uses the 'dependencies' end_hook to generate lemmata and
the dependency structure.
Parameters
----------
output : string
If 'raw', returns the raw output from Alpino itself.
If 'saf', returns a SAF dictionary.
References
----------
`Alpino homepage <http://www.let.rug.nl/vannoord/alp/Alpino/>`_
"""
from ._alpino import tokenize, parse_raw, interpret_parse
try:
transf = {"raw": identity, "saf": interpret_parse}[output]
except KeyError:
raise ValueError("Unknown output format %r" % output)
return pipe(doc, fetch, tokenize, parse_raw, transf)
开发者ID:kazoup,项目名称:xtas,代码行数:27,代码来源:single.py
示例15: get
def get(dsk, keys, optimizations=[fuse], num_workers=cpu_count):
""" Multiprocessed get function appropriate for Bags """
pool = _globals['pool']
if pool is None:
pool = multiprocessing.Pool(psutil.cpu_count())
cleanup = True
else:
cleanup = False
manager = multiprocessing.Manager()
queue = manager.Queue()
apply_async = dill_apply_async(pool.apply_async)
# Optimize Dask
dsk2 = pipe(dsk, partial(cull, keys=keys), *optimizations)
try:
# Run
result = get_async(apply_async, cpu_count, dsk2, keys,
queue=queue)
finally:
if cleanup:
pool.close()
return result
开发者ID:kastnerkyle,项目名称:dask,代码行数:25,代码来源:multiprocessing.py
示例16: compute_down
def compute_down(expr, data, **kwargs):
""" Compile a blaze expression to a sparksql expression"""
leaves = expr._leaves()
# make sure we only have a single leaf node
if len(leaves) != 1:
raise ValueError('Must compile from exactly one root database')
leaf, = leaves
# field expressions on the database are Field instances with a record
# measure whose immediate child is the database leaf
tables = pipe(expr._subterms(), filter(istable(leaf)), list)
# raise if we don't have tables in our database
if not tables:
raise ValueError('Expressions not referencing a table cannot be '
'compiled')
# make new symbols for each table
new_leaves = [symbol(t._name, t.dshape) for t in tables]
# sub them in the expression
expr = expr._subs(dict(zip(tables, new_leaves)))
# compute using sqlalchemy
scope = dict(zip(new_leaves, map(make_sqlalchemy_table, tables)))
query = compute(expr, scope)
# interpolate params
compiled = literalquery(query, dialect=HiveDialect())
return data.sql(str(compiled))
开发者ID:Will-So,项目名称:blaze,代码行数:32,代码来源:sparksql.py
示例17: ngram_tuples
def ngram_tuples(n, string, minlen=3, maxlen=25):
"""
Creates ngram tuples of size 'n' from 'string'.
Also, changes string to lowercase, removes generic stopwords and splits on all non alphanumeric.
Ex:
In [2]: list(ngram_tuples(n=1, string='Just another example text.'))
Out[2]: [('another',), ('example',), ('text',)]
In [2]: list(ngram_tuples(n=2, string='Just another example text.'))
Out[2]: [('another', 'example'), ('example', 'text')]
In [11]: list(ngram_tuples(3, 'I needed a longer example text for this example.'))
Out[11]:
[('needed', 'longer', 'example'),
('longer', 'example', 'text'),
('example', 'text', 'example')]
minlen - filter out words that have fewer characters than 'minlen'.
maxlen - filter out words that have more characters than 'maxlen'.
"""
return tlz.pipe(string,
lower,
simple_split,
filter_longer_than(maxlen),
tlz.compose(tlz.concat, map_c(splitter_of_words)),
filter_shorter_than(minlen),
filter_stopwords,
sliding_window_c(n))
开发者ID:steven-cutting,项目名称:text2math,代码行数:30,代码来源:text2tokens.py
示例18: cli
def cli(board_source, key, token, to, output, board):
"""Hi, I'm TrelloScribe. I take Trello boards and turn them into documents!"""
# Compose a sequence of functions based on the options chosen
# Note toolz.compose() works right to left
read_phase = {
'id': download_board(key, token),
'name': toolz.compose(download_board(key, token), search_boards(key, token)),
'file': read_board
}
convert_phase = {
'raw': partial(json.dumps, indent=2),
'md': ast_to_md,
'html': toolz.compose(md_to_html, ast_to_md)
}
toolz.pipe(board, read_phase[board_source], trello_to_ast,
convert_phase[to], partial(click.echo, file=output))
开发者ID:aulloa,项目名称:trelloscribe,代码行数:16,代码来源:cli.py
示例19: __get_rows
def __get_rows(data, max_length_per_column):
return pipe(
data,
iterkeys,
map(lambda key: __get_row(data, key, max_length_per_column)),
reduce(lambda x, y: x + y)
)
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:7,代码来源:_formatter.py
示例20: load_all_users
def load_all_users():
''' Returns a pd.DataFrame with the information of all the users'''
map = tlz.curry(map)
dataset = tlz.pipe(users, map(parse_exp03_filename), map(user_pipe),
accumulate_users)
dataset.insert(0, 'user', sorted(users * 3))
return dataset
开发者ID:VGonPa,项目名称:datasets-poses2012,代码行数:7,代码来源:user_data_loader.py
注:本文中的toolz.pipe函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论