本文整理汇总了Python中toolz.curried.map函数的典型用法代码示例。如果您正苦于以下问题:Python map函数的具体用法?Python map怎么用?Python map使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了map函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: map
def map(self, func, data): # pylint: disable=no-self-use
return pipe(
data,
map(func),
map(DummyResult),
list
)
开发者ID:wd15,项目名称:extremefill2D,代码行数:7,代码来源:tools.py
示例2: parser
def parser(filename, *args, **kwargs):
g = nx.DiGraph()
tz.pipe(filename, c_open(mode='r'),
c.map(str.strip),
c.map(c_split(sep=',')),
g.add_edges_from)
return g
开发者ID:jni,项目名称:prin,代码行数:7,代码来源:edge_pairs_csv.py
示例3: outer_dict
def outer_dict(dict_in):
"""Outer product of dictionary values
Args:
dict_in: a dictionary with iterable values
Returns:
a list of dictionaries
>>> assert pipe(
... dict(a=[1], b=[2, 3]),
... curry(outer_dict),
... lambda x: x == [dict(a=1, b=2), dict(a=1, b=3)]
... )
"""
return pipe(
dict_in.items(),
lambda x: zip(*x),
list,
lambda x: (x[0], product(*x[1])),
tlam(lambda x, y: zip(repeat(x), y)),
map(lambda x: zip(*x)),
map(dict),
list
)
开发者ID:wd15,项目名称:extremefill2D,代码行数:25,代码来源:tools.py
示例4: __str__
def __str__(self):
labels = self.labels
if all(map(isvalid_identifier, map(first, labels))):
rest = ', '.join('%s=%r' % l for l in labels)
else:
rest = '{%s}' % ', '.join('%r: %r' % l for l in labels)
return '%s.relabel(%s)' % (self._child, rest)
开发者ID:cournape,项目名称:blaze,代码行数:7,代码来源:expressions.py
示例5: parse_people
def parse_people(do_request):
logger.info('Parsing people')
def parse_representative(doc):
doc = doc('div.wpsPortletBody')
raw_birth_date = doc('fieldset table').eq(0).find('td').eq(1).text().replace(' ', '')
return {
'name': doc.find('h3').eq(0).text(),
'birthDate': arrow.get(raw_birth_date, 'D.M.YYYY') if raw_birth_date else None,
'image': DZ_RS_URL + doc.find('img').eq(0).attr('src'),
'group': doc('.panelBox100 a').attr('href'),
'location': doc(u'*:contains("Volilno okro")').parent().text().split(':')[1].strip(),
'gender': "F" if 'Poslanka' in str(doc) else "M",
}
# get all people
return toolz.compose(
# get back metadata
curried.map(parse_representative),
# visit person's link
curried.map(do_request),
# get a link for each person
lambda doc: doc("p.podnaslovOsebaLI a").map(lambda i, r: pq(r).attr('href')),
# get page with a list of people
do_request,
)(DZ_RS_PEOPLE_URL)
开发者ID:domenkozar,项目名称:zakonodajni-monitor-parser,代码行数:26,代码来源:parsers.py
示例6: streaming_pca
def streaming_pca(samples, n_components=2, batch_size=50):
ipca = decomposition.IncrementalPCA(n_components=n_components,
batch_size=batch_size)
_ = list(tz.pipe(samples, curried.partition(batch_size),
curried.map(np.array),
curried.map(ipca.partial_fit)))
return ipca
开发者ID:jeromeku,项目名称:streaming-talk,代码行数:7,代码来源:session.py
示例7: ghost_internal
def ghost_internal(x, axes):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
axes: dict
The size of the shared boundary per axis
The axes dict informs how many cells to overlap between neighboring blocks
{0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
"""
dims = list(map(len, x.blockdims))
expand_key2 = partial(expand_key, dims=dims)
interior_keys = pipe(x._keys(), flatten,
map(expand_key2), map(flatten),
concat, list)
interior_slices = dict((k, fractional_slice(k, axes))
for k in interior_keys)
shape = (3,) * x.ndim
name = next(ghost_names)
ghost_blocks = dict(((name,) + k[1:],
(rec_concatenate, (concrete, expand_key2(k))))
for k in interior_keys)
blockdims = [ [bds[0] + axes.get(i, 0)]
+ [bd + axes.get(i, 0) * 2 for bd in bds[1:-1]]
+ [bds[-1] + axes.get(i, 0)]
for i, bds in enumerate(x.blockdims)]
return Array(merge(interior_slices, ghost_blocks, x.dask),
name, blockdims=blockdims)
开发者ID:kastnerkyle,项目名称:dask,代码行数:35,代码来源:ghost.py
示例8: gender_from_bam
def gender_from_bam(bam_path, prefix=''):
"""Predict the gender from a BAM alignment file.
Args:
bam_path (path): path to a BAM alignment file
prefix (str, optional): string to prefix to 'X', 'Y'
Returns:
Gender: tuple of X coverage, Y coverage, and sex prediction
Examples:
>>> gender_from_bam('alignment.bam', prefix='chr')
Gender(x_coverage=123.31, y_coverage=0.13, sex='female')
"""
# setup: connect to a BAM file
bam = BamFile(bam_path)
# step 0: fake some BED interval rows (already 1,1-based!)
fake_bed_rows = [("%sX" % prefix, 1, 59373566),
("%sY" % prefix, 69362, 11375310)]
# step 1: run the pipeline
sequence = pipe(
fake_bed_rows,
map(lambda interval: bam(*interval)),
map(average)
)
# step: make the prediction
x_coverage, y_coverage = list(sequence)
sex = predict_gender(x_coverage, y_coverage)
return Gender(x_coverage, y_coverage, sex)
开发者ID:dnil,项目名称:chanjo,代码行数:32,代码来源:core.py
示例9: get_service_step
def get_service_step(service_recipe):
"""
Get step timedelta: The smaller duration of service_recipe's periods.
"""
def diff(start, end):
return end - start
res_delta_diffs = compose(map(lambda p: diff(*p)), get('delta_periods'))
return compose(min, map(min), map(res_delta_diffs))(service_recipe)
开发者ID:qandobooking,项目名称:booking-engine,代码行数:8,代码来源:engine.py
示例10: functional
def functional():
return count_by(itemgetter('hour'),
map(json.loads,
filter(None,
mapcat(lambda output: output.strip().split('\n'),
map(lambda date: logs[date.strftime('%Y/%m/%d')],
map(lambda days_ago: today - timedelta(days=days_ago),
range(1, days_of_logs + 1)))))))
开发者ID:berrytj,项目名称:bookends,代码行数:8,代码来源:example.py
示例11: piped
def piped():
return (_| range(1, days_of_logs + 1)
| map(lambda days_ago: today - timedelta(days=days_ago))
| map(lambda date: logs[date.strftime('%Y/%m/%d')])
| mapcat(lambda output: output.strip().split('\n'))
| filter(None)
| map(json.loads)
| count_by(itemgetter('hour'))
|_)
开发者ID:berrytj,项目名称:bookends,代码行数:9,代码来源:example.py
示例12: opt_weight_ir_grid
def opt_weight_ir_grid(df, alphas, look_ahead_pers, long_only=True, tilt_weights=None):
"""exhaustive grid search over alphas, look_ahead_per, norm_types
returning dataframe of cumulative returns for each optimal portfolio construction"""
norm_types = [2,]
end_date = df.index[-(look_ahead_pers[-1] + 1)]
p = pipe(product(alphas, norm_types, look_ahead_pers),
map(lambda x: list(x) + [calc_opt_weight_portfolio_ir(df, x[0], x[1], x[2], long_only, tilt_weights)]),
map(lambda x: dict(zip(['alpha', 'norm_type', 'look_ahead_per', 'ir'], x))))
return pd.DataFrame(list(p))
开发者ID:rhouck,项目名称:nn_port,代码行数:9,代码来源:opt_weights.py
示例13: _iter
def _iter(self, usecols=None):
from blaze.api.into import into
dfs = self.pandas_read_csv(usecols=usecols,
chunksize=self.chunksize,
dtype='O',
parse_dates=[])
return pipe(dfs, map(partial(pd.DataFrame.fillna, value='')),
map(partial(into, list)),
concat)
开发者ID:Casolt,项目名称:blaze,代码行数:9,代码来源:csv.py
示例14: discover_sqlcontext
def discover_sqlcontext(ctx):
try:
table_names = list(map(str, ctx.tableNames()))
except AttributeError:
java_names = ctx._ssql_ctx.catalog().tables().keySet()
table_names = list(scala_set_to_set(ctx, java_names))
table_names.sort()
dshapes = zip(table_names, map(discover, map(ctx.table, table_names)))
return datashape.DataShape(datashape.Record(dshapes))
开发者ID:MoherX,项目名称:odo,代码行数:11,代码来源:sparksql.py
示例15: destruct
def destruct(x):
"""
Deconstructs a data structure into a 1-D np.ndarray (via multiple dispatch)
Converts a list of numpy arrays to a single array
"""
# make sure the values are all numpy arrays
list(map(enforce(np.ndarray), x))
# unravel each array, c
return pipe(x, map(np.ravel), concat, list, np.array)
开发者ID:tedsanders,项目名称:descent,代码行数:11,代码来源:utils.py
示例16: __calculate_max_column_length
def __calculate_max_column_length(column_key):
max_value_length = pipe(
data,
iterkeys,
map(lambda key: data[key][column_key]),
pvector,
map(str),
map(len),
max
)
return max(max_value_length, len(str(column_key)))
开发者ID:RamonAranda,项目名称:ConfusionMatrix,代码行数:11,代码来源:_formatter.py
示例17: compute_up
def compute_up(expr, args, **kwargs):
from_objs = list(unique(concat(map(get_all_froms, args))))
if len(from_objs) > 1:
# TODO: how do you do this in sql? please send help
raise ValueError('only columns from the same table can be merged')
cols = list(unique(concat(map(get_unsafe_inner_columns, args, expr.args))))
sel = sa.select(cols, from_obj=from_objs[0])
where = unify_wheres(args)
if where is not None:
sel = sel.where(where)
return sel
开发者ID:blaze,项目名称:blaze,代码行数:12,代码来源:sql.py
示例18: export_intervals
def export_intervals(chanjo_db, include_header=True, bed_score=0):
r"""Return BED-formatted interval lines from existing ``chanjo_db``.
BED lines are ready to be printed or written to a file.
Args:
chanjo_db (session): ``sqlalchemy.orm.session`` object with a
``.query``-method
include_header (bool, optional): whether to include BED header
bed_score (int, optional): dummy score (0-1000) to insert at field 5
to complete the BED format
Yields:
str: stringified and tab-delimited interval
Examples:
>>> from chanjo import export_intervals, Store
... # instantiate a new connection to a Chanjo database
>>> db = Store('./coverage.sqlite')
>>> with open('intervals.sorted.bed', 'w') as stream:
... # write intervals in BED-format with appropriate headers
... for bed_line in export_intervals(db):
... stream.write(bed_line + '\n')
"""
if include_header:
yield '#chrom\tchromStart\tchromEnd\tname\tscore\tstrand'
# setup up which columns to fetch to make BED file
# column 5 is just a silly default for the "score" field in BED
i = Interval # alias
columns = (i.contig, i.start - 1, i.end, i.id, i.strand)
# BED files are tab-delimited
delimiter = '\t'
# 1. fetch interval tuples from the database (producer)
# 2. stringify each item in each subsequence (interval tuple)
# 3. join lines on tab-character
# 4. prepend the header
bed_lines = pipe(
fetch_records(chanjo_db, columns),
map(map(str)), # convert fields to strings
map(juxt(compose(list, take(4)), # keep first 4 fields
lambda _: [str(bed_score)], # insert BED score
compose(list, last))), # keep last field
map(concat), # flatten each item
map(delimiter.join) # join on \t
)
for bed_line in bed_lines:
yield bed_line
开发者ID:kern3020,项目名称:chanjo,代码行数:51,代码来源:core.py
示例19: ipython_display
def ipython_display(specs):
"""Run publish_display_data for the JS and HTML
Args:
specs: a list of Vega specs
"""
pipe(
specs,
map(lambda x: (uuid.uuid4(), vega.Vega(x))),
list,
do(html_publish_map),
map(tlam(js_publish)),
list
)
开发者ID:wd15,项目名称:extremefill2D,代码行数:14,代码来源:plot.py
示例20: overlap_internal
def overlap_internal(x, axes):
""" Share boundaries between neighboring blocks
Parameters
----------
x: da.Array
A dask array
axes: dict
The size of the shared boundary per axis
The axes input informs how many cells to overlap between neighboring blocks
{0: 2, 2: 5} means share two cells in 0 axis, 5 cells in 2 axis
"""
dims = list(map(len, x.chunks))
expand_key2 = partial(expand_key, dims=dims, axes=axes)
# Make keys for each of the surrounding sub-arrays
interior_keys = pipe(x.__dask_keys__(), flatten, map(expand_key2),
map(flatten), concat, list)
name = 'overlap-' + tokenize(x, axes)
getitem_name = 'getitem-' + tokenize(x, axes)
interior_slices = {}
overlap_blocks = {}
for k in interior_keys:
frac_slice = fractional_slice((x.name,) + k, axes)
if (x.name,) + k != frac_slice:
interior_slices[(getitem_name,) + k] = frac_slice
else:
interior_slices[(getitem_name,) + k] = (x.name,) + k
overlap_blocks[(name,) + k] = (concatenate3,
(concrete, expand_key2((None,) + k, name=getitem_name)))
chunks = []
for i, bds in enumerate(x.chunks):
if len(bds) == 1:
chunks.append(bds)
else:
left = [bds[0] + axes.get(i, 0)]
right = [bds[-1] + axes.get(i, 0)]
mid = []
for bd in bds[1:-1]:
mid.append(bd + axes.get(i, 0) * 2)
chunks.append(left + mid + right)
dsk = merge(interior_slices, overlap_blocks)
dsk = sharedict.merge(x.dask, (name, dsk))
return Array(dsk, name, chunks, dtype=x.dtype)
开发者ID:martindurant,项目名称:dask,代码行数:50,代码来源:overlap.py
注:本文中的toolz.curried.map函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论