本文整理汇总了Python中toolz.accumulate函数的典型用法代码示例。如果您正苦于以下问题:Python accumulate函数的具体用法?Python accumulate怎么用?Python accumulate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了accumulate函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: accumulate_part
def accumulate_part(binop, seq, initial, is_first=False):
if initial == no_default:
res = list(accumulate(binop, seq))
else:
res = list(accumulate(binop, seq, initial=initial))
if is_first:
return res, res[-1] if res else [], initial
return res[1:], res[-1]
开发者ID:datastark,项目名称:dask,代码行数:8,代码来源:core.py
示例2: cumdims_label
def cumdims_label(chunks, const):
""" Interal utility for cumulative sum with label.
>>> cumdims_label(((5, 3, 3), (2, 2, 1)), 'n') # doctest: +NORMALIZE_WHITESPACE
[(('n', 0), ('n', 5), ('n', 8), ('n', 11)),
(('n', 0), ('n', 2), ('n', 4), ('n', 5))]
"""
return [tuple(zip((const,) * (1 + len(bds)), list(accumulate(add, (0,) + bds)))) for bds in chunks]
开发者ID:dougc333,项目名称:TestCode,代码行数:8,代码来源:rechunk.py
示例3: gradient_descent3
def gradient_descent3(f, df, x):
return accumulate(
lambda fx, _: min(
(partial(gradient_step, df, -alpha)(fx) for alpha in [100, 10, 1, 0.7, 0.01, 0.001, 0.0001, 0.00001]),
key=safe(f),
),
repeat(x),
)
开发者ID:philiplessner,项目名称:FunctionalML,代码行数:8,代码来源:func_gradient_descent.py
示例4: arg_reduction
def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):
""" Generic function for argreduction.
Parameters
----------
x : Array
chunk : callable
Partialed ``arg_chunk``.
combine : callable
Partialed ``arg_combine``.
agg : callable
Partialed ``arg_agg``.
axis : int, optional
split_every : int or dict, optional
"""
if axis is None:
axis = tuple(range(x.ndim))
ravel = True
elif isinstance(axis, Integral):
axis = validate_axis(axis, x.ndim)
axis = (axis,)
ravel = x.ndim == 1
else:
raise TypeError("axis must be either `None` or int, "
"got '{0}'".format(axis))
for ax in axis:
chunks = x.chunks[ax]
if len(chunks) > 1 and np.isnan(chunks).any():
raise ValueError(
"Arg-reductions do not work with arrays that have "
"unknown chunksizes. At some point in your computation "
"this array lost chunking information"
)
# Map chunk across all blocks
name = 'arg-reduce-{0}'.format(tokenize(axis, x, chunk,
combine, split_every))
old = x.name
keys = list(product(*map(range, x.numblocks)))
offsets = list(product(*(accumulate(operator.add, bd[:-1], 0)
for bd in x.chunks)))
if ravel:
offset_info = zip(offsets, repeat(x.shape))
else:
offset_info = pluck(axis[0], offsets)
chunks = tuple((1, ) * len(c) if i in axis else c for (i, c)
in enumerate(x.chunks))
dsk = dict(((name,) + k, (chunk, (old,) + k, axis, off)) for (k, off)
in zip(keys, offset_info))
# The dtype of `tmp` doesn't actually matter, just need to provide something
graph = HighLevelGraph.from_collections(name, dsk, dependencies=[x])
tmp = Array(graph, name, chunks, dtype=x.dtype)
dtype = np.argmin([1]).dtype
result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)
return handle_out(out, result)
开发者ID:yliapis,项目名称:dask,代码行数:57,代码来源:reductions.py
示例5: until_nearly_convergence
def until_nearly_convergence(convf, it, tolerance=0.0001):
'''
Test for absolute convergence
Parameters
it: Lazy sequence of values
tolerance: Convergence criteria
Returns
Continues to add to the sequence of current values if tolerence is not satisfied
Othewise it terminates iteration and returns the sequence of values
'''
# The order of arguments for toolz.accumulate is opposite to
# Python 3 itertools.accumulate
return accumulate(partial(convf, tolerance), it)
开发者ID:philiplessner,项目名称:FunctionalML,代码行数:13,代码来源:utility.py
示例6: sgd
def sgd(df, X, y, theta_0, eta=0.1):
"""
Parameters
df: Gradient of function f
X: Matrix of features
y: vector of observations
theta0: Initial guess, theta ia a j dimensional vector ([theta_01, theta_02,...,theta0_0j])
eta: Learning rate
Returns
Generator sequence of [theta_k1, theta_k2,...,theta_kj]
where k = 0 to ...
"""
xys = chain([theta_0], in_random_order(zip(X, y)))
return accumulate(partial(sgd_step, df, eta), xys)
开发者ID:philiplessner,项目名称:FunctionalML,代码行数:14,代码来源:func_gradient_descent.py
示例7: arg_reduction
def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):
""" Generic function for argreduction.
Parameters
----------
x : Array
chunk : callable
Partialed ``arg_chunk``.
combine : callable
Partialed ``arg_combine``.
agg : callable
Partialed ``arg_agg``.
axis : int, optional
split_every : int or dict, optional
"""
if axis is None:
axis = tuple(range(x.ndim))
ravel = True
elif isinstance(axis, int):
if axis < 0:
axis += x.ndim
if axis < 0 or axis >= x.ndim:
raise ValueError("axis entry is out of bounds")
axis = (axis,)
ravel = x.ndim == 1
else:
raise TypeError("axis must be either `None` or int, "
"got '{0}'".format(axis))
# Map chunk across all blocks
name = 'arg-reduce-chunk-{0}'.format(tokenize(chunk, axis))
old = x.name
keys = list(product(*map(range, x.numblocks)))
offsets = list(product(*(accumulate(operator.add, bd[:-1], 0)
for bd in x.chunks)))
if ravel:
offset_info = zip(offsets, repeat(x.shape))
else:
offset_info = pluck(axis[0], offsets)
chunks = tuple((1, ) * len(c) if i in axis else c for (i, c)
in enumerate(x.chunks))
dsk = dict(((name,) + k, (chunk, (old,) + k, axis, off)) for (k, off)
in zip(keys, offset_info))
# The dtype of `tmp` doesn't actually matter, just need to provide something
tmp = Array(sharedict.merge(x.dask, (name, dsk)), name, chunks, dtype=x.dtype)
dtype = np.argmin([1]).dtype
result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)
return handle_out(out, result)
开发者ID:togar-nk,项目名称:dask,代码行数:49,代码来源:reductions.py
示例8: fromfunction
def fromfunction(func, chunks=None, shape=None, dtype=None):
if chunks:
chunks = normalize_chunks(chunks, shape)
name = 'fromfunction-' + tokenize(func, chunks, shape, dtype)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
values = [(np.fromfunction, offset_func(func, offset), shp)
for offset, shp in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
开发者ID:martindurant,项目名称:dask,代码行数:15,代码来源:creation.py
示例9: fromfunction
def fromfunction(func, chunks='auto', shape=None, dtype=None, **kwargs):
chunks = normalize_chunks(chunks, shape)
name = 'fromfunction-' + tokenize(func, chunks, shape, dtype, kwargs)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
aggdims = [list(accumulate(add, (0,) + bd[:-1])) for bd in chunks]
offsets = list(product(*aggdims))
shapes = list(product(*chunks))
dtype = dtype or float
values = [(_np_fromfunction, func, shp, dtype, offset, kwargs)
for offset, shp in zip(offsets, shapes)]
dsk = dict(zip(keys, values))
return Array(dsk, name, chunks, dtype=dtype)
开发者ID:caseyclements,项目名称:dask,代码行数:15,代码来源:creation.py
示例10: keys_to_flush
def keys_to_flush(lengths, fraction=0.1, maxcount=100000):
""" Which keys to remove
>>> lengths = {'a': 20, 'b': 10, 'c': 15, 'd': 15,
... 'e': 10, 'f': 25, 'g': 5}
>>> keys_to_flush(lengths, 0.5)
['f', 'a']
"""
top = topk(max(len(lengths) // 2, 1),
lengths.items(),
key=1)
total = sum(lengths.values())
cutoff = min(maxcount, max(1,
bisect(list(accumulate(add, pluck(1, top))),
total * fraction)))
result = [k for k, v in top[:cutoff]]
assert result
return result
开发者ID:CaptainAL,项目名称:Spyder,代码行数:18,代码来源:buffer.py
示例11: _slice_1d
def _slice_1d(dim_shape, lengths, index):
"""Returns a dict of {blocknum: slice}
This function figures out where each slice should start in each
block for a single dimension. If the slice won't return any elements
in the block, that block will not be in the output.
Parameters
----------
dim_shape - the number of elements in this dimension.
This should be a positive, non-zero integer
blocksize - the number of elements per block in this dimension
This should be a positive, non-zero integer
index - a description of the elements in this dimension that we want
This might be an integer, a slice(), or an Ellipsis
Returns
-------
dictionary where the keys are the integer index of the blocks that
should be sliced and the values are the slices
Examples
--------
100 length array cut into length 20 pieces, slice 0:35
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))
{0: slice(None, None, None), 1: slice(0, 15, 1)}
Support irregular blocks and various slices
>>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))
{0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}
Support step sizes
>>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))
{0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize
{0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}
Also support indexing single elements
>>> _slice_1d(100, [20, 20, 20, 20, 20], 25)
{1: 5}
And negative slicing
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3))
{0: slice(-2, -20, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3))
{0: slice(-2, -8, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))
{4: slice(-1, -12, -3)}
"""
if isinstance(index, (int, long)):
i = 0
ind = index
lens = list(lengths)
while ind >= lens[0]:
i += 1
ind -= lens.pop(0)
return {i: ind}
assert isinstance(index, slice)
step = index.step or 1
if step > 0:
start = index.start or 0
stop = index.stop if index.stop is not None else dim_shape
else:
start = index.start or dim_shape - 1
start = dim_shape - 1 if start >= dim_shape else start
stop = -(dim_shape + 1) if index.stop is None else index.stop
# posify start and stop
if start < 0:
start += dim_shape
if stop < 0:
stop += dim_shape
d = dict()
if step > 0:
for i, length in enumerate(lengths):
if start < length and stop > 0:
d[i] = slice(start, min(stop, length), step)
start = (start - length) % step
else:
start = start - length
stop -= length
else:
rstart = start # running start
chunk_boundaries = list(accumulate(add, lengths))
for i, chunk_stop in reversed(list(enumerate(chunk_boundaries))):
# create a chunk start and stop
#.........这里部分代码省略.........
开发者ID:ankravch,项目名称:dask,代码行数:101,代码来源:slicing.py
示例12: plot_cache
def plot_cache(results, dsk, start_time, metric_name, palette='GnBu',
label_size=60, **kwargs):
"""Visualize the results of profiling in a bokeh plot.
Parameters
----------
results : sequence
Output of CacheProfiler.results
dsk : dict
The dask graph being profiled.
start_time : float
Start time of the profile.
metric_name : string
Metric used to measure cache size
palette : string, optional
Name of the bokeh palette to use, must be key in bokeh.palettes.brewer.
label_size: int (optional)
Maximum size of output labels in plot, defaults to 60
**kwargs
Other keyword arguments, passed to bokeh.figure. These will override
all defaults set by visualize.
Returns
-------
The completed bokeh plot object.
"""
defaults = dict(title="Profile Results",
tools="hover,save,reset,resize,wheel_zoom,xpan",
plot_width=800, plot_height=300)
defaults.update((k, v) for (k, v) in kwargs.items() if k in
bp.Figure.properties())
if results:
starts, ends = list(zip(*results))[3:]
tics = list(sorted(unique(starts + ends)))
groups = groupby(lambda d: pprint_task(d[1], dsk, label_size), results)
data = {}
for k, vals in groups.items():
cnts = dict.fromkeys(tics, 0)
for v in vals:
cnts[v.cache_time] += v.metric
cnts[v.free_time] -= v.metric
data[k] = list(accumulate(add, pluck(1, sorted(cnts.items()))))
tics = [i - start_time for i in tics]
p = bp.figure(x_range=[0, max(tics)], **defaults)
for (key, val), color in zip(data.items(), get_colors(palette, data.keys())):
p.line('x', 'y', line_color=color, line_width=3,
source=bp.ColumnDataSource({'x': tics, 'y': val,
'label': [key for i in val]}))
else:
p = bp.figure(y_range=[0, 10], x_range=[0, 10], **defaults)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.yaxis.axis_label = "Cache Size ({0})".format(metric_name)
p.xaxis.axis_label = "Time (s)"
hover = p.select(HoverTool)
hover.tooltips = """
<div>
<span style="font-size: 14px; font-weight: bold;">Task:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@label</span>
</div>
"""
return p
开发者ID:kerrywatson1,项目名称:dask,代码行数:69,代码来源:profile_visualize.py
示例13: _slice_1d
def _slice_1d(dim_shape, lengths, index):
"""Returns a dict of {blocknum: slice}
This function figures out where each slice should start in each
block for a single dimension. If the slice won't return any elements
in the block, that block will not be in the output.
Parameters
----------
dim_shape - the number of elements in this dimension.
This should be a positive, non-zero integer
blocksize - the number of elements per block in this dimension
This should be a positive, non-zero integer
index - a description of the elements in this dimension that we want
This might be an integer, a slice(), or an Ellipsis
Returns
-------
dictionary where the keys are the integer index of the blocks that
should be sliced and the values are the slices
Examples
--------
100 length array cut into length 20 pieces, slice 0:35
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 35))
{0: slice(None, None, None), 1: slice(0, 15, 1)}
Support irregular blocks and various slices
>>> _slice_1d(100, [20, 10, 10, 10, 25, 25], slice(10, 35))
{0: slice(10, 20, 1), 1: slice(None, None, None), 2: slice(0, 5, 1)}
Support step sizes
>>> _slice_1d(100, [15, 14, 13], slice(10, 41, 3))
{0: slice(10, 15, 3), 1: slice(1, 14, 3), 2: slice(2, 12, 3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(0, 100, 40)) # step > blocksize
{0: slice(0, 20, 40), 2: slice(0, 20, 40), 4: slice(0, 20, 40)}
Also support indexing single elements
>>> _slice_1d(100, [20, 20, 20, 20, 20], 25)
{1: 5}
And negative slicing
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 0, -3))
{0: slice(-2, -20, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, 12, -3))
{0: slice(-2, -8, -3), 1: slice(-1, -21, -3), 2: slice(-3, -21, -3), 3: slice(-2, -21, -3), 4: slice(-1, -21, -3)}
>>> _slice_1d(100, [20, 20, 20, 20, 20], slice(100, -12, -3))
{4: slice(-1, -12, -3)}
"""
if isinstance(index, (int, long)):
i = 0
ind = index
lens = list(lengths)
while ind >= lens[0]:
i += 1
ind -= lens.pop(0)
return {i: ind}
assert isinstance(index, slice)
step = index.step or 1
if step > 0:
start = index.start or 0
stop = index.stop if index.stop is not None else dim_shape
else:
start = index.start or dim_shape - 1
start = dim_shape - 1 if start >= dim_shape else start
stop = -(dim_shape + 1) if index.stop is None else index.stop
if start < 0:
start += dim_shape
if stop < 0:
stop += dim_shape
d = dict()
if step > 0:
for i, length in enumerate(lengths):
if start < length and stop > 0:
d[i] = slice(start, min(stop, length), step)
start = (start - length) % step
else:
start = start - length
stop -= length
else:
stop -= dim_shape
tail_index = list(accumulate(add, lengths))
pos_step = abs(step) # 11%3==2, 11%-3==-1. Need positive step for %
offset = 0
#.........这里部分代码省略.........
开发者ID:kastnerkyle,项目名称:dask,代码行数:101,代码来源:slicing.py
注:本文中的toolz.accumulate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论