本文整理汇总了Python中toolz.concatv函数的典型用法代码示例。如果您正苦于以下问题:Python concatv函数的具体用法?Python concatv怎么用?Python concatv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concatv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_id_macro_dataset
def test_id_macro_dataset(self):
"""
input (self.macro_df)
asof_date timestamp value
0 2014-01-01 2014-01-01 0
3 2014-01-02 2014-01-02 1
6 2014-01-03 2014-01-03 2
output (expected):
value
2014-01-01 Equity(65 [A]) 0
Equity(66 [B]) 0
Equity(67 [C]) 0
2014-01-02 Equity(65 [A]) 1
Equity(66 [B]) 1
Equity(67 [C]) 1
2014-01-03 Equity(65 [A]) 2
Equity(66 [B]) 2
Equity(67 [C]) 2
"""
asset_info = asset_infos[0][0]
nassets = len(asset_info)
with tmp_asset_finder() as finder:
expected = pd.DataFrame(
list(concatv([0] * nassets, [1] * nassets, [2] * nassets)),
index=pd.MultiIndex.from_product((self.macro_df.timestamp, finder.retrieve_all(asset_info.index))),
columns=("value",),
)
self._test_id(self.macro_df, self.macro_dshape, expected, finder, ("value",))
开发者ID:easysg,项目名称:zipline,代码行数:29,代码来源:test_blaze.py
示例2: path
def path(graph, source, target, excluded_edges=None, ooc_types=ooc_types):
""" Path of functions between two types """
if not isinstance(source, type):
source = type(source)
if not isinstance(target, type):
target = type(target)
for cls in concatv(source.mro(), _virtual_superclasses):
if cls in graph:
source = cls
break
# If both source and target are Out-Of-Core types then restrict ourselves
# to the graph of out-of-core types
if ooc_types:
oocs = tuple(ooc_types)
if issubclass(source, oocs) and issubclass(target, oocs):
graph = graph.subgraph([n for n in graph.nodes()
if issubclass(n, oocs)])
with without_edges(graph, excluded_edges) as g:
pth = nx.shortest_path(g, source=source, target=target, weight='cost')
edge = graph.edge
def path_part(src, tgt):
node = edge[src][tgt]
return PathPart(src, tgt, node['func'], node['cost'])
return map(path_part, pth, pth[1:])
开发者ID:Curezhang,项目名称:odo,代码行数:28,代码来源:core.py
示例3: merge_ownership_periods
def merge_ownership_periods(mappings):
"""
Given a dict of mappings where the values are lists of
OwnershipPeriod objects, returns a dict with the same structure with
new OwnershipPeriod objects adjusted so that the periods have no
gaps.
Orders the periods chronologically, and pushes forward the end date
of each period to match the start date of the following period. The
end date of the last period pushed forward to the max Timestamp.
"""
return valmap(
lambda v: tuple(
OwnershipPeriod(
a.start,
b.start,
a.sid,
a.value,
) for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[OwnershipPeriod(
pd.Timestamp.max.tz_localize('utc'),
None,
None,
None,
)],
),
)
),
mappings,
)
开发者ID:chrisvasquez,项目名称:zipline,代码行数:35,代码来源:assets.py
示例4: test_id_macro_dataset
def test_id_macro_dataset(self):
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule=no_deltas_rules.ignore,
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
asset_info = asset_infos[0][0]
with tmp_asset_finder(equities=asset_info) as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
nassets = len(asset_info)
expected = pd.DataFrame(
list(concatv([0] * nassets, [1] * nassets, [2] * nassets)),
index=pd.MultiIndex.from_product((
self.macro_df.timestamp,
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
assert_frame_equal(result, expected, check_dtype=False)
开发者ID:larssonandreas,项目名称:zipline,代码行数:30,代码来源:test_blaze.py
示例5: __new__
def __new__(mcls, name, bases, dict_):
self = super().__new__(mcls, name, bases, dict_)
if len(bases) and bases[0] is ADT:
self._typevars = dict_._typevars
self._constructors = tuple(dict_._constructors.values())
constructors = set(self._constructors)
for constructor in constructors:
types = concatv(
constructor._args,
constructor._kwargs.values(),
)
for t in types:
if isinstance(t, RecursiveType) and t._name != name:
raise TypeError(
'recursive type name must be the same as the type'
' name, %r != %r' % (
t._name,
name,
),
)
if t in constructors:
raise TypeError(
'constructor %r has arguments that are other'
' constructors' % constructor,
)
if not self._typevars:
return adt(self, ())
return self
开发者ID:llllllllll,项目名称:adt,代码行数:28,代码来源:adt.py
示例6: compute_sorted_frame
def compute_sorted_frame(df, order_by, group_by=(), **kwargs):
computed_sort_keys = []
sort_keys = list(toolz.concatv(group_by, order_by))
ascending = [getattr(key.op(), 'ascending', True) for key in sort_keys]
new_columns = {}
for i, key in enumerate(map(operator.methodcaller('op'), sort_keys)):
computed_sort_key, temporary_column = compute_sort_key(
key, df, **kwargs
)
computed_sort_keys.append(computed_sort_key)
if temporary_column is not None:
new_columns[computed_sort_key] = temporary_column
result = df.assign(**new_columns)
result = result.sort_values(
computed_sort_keys, ascending=ascending, kind='mergesort'
)
# TODO: we'll eventually need to return this frame with the temporary
# columns and drop them in the caller (maybe using post_execute?)
ngrouping_keys = len(group_by)
return (
result,
computed_sort_keys[:ngrouping_keys],
computed_sort_keys[ngrouping_keys:],
)
开发者ID:cloudera,项目名称:ibis,代码行数:27,代码来源:util.py
示例7: symbol_ownership_map
def symbol_ownership_map(self):
rows = sa.select(self.equity_symbol_mappings.c).execute().fetchall()
mappings = {}
for row in rows:
mappings.setdefault((row.company_symbol, row.share_class_symbol), []).append(
SymbolOwnership(
pd.Timestamp(row.start_date, unit="ns", tz="utc"),
pd.Timestamp(row.end_date, unit="ns", tz="utc"),
row.sid,
row.symbol,
)
)
return valmap(
lambda v: tuple(
SymbolOwnership(a.start, b.start, a.sid, a.symbol)
for a, b in sliding_window(
2,
concatv(
sorted(v),
# concat with a fake ownership object to make the last
# end date be max timestamp
[SymbolOwnership(pd.Timestamp.max.tz_localize("utc"), None, None, None)],
),
)
),
mappings,
factory=lambda: mappings,
)
开发者ID:RoyHsiao,项目名称:zipline,代码行数:30,代码来源:assets.py
示例8: _collect_variables
def _collect_variables(names, expressions=None):
"""
Map labels and expressions to registered variables.
Handles argument matching.
Example:
_collect_variables(names=['zones', 'zone_id'],
expressions=['parcels.zone_id'])
Would return a dict representing:
{'parcels': <DataFrameWrapper for zones>,
'zone_id': <pandas.Series for parcels.zone_id>}
Parameters
----------
names : list of str
List of registered variable names and/or labels.
If mixing names and labels, labels must come at the end.
expressions : list of str, optional
List of registered variable expressions for labels defined
at end of `names`. Length must match the number of labels.
Returns
-------
variables : dict
Keys match `names`. Values correspond to registered variables,
which may be wrappers or evaluated functions if appropriate.
"""
# Map registered variable labels to expressions.
if not expressions:
expressions = []
offset = len(names) - len(expressions)
labels_map = dict(toolz.concatv(
toolz.compatibility.zip(names[:offset], names[:offset]),
toolz.compatibility.zip(names[offset:], expressions)))
all_variables = toolz.merge(_INJECTABLES, _TABLES)
variables = {}
for label, expression in labels_map.items():
# In the future, more registered variable expressions could be
# supported. Currently supports names of registered variables
# and references to table columns.
if '.' in expression:
# Registered variable expression refers to column.
table_name, column_name = expression.split('.')
table = get_table(table_name)
variables[label] = table.get_column(column_name)
else:
thing = all_variables[expression]
if isinstance(thing, (_InjectableFuncWrapper, TableFuncWrapper)):
# Registered variable object is function.
variables[label] = thing()
else:
variables[label] = thing
return variables
开发者ID:advancedpartnerships,项目名称:urbansim,代码行数:60,代码来源:simulation.py
示例9: paginate_url
def paginate_url(url, do_request):
"""Given a DZ_RS_URL crawl through pages using pagination logic"""
# we can't cache yet cookies and POST requests
do_request = partial(do_request, use_cache=False)
def request_page(prefix, url, page_number):
data = {
prefix: prefix,
'{}:menu1'.format(prefix): 'VII',
'{}:menu2'.format(prefix): 'SEJ_ZAP_KON | MAG | DOK | fa_dokument | fa_sklicSeje | fa_program | fa_sklep',
'{}:txtQueryString'.format(prefix): '',
'{}:tableEx1:goto1__pagerGoText'.format(prefix): str(page_number),
'{}:tableEx1:goto1__pagerGoButton'.format(prefix): 'Go',
'{}:tableEx1:goto1__pagerGoButton.x'.format(prefix): '8',
'{}:tableEx1:goto1__pagerGoButton.y'.format(prefix): '10',
'javax.faces.ViewState': doc('input#javax\.faces\.ViewState').attr('value'),
}
return do_request(url, method='post', data=data)
# get first page
doc = do_request(url)
num_pages = int(re.search(r'(\d+)$', doc('.pagerDeluxe_text').text()).groups()[0])
logger.info('paginating', url=url, num_pages=num_pages)
# prepare data for pagination
pagination_form = doc('form')
prefix = pagination_form.attr('id')
url = DZ_RS_URL + pagination_form.attr('action')
request_page = partial(request_page, prefix, url)
# get the 2nd and the rest of the pages using pagination
return toolz.concatv([doc], map(request_page, range(2, num_pages + 1)))
开发者ID:domenkozar,项目名称:zakonodajni-monitor-parser,代码行数:32,代码来源:parsers.py
示例10: test_novel_deltas_macro
def test_novel_deltas_macro(self):
asset_info = asset_infos[0][0]
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
baseline = pd.DataFrame({
'value': (0, 1),
'asof_date': base_dates,
'timestamp': base_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.macro_dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': repeat_last_axis(
np.array([10.0, 10.0, 10.0]),
nassets,
),
'2014-01-06': repeat_last_axis(
np.array([10.0, 10.0, 11.0]),
nassets,
),
})
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
开发者ID:larssonandreas,项目名称:zipline,代码行数:59,代码来源:test_blaze.py
示例11: choosers_columns_used
def choosers_columns_used(self):
"""
Columns from the choosers table that are used for filtering.
"""
return list(toolz.unique(toolz.concatv(
util.columns_in_filters(self.choosers_predict_filters),
util.columns_in_filters(self.choosers_fit_filters))))
开发者ID:egeriicw,项目名称:urbansim,代码行数:8,代码来源:lcm.py
示例12: execute_selection_dataframe
def execute_selection_dataframe(op, data, scope=None, **kwargs):
selections = op.selections
predicates = op.predicates
sort_keys = op.sort_keys
result = data
# Build up the individual pandas structures from column expressions
if selections:
data_pieces = []
for selection in selections:
pandas_object = compute_projection(
selection, op, data, scope=scope, **kwargs
)
data_pieces.append(pandas_object)
new_pieces = [
piece.reset_index(
level=list(range(1, piece.index.nlevels)), drop=True
)
if piece.index.nlevels > 1
else piece
for piece in data_pieces
]
result = pd.concat(new_pieces, axis=1)
if predicates:
predicates = _compute_predicates(
op.table.op(), predicates, data, scope, **kwargs
)
predicate = functools.reduce(operator.and_, predicates)
assert len(predicate) == len(
result
), 'Selection predicate length does not match underlying table'
result = result.loc[predicate]
if sort_keys:
result, grouping_keys, ordering_keys = util.compute_sorted_frame(
result, order_by=sort_keys, scope=scope, **kwargs
)
else:
grouping_keys = ordering_keys = ()
# return early if we do not have any temporary grouping or ordering columns
assert not grouping_keys, 'group by should never show up in Selection'
if not ordering_keys:
return result
# create a sequence of columns that we need to drop
temporary_columns = pd.Index(
concatv(grouping_keys, ordering_keys)
).difference(data.columns)
# no reason to call drop if we don't need to
if temporary_columns.empty:
return result
# drop every temporary column we created for ordering or grouping
return result.drop(temporary_columns, axis=1)
开发者ID:cloudera,项目名称:ibis,代码行数:58,代码来源:selection.py
示例13: columns_used
def columns_used(self):
"""
Returns all the columns used in this model for filtering
and in the model expression.
"""
return list(toolz.unique(toolz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
util.columns_in_formula(self.model_expression))))
开发者ID:advancedpartnerships,项目名称:urbansim,代码行数:10,代码来源:regression.py
示例14: columns_used
def columns_used(self):
"""
Columns from any table used in the model. May come from either
the choosers or alternatives tables.
"""
return list(toolz.unique(toolz.concatv(
self.choosers_columns_used(),
self.alts_columns_used(),
self.interaction_columns_used())))
开发者ID:egeriicw,项目名称:urbansim,代码行数:10,代码来源:lcm.py
示例15: columns_used
def columns_used(self):
"""
Returns all the columns used across all models in the group
for filtering and in the model expression.
"""
return list(toolz.unique(toolz.concatv(
util.columns_in_filters(self.fit_filters),
util.columns_in_filters(self.predict_filters),
self._group.columns_used())))
开发者ID:egeriicw,项目名称:urbansim,代码行数:10,代码来源:regression.py
示例16: interaction_columns_used
def interaction_columns_used(self):
"""
Columns from the interaction dataset used for filtering and in
the model. These may come originally from either the choosers or
alternatives tables.
"""
return list(toolz.unique(toolz.concatv(
util.columns_in_filters(self.interaction_predict_filters),
util.columns_in_formula(self.model_expression))))
开发者ID:egeriicw,项目名称:urbansim,代码行数:10,代码来源:lcm.py
示例17: test_deltas
def test_deltas(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = bz.Data(self.df, dshape=self.dshape)
deltas = bz.Data(
odo(
bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
),
pd.DataFrame,
),
name='delta',
dshape=self.dshape,
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(equities=asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
开发者ID:AlexanderAA,项目名称:zipline,代码行数:55,代码来源:test_blaze.py
示例18: render_tabular
def render_tabular(api, options=None):
"""Entry point for the tabular reporter interface."""
# determine separator
separator = options.get('report.separator', '\t')
human = options.get('report.human')
panel = options.get('report.panel')
samples = options.get('report.samples')
group = options.get('report.group')
# read gene panel file if it has been set
if panel:
superblock_ids = [line.rstrip() for line in panel]
else:
superblock_ids = None
# get sample ID, group and cutoff from metadata
sample_query = limit_query(api.samples(), group=group, samples=samples)
metadata = ((sample.id, sample.group_id, sample.cutoff)
for sample in sample_query)
# get the data
base_query = limit_query(api.average_metrics(superblock_ids=superblock_ids),
group=group,
samples=samples)
queries = [metadata,
base_query,
api.diagnostic_yield(superblock_ids=superblock_ids,
group_id=group, sample_ids=samples),
api.sex_checker(group_id=group, sample_ids=samples)]
# group multiple queries by sample ID (first column)
key_metrics = groupby(get(0), concat(queries))
# get the column names dynamically from the query
headers = concatv(['sample_id', 'group_id', 'cutoff'],
(column['name'] for column
in base_query.column_descriptions),
['diagnostic yield', 'gender'])
unique_headers = unique(headers)
# iterate over all values, concat different query results, and keep
# only the unique values (excluding second sample_id)
data = (unique(concat(values)) for values in itervalues(key_metrics))
if human:
# export key_metrics in a more human friendly format
return tabulate(data, unique_headers)
# yield headers
return '\n'.join(cons('#' + separator.join(unique_headers),
stringify_list(data, separator=separator)))
开发者ID:BadSeby,项目名称:chanjo-report,代码行数:53,代码来源:core.py
示例19: columns_used
def columns_used(self):
"""
Columns from any table used in the model. May come from either
the choosers or alternatives tables.
"""
return list(toolz.unique(toolz.concatv(
self.choosers_columns_used(),
self.alts_columns_used(),
self.interaction_columns_used(),
util.columns_in_formula(self.default_model_expr),
[self.segmentation_col])))
开发者ID:cDoru,项目名称:urbansim,代码行数:12,代码来源:lcm.py
示例20: iter_enumerations
def iter_enumerations():
integers_or_symbols = concatv(
find(children, type='integer'),
find(children, type='symbol'),
)
values = list(pluck('value', integers_or_symbols))
if values:
yield make_json_ast_node(
type='enumeration_values',
values=values,
)
intervals = find_many_or_none(children, type='interval')
if intervals is not None:
yield from intervals
开发者ID:openfisca,项目名称:calculette-impots-m-language-parser,代码行数:14,代码来源:m_source_file_to_json_ast.py
注:本文中的toolz.concatv函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论