本文整理汇总了Python中moztelemetry.dataset.Dataset类的典型用法代码示例。如果您正苦于以下问题:Python Dataset类的具体用法?Python Dataset怎么用?Python Dataset使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Dataset类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_where_exact_match
def test_where_exact_match():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
new_dataset = dataset.where(dim1='my-value')
assert new_dataset is not dataset
assert new_dataset.clauses.keys() == ['dim1']
condition = new_dataset.clauses['dim1']
assert condition('my-value')
开发者ID:whd,项目名称:python_moztelemetry,代码行数:7,代码来源:test_dataset.py
示例2: test_select
def test_select():
dataset1 = Dataset('test-bucket', ['dim1', 'dim2']).select('field1', 'field2')
dataset2 = Dataset('test-bucket', ['dim1', 'dim2']).select('field1', field2='field2')
dataset3 = Dataset('test-bucket', ['dim1', 'dim2']).select(field1='field1', field2='field2')
assert dataset1.selection == {
'field1': 'field1',
'field2': 'field2',
}
assert dataset1.selection == dataset2.selection == dataset3.selection
dataset4 = Dataset('test-bucket', ['dim1', 'dim2']).select('field1', field2='f2', field3='f3')
assert dataset4.selection == {
'field1': 'field1',
'field2': 'f2',
'field3': 'f3',
}
dataset5 = dataset4.select('field4', field5='f5')
assert dataset5.selection == {
'field1': 'field1',
'field2': 'f2',
'field3': 'f3',
'field4': 'field4',
'field5': 'f5'
}
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:29,代码来源:test_dataset.py
示例3: aggregate_metrics
def aggregate_metrics(sc, channels, submission_date, main_ping_fraction=1, fennec_ping_fraction=1, num_reducers=10000):
""" Returns the build-id and submission date aggregates for a given submission date.
:param sc: A SparkContext instance
:param channel: Either the name of a channel or a list/tuple of names
:param submission-date: The submission date for which the data will be aggregated
:param fraction: An approximative fraction of submissions to consider for aggregation
"""
if not isinstance(channels, (tuple, list)):
channels = [channels]
channels = set(channels)
pings = Dataset.from_source('telemetry') \
.where(appUpdateChannel=lambda x: x in channels,
submissionDate=submission_date,
docType='main',
sourceVersion='4',
appName=lambda x: x != 'Fennec') \
.records(sc, sample=main_ping_fraction)
fennec_pings = Dataset.from_source('telemetry') \
.where(appUpdateChannel=lambda x: x in channels,
submissionDate=submission_date,
docType='saved_session',
sourceVersion='4',
appName='Fennec') \
.records(sc, sample=fennec_ping_fraction)
all_pings = pings.union(fennec_pings)
return _aggregate_metrics(all_pings)
开发者ID:mozilla,项目名称:python_mozaggregator,代码行数:30,代码来源:aggregator.py
示例4: test_where
def test_where():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
clause = lambda x: True
new_dataset = dataset.where(dim1=clause)
assert new_dataset is not dataset
assert new_dataset.clauses == {'dim1': clause}
开发者ID:whd,项目名称:python_moztelemetry,代码行数:7,代码来源:test_dataset.py
示例5: test_scan_multiple_params
def test_scan_multiple_params():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
new_dataset = dataset.where(dim1='myvalue')
assert new_dataset is not dataset
assert list(new_dataset.clauses.keys()) == ['dim1']
condition = new_dataset.clauses['dim1']
assert condition('myvalue')
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:7,代码来源:test_dataset.py
示例6: test_where_wrong_dimension
def test_where_wrong_dimension():
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/')
clause = lambda x: True
with pytest.raises(Exception) as exc_info:
new_dataset = dataset.where(dim3=clause)
assert str(exc_info.value) == 'The dimension dim3 doesn\'t exist'
开发者ID:whd,项目名称:python_moztelemetry,代码行数:8,代码来源:test_dataset.py
示例7: test_scan_multiple_where_params
def test_scan_multiple_where_params(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir1/another-dir/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store).where(dim1='dir1', dim2='subdir1')
summaries = dataset.summaries(spark_context)
expected_key = 'dir1/subdir1/key1'
assert summaries == [{'key': expected_key, 'size': len(store.store[expected_key])}]
开发者ID:Dexterp37,项目名称:python_moztelemetry,代码行数:9,代码来源:test_dataset.py
示例8: test_where_dupe_dimension
def test_where_dupe_dimension():
clause = lambda x: True
dataset = Dataset('test-bucket', ['dim1', 'dim2'], prefix='prefix/',
clauses={'dim1': clause})
with pytest.raises(Exception) as exc_info:
new_dataset = dataset.where(dim1=clause)
assert str(exc_info.value) == 'There should be only one clause for dim1'
开发者ID:whd,项目名称:python_moztelemetry,代码行数:9,代码来源:test_dataset.py
示例9: test_records_limit_and_sample
def test_records_limit_and_sample(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
for i in range(1, 100 + 1):
key = 'dir{}/subdir{}/key{}'.format(*[i] * 3)
value = 'value{}'.format(i)
store.store[key] = value
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
records = dataset.records(spark_context, decode=lambda x: x, limit=5, sample=0.9)
assert records.count() == 5
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:10,代码来源:test_dataset.py
示例10: test_scan_with_prefix
def test_scan_with_prefix():
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['prefix1/dir1/subdir1/key1'] = 'value1'
store.store['prefix2/dir2/another-dir/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'],
clauses={'dim1': lambda x: x == 'dir1'}, store=store)
with futures.ProcessPoolExecutor(1) as executor:
folders = dataset._scan(['dim1', 'dim2',], ['prefix1/',], dataset.clauses, executor)
assert list(folders) == ['prefix1/dir1/']
开发者ID:whd,项目名称:python_moztelemetry,代码行数:10,代码来源:test_dataset.py
示例11: test_records
def test_records(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir2/subdir2/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
records = dataset.records(spark_context, decode=lambda x: x)
records = sorted(records.collect())
assert records == [b'value1', b'value2']
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:10,代码来源:test_dataset.py
示例12: test_summaries_with_limit
def test_summaries_with_limit():
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir2/subdir2/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
summaries = list(dataset._summaries(1))
assert len(summaries) == 1
assert summaries[0]['key'] in store.store
开发者ID:whd,项目名称:python_moztelemetry,代码行数:11,代码来源:test_dataset.py
示例13: test_records_print_output
def test_records_print_output(spark_context, capsys):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
for i in range(1, 100+1):
key = 'dir{}/subdir{}/key{}'.format(*[i]*3)
value = 'value{}'.format(i)
store.store[key] = value
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
dataset.records(spark_context, decode=lambda x: x)
out, err = capsys.readouterr()
assert out.rstrip() == "fetching 0.00066MB in 100 files..."
开发者ID:Dexterp37,项目名称:python_moztelemetry,代码行数:11,代码来源:test_dataset.py
示例14: test_scan_no_clause
def test_scan_no_clause():
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
key = 'dir1/dir2/key1'
value = 'value1'
store.store[key] = value
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
with futures.ProcessPoolExecutor(1) as executor:
folders = dataset._scan(['dim1', 'subdir'], ['prefix'], {}, executor)
assert list(folders) == ['prefix']
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:11,代码来源:test_dataset.py
示例15: test_records_summaries
def test_records_summaries(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir2/subdir2/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store, max_concurrency=1)
records = dataset.records(spark_context, decode=lambda x: x,
summaries=[{'key': 'dir1/subdir1/key1', 'size': len('value1')}])
records = records.collect()
assert records == [b'value1']
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:11,代码来源:test_dataset.py
示例16: test_select_keep_state
def test_select_keep_state():
"""Test that calling select only mutates the selection of a dataset"""
dataset_before = Dataset('test-bucket', ['dim1', 'dim2']).where(dim1=True)
dataset_after = dataset_before.select('field1', 'field2')
assert dataset_before.selection != dataset_after.selection
assert dataset_before.bucket == dataset_after.bucket
assert dataset_before.schema == dataset_after.schema
assert dataset_before.store == dataset_after.store
assert dataset_before.prefix == dataset_after.prefix
assert dataset_before.clauses == dataset_after.clauses
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:11,代码来源:test_dataset.py
示例17: test_summaries_with_limit
def test_summaries_with_limit(spark_context):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['dir1/subdir1/key1'] = 'value1'
store.store['dir2/subdir2/key2'] = 'value2'
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store, max_concurrency=1)
summaries = dataset.summaries(spark_context, 1)
assert len(summaries) == 1
assert summaries[0]['key'] in store.store
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:11,代码来源:test_dataset.py
示例18: test_records_many_groups
def test_records_many_groups(spark_context, monkeypatch):
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
for i in range(1, spark_context.defaultParallelism + 2):
store.store['dir1/subdir1/key{}'.format(i)] = 'value{}'.format(i)
# create one group per item
monkeypatch.setattr(moztelemetry.dataset, '_group_by_size', lambda x: [[y] for y in x])
dataset = Dataset(bucket_name, ['dim1', 'dim2'], store=store)
records = dataset.records(spark_context, decode=lambda x: x)
records = records.collect()
assert records == ['value{}'.format(i) for i in range(1, spark_context.defaultParallelism + 2)]
开发者ID:whd,项目名称:python_moztelemetry,代码行数:12,代码来源:test_dataset.py
示例19: test_records_object
def test_records_object(spark_context):
expect = {"uid": 1}
bucket_name = 'test-bucket'
store = InMemoryStore(bucket_name)
store.store['key'] = json.dumps(expect)
ds = Dataset(bucket_name, None, store=store, max_concurrency=1)
row = ds.records(spark_context, decode=decode).first()
assert isinstance(row, dict)
assert row == expect
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:12,代码来源:test_dataset.py
示例20: test_select_dupe_properties
def test_select_dupe_properties():
dataset = Dataset('test-bucket', ['dim1', 'dim2']).select('field1')
with pytest.raises(Exception) as exc_info:
dataset.select('field1')
assert str(exc_info.value) == 'The property field1 has already been selected'
with pytest.raises(Exception) as exc_info:
dataset.select(field1='keyword_field')
assert str(exc_info.value) == 'The property field1 has already been selected'
开发者ID:mozilla,项目名称:python_moztelemetry,代码行数:12,代码来源:test_dataset.py
注:本文中的moztelemetry.dataset.Dataset类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论