本文整理汇总了Python中tests.sampledata.SampleData类的典型用法代码示例。如果您正苦于以下问题:Python SampleData类的具体用法?Python SampleData怎么用?Python SampleData使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了SampleData类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: post_job_data
def post_job_data(
project, uri, data, status=None, expect_errors=False):
# Since the uri is passed in it's not generated by the
# treeherder request or collection and is missing the protocol
# and host. Add those missing elements here.
uri = 'http://localhost{0}'.format(uri)
# Set the credentials
OAuthCredentials.set_credentials(SampleData.get_credentials())
credentials = OAuthCredentials.get_credentials(project)
tr = TreeherderRequest(
protocol='http',
host='localhost',
project=project,
oauth_key=credentials['consumer_key'],
oauth_secret=credentials['consumer_secret']
)
signed_uri = tr.get_signed_uri(
json.dumps(data), uri
)
response = TestApp(application).post_json(
str(signed_uri), params=data, status=status,
expect_errors=expect_errors
)
return response
开发者ID:klibby,项目名称:treeherder-service,代码行数:31,代码来源:test_utils.py
示例2: post_collection
def post_collection(
project, th_collection, status=None, expect_errors=False,
consumer_key=None, consumer_secret=None):
# Set the credentials
OAuthCredentials.set_credentials(SampleData.get_credentials())
credentials = OAuthCredentials.get_credentials(project)
# The only time the credentials should be overridden are when
# a client needs to test authentication failure confirmation
if consumer_key:
credentials['consumer_key'] = consumer_key
if consumer_secret:
credentials['consumer_secret'] = consumer_secret
cli = TreeherderClient(
protocol='http',
host='localhost',
)
jsondata = th_collection.to_json()
signed_uri = cli._get_uri(project, th_collection.endpoint_base,
data=jsondata,
oauth_key=credentials['consumer_key'],
oauth_secret=credentials['consumer_secret'],
method='POST')
response = TestApp(application).post_json(
str(signed_uri), params=th_collection.get_collection_data(),
status=status
)
return response
开发者ID:ccooper,项目名称:treeherder,代码行数:35,代码来源:test_utils.py
示例3: post_collection
def post_collection(
project, th_collection, status=None, expect_errors=False,
consumer_key=None, consumer_secret=None):
# Set the credentials
OAuthCredentials.set_credentials(SampleData.get_credentials())
credentials = OAuthCredentials.get_credentials(project)
# The only time the credentials should be overridden are when
# a client needs to test authentication failure confirmation
consumer_key = consumer_key or credentials['consumer_key']
consumer_secret = consumer_secret or credentials['consumer_secret']
auth = TreeherderAuth(consumer_key, consumer_secret, project)
client = TreeherderClient(protocol='http', host='localhost', auth=auth)
uri = client._get_project_uri(project, th_collection.endpoint_base)
req = Request('POST', uri,
json=th_collection.get_collection_data(),
auth=auth)
prepped_request = req.prepare()
response = TestApp(application).post_json(
prepped_request.url,
params=th_collection.get_collection_data(),
status=status
)
return response
开发者ID:kingaki007,项目名称:treeherder,代码行数:30,代码来源:test_utils.py
示例4: test_post_talos_artifact
def test_post_talos_artifact(test_project, test_repository, result_set_stored,
mock_post_json):
test_repository.save()
tjc = client.TreeherderJobCollection()
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
tj = client.TreeherderJob({
'project': test_repository.name,
'revision_hash': result_set_stored[0]['revision_hash'],
'job': {
'job_guid': job_guid,
'state': 'completed',
'project': test_repository.name,
'option_collection': {'opt': True},
'artifacts': [{
'blob': {'talos_data': SampleData.get_minimal_talos_perf_data()},
'type': 'json',
'name': 'talos_data',
'job_guid': job_guid
}]
}
})
tjc.add(tj)
post_collection(test_project, tjc)
# we'll just validate that we got the expected number of results for
# talos (we have validation elsewhere for the actual data adapters)
assert PerformanceSignature.objects.count() == 1
assert PerformanceDatum.objects.count() == 1
开发者ID:PratikDhanave,项目名称:treeherder,代码行数:31,代码来源:test_perf_ingestion.py
示例5: post_collection
def post_collection(project, th_collection, status=None, expect_errors=False, consumer_key=None, consumer_secret=None):
# Set the credentials
OAuthCredentials.set_credentials(SampleData.get_credentials())
credentials = OAuthCredentials.get_credentials(project)
# The only time the credentials should be overridden are when
# a client needs to test authentication failure confirmation
if consumer_key:
credentials["consumer_key"] = consumer_key
if consumer_secret:
credentials["consumer_secret"] = consumer_secret
tr = TreeherderRequest(
protocol="http",
host="localhost",
project=project,
oauth_key=credentials["consumer_key"],
oauth_secret=credentials["consumer_secret"],
)
signed_uri = tr.oauth_client.get_signed_uri(
th_collection.to_json(), tr.get_uri(th_collection.endpoint_base), "POST"
)
response = TestApp(application).post_json(
str(signed_uri), params=th_collection.get_collection_data(), status=status
)
return response
开发者ID:jonasfj,项目名称:treeherder-service,代码行数:32,代码来源:test_utils.py
示例6: _post_json_data
def _post_json_data(url, data):
th_collection = data[jm.project]
OAuthCredentials.set_credentials( SampleData.get_credentials() )
credentials = OAuthCredentials.get_credentials(jm.project)
tr = TreeherderRequest(
protocol='http',
host='localhost',
project=jm.project,
oauth_key=credentials['consumer_key'],
oauth_secret=credentials['consumer_secret']
)
signed_uri = tr.oauth_client.get_signed_uri(
th_collection.to_json(),
tr.get_uri(th_collection.endpoint_base),
"POST"
)
response = TestApp(application).post_json(
str(signed_uri), params=th_collection.get_collection_data()
)
response.getcode = lambda: response.status_int
return response
开发者ID:AutomatedTester,项目名称:treeherder-service,代码行数:26,代码来源:conftest.py
示例7: check_json
def check_json(self, filename, expected_timestamps):
"""Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
# Configuration for Analyzer
FORE_WINDOW = 12
BACK_WINDOW = 12
THRESHOLD = 7
payload = SampleData.get_perf_data(os.path.join('graphs', filename))
runs = payload['test_runs']
a = Analyzer()
for r in runs:
a.add_data(r[2], r[3], testrun_id=r[0],
revision_id=r[1][2])
results = a.analyze_t(BACK_WINDOW, FORE_WINDOW, THRESHOLD)
regression_timestamps = [d.push_timestamp for d in results if
d.state == 'regression']
self.assertEqual(regression_timestamps, expected_timestamps)
开发者ID:gregarndt,项目名称:treeherder,代码行数:18,代码来源:test_analyze.py
示例8: _send
def _send(th_request, th_collection):
OAuthCredentials.set_credentials(SampleData.get_credentials())
credentials = OAuthCredentials.get_credentials(jm.project)
th_request.oauth_key = credentials['consumer_key']
th_request.oauth_secret = credentials['consumer_secret']
signed_uri = th_request.get_signed_uri(
th_collection.to_json(), th_request.get_uri(th_collection)
)
response = TestApp(application).post_json(
str(signed_uri), params=th_collection.get_collection_data()
)
response.getcode = lambda: response.status_int
return response
开发者ID:uberj,项目名称:treeherder-service,代码行数:18,代码来源:conftest.py
示例9: test_adapt_and_load
def test_adapt_and_load():
talos_perf_data = SampleData.get_talos_perf_data()
tda = TalosDataAdapter()
result_count = 0
for datum in talos_perf_data:
datum = {
"job_guid": 'oqiwy0q847365qiu',
"name": "test",
"type": "test",
"blob": datum
}
job_data = {
"oqiwy0q847365qiu": {
"id": 1,
"result_set_id": 1,
"push_timestamp": 1402692388
}
}
reference_data = {
"property1": "value1",
"property2": "value2",
"property3": "value3"
}
# one extra result for the summary series
result_count += len(datum['blob']["results"]) + 1
# we create one performance series per counter
if 'talos_counters' in datum['blob']:
result_count += len(datum['blob']["talos_counters"])
# Mimic production environment, the blobs are serialized
# when the web service receives them
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
tda.adapt_and_load(reference_data, job_data, datum)
assert result_count == len(tda.performance_artifact_placeholders)
开发者ID:TheTeraByte,项目名称:treeherder,代码行数:43,代码来源:test_perf_data_adapters.py
示例10: test_detect_changes_historical_data
def test_detect_changes_historical_data(filename, expected_timestamps):
"""Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
# Configuration for Analyzer
FORE_WINDOW = 12
MIN_BACK_WINDOW = 12
MAX_BACK_WINDOW = 24
THRESHOLD = 7
payload = SampleData.get_perf_data(os.path.join('graphs', filename))
runs = payload['test_runs']
data = [RevisionDatum(r[2], r[2], [r[3]]) for r in runs]
results = detect_changes(data,
min_back_window=MIN_BACK_WINDOW,
max_back_window=MAX_BACK_WINDOW,
fore_window=FORE_WINDOW,
t_threshold=THRESHOLD)
regression_timestamps = [d.push_timestamp for d in results if
d.change_detected]
assert regression_timestamps == expected_timestamps
开发者ID:MikeLing,项目名称:treeherder,代码行数:20,代码来源:test_analyze.py
示例11: check_json
def check_json(self, filename, expected_timestamps):
"""Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
# Configuration for TalosAnalyzer
FORE_WINDOW = 12
BACK_WINDOW = 12
THRESHOLD = 7
MACHINE_THRESHOLD = 15
MACHINE_HISTORY_SIZE = 5
payload = SampleData.get_perf_data(os.path.join('graphs', filename))
runs = payload['test_runs']
data = [PerfDatum(r[2], r[3], testrun_id=r[0], machine_id=r[6],
testrun_timestamp=r[2], buildid=r[1][1],
revision=r[1][2]) for r in runs]
a = TalosAnalyzer()
a.addData(data)
results = a.analyze_t(BACK_WINDOW, FORE_WINDOW, THRESHOLD,
MACHINE_THRESHOLD, MACHINE_HISTORY_SIZE)
regression_timestamps = [d.testrun_timestamp for d in results if
d.state == 'regression']
self.assertEqual(regression_timestamps, expected_timestamps)
开发者ID:TheTeraByte,项目名称:treeherder,代码行数:22,代码来源:test_analyze.py
示例12: check_json
def check_json(self, filename, expected_timestamps):
"""Parse JSON produced by http://graphs.mozilla.org/api/test/runs"""
# Configuration for Analyzer
FORE_WINDOW = 12
MIN_BACK_WINDOW = 12
MAX_BACK_WINDOW = 24
THRESHOLD = 7
payload = SampleData.get_perf_data(os.path.join('graphs', filename))
runs = payload['test_runs']
data = []
for r in runs:
data.append(Datum(r[2], r[3], testrun_id=r[0],
revision_id=r[1][2]))
results = detect_changes(data, min_back_window=MIN_BACK_WINDOW,
max_back_window=MAX_BACK_WINDOW,
fore_window=FORE_WINDOW,
t_threshold=THRESHOLD)
regression_timestamps = [d.push_timestamp for d in results if
d.state == 'regression']
self.assertEqual(regression_timestamps, expected_timestamps)
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:22,代码来源:test_analyze.py
示例13: _send
def _send(th_request, endpoint, method=None, data=None):
OAuthCredentials.set_credentials(SampleData.get_credentials())
credentials = OAuthCredentials.get_credentials(jm.project)
th_request.oauth_key = credentials['consumer_key']
th_request.oauth_secret = credentials['consumer_secret']
if data and not isinstance(data, str):
data = json.dumps(data)
signed_uri = th_request.oauth_client.get_signed_uri(
data, th_request.get_uri(endpoint), method
)
response = getattr(TestApp(application), method.lower())(
str(signed_uri),
params=data,
content_type='application/json'
)
response.getcode = lambda: response.status_int
return response
开发者ID:AutomatedTester,项目名称:treeherder-service,代码行数:23,代码来源:conftest.py
示例14: test_post_talos_artifact
def test_post_talos_artifact(test_project, test_repository, result_set_stored,
mock_post_json):
test_repository.save()
# delete any previously-created perf objects until bug 1133273 is fixed
# https://bugzilla.mozilla.org/show_bug.cgi?id=1133273 (this can be really
# slow if the local database has a lot of objects in it)
PerformanceSignature.objects.all().delete()
PerformanceDatum.objects.all().delete()
tjc = client.TreeherderJobCollection()
job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
tj = client.TreeherderJob({
'project': test_repository.name,
'revision_hash': result_set_stored[0]['revision_hash'],
'job': {
'job_guid': job_guid,
'state': 'completed',
'project': test_repository.name,
'option_collection': {'opt': True},
'artifacts': [{
'blob': {'talos_data': SampleData.get_minimal_talos_perf_data()},
'type': 'json',
'name': 'talos_data',
'job_guid': job_guid
}]
}
})
tjc.add(tj)
do_post_collection(test_project, tjc)
# we'll just validate that we got the expected number of results for
# talos (we have validation elsewhere for the actual data adapters)
assert PerformanceSignature.objects.count() == 2
assert PerformanceDatum.objects.count() == 2
开发者ID:adusca,项目名称:treeherder,代码行数:37,代码来源:test_perf_ingestion.py
示例15: set_oauth_credentials
def set_oauth_credentials():
OAuthCredentials.set_credentials(SampleData.get_credentials())
开发者ID:adusca,项目名称:treeherder,代码行数:2,代码来源:conftest.py
示例16: test_adapt_and_load
def test_adapt_and_load(self):
talos_perf_data = SampleData.get_talos_perf_data()
for talos_datum in talos_perf_data:
datum = {
"job_guid": 'oqiwy0q847365qiu',
"name": "test",
"type": "test",
"blob": talos_datum
}
job_data = {
"oqiwy0q847365qiu": {
"id": 1,
"result_set_id": 1,
"push_timestamp": 1402692388
}
}
reference_data = {
"property1": "value1",
"property2": "value2",
"property3": "value3"
}
# Mimic production environment, the blobs are serialized
# when the web service receives them
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
tda = TalosDataAdapter()
tda.adapt_and_load(reference_data, job_data, datum)
# base: subtests + one extra result for the summary series
expected_result_count = len(talos_datum["results"]) + 1
# we create one performance series per counter
if 'talos_counters' in talos_datum:
expected_result_count += len(talos_datum["talos_counters"])
# result count == number of signatures
self.assertEqual(expected_result_count, len(tda.signatures.keys()))
# verify that we have signatures for the subtests
signature_placeholders = copy.copy(
tda.signature_property_placeholders)
for (testname, results) in talos_datum["results"].iteritems():
signature_placeholder = filter(
lambda p: p[2] == testname, signature_placeholders)
self.assertEqual(len(signature_placeholder), 1)
signature_hash = signature_placeholder[0][0]
perfdata = tda.signatures[signature_hash][0]
if talos_datum.get('summary'):
# if we have a summary, ensure the subtest summary values made
# it in
for measure in ['min', 'max', 'std', 'mean', 'median']:
self.assertEqual(
round(talos_datum['summary']['subtests'][testname][measure], 2),
perfdata[measure])
else:
# this is an old style talos blob without a summary. these are going
# away, so I'm not going to bother testing the correctness. however
# let's at least verify that some values are being generated here
for measure in ['min', 'max', 'std', 'mean', 'median']:
self.assertTrue(perfdata[measure])
# filter out this signature from data to process
signature_placeholders = filter(
lambda p: p[0] != signature_hash, signature_placeholders)
# if we have counters, verify that the series for them is as expected
for (counter, results) in talos_datum.get('talos_counters',
{}).iteritems():
signature_placeholder = filter(
lambda p: p[2] == counter, signature_placeholders)
self.assertEqual(len(signature_placeholder), 1)
signature_hash = signature_placeholder[0][0]
perfdata = tda.signatures[signature_hash][0]
for measure in ['max', 'mean']:
self.assertEqual(round(float(results[measure]), 2),
perfdata[measure])
# filter out this signature from data to process
signature_placeholders = filter(
lambda p: p[0] != signature_hash, signature_placeholders)
# we should be left with just summary signature placeholders
self.assertEqual(len(signature_placeholders), 2)
perfdata = tda.signatures[signature_placeholders[0][0]][0]
if talos_datum.get('summary'):
self.assertEqual(round(talos_datum['summary']['suite'], 2),
perfdata['geomean'])
else:
# old style talos blob without summary. again, going away,
# but let's at least test that we have the 'geomean' value
# generated
self.assertTrue(perfdata['geomean'])
开发者ID:jgraham,项目名称:treeherder,代码行数:96,代码来源:test_perf_data_adapters.py
示例17: test_adapt_and_load
def test_adapt_and_load(self):
talos_perf_data = SampleData.get_talos_perf_data()
for talos_datum in talos_perf_data:
# delete any previously-created perf objects
# FIXME: because of https://bugzilla.mozilla.org/show_bug.cgi?id=1133273
# this can be really slow if we have a dev database with lots of
# performance data in it (if the test succeeds, the transaction
# will be rolled back so at least it won't pollute the production
# database)
PerformanceSignature.objects.all().delete()
PerformanceDatum.objects.all().delete()
datum = {
"job_guid": 'oqiwy0q847365qiu',
"name": "test",
"type": "test",
"blob": talos_datum
}
job_data = {
"oqiwy0q847365qiu": {
"id": 1,
"result_set_id": 1,
"push_timestamp": 1402692388
}
}
reference_data = {
"option_collection_hash": self.OPTION_HASH,
"machine_platform": self.MACHINE_PLATFORM,
"property1": "value1",
"property2": "value2",
"property3": "value3"
}
# Mimic production environment, the blobs are serialized
# when the web service receives them
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
tda = TalosDataAdapter()
tda.adapt_and_load(self.REPO_NAME, reference_data, job_data, datum)
# base: subtests + one extra result for the summary series
expected_result_count = len(talos_datum["results"]) + 1
# we create one performance series per counter
if 'talos_counters' in talos_datum:
expected_result_count += len(talos_datum["talos_counters"])
# result count == number of signatures
self.assertEqual(expected_result_count,
PerformanceSignature.objects.all().count())
# verify that we have signatures for the subtests
for (testname, results) in talos_datum["results"].iteritems():
signature = PerformanceSignature.objects.get(test=testname)
datum = PerformanceDatum.objects.get(signature=signature)
if talos_datum.get('summary'):
# if we have a summary, ensure the subtest summary values made
# it in
self.assertEqual(
round(talos_datum['summary']['subtests'][testname]['filtered'], 2),
datum.value)
else:
# this is an old style talos blob without a summary. these are going
# away, so I'm not going to bother testing the correctness. however
# let's at least verify that some values are being generated here
self.assertTrue(datum.value)
# if we have counters, verify that the series for them is as expected
for (counter, results) in talos_datum.get('talos_counters',
{}).iteritems():
signature = PerformanceSignature.objects.get(test=counter)
datum = PerformanceDatum.objects.get(signature=signature)
self.assertEqual(round(float(results['mean']), 2),
datum.value)
# we should be left with just the summary series
signature = PerformanceSignature.objects.get(
test='',
suite=talos_datum['testrun']['suite'])
datum = PerformanceDatum.objects.get(signature=signature)
if talos_datum.get('summary'):
self.assertEqual(round(talos_datum['summary']['suite'], 2),
datum.value)
else:
# old style talos blob without summary. again, going away,
# but let's at least test that we have the value
self.assertTrue(datum.value)
开发者ID:adusca,项目名称:treeherder,代码行数:89,代码来源:test_perf_data_adapters.py
示例18: test_adapt_and_load
def test_adapt_and_load():
talos_perf_data = SampleData.get_talos_perf_data()
tda = TalosDataAdapter()
result_count = 0
for datum in talos_perf_data:
datum = {
"job_guid": 'oqiwy0q847365qiu',
"name": "test",
"type": "test",
"blob": datum
}
job_data = {
"oqiwy0q847365qiu": {
"id": 1,
"result_set_id": 1,
"push_timestamp": 1402692388
}
}
reference_data = {
"property1": "value1",
"property2": "value2",
"property3": "value3"
}
# one extra result for the summary series
result_count += len(datum['blob']["results"]) + 1
# we create one performance series per counter
if 'talos_counters' in datum['blob']:
result_count += len(datum['blob']["talos_counters"])
# Mimic production environment, the blobs are serialized
# when the web service receives them
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
tda.adapt_and_load(reference_data, job_data, datum)
# we upload a summary with a suite and subtest values, +1 for suite
if 'summary' in datum['blob']:
results = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1][4]))
data = json.loads(datum['blob'])['talos_data'][0]
assert results["blob"]["performance_series"]["geomean"] == data['summary']['suite']
# deal with the subtests now
for i in range(0, len(data['summary']['subtests'])):
subresults = json.loads(zlib.decompress(tda.performance_artifact_placeholders[-1 - i][4]))
if 'subtest_signatures' in subresults["blob"]['signature_properties']:
# ignore summary signatures
continue
subdata = data['summary']['subtests'][subresults["blob"]['signature_properties']['test']]
for datatype in ['min', 'max', 'mean', 'median', 'std']:
assert subdata[datatype] == subresults["blob"]["performance_series"][datatype]
if 'value' in subdata.keys():
assert subdata['value'] == subresults["blob"]["performance_series"]['value']
else:
# FIXME: the talos data blob we're currently using contains datums with summaries and those without
# we should probably test non-summarized data as well
pass
assert result_count == len(tda.performance_artifact_placeholders)
开发者ID:vaishalitekale,项目名称:treeherder,代码行数:66,代码来源:test_perf_data_adapters.py
示例19: test_load_talos_data
def test_load_talos_data(self):
PerformanceFramework.objects.get_or_create(name='talos')
talos_perf_data = SampleData.get_talos_perf_data()
for talos_datum in talos_perf_data:
(job_data, reference_data) = self._get_job_and_reference_data()
datum = {
"job_guid": self.JOB_GUID,
"name": "test",
"type": "test",
"blob": talos_datum
}
# Mimic production environment, the blobs are serialized
# when the web service receives them
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
load_talos_artifacts(self.REPO_NAME, reference_data, job_data, datum)
# base: subtests + one extra result for the summary series
expected_result_count = len(talos_datum["results"]) + 1
# we create one performance series per counter
if 'talos_counters' in talos_datum:
expected_result_count += len(talos_datum["talos_counters"])
# result count == number of signatures
self.assertEqual(expected_result_count,
PerformanceSignature.objects.all().count())
# verify that we have signatures for the subtests
for (testname, results) in talos_datum["results"].iteritems():
signature = PerformanceSignature.objects.get(test=testname)
datum = PerformanceDatum.objects.get(signature=signature)
if talos_datum.get('summary'):
# if we have a summary, ensure the subtest summary values made
# it in and that we ingested lowerIsBetter ok (if it was there)
subtest = talos_datum['summary']['subtests'][testname]
self.assertEqual(
round(subtest['filtered'], 2), datum.value)
self.assertEqual(signature.lower_is_better,
subtest.get('lowerIsBetter', True))
else:
# this is an old style talos blob without a summary. these are going
# away, so I'm not going to bother testing the correctness. however
# let's at least verify that some values are being generated here
self.assertTrue(datum.value)
self.assertEqual(datum.push_timestamp,
datetime.datetime.fromtimestamp(
self.PUSH_TIMESTAMP))
# if we have counters, verify that the series for them is as expected
for (counter, results) in talos_datum.get('talos_counters',
{}).iteritems():
signature = PerformanceSignature.objects.get(test=counter)
datum = PerformanceDatum.objects.get(signature=signature)
self.assertEqual(round(float(results['mean']), 2),
datum.value)
self.assertEqual(datum.push_timestamp,
datetime.datetime.fromtimestamp(
self.PUSH_TIMESTAMP))
# we should be left with just the summary series
signature = PerformanceSignature.objects.get(
test='',
suite=talos_datum['testrun']['suite'])
datum = PerformanceDatum.objects.get(signature=signature)
if talos_datum.get('summary'):
self.assertEqual(round(talos_datum['summary']['suite'], 2),
datum.value)
else:
# old style talos blob without summary. again, going away,
# but let's at least test that we have the value
self.assertTrue(datum.value)
self.assertEqual(datum.push_timestamp,
datetime.datetime.fromtimestamp(
self.PUSH_TIMESTAMP))
# delete perf objects for next iteration
PerformanceSignature.objects.all().delete()
PerformanceDatum.objects.all().delete()
开发者ID:klahnakoski,项目名称:treeherder,代码行数:82,代码来源:test_perf_data_adapters.py
示例20: test_load_talos_data
def test_load_talos_data(test_project, test_repository,
perf_option_collection, perf_platform,
perf_job_data, perf_reference_data):
PerformanceFramework.objects.create(name='talos')
talos_perf_data = SampleData.get_talos_perf_data()
for talos_datum in talos_perf_data:
datum = {
"job_guid": "fake_job_guid",
"name": "test",
"type": "test",
"blob": talos_datum
}
# Mimic production environment, the blobs are serialized
# when the web service receives them
datum['blob'] = json.dumps({'talos_data': [datum['blob']]})
load_talos_artifacts(test_repository.name, perf_reference_data,
perf_job_data, datum)
# base: subtests + one extra result for the summary series
expected_result_count = len(talos_datum["results"]) + 1
# we create one performance series per counter
if 'talos_counters' in talos_datum:
expected_result_count += len(talos_datum["talos_counters"])
# result count == number of signatures
assert expected_result_count == PerformanceSignature.objects.all().count()
expected_push_timestamp = datetime.datetime.fromtimestamp(
perf_job_data['fake_job_guid']['push_timestamp'])
# verify that we have signatures for the subtests
for (testname, results) in talos_datum["results"].iteritems():
signature = PerformanceSignature.objects.get(test=testname)
datum = PerformanceDatum.objects.get(signature=signature)
if talos_datum.get('summary'):
# if we have a summary, ensure the subtest summary values made
# it in and that we ingested lowerIsBetter ok (if it was there)
subtest = talos_datum['summary']['subtests'][testname]
assert round(subtest['filtered'], 2) == datum.value
assert signature.lower_is_better == subtest.get('lowerIsBetter', True)
else:
# this is an old style talos blob without a summary. these are
# going away, so I'm not going to bother testing the
# correctness. however let's at least verify that some values
# are being generated here
assert datum.value
assert datum.push_timestamp == expected_push_timestamp
# if we have counters, verify that the series for them is as expected
for (counter, results) in talos_datum.get('talos_counters',
{}).iteritems():
signature = PerformanceSignature.objects.get(test=counter)
datum = PerformanceDatum.objects.get(signature=signature)
assert round(float(results['mean']), 2) == datum.value
assert datum.push_timestamp == expected_push_timestamp
# we should be left with just the summary series
signature = PerformanceSignature.objects.get(
test='',
suite=talos_datum['testrun']['suite'])
datum = PerformanceDatum.objects.get(signature=signature)
if talos_datum.get('summary'):
assert round(talos_datum['summary']['suite'], 2) == datum.value
else:
# old style talos blob without summary. again, going away,
# but let's at least test that we have the value
assert datum.value
assert datum.push_timestamp == expected_push_timestamp
# delete perf objects for next iteration
PerformanceSignature.objects.all().delete()
PerformanceDatum.objects.all().delete()
开发者ID:optionalg,项目名称:treeherder,代码行数:77,代码来源:test_perf_data_adapters.py
注:本文中的tests.sampledata.SampleData类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论