本文整理汇总了Python中treeherder.etl.buildbot.extract_platform_info函数的典型用法代码示例。如果您正苦于以下问题:Python extract_platform_info函数的具体用法?Python extract_platform_info怎么用?Python extract_platform_info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了extract_platform_info函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_buildername_translation
def test_buildername_translation(buildername, exp_result):
"""
test getting the right platform based on the buildername
"""
assert buildbot.extract_platform_info(buildername) == exp_result["platform"]
assert buildbot.extract_job_type(buildername, default="not found") == exp_result["job_type"]
assert buildbot.extract_build_type(buildername) == exp_result["build_type"]
assert buildbot.extract_name_info(buildername) == exp_result["name"]
开发者ID:un33k,项目名称:treeherder-service,代码行数:9,代码来源:test_buildbot.py
示例2: get_platform_regex_misses
def get_platform_regex_misses(self, analysis_type, build, buildername, job_guid):
if not buildername:
return
# Match platforms
platform_target = buildbot.extract_platform_info(buildername)
if platform_target["os"] == "unknown":
self._load_missed_buildername(analysis_type, buildername, job_guid)
开发者ID:un33k,项目名称:treeherder-service,代码行数:10,代码来源:buildapi.py
示例3: get_buildername_data
def get_buildername_data(self, attr, value, data):
"""Callback function for the buildername property in the pulse stream"""
#set buildername
data[attr] = value
#extend data with platform attributes
platform_info = buildbot.extract_platform_info(value)
data.update(platform_info)
#extend data with build type attributes
data['buildtype'] = buildbot.extract_build_type(value)
#extend data with job type data
data['jobtype'] = buildbot.extract_job_type(value)
job_name_info = buildbot.extract_name_info(value)
data['test_name'] = job_name_info["name"]
data.update(job_name_info)
return data
开发者ID:AutomatedTester,项目名称:treeherder-service,代码行数:21,代码来源:pulse.py
示例4: transform
def transform(self, data, project_filter=None, revision_filter=None,
job_group_filter=None):
"""
transform the builds4h structure into something we can ingest via
our restful api
"""
revisions = defaultdict(list)
missing_resultsets = defaultdict(set)
valid_projects = set(x.project for x in Datasource.objects.cached())
for build in data['builds']:
try:
prop = build['properties']
project = prop['branch']
buildername = prop['buildername']
if common.should_skip_project(project, valid_projects, project_filter):
continue
if common.should_skip_revision(prop['revision'], revision_filter):
continue
if common.is_blacklisted_buildername(buildername):
continue
prop['short_revision'] = prop['revision'][0:12]
except KeyError as e:
logger.warning("skipping builds-4hr job %s since missing property: %s", build['id'], str(e))
continue
revisions[project].append(prop['short_revision'])
revisions_lookup = common.lookup_revisions(revisions)
job_ids_seen_last_time = cache.get(CACHE_KEYS['complete'], set())
job_ids_seen_now = set()
# Holds one collection per unique branch/project
th_collections = {}
for build in data['builds']:
try:
prop = build['properties']
project = prop['branch']
buildername = prop['buildername']
if common.should_skip_project(project, valid_projects, project_filter):
continue
if common.should_skip_revision(prop['revision'], revision_filter):
continue
if common.is_blacklisted_buildername(buildername):
continue
# todo: Continue using short revisions until Bug 1199364
resultset = common.get_resultset(project,
revisions_lookup,
prop['short_revision'],
missing_resultsets,
logger)
except KeyError:
# There was no matching resultset, skip the job.
continue
# We record the id here rather than at the start of the loop, since we
# must not count jobs whose revisions were not yet imported as processed,
# or we'll never process them once we've ingested their associated revision.
job_ids_seen_now.add(build['id'])
# Don't process jobs that were already present in builds-4hr
# the last time this task completed successfully.
if build['id'] in job_ids_seen_last_time:
continue
platform_info = buildbot.extract_platform_info(buildername)
job_name_info = buildbot.extract_name_info(buildername)
if (job_group_filter and job_name_info.get('group_symbol', '').lower() !=
job_group_filter.lower()):
continue
treeherder_data = {
'revision_hash': resultset['revision_hash'],
'resultset_id': resultset['id'],
'project': project,
'coalesced': []
}
log_reference = []
if 'log_url' in prop:
log_reference.append({
'url': prop['log_url'],
'name': 'buildbot_text'
})
# add structured logs to the list of log references
if 'blobber_files' in prop:
try:
blobber_files = json.loads(prop['blobber_files'])
for bf, url in blobber_files.items():
if bf and url and bf.endswith('_raw.log'):
#.........这里部分代码省略.........
开发者ID:EricRahm,项目名称:treeherder,代码行数:101,代码来源:buildapi.py
示例5: transform
def transform(self, data, filter_to_project=None, filter_to_revision=None,
filter_to_job_group=None):
"""
transform the builds4h structure into something we can ingest via
our restful api
"""
revisions = defaultdict(list)
missing_resultsets = defaultdict(set)
projects = set(x.project for x in Datasource.objects.cached())
for build in data['builds']:
prop = build['properties']
if 'buildername' not in prop:
logger.warning("skipping builds-4hr job since no buildername found")
continue
if 'branch' not in prop:
logger.warning("skipping builds-4hr job since no branch found: %s", prop['buildername'])
continue
if prop['branch'] not in projects:
# Fuzzer jobs specify a branch of 'idle', and we intentionally don't display them.
if prop['branch'] != 'idle':
logger.warning("skipping builds-4hr job on unknown branch %s: %s", prop['branch'], prop['buildername'])
continue
if filter_to_project and prop['branch'] != filter_to_project:
continue
prop['revision'] = prop.get('revision',
prop.get('got_revision',
prop.get('sourcestamp', None)))
if not prop['revision']:
logger.warning("skipping builds-4hr job since no revision found: %s", prop['buildername'])
continue
prop['revision'] = prop['revision'][0:12]
if prop['revision'] == prop.get('l10n_revision', None):
# Some l10n jobs specify the l10n repo revision under 'revision', rather
# than the gecko revision. If we did not skip these, it would result in
# fetch_missing_resultsets requests that were guaranteed to 404.
# This needs to be fixed upstream in builds-4hr by bug 1125433.
logger.warning("skipping builds-4hr job since revision refers to wrong repo: %s", prop['buildername'])
continue
revisions[prop['branch']].append(prop['revision'])
revisions_lookup = common.lookup_revisions(revisions)
job_ids_seen_last_time = cache.get(CACHE_KEYS['complete'], set())
job_ids_seen_now = set()
# Holds one collection per unique branch/project
th_collections = {}
for build in data['builds']:
try:
prop = build['properties']
project = prop['branch']
resultset = common.get_resultset(project,
revisions_lookup,
prop['revision'],
missing_resultsets,
logger)
except KeyError:
# skip this job, at least at this point
continue
if filter_to_revision and filter_to_revision != resultset['revision']:
continue
# We record the id here rather than at the start of the loop, since we
# must not count jobs whose revisions were not yet imported as processed,
# or we'll never process them once we've ingested their associated revision.
job_ids_seen_now.add(build['id'])
# Don't process jobs that were already present in builds-4hr
# the last time this task completed successfully.
if build['id'] in job_ids_seen_last_time:
continue
platform_info = buildbot.extract_platform_info(prop['buildername'])
job_name_info = buildbot.extract_name_info(prop['buildername'])
if (filter_to_job_group and job_name_info.get('group_symbol', '').lower() !=
filter_to_job_group.lower()):
continue
treeherder_data = {
'revision_hash': resultset['revision_hash'],
'resultset_id': resultset['id'],
'project': project,
'coalesced': []
}
device_name = buildbot.get_device_or_unknown(
job_name_info.get('name', ''),
#.........这里部分代码省略.........
开发者ID:EdgarChen,项目名称:treeherder,代码行数:101,代码来源:buildapi.py
示例6: transform
def transform(self, data, source, filter_to_revision=None, filter_to_project=None,
filter_to_job_group=None):
"""
transform the buildapi structure into something we can ingest via
our restful api
"""
projects = set(x.project for x in Datasource.objects.cached())
revision_dict = defaultdict(list)
missing_resultsets = defaultdict(set)
# loop to catch all the revisions
for project, revisions in data[source].iteritems():
# this skips those projects we don't care about
if project not in projects:
continue
if filter_to_project and project != filter_to_project:
continue
for rev, jobs in revisions.items():
revision_dict[project].append(rev)
# retrieving the revision->resultset lookups
revisions_lookup = common.lookup_revisions(revision_dict)
th_collections = {}
for project, revisions in data[source].iteritems():
for revision, jobs in revisions.items():
try:
resultset = common.get_resultset(project,
revisions_lookup,
revision,
missing_resultsets,
logger)
except KeyError:
# skip this job, at least at this point
continue
if filter_to_revision and filter_to_revision != resultset['revision']:
continue
# using project and revision form the revision lookups
# to filter those jobs with unmatched revision
for job in jobs:
treeherder_data = {
'revision_hash': resultset['revision_hash'],
'resultset_id': resultset['id'],
'project': project,
}
platform_info = buildbot.extract_platform_info(job['buildername'])
job_name_info = buildbot.extract_name_info(job['buildername'])
if (filter_to_job_group and job_name_info.get('group_symbol', '').lower() !=
filter_to_job_group.lower()):
continue
if source == 'pending':
request_id = job['id']
elif source == 'running':
# The last element in request_ids corresponds to the request id of this job,
# the others are for the requests that were coalesced into this one.
request_id = job['request_ids'][-1]
device_name = buildbot.get_device_or_unknown(
job_name_info.get('name', ''),
platform_info['vm']
)
new_job = {
'job_guid': common.generate_job_guid(
request_id,
job['buildername']
),
'name': job_name_info.get('name', ''),
'job_symbol': job_name_info.get('job_symbol', ''),
'group_name': job_name_info.get('group_name', ''),
'group_symbol': job_name_info.get('group_symbol', ''),
'reference_data_name': job['buildername'],
'state': source,
'submit_timestamp': job['submitted_at'],
'build_platform': {
'os_name': platform_info['os'],
'platform': platform_info['os_platform'],
'architecture': platform_info['arch'],
'vm': platform_info['vm']
},
# where are we going to get this data from?
'machine_platform': {
'os_name': platform_info['os'],
'platform': platform_info['os_platform'],
'architecture': platform_info['arch'],
'vm': platform_info['vm']
},
'device_name': device_name,
'who': 'unknown',
'option_collection': {
#.........这里部分代码省略.........
开发者ID:TheTeraByte,项目名称:treeherder,代码行数:101,代码来源:buildapi.py
示例7: transform
def transform(self, data):
"""
transform the builds4h structure into something we can ingest via
our restful api
"""
revisions = defaultdict(list)
missing_resultsets = defaultdict(set)
projects = set(x.project for x in Datasource.objects.cached())
for build in data['builds']:
prop = build['properties']
if not 'branch' in prop:
logger.warning("property 'branch' not found in build4h")
continue
if not prop['branch'] in projects:
logger.warning("skipping job on unsupported branch {0}".format(prop['branch']))
continue
prop['revision'] = prop.get('revision',
prop.get('got_revision',
prop.get('sourcestamp', None)))
if not prop['revision']:
logger.warning("property 'revision' not found in build4h")
continue
prop['revision'] = prop['revision'][0:12]
revisions[prop['branch']].append(prop['revision'])
revisions_lookup = common.lookup_revisions(revisions)
# Holds one collection per unique branch/project
th_collections = {}
for build in data['builds']:
try:
prop = build['properties']
project = prop['branch']
artifact_build = copy.deepcopy(build)
resultset = common.get_resultset(project,
revisions_lookup,
prop['revision'],
missing_resultsets,
logger)
except KeyError:
# skip this job, at least at this point
continue
treeherder_data = {
'revision_hash': resultset['revision_hash'],
'resultset_id': resultset['id'],
'project': project,
'coalesced': []
}
platform_info = buildbot.extract_platform_info(prop['buildername'])
job_name_info = buildbot.extract_name_info(prop['buildername'])
device_name = buildbot.get_device_or_unknown(
job_name_info.get('name', ''),
platform_info['vm']
)
if 'log_url' in prop:
log_reference = [{
'url': prop['log_url'],
'name': 'builds-4h'
}]
else:
log_reference = []
# request_id and request_time are mandatory
# and they can be found in a couple of different places
try:
job_guid_data = self.find_job_guid(build)
request_ids = build['properties'].get('request_ids',
build['request_ids'])
except KeyError:
continue
treeherder_data['coalesced'] = job_guid_data['coalesced']
def prop_remove(field):
try:
del(artifact_build['properties'][field])
except:
pass
prop_remove("product")
prop_remove("project")
prop_remove("buildername")
prop_remove("slavename")
prop_remove("build_url")
prop_remove("log_url")
prop_remove("slavebuilddir")
prop_remove("branch")
prop_remove("repository")
#.........这里部分代码省略.........
开发者ID:asutherland,项目名称:treeherder-service,代码行数:101,代码来源:buildapi.py
示例8: transform
def transform(self, data, source, revision_filter=None, project_filter=None,
job_group_filter=None):
"""
transform the buildapi structure into something we can ingest via
our restful api
"""
valid_projects = set(Repository.objects.values_list('name', flat=True))
revision_dict = defaultdict(list)
# loop to catch all the revisions
for project, revisions in data[source].iteritems():
if common.should_skip_project(project, valid_projects, project_filter):
continue
for rev in revisions.iterkeys():
if common.should_skip_revision(rev, revision_filter):
continue
revision_dict[project].append(rev)
job_ids_seen_last_time = cache.get(CACHE_KEYS[source], set())
job_ids_seen_now = set()
th_collections = {}
for project, revisions in data[source].iteritems():
if common.should_skip_project(project, valid_projects, project_filter):
continue
revisions_seen_now_for_project = set()
for revision, jobs in revisions.items():
if common.should_skip_revision(revision, revision_filter):
continue
# it should be quite rare for a job to be ingested before a
# revision, but it could happen
if revision not in revisions_seen_now_for_project and \
not Push.objects.filter(repository__name=project,
revision__startswith=revision).exists():
logger.warning("skipping jobs since %s revision %s "
"not yet ingested", project, revision)
continue
revisions_seen_now_for_project.add(revision)
# using project and revision form the revision lookups
# to filter those jobs with unmatched revision
for job in jobs:
job_ids_seen_now.add(job['id'])
# Don't process jobs that we saw the last time this task
# completed successfully.
if job['id'] in job_ids_seen_last_time:
continue
treeherder_data = {
'revision': revision,
'project': project,
}
buildername = job['buildername']
platform_info = buildbot.extract_platform_info(buildername)
job_name_info = buildbot.extract_name_info(buildername)
if (job_group_filter and job_name_info.get('group_symbol', '').lower() !=
job_group_filter.lower()):
continue
if source == 'pending':
request_id = job['id']
elif source == 'running':
# The last element in request_ids corresponds to the request id of this job,
# the others are for the requests that were coalesced into this one.
request_id = job['request_ids'][-1]
new_job = {
'job_guid': common.generate_job_guid(
request_id,
buildername
),
'name': job_name_info.get('name', ''),
'job_symbol': job_name_info.get('job_symbol', ''),
'group_name': job_name_info.get('group_name', ''),
'group_symbol': job_name_info.get('group_symbol', ''),
'reference_data_name': buildername,
'state': source,
'submit_timestamp': job['submitted_at'],
'build_platform': {
'os_name': platform_info['os'],
'platform': platform_info['os_platform'],
'architecture': platform_info['arch'],
},
# where are we going to get this data from?
'machine_platform': {
'os_name': platform_info['os'],
'platform': platform_info['os_platform'],
'architecture': platform_info['arch'],
},
'who': 'unknown',
'option_collection': {
# build_type contains an option name, eg. PGO
#.........这里部分代码省略.........
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:101,代码来源:buildapi.py
示例9: transform
def transform(self, data, project_filter=None, revision_filter=None, job_group_filter=None):
"""
transform the builds4h structure into something we can ingest via
our restful api
"""
revisions = defaultdict(list)
valid_projects = set(x.project for x in Datasource.objects.cached())
for build in data["builds"]:
try:
prop = build["properties"]
project = prop["branch"]
if common.should_skip_project(project, valid_projects, project_filter):
continue
if common.should_skip_revision(prop["revision"], revision_filter):
continue
except KeyError as e:
logger.warning("skipping builds-4hr job %s since missing property: %s", build["id"], str(e))
continue
revisions[project].append(prop["revision"])
revisions_lookup = common.lookup_revisions(revisions)
job_ids_seen_last_time = cache.get(CACHE_KEYS["complete"], set())
job_ids_seen_now = set()
# Holds one collection per unique branch/project
th_collections = {}
for build in data["builds"]:
try:
prop = build["properties"]
project = prop["branch"]
buildername = prop["buildername"]
if common.should_skip_project(project, valid_projects, project_filter):
continue
if common.should_skip_revision(prop["revision"], revision_filter):
continue
except KeyError:
continue
try:
resultset = revisions_lookup[project][prop["revision"]]
except KeyError:
logger.warning(
"skipping builds-4hr job %s since %s revision %s not yet ingested",
build["id"],
project,
prop["revision"],
)
continue
# We record the id here rather than at the start of the loop, since we
# must not count jobs whose revisions were not yet imported as processed,
# or we'll never process them once we've ingested their associated revision.
job_ids_seen_now.add(build["id"])
# Don't process jobs that were already present in builds-4hr
# the last time this task completed successfully.
if build["id"] in job_ids_seen_last_time:
continue
platform_info = buildbot.extract_platform_info(buildername)
job_name_info = buildbot.extract_name_info(buildername)
if job_group_filter and job_name_info.get("group_symbol", "").lower() != job_group_filter.lower():
continue
treeherder_data = {
"revision": prop["revision"],
"resultset_id": resultset["id"],
"project": project,
"coalesced": [],
}
log_reference = []
if "log_url" in prop:
log_reference.append({"url": prop["log_url"], "name": "buildbot_text"})
# add structured logs to the list of log references
if "blobber_files" in prop:
try:
blobber_files = json.loads(prop["blobber_files"])
for bf, url in blobber_files.items():
if bf and url and bf.endswith("_errorsummary.log"):
log_reference.append({"url": url, "name": "errorsummary_json"})
except Exception as e:
logger.warning("invalid blobber_files json for build id %s (%s): %s", build["id"], buildername, e)
try:
job_guid_data = self.find_job_guid(build)
# request_ids is mandatory, but can be found in several places.
request_ids = prop.get("request_ids", build["request_ids"])
# The last element in request_ids corresponds to the request id of this job,
# the others are for the requests that were coalesced into this one.
#.........这里部分代码省略.........
开发者ID:samh12,项目名称:treeherder,代码行数:101,代码来源:buildapi.py
示例10: transform
def transform(self, data):
"""
transform the builds4h structure into something we can ingest via
our restful api
"""
revisions = defaultdict(list)
projects = set(x.project for x in Datasource.objects.cached())
for build in data["builds"]:
prop = build["properties"]
if not "branch" in prop:
logger.warning("property 'branch' not found in build4h")
continue
if not prop["branch"] in projects:
logger.warning("skipping job on branch {0}".format(prop["branch"]))
continue
prop["revision"] = prop.get("revision", prop.get("got_revision", prop.get("sourcestamp", None)))
if not prop["revision"]:
logger.warning("property 'revision' not found in build4h")
continue
prop["revision"] = prop["revision"][0:12]
revisions[prop["branch"]].append(prop["revision"])
revisions_lookup = common.lookup_revisions(revisions)
# Holds one collection per unique branch/project
th_collections = {}
for build in data["builds"]:
prop = build["properties"]
try:
resultset = revisions_lookup[prop["branch"]][prop["revision"]]
except KeyError:
# this branch is not one of those we care about
continue
project = prop["branch"]
treeherder_data = {
"revision_hash": resultset["revision_hash"],
"resultset_id": resultset["id"],
"project": project,
"coalesced": [],
}
platform_info = buildbot.extract_platform_info(prop["buildername"])
job_name_info = buildbot.extract_name_info(prop["buildername"])
if "log_url" in prop:
log_reference = [{"url": prop["log_url"], "name": "builds-4h"}]
else:
log_reference = []
job_guid_data = self.find_job_guid(build)
treeherder_data["coalesced"] = job_guid_data["coalesced"]
job = {
"job_guid": job_guid_data["job_guid"],
"name": job_name_info.get("name", ""),
"job_symbol": job_name_info.get("job_symbol", ""),
"group_name": job_name_info.get("group_name", ""),
"group_symbol": job_name_info.get("group_symbol", ""),
"buildername": prop["buildername"],
"product_name": prop.get("product", ""),
"state": "completed",
"result": buildbot.RESULT_DICT[build["result"]],
"reason": build["reason"],
# scheduler, if 'who' property is not present
"who": prop.get("who", prop.get("scheduler", "")),
"submit_timestamp": build["requesttime"],
"start_timestamp": build["starttime"],
"end_timestamp": build["endtime"],
"machine": prop.get("slavename", "unknown"),
# build_url not present in all builds
"build_url": prop.get("build_url", ""),
# build_platform same as machine_platform
"build_platform": {
# platform attributes sometimes parse without results
"os_name": platform_info.get("os", ""),
"platform": platform_info.get("os_platform", ""),
"architecture": platform_info.get("arch", ""),
},
"machine_platform": {
"os_name": platform_info.get("os", ""),
"platform": platform_info.get("os_platform", ""),
"architecture": platform_info.get("arch", ""),
},
# pgo or non-pgo dependent on buildername parsing
"option_collection": {buildbot.extract_build_type(prop["buildername"]): True},
"log_references": log_reference,
"artifact": {"type": "", "name": "", "log_urls": [], "blob": ""},
}
#.........这里部分代码省略.........
开发者ID:un33k,项目名称:treeherder-service,代码行数:101,代码来源:buildapi.py
注:本文中的treeherder.etl.buildbot.extract_platform_info函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论