本文整理汇总了Python中treeherder.etl.common.fetch_json函数的典型用法代码示例。如果您正苦于以下问题:Python fetch_json函数的具体用法?Python fetch_json怎么用?Python fetch_json使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fetch_json函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: extract
def extract(self, url, revision):
logger.info("extracting missing resultsets: {0}".format(url))
try:
return fetch_json(url)
except requests.exceptions.HTTPError as e:
status_code = e.response.status_code
if status_code == 404:
# we will sometimes get here because builds4hr/pending/running have a
# job with a resultset that json-pushes doesn't know about. So far
# I have only found this to be the case when it uses a revision from
# the wrong repo. For example: mozilla-central, but l10n. The l10n
# is a separate repo, but buildbot shows it as the same. So we
# create this dummy resultset with ``active_status`` of ``onhold``.
#
# The effect of this is that we won't keep trying to re-fetch
# the bogus pushlog, but the jobs are (correctly) not shown in the
# UI, since they're bad data.
logger.warn(("no pushlog in json-pushes. generating a dummy"
" onhold placeholder: {0}").format(url))
# we want to make a "dummy" resultset that is "onhold",
# because json-pushes doesn't know about it.
# This is, in effect, what TBPL does.
# These won't show in the UI, because they only fetch "active"
# resultsets
return get_not_found_onhold_push(url, revision)
logger.warning("HTTPError %s fetching: %s", status_code, url)
raise
开发者ID:anurag619,项目名称:treeherder,代码行数:29,代码来源:pushlog.py
示例2: fetch_resultset
def fetch_resultset(self, url, repository, sha=None):
params = {"sha": sha} if sha else {}
params.update(self.CREDENTIALS)
logger.info("Fetching resultset details: {}".format(url))
try:
commits = self.get_cleaned_commits(fetch_json(url, params))
head_commit = commits[-1]
resultset = {
"revision": head_commit["sha"],
"push_timestamp": to_timestamp(
head_commit["commit"]["author"]["date"]),
"author": head_commit["commit"]["author"]["email"],
}
revisions = []
for commit in commits:
revisions.append({
"comment": commit["commit"]["message"],
"repository": repository,
"author": "{} <{}>".format(
commit["commit"]["author"]["name"],
commit["commit"]["author"]["email"]),
"revision": commit["sha"]
})
resultset["revisions"] = revisions
return resultset
except Exception as ex:
logger.exception("Error fetching commits", exc_info=ex)
newrelic.agent.record_exception(ex, params={
"url": url, "repository": repository, "sha": sha
})
开发者ID:akhileshpillai,项目名称:treeherder,代码行数:34,代码来源:resultset_loader.py
示例3: _query_latest_gecko_decision_task_id
def _query_latest_gecko_decision_task_id(self, repo_name):
url = self.tc_index_url % repo_name
logger.info('Fetching {}'.format(url))
latest_task = fetch_json(url)
task_id = latest_task['taskId']
logger.info('For {} we found the task id: {}'.format(repo_name, task_id))
return task_id
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:7,代码来源:runnable_jobs.py
示例4: fetch_push
def fetch_push(self, url, repository, sha=None):
newrelic.agent.add_custom_parameter("sha", sha)
logger.info("fetching for {} {}".format(repository, url))
# there will only ever be one, with this url
push = fetch_json(url)["pushes"].values()[0]
commits = []
# TODO: Remove this when bug 1257602 is addressed
rev_hash_components = []
# we only want to ingest the last 200 commits for each push,
# to protect against the 5000+ commit merges on release day uplift.
for commit in push['changesets'][-200:]:
commits.append({
"revision": commit["node"],
"author": commit["author"],
"comment": commit["desc"],
})
rev_hash_components.append(commit['node'])
rev_hash_components.append(commit['branch'])
return {
"revision": commits[-1]["revision"],
'revision_hash': generate_revision_hash(rev_hash_components),
"author": push["user"],
"push_timestamp": push["date"],
"revisions": commits,
}
开发者ID:MikeLing,项目名称:treeherder,代码行数:28,代码来源:push_loader.py
示例5: fetch_push
def fetch_push(self, url, repository):
params = {}
params.update(self.CREDENTIALS)
logger.info("Fetching push details: %s", url)
commits = self.get_cleaned_commits(fetch_json(url, params))
head_commit = commits[-1]
push = {
"revision": head_commit["sha"],
"push_timestamp": to_timestamp(
head_commit["commit"]["author"]["date"]),
"author": head_commit["commit"]["author"]["email"],
}
revisions = []
for commit in commits:
revisions.append({
"comment": commit["commit"]["message"],
"author": u"{} <{}>".format(
commit["commit"]["author"]["name"],
commit["commit"]["author"]["email"]),
"revision": commit["sha"]
})
push["revisions"] = revisions
return push
开发者ID:bclary,项目名称:treeherder,代码行数:27,代码来源:push_loader.py
示例6: get_bugs_for_search_term
def get_bugs_for_search_term(search, base_uri):
"""
Fetch the base_uri endpoint filtering on search and status.
Status must be either 'open' or 'closed'
"""
from treeherder.etl.common import fetch_json
params = {
'search': search
}
return fetch_json(base_uri, params=params)
开发者ID:arpan98,项目名称:treeherder,代码行数:11,代码来源:error_summary.py
示例7: run
def run(self, revision_filter=None, project_filter=None, job_group_filter=None):
""" Returns True if new completed jobs were loaded, False otherwise. """
builds_4hr = common.fetch_json(settings.BUILDAPI_BUILDS4H_URL)
job_collections, job_ids_seen = self.transform(builds_4hr,
revision_filter=revision_filter,
project_filter=project_filter,
job_group_filter=job_group_filter)
if job_collections:
th_publisher.post_treeherder_collections(job_collections,
chunk_size=settings.BUILDAPI_BUILDS4H_CHUNK_SIZE)
cache.set(CACHE_KEYS['complete'], job_ids_seen)
return bool(job_collections)
开发者ID:vakila,项目名称:treeherder,代码行数:12,代码来源:buildapi.py
示例8: fetch_intermittent_bugs
def fetch_intermittent_bugs(offset, limit):
url = settings.BZ_API_URL + '/rest/bug'
params = {
'keywords': 'intermittent-failure',
'chfieldfrom': '-1y',
'include_fields': ('id,summary,status,resolution,op_sys,cf_crash_signature,'
'keywords,last_change_time, whiteboard'),
'offset': offset,
'limit': limit,
}
response = fetch_json(url, params=params)
return response.get('bugs', [])
开发者ID:edmorley,项目名称:treeherder,代码行数:12,代码来源:bugzilla.py
示例9: run
def run(self, revision_filter=None, project_filter=None, job_group_filter=None):
""" Returns True if new completed jobs were loaded, False otherwise. """
builds_4hr = common.fetch_json(BUILDS4H_URL)
job_collections, job_ids_seen = self.transform(builds_4hr,
revision_filter=revision_filter,
project_filter=project_filter,
job_group_filter=job_group_filter)
if job_collections:
store_jobs(job_collections, chunk_size=500)
cache.set(CACHE_KEYS['complete'], job_ids_seen, FOUR_HOURS_IN_SECONDS)
return bool(job_collections)
开发者ID:git-srinivas,项目名称:treeherder,代码行数:13,代码来源:buildapi.py
示例10: run
def run(self, revision_filter=None, project_filter=None, job_group_filter=None):
""" Returns True if new running jobs were loaded, False otherwise. """
builds_running = common.fetch_json(settings.BUILDAPI_RUNNING_URL)
job_collections, job_ids_seen = self.transform(builds_running,
'running',
revision_filter=revision_filter,
project_filter=project_filter,
job_group_filter=job_group_filter)
if job_collections:
store_jobs(job_collections,
chunk_size=settings.BUILDAPI_RUNNING_CHUNK_SIZE)
cache.set(CACHE_KEYS['running'], job_ids_seen)
return bool(job_collections)
开发者ID:AnthonyMeaux,项目名称:treeherder,代码行数:13,代码来源:buildapi.py
示例11: _taskcluster_runnable_jobs_gz
def _taskcluster_runnable_jobs_gz(tc_graph_url):
try:
# `force_gzip_encoding` works around Taskcluster not setting `Content-Encoding: gzip`:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1423215
tc_graph = fetch_json(tc_graph_url, force_gzip_decompression=True)
except ValidationError:
logger.warning('Failed to validate %s', tc_graph_url)
return []
except requests.exceptions.HTTPError as e:
logger.info('HTTPError %s when getting taskgraph at %s',
e.response.status_code, tc_graph_url)
return []
return tc_graph
开发者ID:bclary,项目名称:treeherder,代码行数:13,代码来源:runnable_jobs.py
示例12: _taskcluster_runnable_jobs
def _taskcluster_runnable_jobs(project, decision_task_id):
ret = []
tc_graph = {}
if not decision_task_id:
decision_task_id = query_latest_gecko_decision_task_id(project)
# Some trees (e.g. comm-central) don't have a decision task, which means there are no taskcluster runnable jobs
if not decision_task_id:
return ret
tc_graph_url = settings.TASKCLUSTER_TASKGRAPH_URL.format(task_id=decision_task_id)
validate = URLValidator()
try:
validate(tc_graph_url)
tc_graph = fetch_json(tc_graph_url)
except ValidationError:
logger.warning('Failed to validate {}'.format(tc_graph_url))
return []
except requests.exceptions.HTTPError as e:
logger.info('HTTPError {} when getting taskgraph at {}'.format(
e.response.status_code, tc_graph_url))
return []
for label, node in tc_graph.iteritems():
if not ('extra' in node['task'] and 'treeherder' in node['task']['extra']):
# some tasks don't have the treeherder information we need
# to be able to display them (and are not intended to be
# displayed). skip.
continue
treeherder_options = node['task']['extra']['treeherder']
task_metadata = node['task']['metadata']
platform_option = ' '.join(treeherder_options.get('collection', {}).keys())
ret.append({
'build_platform': treeherder_options.get('machine', {}).get('platform', ''),
'build_system_type': 'taskcluster',
'job_group_name': treeherder_options.get('groupName', ''),
'job_group_symbol': treeherder_options.get('groupSymbol', ''),
'job_type_description': task_metadata['description'],
'job_type_name': task_metadata['name'],
'job_type_symbol': treeherder_options['symbol'],
'platform': treeherder_options.get('machine', {}).get('platform', ''),
'platform_option': platform_option,
'ref_data_name': label,
'state': 'runnable',
'result': 'runnable',
'job_coalesced_to_guid': None
})
return ret
开发者ID:kapy2010,项目名称:treeherder,代码行数:50,代码来源:runnable_jobs.py
示例13: query_latest_gecko_decision_task_id
def query_latest_gecko_decision_task_id(project):
url = TASKCLUSTER_INDEX_URL % project
logger.info('Fetching %s', url)
try:
latest_task = fetch_json(url)
task_id = latest_task['taskId']
logger.info('For %s we found the task id: %s', project, task_id)
except requests.exceptions.HTTPError as e:
# Specifically handle 404 errors, as it means there's no decision task on this push
if e.response.status_code == 404:
logger.info('For %s we did not find a task id', project)
task_id = None
else:
raise
return task_id
开发者ID:mozilla,项目名称:treeherder,代码行数:15,代码来源:runnable_jobs.py
示例14: _taskcluster_runnable_jobs
def _taskcluster_runnable_jobs(project, decision_task_id):
ret = []
tc_graph = {}
if not decision_task_id:
decision_task_id = query_latest_gecko_decision_task_id(project)
# Some trees (e.g. comm-central) don't have a decision task, which means there are no taskcluster runnable jobs
if not decision_task_id:
return ret
tc_graph_url = RUNNABLE_JOBS_URL.format(task_id=decision_task_id)
validate = URLValidator()
try:
validate(tc_graph_url)
tc_graph = fetch_json(tc_graph_url)
except ValidationError:
logger.warning('Failed to validate %s', tc_graph_url)
return []
except requests.exceptions.HTTPError as e:
logger.info('HTTPError %s when getting uncompressed taskgraph at %s',
e.response.status_code, tc_graph_url)
# TODO: Remove this fallback once all .gz artifacts have expired
logger.info('Attempting to fall back to the compressed taskgraph...')
newrelic.agent.record_custom_event(
"runnable_jobs_fallback",
{
"message": "runnable-jobs.json artifact not found, falling back to gz version",
"project": project,
"url": tc_graph_url
}
)
tc_graph = _taskcluster_runnable_jobs_gz(tc_graph_url + ".gz")
for label, node in iteritems(tc_graph):
ret.append({
'build_platform': node.get('platform', ''),
'build_system_type': 'taskcluster',
'job_group_name': node.get('groupName', ''),
'job_group_symbol': node.get('groupSymbol', ''),
'job_type_name': label,
'job_type_symbol': node['symbol'],
'platform': node.get('platform'),
'platform_option': ' '.join(node.get('collection', {}).keys()),
'ref_data_name': label,
'state': 'runnable',
'result': 'runnable',
})
return ret
开发者ID:bclary,项目名称:treeherder,代码行数:48,代码来源:runnable_jobs.py
示例15: _taskcluster_runnable_jobs
def _taskcluster_runnable_jobs(project):
decision_task_id = query_latest_gecko_decision_task_id(project)
# Some trees (e.g. comm-central) don't have a decision task, which means there are no taskcluster runnable jobs
if not decision_task_id:
return []
for run_number in range(0, 5):
tc_graph_url = RUNNABLE_JOBS_URL.format(task_id=decision_task_id, run_number=run_number)
validate = URLValidator()
try:
validate(tc_graph_url)
except ValidationError:
logger.warning('Failed to validate %s', tc_graph_url)
return []
try:
tc_graph = fetch_json(tc_graph_url)
except requests.exceptions.HTTPError as e:
logger.info('HTTPError %s when getting taskgraph at %s',
e.response.status_code, tc_graph_url)
continue
return [
{
'build_platform': node.get('platform', ''),
'build_system_type': 'taskcluster',
'job_group_name': node.get('groupName', ''),
'job_group_symbol': node.get('groupSymbol', ''),
'job_type_name': label,
'job_type_symbol': node['symbol'],
'platform': node.get('platform'),
'platform_option': ' '.join(node.get('collection', {}).keys()),
'ref_data_name': label,
'state': 'runnable',
'result': 'runnable',
}
for label, node in tc_graph.items()
]
return []
开发者ID:mozilla,项目名称:treeherder,代码行数:40,代码来源:runnable_jobs.py
示例16: _taskcluster_runnable_jobs
def _taskcluster_runnable_jobs(project, decision_task_id):
ret = []
tc_graph = {}
if not decision_task_id:
decision_task_id = query_latest_gecko_decision_task_id(project)
# Some trees (e.g. comm-central) don't have a decision task, which means there are no taskcluster runnable jobs
if not decision_task_id:
return ret
tc_graph_url = settings.TASKCLUSTER_RUNNABLE_JOBS_URL.format(task_id=decision_task_id)
validate = URLValidator()
try:
validate(tc_graph_url)
# `force_gzip_encoding` works around Taskcluster not setting `Content-Encoding: gzip`:
# https://bugzilla.mozilla.org/show_bug.cgi?id=1423215
tc_graph = fetch_json(tc_graph_url, force_gzip_decompression=True)
except ValidationError:
logger.warning('Failed to validate {}'.format(tc_graph_url))
return []
except requests.exceptions.HTTPError as e:
logger.info('HTTPError {} when getting taskgraph at {}'.format(
e.response.status_code, tc_graph_url))
return []
for label, node in tc_graph.iteritems():
ret.append({
'build_platform': node.get('platform', ''),
'build_system_type': 'taskcluster',
'job_group_name': node.get('groupName', ''),
'job_group_symbol': node.get('groupSymbol', ''),
'job_type_name': label,
'job_type_symbol': node['symbol'],
'platform': node.get('platform'),
'platform_option': ' '.join(node.get('collection', {}).keys()),
'ref_data_name': label,
'state': 'runnable',
'result': 'runnable',
})
return ret
开发者ID:SJasoria,项目名称:treeherder,代码行数:40,代码来源:runnable_jobs.py
示例17: run
def run(self):
all_the_things = fetch_json(settings.ALLTHETHINGS_URL)
jobs_per_branch = self.transform(all_the_things)
self.load(jobs_per_branch)
开发者ID:KWierso,项目名称:treeherder,代码行数:4,代码来源:allthethings.py
示例18: list
def list(self, request, project):
"""
GET method implementation for list of all runnable buildbot jobs
"""
decision_task_id = request.query_params.get('decisionTaskID')
if decision_task_id:
tc_graph_url = settings.TASKCLUSTER_TASKGRAPH_URL.format(task_id=decision_task_id)
tc_graph = None
validate = URLValidator()
try:
validate(tc_graph_url)
tc_graph = fetch_json(tc_graph_url)
except ValidationError:
# We pass here as we still want to schedule BuildBot jobs
pass
except Exception as ex:
return Response("Exception: {0}".format(ex), status=HTTP_500_INTERNAL_SERVER_ERROR)
else:
tc_graph = {}
repository = models.Repository.objects.get(name=project)
options_by_hash = models.OptionCollection.objects.all().select_related(
'option').values_list('option__name', 'option_collection_hash')
runnable_jobs = models.RunnableJob.objects.filter(
repository=repository
).select_related('build_platform', 'machine_platform',
'job_type', 'job_type__job_group')
ret = []
# Adding buildbot jobs
for datum in runnable_jobs:
options = ' '.join(option_name for (option_name, col_hash) in options_by_hash
if col_hash == datum.option_collection_hash)
ret.append({
'build_platform_id': datum.build_platform.id,
'build_platform': datum.build_platform.platform,
'build_os': datum.build_platform.os_name,
'build_architecture': datum.build_platform.architecture,
'machine_platform_id': datum.machine_platform.id,
'platform': datum.machine_platform.platform,
'machine_platform_os': datum.machine_platform.os_name,
'machine_platform_architecture': datum.machine_platform.architecture,
'job_group_id': datum.job_type.job_group.id,
'job_group_name': datum.job_type.job_group.name,
'job_group_symbol': datum.job_type.job_group.symbol,
'job_group_description': datum.job_type.job_group.description,
'job_type_id': datum.job_type.id,
'job_type_name': datum.job_type.name,
'job_type_symbol': datum.job_type.symbol,
'job_type_description': datum.job_type.description,
'option_collection_hash': datum.option_collection_hash,
'ref_data_name': datum.ref_data_name,
'build_system_type': datum.build_system_type,
'platform_option': options,
'job_coalesced_to_guid': None,
'state': 'runnable',
'result': 'runnable'})
for label, node in tc_graph.iteritems():
extra = node['task'].get('extra')
if not extra or not extra.get('treeherder'):
# some tasks don't have the treeherder information we need
# to be able to display them (and are not intended to be
# displayed). skip.
continue
treeherder_options = extra['treeherder']
task_metadata = node['task']['metadata']
build_platform = treeherder_options.get('machine', {}).get('platform', '')
# Not all tasks have a group name
job_group_name = treeherder_options.get('groupName', '')
# Not all tasks have a group symbol
job_group_symbol = treeherder_options.get('groupSymbol', '')
# Not all tasks have a collection
if 'collection' in treeherder_options:
platform_option = ' '.join(treeherder_options['collection'].keys())
else:
platform_option = ""
ret.append({
'build_platform': build_platform,
'platform': build_platform,
'job_group_name': job_group_name,
'job_group_symbol': job_group_symbol,
'job_type_name': task_metadata['name'],
'job_type_symbol': treeherder_options['symbol'],
'job_type_description': task_metadata['description'],
'ref_data_name': label,
'build_system_type': 'taskcluster',
'platform_option': platform_option,
'job_coalesced_to_guid': None,
'state': 'runnable',
'result': 'runnable'})
#.........这里部分代码省略.........
开发者ID:akhileshpillai,项目名称:treeherder,代码行数:101,代码来源:runnable_jobs.py
示例19: extract
def extract(self, url):
try:
return fetch_json(url)
except requests.exceptions.HTTPError as e:
logger.warning("HTTPError %s fetching: %s", e.response.status_code, url)
raise
开发者ID:SJasoria,项目名称:treeherder,代码行数:6,代码来源:pushlog.py
示例20: last_push_id_from_server
def last_push_id_from_server(repo):
"""Obtain the last push ID from a ``Repository`` instance."""
url = '%s/json-pushes/?version=2' % repo.url
data = fetch_json(url)
return data['lastpushid']
开发者ID:SJasoria,项目名称:treeherder,代码行数:5,代码来源:pushlog.py
注:本文中的treeherder.etl.common.fetch_json函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论