• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python jobs.store_job_data函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中treeherder.etl.jobs.store_job_data函数的典型用法代码示例。如果您正苦于以下问题:Python store_job_data函数的具体用法?Python store_job_data怎么用?Python store_job_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了store_job_data函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_jobs

def test_jobs(eleven_job_blobs, failure_classifications, test_repository):
    job_blobs = [j for j in eleven_job_blobs if 'superseded' not in j]
    for i, job in enumerate(JOB_DATA):
        job_blobs[i]['job'].update(job)
        print(job_blobs[i])
    store_job_data(test_repository, job_blobs[0:len(JOB_DATA)])
    return [Job.objects.get(id=i) for i in range(1, len(JOB_DATA) + 1)]
开发者ID:rwood-moz,项目名称:treeherder,代码行数:7,代码来源:test_filter_jobs_by_result_status.py


示例2: test_ingest_running_to_retry_to_success_sample_job

def test_ingest_running_to_retry_to_success_sample_job(test_repository,
                                                       failure_classifications,
                                                       sample_data,
                                                       sample_push,
                                                       mock_log_parser,
                                                       ingestion_cycles):
    # verifies that retries to success work, no matter how jobs are batched
    store_push_data(test_repository, sample_push)

    job_datum = copy.deepcopy(sample_data.job_data[0])
    job_datum['revision'] = sample_push[0]['revision']

    job = job_datum['job']
    job_guid_root = job['job_guid']

    job_data = []
    for (state, result, job_guid) in [
            ('running', 'unknown', job_guid_root),
            ('completed', 'retry',
             job_guid_root + "_" + str(job['end_timestamp'])[-5:]),
            ('completed', 'success', job_guid_root)]:
        new_job_datum = copy.deepcopy(job_datum)
        new_job_datum['job']['state'] = state
        new_job_datum['job']['result'] = result
        new_job_datum['job']['job_guid'] = job_guid
        job_data.append(new_job_datum)

    for (i, j) in ingestion_cycles:
        store_job_data(test_repository, job_data[i:j])

    assert Job.objects.count() == 2
    assert Job.objects.get(id=1).result == 'retry'
    assert Job.objects.get(id=2).result == 'success'
    assert JobLog.objects.count() == 2
开发者ID:edmorley,项目名称:treeherder,代码行数:34,代码来源:test_job_ingestion.py


示例3: test_ingest_job_with_updated_job_group

def test_ingest_job_with_updated_job_group(test_repository, failure_classifications,
                                           sample_data, mock_log_parser,
                                           result_set_stored):
    """
    When a job_type is associated with a job group on data ingestion,
    that association will not updated ingesting a new job with the same
    job_type but different job_group
    """
    first_job_datum = sample_data.job_data[0]
    first_job_datum["job"]["group_name"] = "first group name"
    first_job_datum["job"]["group_symbol"] = "1"
    first_job_datum["revision"] = result_set_stored[0]["revision"]
    store_job_data(test_repository, [first_job_datum])

    second_job_datum = copy.deepcopy(first_job_datum)
    # create a new guid to ingest the job again
    second_job_guid = "a-unique-job-guid"
    second_job_datum["job"]["job_guid"] = second_job_guid
    second_job_datum["job"]["group_name"] = "second group name"
    second_job_datum["job"]["group_symbol"] = "2"
    second_job_datum["revision"] = result_set_stored[0]["revision"]

    store_job_data(test_repository, [second_job_datum])

    second_job = Job.objects.get(guid=second_job_guid)

    first_job_group_name = first_job_datum["job"]["group_name"]
    second_job_group_name = second_job.job_type.job_group.name

    assert first_job_group_name == second_job_group_name

    # make sure also we didn't create a new job group
    with pytest.raises(JobGroup.DoesNotExist):
        JobGroup.objects.get(name="second group name")
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:34,代码来源:test_job_ingestion.py


示例4: test_ingest_job_with_updated_job_group

def test_ingest_job_with_updated_job_group(test_repository, failure_classifications,
                                           sample_data, mock_log_parser,
                                           push_stored):
    """
    The job_type and job_group for a job is independent of any other job_type
    and job_group combination.
    """
    first_job_datum = sample_data.job_data[0]
    first_job_datum["job"]["group_name"] = "first group name"
    first_job_datum["job"]["group_symbol"] = "1"
    first_job_guid = "first-unique-job-guid"
    first_job_datum["job"]["job_guid"] = first_job_guid
    first_job_datum["revision"] = push_stored[0]["revision"]
    store_job_data(test_repository, [first_job_datum])
    first_job = Job.objects.get(guid=first_job_guid)

    second_job_datum = copy.deepcopy(first_job_datum)
    # create a new guid to ingest the job again
    second_job_guid = "second-unique-job-guid"
    second_job_datum["job"]["job_guid"] = second_job_guid
    second_job_datum["job"]["group_name"] = "second group name"
    second_job_datum["job"]["group_symbol"] = "2"
    second_job_datum["revision"] = push_stored[0]["revision"]

    store_job_data(test_repository, [second_job_datum])

    second_job = Job.objects.get(guid=second_job_guid)

    assert second_job.job_group.name == second_job_datum["job"]["group_name"]
    assert first_job.job_group.name == first_job_datum["job"]["group_name"]
开发者ID:edmorley,项目名称:treeherder,代码行数:30,代码来源:test_job_ingestion.py


示例5: process_job_list

    def process_job_list(self, all_jobs_list):
        if not isinstance(all_jobs_list, list):
            all_jobs_list = [all_jobs_list]

        validated_jobs = self._get_validated_jobs_by_project(all_jobs_list)

        for project, job_list in validated_jobs.items():
            newrelic.agent.add_custom_parameter("project", project)
            try:
                repository = Repository.objects.get(name=project)

                storeable_job_list = []
                for pulse_job in job_list:
                    if pulse_job["state"] != "unscheduled":
                        try:
                            self.clean_revision(repository, pulse_job)
                            storeable_job_list.append(
                                self.transform(pulse_job)
                            )
                        except AttributeError:
                            logger.warn("Skipping job due to bad attribute",
                                        exc_info=1)

                store_job_data(repository, storeable_job_list)

            except Repository.DoesNotExist:
                logger.info("Job with unsupported project: {}".format(project))
开发者ID:kapy2010,项目名称:treeherder,代码行数:27,代码来源:job_loader.py


示例6: test_store_job_with_parsed_log

def test_store_job_with_parsed_log(test_repository, push_stored,
                                   failure_classifications,
                                   monkeypatch):
    """
    test submitting a job with a pre-parsed log gets job_log_url
    parse_status of "parsed" and does not parse, even though no text_log_summary
    exists.

    This is for the case where they may want to submit it at a later time.
    """

    mock_parse = MagicMock(name="parse_line")
    monkeypatch.setattr(StepParser, 'parse_line', mock_parse)

    job_guid = 'd22c74d4aa6d2a1dcba96d95dccbd5fdca70cf33'
    job_data = {
        'project': test_repository.name,
        'revision': push_stored[0]['revision'],
        'job': {
            'job_guid': job_guid,
            'state': 'completed',
            'log_references': [{
                'url': 'http://ftp.mozilla.org/pub/mozilla.org/spidermonkey/...',
                'name': 'buildbot_text',
                'parse_status': 'parsed'
            }]
        }
    }

    store_job_data(test_repository, [job_data])

    # ensure the parsing didn't happen
    assert mock_parse.called is False
开发者ID:edmorley,项目名称:treeherder,代码行数:33,代码来源:test_job_ingestion.py


示例7: push_with_three_jobs

def push_with_three_jobs(sample_data, sample_resultset, test_repository):
    """
    Stores a number of jobs in the same resultset.
    """
    num_jobs = 3
    resultset = sample_resultset[0]
    jobs = copy.deepcopy(sample_data.job_data[0:num_jobs])

    # Only store data for the first resultset....
    store_result_set_data(test_repository, [resultset])

    blobs = []
    for index, blob in enumerate(jobs):
        # Modify job structure to sync with the resultset sample data
        if 'sources' in blob:
            del blob['sources']

        # Skip log references since they do not work correctly in pending state.
        if 'log_references' in blob['job']:
            del blob['job']['log_references']

        blob['revision'] = resultset['revision']
        blob['job']['state'] = 'pending'
        blobs.append(blob)

    # Store and process the jobs so they are present in the tables.
    store_job_data(test_repository, blobs)
    return Push.objects.get(repository=test_repository,
                            revision=resultset['revision'])
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:29,代码来源:conftest.py


示例8: test_create_error_summary

def test_create_error_summary(failure_classifications,
                              jobs_with_local_log, sample_resultset,
                              test_repository):
    """
    check that a bug suggestions artifact gets inserted when running
    a parse_log task for a failed job, and that the number of
    bug search terms/suggestions matches the number of error lines.
    """
    store_result_set_data(test_repository, sample_resultset)

    jobs = jobs_with_local_log
    for job in jobs:
        job['job']['result'] = "testfailed"
        job['revision'] = sample_resultset[0]['revision']

    store_job_data(test_repository, jobs)

    bug_suggestions = get_error_summary(Job.objects.get(id=1))

    # we must have one bugs item per error in bug_suggestions.
    # errors with no bug suggestions will just have an empty
    # bugs list
    assert TextLogError.objects.count() == len(bug_suggestions)

    # We really need to add some tests that check the values of each entry
    # in bug_suggestions, but for now this is better than nothing.
    expected_keys = set(["search", "search_terms", "bugs"])
    for failure_line in bug_suggestions:
        assert set(failure_line.keys()) == expected_keys
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:29,代码来源:test_tasks.py


示例9: test_ingest_buildbot_tier1_job

def test_ingest_buildbot_tier1_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 1 if no lower_tier_signatures is used (ie: TaskCluster)"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
开发者ID:SJasoria,项目名称:treeherder,代码行数:8,代码来源:test_job_ingestion.py


示例10: test_ingest_job_default_tier

def test_ingest_job_default_tier(test_repository, sample_data, sample_push,
                                 failure_classifications, mock_log_parser):
    """Tier is set to 1 by default"""
    job_data = sample_data.job_data[:1]
    store_push_data(test_repository, sample_push)
    store_job_data(test_repository, job_data)
    job = Job.objects.all().first()
    assert job.tier == 1
开发者ID:edmorley,项目名称:treeherder,代码行数:8,代码来源:test_job_ingestion.py


示例11: test_ingesting_skip_existing

def test_ingesting_skip_existing(test_repository, failure_classifications, sample_data,
                                 sample_push, mock_log_parser):
    """Remove single existing job prior to loading"""
    job_data = sample_data.job_data[:1]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push)

    store_job_data(test_repository, sample_data.job_data[:2])

    assert Job.objects.count() == 2
开发者ID:edmorley,项目名称:treeherder,代码行数:9,代码来源:test_job_ingestion.py


示例12: pending_jobs_stored

def pending_jobs_stored(
        test_repository, failure_classifications, pending_job,
        push_stored):
    """
    stores a list of buildapi pending jobs into the jobs store
    """
    pending_job.update(push_stored[0])
    pending_job.update({'project': test_repository.name})
    store_job_data(test_repository, [pending_job])
开发者ID:edmorley,项目名称:treeherder,代码行数:9,代码来源:conftest.py


示例13: completed_jobs_stored

def completed_jobs_stored(
        test_repository, failure_classifications, completed_job,
        push_stored):
    """
    stores a list of buildapi completed jobs
    """
    completed_job['revision'] = push_stored[0]['revision']
    completed_job.update({'project': test_repository.name})
    store_job_data(test_repository, [completed_job])
开发者ID:edmorley,项目名称:treeherder,代码行数:9,代码来源:conftest.py


示例14: running_jobs_stored

def running_jobs_stored(
        test_repository, failure_classifications, running_job,
        push_stored):
    """
    stores a list of buildapi running jobs
    """
    running_job.update(push_stored[0])
    running_job.update({'project': test_repository.name})
    store_job_data(test_repository, [running_job])
开发者ID:edmorley,项目名称:treeherder,代码行数:9,代码来源:conftest.py


示例15: retriggered_job

def retriggered_job(test_job, eleven_job_blobs):
    # a copy of test_job with a different guid, representing a "retrigger"
    from treeherder.model.models import Job
    original = eleven_job_blobs[0]
    retrigger = copy.deepcopy(original)
    retrigger['job']['job_guid'] = "f1c75261017c7c5ce3000931dce4c442fe0a129a"

    store_job_data(test_job.repository, [retrigger])

    return Job.objects.get(guid=retrigger['job']['job_guid'])
开发者ID:SebastinSanty,项目名称:treeherder,代码行数:10,代码来源:conftest.py


示例16: test_bad_date_value_ingestion

def test_bad_date_value_ingestion(test_repository, failure_classifications,
                                  sample_push, mock_log_parser):
    """
    Test ingesting a job blob with bad date value

    """
    blob = job_data(start_timestamp="foo",
                    revision=sample_push[0]['revision'])

    store_push_data(test_repository, sample_push[:1])
    store_job_data(test_repository, [blob])
开发者ID:edmorley,项目名称:treeherder,代码行数:11,代码来源:test_job_ingestion.py


示例17: create

    def create(self, request, project):
        """
        This method adds a job to a given push.
        """
        try:
            repository = Repository.objects.get(name=project)
        except ObjectDoesNotExist:
            return Response("No repository with name: {0}".format(project),
                            status=HTTP_404_NOT_FOUND)

        store_job_data(repository, request.data)

        return Response({'message': 'Job successfully updated'})
开发者ID:MikeLing,项目名称:treeherder,代码行数:13,代码来源:jobs.py


示例18: running_jobs_stored

def running_jobs_stored(
        test_repository, failure_classifications, running_jobs,
        push_stored):
    """
    stores a list of buildapi running jobs
    """
    running_jobs.update(push_stored[0])
    running_jobs.update({'project': test_repository.name})

    tjc = TreeherderJobCollection()
    tj = tjc.get_job(running_jobs)
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())
开发者ID:bclary,项目名称:treeherder,代码行数:14,代码来源:conftest.py


示例19: completed_jobs_stored

def completed_jobs_stored(
        test_repository, failure_classifications, completed_jobs,
        push_stored):
    """
    stores a list of buildapi completed jobs
    """
    completed_jobs['revision'] = push_stored[0]['revision']
    completed_jobs.update({'project': test_repository.name})

    tjc = TreeherderJobCollection()
    tj = tjc.get_job(completed_jobs)
    tjc.add(tj)

    store_job_data(test_repository, tjc.get_collection_data())
开发者ID:bclary,项目名称:treeherder,代码行数:14,代码来源:conftest.py


示例20: test_ingest_buildbot_tier2_job

def test_ingest_buildbot_tier2_job(test_repository, sample_data, sample_push,
                                   failure_classifications, mock_log_parser):
    """Tier is set to 2 if it matches the signature object"""
    job_data = sample_data.job_data[:1]
    test_utils.do_job_ingestion(test_repository, job_data, sample_push)
    job = Job.objects.all().first()
    lower_tier_signatures = {
            job.signature.signature: 2
    }
    job_data_2 = copy.deepcopy(job_data)
    job_data_2[0]['job']['job_guid'] = "foo"
    store_job_data(test_repository, job_data_2, lower_tier_signatures)
    job2 = Job.objects.get(guid="foo")
    assert job2.tier == 2
开发者ID:SJasoria,项目名称:treeherder,代码行数:14,代码来源:test_job_ingestion.py



注:本文中的treeherder.etl.jobs.store_job_data函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python oauth_utils.OAuthCredentials类代码示例发布时间:2022-05-27
下一篇:
Python job_loader.JobLoader类代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap