• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    公众号

Python util.run_benchmark函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中util.run_benchmark函数的典型用法代码示例。如果您正苦于以下问题:Python run_benchmark函数的具体用法?Python run_benchmark怎么用?Python run_benchmark使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了run_benchmark函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: run

def run(num_runs=1, geo_mean=True):
    # Get all our IO over with early.
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    spec_filename = os.path.join(data_dir, "html5lib_spec.html")
    with open(spec_filename) as spec_fh:
        spec_data = io.StringIO(spec_fh.read())

    util.run_benchmark(geo_mean, num_runs, test_html5lib, spec_data)
开发者ID:BrythonServer,项目名称:brython,代码行数:8,代码来源:bm_html5lib.py


示例2: entry_point

def entry_point(argv):
    import optparse
    import util

    def parse_depths(option, opt_str, value, parser):
        parser.values.depths = [v for v in value.split(',') if v]
    
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the garbage collector benchmark")
    util.add_standard_options_to(parser)
    parser.add_option('--threads', default=0, action="store",
                      help="provide number of threads (default 1)")
    parser.add_option('--depths', default=DEFAULT_DEPTHS, type="string",
                      action="callback", callback=parse_depths,
                      help='tree depths')
    parser.add_option('--debug', default=False, action='store_true',
                      help="enable debugging")
    options, args = parser.parse_args(argv)
    util.run_benchmark(options, options.num_runs, main,
                       options.depths, options.threads, options.debug)
开发者ID:kmod,项目名称:icbd,代码行数:21,代码来源:gcbench.py


示例3: test_list_unpacking

def test_list_unpacking(iterations, timer):
    x = list(range(10))

    return do_unpacking(iterations, timer, x)


def test_all(iterations, timer):
    tuple_data = test_tuple_unpacking(iterations, timer)
    list_data = test_list_unpacking(iterations, timer)
    return [x + y for (x, y) in zip(tuple_data, list_data)]


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] [test]",
        description=("Test the performance of sequence unpacking."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    tests = {"tuple": test_tuple_unpacking, "list": test_list_unpacking}

    if len(args) > 1:
        parser.error("Can only specify one test")
    elif len(args) == 1:
        func = tests.get(args[0])
        if func is None:
            parser.error("Invalid test name")
        util.run_benchmark(options, options.num_runs, func)
    else:
        util.run_benchmark(options, options.num_runs, test_all)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_unpack_sequence.py


示例4: test_mako

    % endfor
</tr>
% endfor
</table>
""")

def test_mako(count, timer):
    table = [xrange(150) for _ in xrange(150)]

    # Warm up Mako.
    MAKO_TMPL.render(table = table)
    MAKO_TMPL.render(table = table)

    times = []
    for _ in xrange(count):
        t0 = timer()
        MAKO_TMPL.render(table = table)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Mako templates."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_mako)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_mako.py


示例5: RuntimeError

    if not args:
        bench_func = bench_parse
    elif args[0] not in benchmarks:
        raise RuntimeError("invalid benchmark requested")
    else:
        bench_func = globals()['bench_%s' % args[0]]

    if options.no_accelerator and sys.version_info >= (3, 3):
        # prevent C accelerator from being used in 3.3
        sys.modules['_elementtree'] = None
        import xml.etree.ElementTree as et
        if et.SubElement.__module__ != 'xml.etree.ElementTree':
            raise RuntimeError("Unexpected C accelerator for ElementTree")

    try:
        from importlib import import_module
    except ImportError:
        def import_module(module_name):
            __import__(module_name)
            return sys.modules[module_name]

    try:
        etree_module = import_module(options.etree_module)
    except ImportError:
        if options.etree_module != default_etmodule:
            raise
        etree_module = import_module(fallback_etmodule)

    util.run_benchmark(options, options.num_runs,
                       run_etree_benchmark, etree_module, bench_func)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_elementtree.py


示例6: test_richards

# Python imports
import optparse
import time

# Local imports
import richards
import util


def test_richards(iterations):
    # Warm-up
    r = richards.Richards()
    r.run(iterations=2)

    times = []
    for _ in xrange(iterations):
        t0 = time.time()
        r.run(iterations=1)
        t1 = time.time()
        times.append(t1 - t0)
    return times

if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description="Test the performance of the Richards benchmark")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_richards)
开发者ID:kmod,项目名称:icbd,代码行数:30,代码来源:bm_richards.py


示例7: time

    # train it with some patterns
    n.train(pat, 5000)
    # test it
    #n.test(pat)

def time(fn, *args):
    import time, traceback
    begin = time.time()
    result = fn(*args)
    end = time.time()
    return result, end-begin

def test_bpnn(iterations):
    times = []
    for _ in range(iterations):
        result, t = time(demo)
        times.append(t)
    return times

main = test_bpnn

if __name__ == "__main__":
    import optparse
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of a neural network."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_bpnn)
开发者ID:13steinj,项目名称:cython,代码行数:30,代码来源:bpnn3.py


示例8: runtest

import bigtable

# bummer, timeit module is stupid
from bigtable import test_python_cstringio, test_spitfire_o4, test_spitfire


def runtest(n, benchmark):
    times = []
    for i in range(n):
        sys.stdout = StringIO()
        bigtable.run([benchmark], 100)
        times.append(float(sys.stdout.getvalue().split(" ")[-2]))
        sys.stdout = sys.__stdout__
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]", description="Test the performance of the spitfire benchmark"
    )
    parser.add_option(
        "--benchmark",
        type="choice",
        choices=["python_cstringio", "spitfire_o4"],
        default="spitfire_o4",
        help="choose between cstringio and spitfire_o4",
    )
    util.add_standard_options_to(parser)
    options, args = parser.parse_args(sys.argv)
    util.run_benchmark(options, options.num_runs, runtest, options.benchmark)
开发者ID:kmod,项目名称:icbd,代码行数:30,代码来源:spitfire.py


示例9: test_regex_effbot

        re.search(regexs[id], string_tables[n][id])
        re.search(regexs[id], string_tables[n][id])


def test_regex_effbot(iterations):
    sizes = init_benchmarks()

    # Warm up.
    for size in sizes:
        run_benchmarks(size)

    times = []
    for i in xrange(iterations):
        t0 = time.time()
        for size in sizes:
            run_benchmarks(size)
        t1 = time.time()
        times.append(t1 - t0)
    return times


if __name__ == '__main__':
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of regexps using Fredik Lundh's "
                     "benchmarks."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_regex_effbot)
开发者ID:kmod,项目名称:icbd,代码行数:30,代码来源:bm_regex_effbot.py


示例10: test_regex_compile

        bm_regex_v8.test_regex_v8(1)
    finally:
        re.compile = real_compile
        re.search = real_search
        re.sub = real_sub
    return regexes


def test_regex_compile(count):
    re._cache = EmptyCache()
    regexes = capture_regexes()
    times = []

    for _ in xrange(count):
        t0 = time.time()
        for regex, flags in regexes:
            re.compile(regex, flags)
        t1 = time.time()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test regex compilation performance"))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_regex_compile)
开发者ID:kmod,项目名称:icbd,代码行数:30,代码来源:bm_regex_compile.py


示例11: test_django

<tr>{% for col in row %}<td>{{ col|escape }}</td>{% endfor %}</tr>
{% endfor %}
</table>
""")

def test_django(count, timer):
    table = [xrange(150) for _ in xrange(150)]
    context = Context({"table": table})

    # Warm up Django.
    DJANGO_TMPL.render(context)
    DJANGO_TMPL.render(context)

    times = []
    for _ in xrange(count):
        t0 = timer()
        data = DJANGO_TMPL.render(context)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of Django templates."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_django)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_django_v2.py


示例12: make_http_server

    host, port = make_http_server(loop, make_application())
    url = "http://%s:%s/" % (host, port)
    times = []

    @coroutine
    def main():
        client = AsyncHTTPClient()
        for i in xrange(count):
            t0 = timer()
            futures = [client.fetch(url) for j in xrange(CONCURRENCY)]
            for fut in futures:
                resp = yield fut
                buf = resp.buffer
                buf.seek(0, 2)
                assert buf.tell() == len(CHUNK) * NCHUNKS
            t1 = timer()
            times.append(t1 - t0)

    loop.run_sync(main)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of HTTP requests with Tornado."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_tornado)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_tornado_http.py


示例13: timer

        for thread in threads:
            thread.join()
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options] benchmark_name",
        description="Test the performance of Python's threads.")
    parser.add_option("--num_threads", action="store", type="int", default=2,
                      dest="num_threads", help="Number of threads to test.")
    parser.add_option("--check_interval", action="store", type="int",
                      default=sys.getcheckinterval(),
                      dest="check_interval",
                      help="Value to pass to sys.setcheckinterval().")
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    if len(args) != 1:
        parser.error("incorrect number of arguments")

    bm_name = args[0].lower()
    func = globals().get("test_" + bm_name)
    if not func:
        parser.error("unknown benchmark: %s" % bm_name)

    sys.setcheckinterval(options.check_interval)
    util.run_benchmark(options, options.num_runs, func, options.num_threads)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_threading.py


示例14: run

def run(geo_mean, num_runs):
    return util.run_benchmark(geo_mean, num_runs, test_regex_effbot)
开发者ID:BrythonServer,项目名称:brython,代码行数:2,代码来源:bm_regex_effbot.py


示例15: timer

            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
            json.loads(json_dict_group)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [json_dump|json_load] [options]",
        description=("Test the performance of JSON (de)serializing."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    benchmarks = ["json_dump", "json_load"]
    for bench_name in benchmarks:
        if bench_name in args:
            benchmark = globals()["test_" + bench_name]
            break
    else:
        raise RuntimeError("Need to specify one of %s" % benchmarks)

    num_obj_copies = 8000
    import json

    util.run_benchmark(options, num_obj_copies, benchmark, json, options)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_json.py


示例16: test_spambayes

def test_spambayes(iterations, timer, messages, ham_classifier):
    # Prime the pump. This still leaves some hot functions uncompiled; these
    # will be noticed as hot during the timed loops below.
    for msg in messages:
        ham_classifier.score(msg)

    times = []
    for _ in xrange(iterations):
        t0 = timer()
        for msg in messages:
            ham_classifier.score(msg)
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Run the SpamBayes benchmark."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    data_dir = os.path.join(os.path.dirname(__file__), "data")
    mailbox = os.path.join(data_dir, "spambayes_mailbox")
    ham_data = os.path.join(data_dir, "spambayes_hammie.pkl")
    msgs = list(mboxutils.getmbox(mailbox))
    ham_classifier = hammie.open(ham_data, "pickle", "r")
    util.run_benchmark(options, options.num_runs, test_spambayes,
                       msgs, ham_classifier)
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_spambayes.py


示例17: xrange

    times = []
    for _ in xrange(max(1, count // 2)):
        t0 = timer()
        # Do something simple with each path.
        for p in base_path:
            p.st_mtime
        for p in base_path.glob("*.py"):
            p.st_mtime
        for p in base_path:
            p.st_mtime
        for p in base_path.glob("*.py"):
            p.st_mtime
        t1 = timer()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of pathlib operations."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    setup()
    try:
        util.run_benchmark(options, options.num_runs, test_pathlib)
    finally:
        teardown()
开发者ID:bennn,项目名称:retic_performance,代码行数:29,代码来源:bm_pathlib.py


示例18: test_html5lib


def test_html5lib(count, spec_data):
    # No warm-up runs for this benchmark; in real life, the parser doesn't get
    # to warm up (this isn't a daemon process).

    times = []
    for _ in xrange(count):
        spec_data.seek(0)
        t0 = time.time()
        html5lib.parse(spec_data)
        t1 = time.time()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of the html5lib parser."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    # Get all our IO over with early.
    data_dir = os.path.join(os.path.dirname(__file__), "data")
    spec_filename = os.path.join(data_dir, "html5lib_spec.html")
    with open(spec_filename) as spec_fh:
        spec_data = StringIO.StringIO(spec_fh.read())

    util.run_benchmark(options, options.num_runs, test_html5lib, spec_data)
开发者ID:kmod,项目名称:icbd,代码行数:28,代码来源:bm_html5lib.py


示例19: list

    """
    cols = list(range(queen_count))
    for vec in permutations(cols):
        if (queen_count == len({ vec[i]+i for i in cols })
                        == len({ vec[i]-i for i in cols })):
            yield vec


def test_n_queens(iterations):
    # Warm-up runs.
    list(n_queens(8))
    list(n_queens(8))

    times = []
    for _ in _xrange(iterations):
        t0 = time()
        list(n_queens(8))
        t1 = time()
        times.append(t1 - t0)
    return times


if __name__ == "__main__":
    parser = optparse.OptionParser(
        usage="%prog [options]",
        description=("Test the performance of an N-Queens solvers."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    util.run_benchmark(options, options.num_runs, test_n_queens)
开发者ID:MarkLodato,项目名称:cython,代码行数:30,代码来源:nqueens.py


示例20: globals

        usage="%prog [no_output|simple_output|formatted_output] [options]",
        description=("Test the performance of logging."))
    util.add_standard_options_to(parser)
    options, args = parser.parse_args()

    benchmarks = ["no_output", "simple_output", "formatted_output"]
    for bench_name in benchmarks:
        if bench_name in args:
            benchmark = globals()["test_" + bench_name]
            break
    else:
        raise RuntimeError("Need to specify one of %s" % benchmarks)

    # NOTE: StringIO performance will impact the results...
    if sys.version_info >= (3,):
        sio = io.StringIO()
    else:
        sio = io.BytesIO()
    handler = logging.StreamHandler(stream=sio)
    logger = logging.getLogger("benchlogger")
    logger.propagate = False
    logger.addHandler(handler)
    logger.setLevel(logging.WARNING)

    util.run_benchmark(options, options.num_runs, benchmark, logger)

    if benchmark is not test_no_output:
        assert len(sio.getvalue()) > 0
    else:
        assert len(sio.getvalue()) == 0
开发者ID:bennn,项目名称:retic_performance,代码行数:30,代码来源:bm_logging.py



注:本文中的util.run_benchmark函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python util.run_cmd函数代码示例发布时间:2022-05-26
下一篇:
Python util.run函数代码示例发布时间:2022-05-26
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap