"Das U-Boot" Source Tree
0
fork

Configure Feed

Select the types of activity you want to include in your feed.

test/py: Add a report show test durations

Execution time varies widely with the existing tests. Provides a way to
produce a summary of the time taken for each test, along with a
histogram.

This is enabled with the --timing flag.

Enable it for sandbox in CI.

Example:

Duration : Number of tests
======== : ========================================
<1ms : 1
<8ms : 1
<20ms : # 20
<30ms : ######## 127
<50ms : ######################################## 582
<75ms : ####### 102
<100ms : ## 39
<200ms : ##### 86
<300ms : # 29
<500ms : ## 42
<750ms : # 16
<1.0s : # 15
<2.0s : # 23
<3.0s : 13
<5.0s : 9
<7.5s : 1
<10.0s : 6
<20.0s : 12

Signed-off-by: Simon Glass <sjg@chromium.org>
Reviewed-by: Tom Rini <trini@konsulko.com>

authored by

Simon Glass and committed by
Tom Rini
5e46a069 9f5d1863

+124 -2
+2 -1
.azure-pipelines.yml
··· 295 295 export PATH=/opt/qemu/bin:/tmp/uboot-test-hooks/bin:\${PATH} 296 296 export PYTHONPATH=/tmp/uboot-test-hooks/py/travis-ci 297 297 # "\${var:+"-k \$var"}" expands to "" if \$var is empty, "-k \$var" if not 298 - ./test/py/test.py -ra -o cache_dir="\$UBOOT_TRAVIS_BUILD_DIR"/.pytest_cache --bd \${TEST_PY_BD} \${TEST_PY_ID} \${TEST_PY_TEST_SPEC:+"-k \${TEST_PY_TEST_SPEC}"} --build-dir "\$UBOOT_TRAVIS_BUILD_DIR" --report-dir "\$UBOOT_TRAVIS_BUILD_DIR" --junitxml=\$(System.DefaultWorkingDirectory)/results.xml 298 + ./test/py/test.py -ra -o cache_dir="\$UBOOT_TRAVIS_BUILD_DIR"/.pytest_cache --bd \${TEST_PY_BD} \${TEST_PY_ID} \${TEST_PY_EXTRA} \${TEST_PY_TEST_SPEC:+"-k \${TEST_PY_TEST_SPEC}"} --build-dir "\$UBOOT_TRAVIS_BUILD_DIR" --report-dir "\$UBOOT_TRAVIS_BUILD_DIR" --junitxml=\$(System.DefaultWorkingDirectory)/results.xml 299 299 # the below corresponds to .gitlab-ci.yml "after_script" 300 300 rm -rf /tmp/uboot-test-hooks /tmp/venv 301 301 EOF ··· 318 318 matrix: 319 319 sandbox: 320 320 TEST_PY_BD: "sandbox" 321 + TEST_PY_EXTRA: "--timing" 321 322 sandbox_asan: 322 323 TEST_PY_BD: "sandbox" 323 324 OVERRIDE: "-a ASAN"
+2 -1
.gitlab-ci.yml
··· 96 96 # "${var:+"-k $var"}" expands to "" if $var is empty, "-k $var" if not 97 97 - export PATH=/opt/qemu/bin:/tmp/uboot-test-hooks/bin:${PATH}; 98 98 export PYTHONPATH=/tmp/uboot-test-hooks/py/travis-ci; 99 - ./test/py/test.py -ra --bd ${TEST_PY_BD} ${TEST_PY_ID} 99 + ./test/py/test.py -ra --bd ${TEST_PY_BD} ${TEST_PY_ID} ${TEST_PY_EXTRA} 100 100 ${TEST_PY_TEST_SPEC:+"-k ${TEST_PY_TEST_SPEC}"} 101 101 --build-dir "$UBOOT_TRAVIS_BUILD_DIR" 102 102 --junitxml=/tmp/${TEST_PY_BD}/results.xml ··· 234 234 - ${DEFAULT_AMD64_TAG} 235 235 variables: 236 236 TEST_PY_BD: "sandbox" 237 + TEST_PY_EXTRA: "--timing" 237 238 <<: *buildman_and_testpy_dfn 238 239 239 240 sandbox with clang test.py:
+33
doc/develop/py_testing.rst
··· 246 246 sets the directory used to store persistent test data. This is test data that 247 247 may be re-used across test runs, such as file-system images. 248 248 249 + --timing 250 + shows a histogram of test duration, at the end of the run. The columns are: 251 + 252 + Duration 253 + the duration-bucket that this test was in 254 + 255 + Total 256 + total time of all tests in this bucket 257 + 258 + Number of tests 259 + graph showing the number of tests in this bucket, with the actual number 260 + shown at the end 261 + 262 + Example:: 263 + 264 + Duration : Total | Number of tests 265 + ======== : ======= |======================================== 266 + <20ms : 418ms |## 23 267 + <30ms : 9.1s |######################################## 347 268 + <40ms : 10.0s |################################# 294 269 + <50ms : 3.1s |####### 69 270 + <75ms : 2.6s |#### 43 271 + <100ms : 1.7s |## 19 272 + <200ms : 3.0s |## 22 273 + <300ms : 1.7s | 7 274 + <400ms : 675ms | 2 275 + <500ms : 2.2s | 5 276 + <750ms : 8.3s |# 13 277 + <1.0s : 1.6s | 2 278 + <2.0s : 9.4s | 7 279 + <3.0s : 2.4s | 1 280 + <7.5s : 6.1s | 1 281 + 249 282 `pytest` also implements a number of its own command-line options. Commonly used 250 283 options are mentioned below. Please see `pytest` documentation for complete 251 284 details. Execute `py.test --version` for a brief summary. Note that U-Boot's
+87
test/py/conftest.py
··· 25 25 from _pytest.runner import runtestprotocol 26 26 import subprocess 27 27 import sys 28 + import time 28 29 from u_boot_spawn import BootFail, Timeout, Unexpected, handle_exception 29 30 30 31 # Globals: The HTML log file, and the connection to the U-Boot console. ··· 91 92 parser.addoption('--role', help='U-Boot board role (for Labgrid-sjg)') 92 93 parser.addoption('--use-running-system', default=False, action='store_true', 93 94 help="Assume that U-Boot is ready and don't wait for a prompt") 95 + parser.addoption('--timing', default=False, action='store_true', 96 + help='Show info on test timing') 97 + 94 98 95 99 def run_build(config, source_dir, build_dir, board_type, log): 96 100 """run_build: Build U-Boot ··· 322 326 ubconfig.use_running_system = config.getoption('use_running_system') 323 327 ubconfig.dtb = build_dir + '/arch/sandbox/dts/test.dtb' 324 328 ubconfig.connection_ok = True 329 + ubconfig.timing = config.getoption('timing') 325 330 326 331 env_vars = ( 327 332 'board_type', ··· 515 520 tests_skipped = [] 516 521 tests_warning = [] 517 522 tests_passed = [] 523 + 524 + # Duration of each test: 525 + # key (string): test name 526 + # value (float): duration in ms 527 + test_durations = {} 528 + 518 529 519 530 def pytest_itemcollected(item): 520 531 """pytest hook: Called once for each test found during collection. ··· 531 542 532 543 tests_not_run.append(item.name) 533 544 545 + 546 + def show_timings(): 547 + """Write timings for each test, along with a histogram""" 548 + 549 + def get_time_delta(msecs): 550 + """Convert milliseconds into a user-friendly string""" 551 + if msecs >= 1000: 552 + return f'{msecs / 1000:.1f}s' 553 + else: 554 + return f'{msecs:.0f}ms' 555 + 556 + def show_bar(key, msecs, value): 557 + """Show a single bar (line) of the histogram 558 + 559 + Args: 560 + key (str): Key to write on the left 561 + value (int): Value to display, i.e. the relative length of the bar 562 + """ 563 + if value: 564 + bar_length = int((value / max_count) * max_bar_length) 565 + print(f"{key:>8} : {get_time_delta(msecs):>7} |{'#' * bar_length} {value}", file=buf) 566 + 567 + # Create the buckets we will use, each has a count and a total time 568 + bucket = {} 569 + for power in range(5): 570 + for i in [1, 2, 3, 4, 5, 7.5]: 571 + bucket[i * 10 ** power] = {'count': 0, 'msecs': 0.0} 572 + max_dur = max(bucket.keys()) 573 + 574 + # Collect counts for each bucket; if outside the range, add to too_long 575 + # Also show a sorted list of test timings from longest to shortest 576 + too_long = 0 577 + too_long_msecs = 0.0 578 + max_count = 0 579 + with log.section('Timing Report', 'timing_report'): 580 + for name, dur in sorted(test_durations.items(), key=lambda kv: kv[1], 581 + reverse=True): 582 + log.info(f'{get_time_delta(dur):>8} {name}') 583 + greater = [k for k in bucket.keys() if dur <= k] 584 + if greater: 585 + buck = bucket[min(greater)] 586 + buck['count'] += 1 587 + max_count = max(max_count, buck['count']) 588 + buck['msecs'] += dur 589 + else: 590 + too_long += 1 591 + too_long_msecs += dur 592 + 593 + # Set the maximum length of a histogram bar, in characters 594 + max_bar_length = 40 595 + 596 + # Show a a summary with histogram 597 + buf = io.StringIO() 598 + with log.section('Timing Summary', 'timing_summary'): 599 + print('Duration : Total | Number of tests', file=buf) 600 + print(f'{"=" * 8} : {"=" * 7} |{"=" * max_bar_length}', file=buf) 601 + for dur, buck in bucket.items(): 602 + if buck['count']: 603 + label = get_time_delta(dur) 604 + show_bar(f'<{label}', buck['msecs'], buck['count']) 605 + if too_long: 606 + show_bar(f'>{get_time_delta(max_dur)}', too_long_msecs, too_long) 607 + log.info(buf.getvalue()) 608 + if ubconfig.timing: 609 + print(buf.getvalue(), end='') 610 + 611 + 534 612 def cleanup(): 535 613 """Clean up all global state. 536 614 ··· 580 658 for test in tests_not_run: 581 659 anchor = anchors.get(test, None) 582 660 log.status_fail('... ' + test, anchor) 661 + show_timings() 583 662 log.close() 584 663 atexit.register(cleanup) 585 664 ··· 713 792 log.get_and_reset_warning() 714 793 ihook = item.ihook 715 794 ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) 795 + start = time.monotonic() 716 796 reports = runtestprotocol(item, nextitem=nextitem) 797 + duration = round((time.monotonic() - start) * 1000, 1) 717 798 ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) 718 799 was_warning = log.get_and_reset_warning() 719 800 ··· 726 807 start_test_section(item) 727 808 728 809 failure_cleanup = False 810 + record_duration = True 729 811 if not was_warning: 730 812 test_list = tests_passed 731 813 msg = 'OK' ··· 756 838 test_list = tests_skipped 757 839 msg = 'SKIPPED:\n' + str(report.longrepr) 758 840 msg_log = log.status_skipped 841 + record_duration = False 842 + 843 + msg += f' {duration} ms' 844 + if record_duration: 845 + test_durations[item.name] = duration 759 846 760 847 if failure_cleanup: 761 848 console.drain_console()