Hash :
33d17b7f
Author :
Date :
2022-01-17T10:29:28
Fix perf test runner script. For Python3, and a couple other tweaks. Bug: angleproject:5707 Change-Id: I64365bf9f7834c78dd8cded277a13c35811e0f9d Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/3395812 Reviewed-by: Shahbaz Youssefi <syoussefi@chromium.org> Commit-Queue: Jamie Madill <jmadill@chromium.org>
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
#!/usr/bin/python3
#
# Copyright 2015 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# perf_test_runner.py:
# Helper script for running and analyzing perftest results. Runs the
# tests in an infinite batch, printing out the mean and coefficient of
# variation of the population continuously.
#
import argparse
import glob
import logging
import os
import re
import subprocess
import sys
base_path = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
# We look in this path for a recent build.
TEST_SUITE_SEARCH_PATH = glob.glob('out/*')
DEFAULT_METRIC = 'wall_time'
DEFAULT_EXPERIMENTS = 10
DEFAULT_TEST_SUITE = 'angle_perftests'
if sys.platform == 'win32':
DEFAULT_TEST_NAME = 'DrawCallPerfBenchmark.Run/d3d11_null'
else:
DEFAULT_TEST_NAME = 'DrawCallPerfBenchmark.Run/gl'
EXIT_SUCCESS = 0
EXIT_FAILURE = 1
scores = []
# Danke to http://stackoverflow.com/a/27758326
def mean(data):
"""Return the sample arithmetic mean of data."""
n = len(data)
if n < 1:
raise ValueError('mean requires at least one data point')
return float(sum(data)) / float(n) # in Python 2 use sum(data)/float(n)
def sum_of_square_deviations(data, c):
"""Return sum of square deviations of sequence data."""
ss = sum((float(x) - c)**2 for x in data)
return ss
def coefficient_of_variation(data):
"""Calculates the population coefficient of variation."""
n = len(data)
if n < 2:
raise ValueError('variance requires at least two data points')
c = mean(data)
ss = sum_of_square_deviations(data, c)
pvar = ss / n # the population variance
stddev = (pvar**0.5) # population standard deviation
return stddev / c
def truncated_list(data, n):
"""Compute a truncated list, n is truncation size"""
if len(data) < n * 2:
raise ValueError('list not large enough to truncate')
return sorted(data)[n:-n]
def truncated_mean(data, n):
"""Compute a truncated mean, n is truncation size"""
return mean(truncated_list(data, n))
def truncated_cov(data, n):
"""Compute a truncated coefficient of variation, n is truncation size"""
return coefficient_of_variation(truncated_list(data, n))
def main(raw_args):
parser = argparse.ArgumentParser()
parser.add_argument(
'--suite',
help='Test suite binary. Default is "%s".' % DEFAULT_TEST_SUITE,
default=DEFAULT_TEST_SUITE)
parser.add_argument(
'-m',
'--metric',
help='Test metric. Default is "%s".' % DEFAULT_METRIC,
default=DEFAULT_METRIC)
parser.add_argument(
'--experiments',
help='Number of experiments to run. Default is %d.' % DEFAULT_EXPERIMENTS,
default=DEFAULT_EXPERIMENTS,
type=int)
parser.add_argument('-v', '--verbose', help='Extra verbose logging.', action='store_true')
parser.add_argument('test_name', help='Test to run', default=DEFAULT_TEST_NAME)
args, extra_args = parser.parse_known_args(raw_args)
if args.verbose:
logging.basicConfig(level='DEBUG')
if sys.platform == 'win32':
args.suite += '.exe'
# Find most recent binary
newest_binary = None
newest_mtime = None
for path in TEST_SUITE_SEARCH_PATH:
binary_path = os.path.join(base_path, path, args.suite)
if os.path.exists(binary_path):
binary_mtime = os.path.getmtime(binary_path)
if (newest_binary is None) or (binary_mtime > newest_mtime):
newest_binary = binary_path
newest_mtime = binary_mtime
perftests_path = newest_binary
if perftests_path == None or not os.path.exists(perftests_path):
print('Cannot find %s in %s!' % (args.suite, TEST_SUITE_SEARCH_PATH))
return EXIT_FAILURE
print('Using test executable: %s' % perftests_path)
print('Test name: %s' % args.test_name)
def get_results(metric, extra_args=[]):
run = [perftests_path, '--gtest_filter=%s' % args.test_name] + extra_args
logging.info('running %s' % str(run))
process = subprocess.Popen(
run, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8')
output, err = process.communicate()
m = re.search(r'Running (\d+) tests', output)
if m and int(m.group(1)) > 1:
print(output)
raise Exception('Found more than one test result in output')
# Results are reported in the format:
# name_backend.metric: story= value units.
pattern = r'\.' + metric + r':.*= ([0-9.]+)'
logging.debug('searching for %s in output' % pattern)
m = re.findall(pattern, output)
if not m:
print(output)
raise Exception('Did not find the metric "%s" in the test output' % metric)
return [float(value) for value in m]
# Calibrate the number of steps
steps = get_results("steps_to_run", ["--calibration"] + extra_args)[0]
print("running with %d steps." % steps)
# Loop 'args.experiments' times, running the tests.
for experiment in range(args.experiments):
experiment_scores = get_results(args.metric,
["--steps-per-trial", str(steps)] + extra_args)
for score in experiment_scores:
sys.stdout.write("%s: %.2f" % (args.metric, score))
scores.append(score)
if (len(scores) > 1):
sys.stdout.write(", mean: %.2f" % mean(scores))
sys.stdout.write(", variation: %.2f%%" %
(coefficient_of_variation(scores) * 100.0))
if (len(scores) > 7):
truncation_n = len(scores) >> 3
sys.stdout.write(", truncated mean: %.2f" % truncated_mean(scores, truncation_n))
sys.stdout.write(", variation: %.2f%%" %
(truncated_cov(scores, truncation_n) * 100.0))
print("")
return EXIT_SUCCESS
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))