This commit is contained in:
Ivan
2022-04-05 11:42:28 +03:00
commit 6dc0eb0fcf
5565 changed files with 1200500 additions and 0 deletions

View File

@@ -0,0 +1,19 @@
load("@py_deps//:requirements.bzl", "requirement")
py_library(
name = "gbench",
srcs = glob(["gbench/*.py"]),
deps = [
requirement("numpy"),
requirement("scipy"),
],
)
py_binary(
name = "compare",
srcs = ["compare.py"],
python_version = "PY2",
deps = [
":gbench",
],
)

View File

@@ -0,0 +1,429 @@
#!/usr/bin/env python
import unittest
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import json
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
parser.add_argument(
'-a',
'--display_aggregates_only',
dest='display_aggregates_only',
action="store_true",
help="If there are repetitions, by default, we display everything - the"
" actual runs, and the aggregates computed. Sometimes, it is "
"desirable to only view the aggregates. E.g. when there are a lot "
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
parser.add_argument(
'--no-color',
dest='color',
default=True,
action="store_false",
help="Do not use colors in the terminal output"
)
parser.add_argument(
'-d',
'--dump_to_json',
dest='dump_to_json',
help="Additionally, dump benchmark comparison output to this file in JSON format.")
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
dest='utest',
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
alpha_default = 0.05
utest.add_argument(
"--alpha",
dest='utest_alpha',
default=alpha_default,
type=float,
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
alpha_default)
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
if args.display_aggregates_only:
benchmark_options += ['--benchmark_display_aggregates_only=true']
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline))
json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender))
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
diff_report = gbench.report.get_difference_report(
json1, json2, args.utest)
output_lines = gbench.report.print_difference_report(
diff_report,
args.display_aggregates_only,
args.utest, args.utest_alpha, args.color)
print(description)
for ln in output_lines:
print(ln)
# Optionally, diff and output to JSON
if args.dump_to_json is not None:
with open(args.dump_to_json, 'w') as f_json:
json.dump(diff_report, f_json)
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_display_aggregates_only(self):
parsed = self.parser.parse_args(
['-a', 'benchmarks', self.testInput0, self.testInput1])
self.assertTrue(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;

View File

@@ -0,0 +1,119 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 1000,
"real_time": 50,
"cpu_time": 50,
"time_unit": "ns"
},
{
"name": "BM_1PercentFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_1PercentSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_100xSlower",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_100xFaster",
"iterations": 1000,
"real_time": 10000,
"cpu_time": 10000,
"time_unit": "ns"
},
{
"name": "BM_10PercentCPUToTime",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_ThirdFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "MyComplexityTest_BigO",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "BigO",
"cpu_coefficient": 4.2749856294592886e+00,
"real_coefficient": 6.4789275289789780e+00,
"big_o": "N",
"time_unit": "ns"
},
{
"name": "MyComplexityTest_RMS",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "RMS",
"rms": 4.5097802512472874e-03
},
{
"name": "BM_NotBadTimeUnit",
"iterations": 1000,
"real_time": 0.4,
"cpu_time": 0.5,
"time_unit": "s"
},
{
"name": "BM_DifferentTimeUnit",
"iterations": 1,
"real_time": 1,
"cpu_time": 1,
"time_unit": "s"
}
]
}

View File

@@ -0,0 +1,119 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_SameTimes",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_2xFaster",
"iterations": 1000,
"real_time": 25,
"cpu_time": 25,
"time_unit": "ns"
},
{
"name": "BM_2xSlower",
"iterations": 20833333,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_1PercentFaster",
"iterations": 1000,
"real_time": 98.9999999,
"cpu_time": 98.9999999,
"time_unit": "ns"
},
{
"name": "BM_1PercentSlower",
"iterations": 1000,
"real_time": 100.9999999,
"cpu_time": 100.9999999,
"time_unit": "ns"
},
{
"name": "BM_10PercentFaster",
"iterations": 1000,
"real_time": 90,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_10PercentSlower",
"iterations": 1000,
"real_time": 110,
"cpu_time": 110,
"time_unit": "ns"
},
{
"name": "BM_100xSlower",
"iterations": 1000,
"real_time": 1.0000e+04,
"cpu_time": 1.0000e+04,
"time_unit": "ns"
},
{
"name": "BM_100xFaster",
"iterations": 1000,
"real_time": 100,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_10PercentCPUToTime",
"iterations": 1000,
"real_time": 110,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_ThirdFaster",
"iterations": 1000,
"real_time": 66.665,
"cpu_time": 66.664,
"time_unit": "ns"
},
{
"name": "MyComplexityTest_BigO",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "BigO",
"cpu_coefficient": 5.6215779594361486e+00,
"real_coefficient": 5.6288314793554610e+00,
"big_o": "N",
"time_unit": "ns"
},
{
"name": "MyComplexityTest_RMS",
"run_name": "MyComplexityTest",
"run_type": "aggregate",
"aggregate_name": "RMS",
"rms": 3.3128901852342174e-03
},
{
"name": "BM_NotBadTimeUnit",
"iterations": 1000,
"real_time": 0.04,
"cpu_time": 0.6,
"time_unit": "s"
},
{
"name": "BM_DifferentTimeUnit",
"iterations": 1,
"real_time": 1,
"cpu_time": 1,
"time_unit": "ns"
}
]
}

View File

@@ -0,0 +1,81 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_Hi",
"iterations": 1234,
"real_time": 42,
"cpu_time": 24,
"time_unit": "ms"
},
{
"name": "BM_Zero",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "BM_Zero/4",
"iterations": 4000,
"real_time": 40,
"cpu_time": 40,
"time_unit": "ns"
},
{
"name": "Prefix/BM_Zero",
"iterations": 2000,
"real_time": 20,
"cpu_time": 20,
"time_unit": "ns"
},
{
"name": "Prefix/BM_Zero/3",
"iterations": 3000,
"real_time": 30,
"cpu_time": 30,
"time_unit": "ns"
},
{
"name": "BM_One",
"iterations": 5000,
"real_time": 5,
"cpu_time": 5,
"time_unit": "ns"
},
{
"name": "BM_One/4",
"iterations": 2000,
"real_time": 20,
"cpu_time": 20,
"time_unit": "ns"
},
{
"name": "Prefix/BM_One",
"iterations": 1000,
"real_time": 10,
"cpu_time": 10,
"time_unit": "ns"
},
{
"name": "Prefix/BM_One/3",
"iterations": 1500,
"real_time": 15,
"cpu_time": 15,
"time_unit": "ns"
},
{
"name": "BM_Bye",
"iterations": 5321,
"real_time": 11,
"cpu_time": 63,
"time_unit": "ns"
}
]
}

View File

@@ -0,0 +1,65 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_One",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 10,
"cpu_time": 100,
"time_unit": "ns"
},
{
"name": "BM_Two",
"iterations": 1000,
"real_time": 9,
"cpu_time": 90,
"time_unit": "ns"
},
{
"name": "BM_Two",
"iterations": 1000,
"real_time": 8,
"cpu_time": 86,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 8,
"cpu_time": 80,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 8,
"cpu_time": 77,
"time_unit": "ns"
},
{
"name": "medium",
"run_type": "iteration",
"iterations": 1000,
"real_time": 8,
"cpu_time": 80,
"time_unit": "ns"
},
{
"name": "medium",
"run_type": "iteration",
"iterations": 1000,
"real_time": 9,
"cpu_time": 82,
"time_unit": "ns"
}
]
}

View File

@@ -0,0 +1,65 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "BM_One",
"iterations": 1000,
"real_time": 9,
"cpu_time": 110,
"time_unit": "ns"
},
{
"name": "BM_Two",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 10,
"cpu_time": 89,
"time_unit": "ns"
},
{
"name": "BM_Two",
"iterations": 1000,
"real_time": 7,
"cpu_time": 72,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 1000,
"real_time": 7,
"cpu_time": 75,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "aggregate",
"iterations": 762,
"real_time": 4.54,
"cpu_time": 66.6,
"time_unit": "ns"
},
{
"name": "short",
"run_type": "iteration",
"iterations": 1000,
"real_time": 800,
"cpu_time": 1,
"time_unit": "ns"
},
{
"name": "medium",
"run_type": "iteration",
"iterations": 1200,
"real_time": 5,
"cpu_time": 53,
"time_unit": "ns"
}
]
}

View File

@@ -0,0 +1,96 @@
{
"benchmarks": [
{
"name": "99 family 0 instance 0 repetition 0",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 0,
"repetition_index": 0
},
{
"name": "98 family 0 instance 0 repetition 1",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 0,
"repetition_index": 1
},
{
"name": "97 family 0 instance 0 aggregate",
"run_type": "aggregate",
"family_index": 0,
"per_family_instance_index": 0,
"aggregate_name": "9 aggregate"
},
{
"name": "96 family 0 instance 1 repetition 0",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 1,
"repetition_index": 0
},
{
"name": "95 family 0 instance 1 repetition 1",
"run_type": "iteration",
"family_index": 0,
"per_family_instance_index": 1,
"repetition_index": 1
},
{
"name": "94 family 0 instance 1 aggregate",
"run_type": "aggregate",
"family_index": 0,
"per_family_instance_index": 1,
"aggregate_name": "9 aggregate"
},
{
"name": "93 family 1 instance 0 repetition 0",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 0,
"repetition_index": 0
},
{
"name": "92 family 1 instance 0 repetition 1",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 0,
"repetition_index": 1
},
{
"name": "91 family 1 instance 0 aggregate",
"run_type": "aggregate",
"family_index": 1,
"per_family_instance_index": 0,
"aggregate_name": "9 aggregate"
},
{
"name": "90 family 1 instance 1 repetition 0",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 1,
"repetition_index": 0
},
{
"name": "89 family 1 instance 1 repetition 1",
"run_type": "iteration",
"family_index": 1,
"per_family_instance_index": 1,
"repetition_index": 1
},
{
"name": "88 family 1 instance 1 aggregate",
"run_type": "aggregate",
"family_index": 1,
"per_family_instance_index": 1,
"aggregate_name": "9 aggregate"
}
]
}

View File

@@ -0,0 +1,21 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "whocares",
"run_type": "aggregate",
"aggregate_name": "zz",
"aggregate_unit": "percentage",
"iterations": 1000,
"real_time": 0.01,
"cpu_time": 0.10,
"time_unit": "ns"
}
]
}

View File

@@ -0,0 +1,21 @@
{
"context": {
"date": "2016-08-02 17:44:46",
"num_cpus": 4,
"mhz_per_cpu": 4228,
"cpu_scaling_enabled": false,
"library_build_type": "release"
},
"benchmarks": [
{
"name": "whocares",
"run_type": "aggregate",
"aggregate_name": "zz",
"aggregate_unit": "percentage",
"iterations": 1000,
"real_time": 0.005,
"cpu_time": 0.15,
"time_unit": "ns"
}
]
}

View File

@@ -0,0 +1,8 @@
"""Google Benchmark tooling"""
__author__ = 'Eric Fiselier'
__email__ = 'eric@efcs.ca'
__versioninfo__ = (0, 5, 0)
__version__ = '.'.join(str(v) for v in __versioninfo__) + 'dev'
__all__ = []

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,181 @@
"""util.py - General utilities for running, loading, and processing benchmarks
"""
import json
import os
import tempfile
import subprocess
import sys
import functools
# Input file type enumeration
IT_Invalid = 0
IT_JSON = 1
IT_Executable = 2
_num_magic_bytes = 2 if sys.platform.startswith('win') else 4
def is_executable_file(filename):
"""
Return 'True' if 'filename' names a valid file which is likely
an executable. A file is considered an executable if it starts with the
magic bytes for a EXE, Mach O, or ELF file.
"""
if not os.path.isfile(filename):
return False
with open(filename, mode='rb') as f:
magic_bytes = f.read(_num_magic_bytes)
if sys.platform == 'darwin':
return magic_bytes in [
b'\xfe\xed\xfa\xce', # MH_MAGIC
b'\xce\xfa\xed\xfe', # MH_CIGAM
b'\xfe\xed\xfa\xcf', # MH_MAGIC_64
b'\xcf\xfa\xed\xfe', # MH_CIGAM_64
b'\xca\xfe\xba\xbe', # FAT_MAGIC
b'\xbe\xba\xfe\xca' # FAT_CIGAM
]
elif sys.platform.startswith('win'):
return magic_bytes == b'MZ'
else:
return magic_bytes == b'\x7FELF'
def is_json_file(filename):
"""
Returns 'True' if 'filename' names a valid JSON output file.
'False' otherwise.
"""
try:
with open(filename, 'r') as f:
json.load(f)
return True
except BaseException:
pass
return False
def classify_input_file(filename):
"""
Return a tuple (type, msg) where 'type' specifies the classified type
of 'filename'. If 'type' is 'IT_Invalid' then 'msg' is a human readable
string represeting the error.
"""
ftype = IT_Invalid
err_msg = None
if not os.path.exists(filename):
err_msg = "'%s' does not exist" % filename
elif not os.path.isfile(filename):
err_msg = "'%s' does not name a file" % filename
elif is_executable_file(filename):
ftype = IT_Executable
elif is_json_file(filename):
ftype = IT_JSON
else:
err_msg = "'%s' does not name a valid benchmark executable or JSON file" % filename
return ftype, err_msg
def check_input_file(filename):
"""
Classify the file named by 'filename' and return the classification.
If the file is classified as 'IT_Invalid' print an error message and exit
the program.
"""
ftype, msg = classify_input_file(filename)
if ftype == IT_Invalid:
print("Invalid input file: %s" % msg)
sys.exit(1)
return ftype
def find_benchmark_flag(prefix, benchmark_flags):
"""
Search the specified list of flags for a flag matching `<prefix><arg>` and
if it is found return the arg it specifies. If specified more than once the
last value is returned. If the flag is not found None is returned.
"""
assert prefix.startswith('--') and prefix.endswith('=')
result = None
for f in benchmark_flags:
if f.startswith(prefix):
result = f[len(prefix):]
return result
def remove_benchmark_flags(prefix, benchmark_flags):
"""
Return a new list containing the specified benchmark_flags except those
with the specified prefix.
"""
assert prefix.startswith('--') and prefix.endswith('=')
return [f for f in benchmark_flags if not f.startswith(prefix)]
def load_benchmark_results(fname):
"""
Read benchmark output from a file and return the JSON object.
REQUIRES: 'fname' names a file containing JSON benchmark output.
"""
with open(fname, 'r') as f:
return json.load(f)
def sort_benchmark_results(result):
benchmarks = result['benchmarks']
# From inner key to the outer key!
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1)
benchmarks = sorted(
benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0)
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1)
benchmarks = sorted(
benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1)
result['benchmarks'] = benchmarks
return result
def run_benchmark(exe_name, benchmark_flags):
"""
Run a benchmark specified by 'exe_name' with the specified
'benchmark_flags'. The benchmark is run directly as a subprocess to preserve
real time console output.
RETURNS: A JSON object representing the benchmark output
"""
output_name = find_benchmark_flag('--benchmark_out=',
benchmark_flags)
is_temp_output = False
if output_name is None:
is_temp_output = True
thandle, output_name = tempfile.mkstemp()
os.close(thandle)
benchmark_flags = list(benchmark_flags) + \
['--benchmark_out=%s' % output_name]
cmd = [exe_name] + benchmark_flags
print("RUNNING: %s" % ' '.join(cmd))
exitCode = subprocess.call(cmd)
if exitCode != 0:
print('TEST FAILED...')
sys.exit(exitCode)
json_res = load_benchmark_results(output_name)
if is_temp_output:
os.unlink(output_name)
return json_res
def run_or_load_benchmark(filename, benchmark_flags):
"""
Get the results for a specified benchmark. If 'filename' specifies
an executable benchmark then the results are generated by running the
benchmark. Otherwise 'filename' must name a valid JSON output file,
which is loaded and the result returned.
"""
ftype = check_input_file(filename)
if ftype == IT_JSON:
return load_benchmark_results(filename)
if ftype == IT_Executable:
return run_benchmark(filename, benchmark_flags)
raise ValueError('Unknown file type %s' % ftype)

View File

@@ -0,0 +1 @@
scipy>=1.5.0

View File

@@ -0,0 +1,151 @@
#!/usr/bin/env python
"""
strip_asm.py - Cleanup ASM output for the specified file
"""
from argparse import ArgumentParser
import sys
import os
import re
def find_used_labels(asm):
found = set()
label_re = re.compile("\s*j[a-z]+\s+\.L([a-zA-Z0-9][a-zA-Z0-9_]*)")
for l in asm.splitlines():
m = label_re.match(l)
if m:
found.add('.L%s' % m.group(1))
return found
def normalize_labels(asm):
decls = set()
label_decl = re.compile("^[.]{0,1}L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if m:
decls.add(m.group(0))
if len(decls) == 0:
return asm
needs_dot = next(iter(decls))[0] != '.'
if not needs_dot:
return asm
for ld in decls:
asm = re.sub("(^|\s+)" + ld + "(?=:|\s)", '\\1.' + ld, asm)
return asm
def transform_labels(asm):
asm = normalize_labels(asm)
used_decls = find_used_labels(asm)
new_asm = ''
label_decl = re.compile("^\.L([a-zA-Z0-9][a-zA-Z0-9_]*)(?=:)")
for l in asm.splitlines():
m = label_decl.match(l)
if not m or m.group(0) in used_decls:
new_asm += l
new_asm += '\n'
return new_asm
def is_identifier(tk):
if len(tk) == 0:
return False
first = tk[0]
if not first.isalpha() and first != '_':
return False
for i in range(1, len(tk)):
c = tk[i]
if not c.isalnum() and c != '_':
return False
return True
def process_identifiers(l):
"""
process_identifiers - process all identifiers and modify them to have
consistent names across all platforms; specifically across ELF and MachO.
For example, MachO inserts an additional understore at the beginning of
names. This function removes that.
"""
parts = re.split(r'([a-zA-Z0-9_]+)', l)
new_line = ''
for tk in parts:
if is_identifier(tk):
if tk.startswith('__Z'):
tk = tk[1:]
elif tk.startswith('_') and len(tk) > 1 and \
tk[1].isalpha() and tk[1] != 'Z':
tk = tk[1:]
new_line += tk
return new_line
def process_asm(asm):
"""
Strip the ASM of unwanted directives and lines
"""
new_contents = ''
asm = transform_labels(asm)
# TODO: Add more things we want to remove
discard_regexes = [
re.compile("\s+\..*$"), # directive
re.compile("\s*#(NO_APP|APP)$"), #inline ASM
re.compile("\s*#.*$"), # comment line
re.compile("\s*\.globa?l\s*([.a-zA-Z_][a-zA-Z0-9$_.]*)"), #global directive
re.compile("\s*\.(string|asciz|ascii|[1248]?byte|short|word|long|quad|value|zero)"),
]
keep_regexes = [
]
fn_label_def = re.compile("^[a-zA-Z_][a-zA-Z0-9_.]*:")
for l in asm.splitlines():
# Remove Mach-O attribute
l = l.replace('@GOTPCREL', '')
add_line = True
for reg in discard_regexes:
if reg.match(l) is not None:
add_line = False
break
for reg in keep_regexes:
if reg.match(l) is not None:
add_line = True
break
if add_line:
if fn_label_def.match(l) and len(new_contents) != 0:
new_contents += '\n'
l = process_identifiers(l)
new_contents += l
new_contents += '\n'
return new_contents
def main():
parser = ArgumentParser(
description='generate a stripped assembly file')
parser.add_argument(
'input', metavar='input', type=str, nargs=1,
help='An input assembly file')
parser.add_argument(
'out', metavar='output', type=str, nargs=1,
help='The output file')
args, unknown_args = parser.parse_known_args()
input = args.input[0]
output = args.out[0]
if not os.path.isfile(input):
print(("ERROR: input file '%s' does not exist") % input)
sys.exit(1)
contents = None
with open(input, 'r') as f:
contents = f.read()
new_contents = process_asm(contents)
with open(output, 'w') as f:
f.write(new_contents)
if __name__ == '__main__':
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;