benchmarkRunner.py 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. #!/usr/bin/env python3
  2. import subprocess, os, sys
  3. import xml.etree.ElementTree as ET
  4. from collections import defaultdict
  5. from statistics import median, stdev
  6. from datetime import datetime
  7. def get_commit_hash():
  8. res = subprocess.run('git rev-parse HEAD'.split(), check=True, stdout=subprocess.PIPE, universal_newlines=True)
  9. return res.stdout.strip()
  10. if len(sys.argv) < 2:
  11. print('Usage: {} benchmark-binary'.format(sys.argv[0]))
  12. exit(1)
  13. num_runs = 10
  14. data = defaultdict(list)
  15. def parse_file(file):
  16. def recursive_search(node):
  17. if node.tag == 'TestCase':
  18. results = node.find('OverallResult')
  19. time = results.get('durationInSeconds')
  20. data[node.get('name')].append(float(time))
  21. elif node.tag in ('Group', 'Catch'):
  22. for child in node:
  23. recursive_search(child)
  24. tree = ET.parse(file)
  25. recursive_search(tree.getroot())
  26. def run_benchmarks(binary):
  27. call = [binary] + '-d yes -r xml -o'.split()
  28. for i in range(num_runs):
  29. file = 'temp{}.xml'.format(i)
  30. print('Run number {}'.format(i))
  31. subprocess.run(call + [file])
  32. parse_file(file)
  33. # Remove file right after parsing, because benchmark output can be big
  34. os.remove(file)
  35. # Run benchmarks
  36. run_benchmarks(sys.argv[1])
  37. result_file = '{:%Y-%m-%dT%H-%M-%S}-{}.result'.format(datetime.now(), get_commit_hash())
  38. print('Writing results to {}'.format(result_file))
  39. with open(result_file, 'w') as file:
  40. for k in sorted(data):
  41. file.write('{}: median: {} (s), stddev: {} (s)\n'.format(k, median(data[k]), stdev(data[k])))