benchmarkCompile.py 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. #!/usr/bin/env python3
  2. from __future__ import print_function
  3. import time, subprocess, sys, os, shutil, glob, random
  4. import argparse
  5. def median(lst):
  6. lst = sorted(lst)
  7. mid, odd = divmod(len(lst), 2)
  8. if odd:
  9. return lst[mid]
  10. else:
  11. return (lst[mid - 1] + lst[mid]) / 2.0
  12. def mean(lst):
  13. return float(sum(lst)) / max(len(lst), 1)
  14. compiler_path = ''
  15. flags = []
  16. main_file = r'''
  17. #define CATCH_CONFIG_MAIN
  18. #include "catch.hpp"
  19. '''
  20. main_name = 'catch-main.cpp'
  21. dir_name = 'benchmark-dir'
  22. files = 20
  23. test_cases_in_file = 20
  24. sections_in_file = 4
  25. assertions_per_section = 5
  26. checks = [
  27. 'a != b', 'a != c', 'a != d', 'a != e', 'b != c', 'b != d', 'b != e', 'c != d', 'c != e', 'd != e', 'a + a == a',
  28. 'a + b == b', 'a + c == c', 'a + d == d', 'a + e == e', 'b + a == b', 'b + b == c', 'b + c == d',
  29. 'b + d == e', 'c + a == c', 'c + b == d', 'c + c == e', 'd + a == d', 'd + b == e', 'e + a == e',
  30. 'a + a + a == a', 'b + c == a + d', 'c + a + a == a + b + b + a',
  31. 'a < b', 'b < c', 'c < d', 'd < e', 'a >= a', 'd >= b',
  32. ]
  33. def create_temp_dir():
  34. if os.path.exists(dir_name):
  35. shutil.rmtree(dir_name)
  36. os.mkdir(dir_name)
  37. def copy_catch(path_to_catch):
  38. shutil.copy(path_to_catch, dir_name)
  39. def create_catch_main():
  40. with open(main_name, 'w') as f:
  41. f.write(main_file)
  42. def compile_main():
  43. start_t = time.time()
  44. subprocess.check_call([compiler_path, main_name, '-c'] + flags)
  45. end_t = time.time()
  46. return end_t - start_t
  47. def compile_files():
  48. cpp_files = glob.glob('tests*.cpp')
  49. start_t = time.time()
  50. subprocess.check_call([compiler_path, '-c'] + flags + cpp_files)
  51. end_t = time.time()
  52. return end_t - start_t
  53. def link_files():
  54. obj_files = glob.glob('*.o')
  55. start_t = time.time()
  56. subprocess.check_call([compiler_path] + flags + obj_files)
  57. end_t = time.time()
  58. return end_t - start_t
  59. def benchmark(func):
  60. results = [func() for i in range(10)]
  61. return mean(results), median(results)
  62. def char_range(start, end):
  63. for c in range(ord(start), ord(end)):
  64. yield chr(c)
  65. def generate_sections(fd):
  66. for i in range(sections_in_file):
  67. fd.write(' SECTION("Section {}") {{\n'.format(i))
  68. fd.write('\n'.join(' CHECK({});'.format(check) for check in random.sample(checks, assertions_per_section)))
  69. fd.write(' }\n')
  70. def generate_file(file_no):
  71. with open('tests{}.cpp'.format(file_no), 'w') as f:
  72. f.write('#include "catch.hpp"\n\n')
  73. for i in range(test_cases_in_file):
  74. f.write('TEST_CASE("File {} test {}", "[.compile]"){{\n'.format(file_no, i))
  75. for i, c in enumerate(char_range('a', 'f')):
  76. f.write(' int {} = {};\n'.format(c, i))
  77. generate_sections(f)
  78. f.write('}\n\n')
  79. def generate_files():
  80. create_catch_main()
  81. for i in range(files):
  82. generate_file(i)
  83. options = ['all', 'main', 'files', 'link']
  84. parser = argparse.ArgumentParser(description='Benchmarks Catch\'s compile times against some synthetic tests')
  85. # Add first arg -- benchmark type
  86. parser.add_argument('benchmark_kind', nargs='?', default='all', choices=options, help='What kind of benchmark to run, default: all')
  87. # Args to allow changing header/compiler
  88. parser.add_argument('-I', '--catch-header', default='catch.hpp', help = 'Path to catch.hpp, default: catch.hpp')
  89. parser.add_argument('-c', '--compiler', default='g++', help = 'Compiler to use, default: g++')
  90. parser.add_argument('-f', '--flags', help = 'Flags to be passed to the compiler. Pass as "," separated list')
  91. # Allow creating files only, without running the whole thing
  92. parser.add_argument('-g', '--generate-files', action='store_true', help='Generate test files and quit')
  93. args = parser.parse_args()
  94. compiler_path = args.compiler
  95. catch_path = args.catch_header
  96. if args.generate_files:
  97. create_temp_dir()
  98. copy_catch(catch_path)
  99. os.chdir(dir_name)
  100. # now create the fake test files
  101. generate_files()
  102. # Early exit
  103. print('Finished generating files')
  104. exit(1)
  105. os.chdir(dir_name)
  106. if args.flags:
  107. flags = args.flags.split(',')
  108. print('Time needed for ...')
  109. if args.benchmark_kind in ('all', 'main'):
  110. print(' ... compiling main, mean: {:.2f}, median: {:.2f} s'.format(*benchmark(compile_main)))
  111. if args.benchmark_kind in ('all', 'files'):
  112. print(' ... compiling test files, mean: {:.2f}, median: {:.2f} s'.format(*benchmark(compile_files)))
  113. if args.benchmark_kind in ('all', 'link'):
  114. print(' ... linking everything, mean: {:.2f}, median: {:.2f} s'.format(*benchmark(link_files)))