From ec9cfc79942b87c0e7be5a0a79712c7835610d84 Mon Sep 17 00:00:00 2001 From: v-kaywon Date: Thu, 10 Aug 2017 11:02:49 -0700 Subject: [PATCH] changed README --- test/Performance/README.md | 4 ++-- test/Performance/run-perf_tests.py | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/test/Performance/README.md b/test/Performance/README.md index 0fb305c7..1b2976e1 100644 --- a/test/Performance/README.md +++ b/test/Performance/README.md @@ -24,11 +24,11 @@ PHPBench is used to run the benchmarks. Visit http://phpbench.readthedocs.io/en/ ### 3. The number of iterations for each test can be modified in the test itself (e.g., in test/Performance/benchmark/sqlsrv). Each test has a @Iteration(n) annotation. If you change the number in this annotation, you will change the number of iterations run for this test. By default, most tests are set to 1000 iterations. ### 4. Execute run-perf_tests.py. ### Windows - py.exe run-perf_tests.py -platform >> run-perf_output.txt + py.exe run-perf_tests.py -platform > tee run-perf_output.txt ### Linux and Mac On Linux and Mac, the script must be executed with `sudo python3` because to enable pooling it needs to modify odbcinst.ini system file. As an improvement, the location of the odbcinst.ini file can be changed so that, sudo is not requiered. - python3 run-perf_tests.py -platform >> run-perf_output.txt + python3 run-perf_tests.py -platform | tee run-perf_output.txt `-platform` - The platform that the tests are ran on. Must be one of the following: Windows10, WindowsServer2016, WindowsServer2012, Ubuntu16, RedHat7, Sierra `-php-driver` (optional) - The driver that the tests are ran on. Must be one of the following: sqlsrv, pdo_sqlsrv, or both. Default is both. diff --git a/test/Performance/run-perf_tests.py b/test/Performance/run-perf_tests.py index b2f45e32..0a04e2ab 100644 --- a/test/Performance/run-perf_tests.py +++ b/test/Performance/run-perf_tests.py @@ -656,6 +656,7 @@ def parse_results( dump_file ): # If the bechmark was run successfully, parse the results. This is where you would add code to parse more details about the benchmark. else: xml_result.success = 1 + # convert microseconds to seconds xml_result.duration = int( round( int( benchmark[0][0].find( 'stats' ).get( 'sum' )) / 1000000 )) iterations = benchmark[0][0].findall( 'iteration' ) xml_result.iterations = len( iterations )