diff mbox series

[v6,12/15] scripts/simplebench: support iops

Message ID 20200918181951.21752-13-vsementsov@virtuozzo.com
State Superseded
Headers show
Series preallocate filter | expand

Commit Message

Vladimir Sementsov-Ogievskiy Sept. 18, 2020, 6:19 p.m. UTC
Support benchmarks returning not seconds but iops. We'll use it for
further new test.

Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
---
 scripts/simplebench/simplebench.py | 35 +++++++++++++++++++++++-------
 1 file changed, 27 insertions(+), 8 deletions(-)

Comments

Max Reitz Sept. 25, 2020, 8:54 a.m. UTC | #1
On 18.09.20 20:19, Vladimir Sementsov-Ogievskiy wrote:
> Support benchmarks returning not seconds but iops. We'll use it for

> further new test.

> 

> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>

> ---

>  scripts/simplebench/simplebench.py | 35 +++++++++++++++++++++++-------

>  1 file changed, 27 insertions(+), 8 deletions(-)

> 

> diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py

> index 59e7314ff6..716d7fe9b2 100644

> --- a/scripts/simplebench/simplebench.py

> +++ b/scripts/simplebench/simplebench.py


[...]

> @@ -34,6 +37,7 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):

>  

>      Returns dict with the following fields:

>          'runs':     list of test_func results

> +        'dimension': dimension of results, may be 'seconds' or 'iops'

>          'average':  average seconds per run (exists only if at least one run


s/seconds/value/ (or something like that)

>                      succeeded)

>          'delta':    maximum delta between test_func result and the average

> @@ -54,11 +58,20 @@ def bench_one(test_func, test_env, test_case, count=5, initial_run=True):

>  

>      result = {'runs': runs}

>  

> -    successed = [r for r in runs if ('seconds' in r)]

> +    successed = [r for r in runs if ('seconds' in r or 'iops' in r)]

>      if successed:


((Pre-existing, but I feel the urge to point it that it should be
“succeeded”.  (Or perhaps “successes”.)

Sorry, not something that should be fixed here, but I just couldn’t
contain myself.))

> -        avg = sum(r['seconds'] for r in successed) / len(successed)

> +        dim = 'iops' if ('iops' in successed[0]) else 'seconds'


I think this line should be dropped, because it’s obsoleted by the
if-else that follows.

> +        if 'iops' in successed[0]:

> +            assert all('iops' in r for r in successed)

> +            dim = 'iops'

> +        else:

> +            assert all('seconds' in r for r in successed)

> +            assert all('iops' not in r for r in successed)

> +            dim = 'seconds'


Max
diff mbox series

Patch

diff --git a/scripts/simplebench/simplebench.py b/scripts/simplebench/simplebench.py
index 59e7314ff6..716d7fe9b2 100644
--- a/scripts/simplebench/simplebench.py
+++ b/scripts/simplebench/simplebench.py
@@ -24,9 +24,12 @@  def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
 
     test_func   -- benchmarking function with prototype
                    test_func(env, case), which takes test_env and test_case
-                   arguments and returns {'seconds': int} (which is benchmark
-                   result) on success and {'error': str} on error. Returned
-                   dict may contain any other additional fields.
+                   arguments and on success returns dict with 'seconds' or
+                   'iops' (or both) fields, specifying the benchmark result.
+                   If both 'iops' and 'seconds' provided, the 'iops' is
+                   considered the main, and 'seconds' is just an additional
+                   info. On failure test_func should return {'error': str}.
+                   Returned dict may contain any other additional fields.
     test_env    -- test environment - opaque first argument for test_func
     test_case   -- test case - opaque second argument for test_func
     count       -- how many times to call test_func, to calculate average
@@ -34,6 +37,7 @@  def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
 
     Returns dict with the following fields:
         'runs':     list of test_func results
+        'dimension': dimension of results, may be 'seconds' or 'iops'
         'average':  average seconds per run (exists only if at least one run
                     succeeded)
         'delta':    maximum delta between test_func result and the average
@@ -54,11 +58,20 @@  def bench_one(test_func, test_env, test_case, count=5, initial_run=True):
 
     result = {'runs': runs}
 
-    successed = [r for r in runs if ('seconds' in r)]
+    successed = [r for r in runs if ('seconds' in r or 'iops' in r)]
     if successed:
-        avg = sum(r['seconds'] for r in successed) / len(successed)
+        dim = 'iops' if ('iops' in successed[0]) else 'seconds'
+        if 'iops' in successed[0]:
+            assert all('iops' in r for r in successed)
+            dim = 'iops'
+        else:
+            assert all('seconds' in r for r in successed)
+            assert all('iops' not in r for r in successed)
+            dim = 'seconds'
+        avg = sum(r[dim] for r in successed) / len(successed)
+        result['dimension'] = dim
         result['average'] = avg
-        result['delta'] = max(abs(r['seconds'] - avg) for r in successed)
+        result['delta'] = max(abs(r[dim] - avg) for r in successed)
 
     if len(successed) < count:
         result['n-failed'] = count - len(successed)
@@ -118,11 +131,17 @@  def ascii(results):
     """Return ASCII representation of bench() returned dict."""
     from tabulate import tabulate
 
+    dim = None
     tab = [[""] + [c['id'] for c in results['envs']]]
     for case in results['cases']:
         row = [case['id']]
         for env in results['envs']:
-            row.append(ascii_one(results['tab'][case['id']][env['id']]))
+            res = results['tab'][case['id']][env['id']]
+            if dim is None:
+                dim = res['dimension']
+            else:
+                assert dim == res['dimension']
+            row.append(ascii_one(res))
         tab.append(row)
 
-    return tabulate(tab)
+    return f'All results are in {dim}\n\n' + tabulate(tab)