diff mbox

[v2] benchtests: Add malloc microbenchmark

Message ID 1397737835-15868-1-git-send-email-will.newton@linaro.org
State Superseded
Headers show

Commit Message

Will Newton April 17, 2014, 12:30 p.m. UTC
Add a microbenchmark for measuring malloc and free performance. The
benchmark allocates and frees buffers of random sizes in a random
order and measures the overall execution time and RSS. Variants of the
benchmark are run with 8, 32 and 64 threads to measure the effect of
concurrency on allocator performance.

The random block sizes used follow an inverse square distribution
which is intended to mimic the behaviour of real applications which
tend to allocate many more small blocks than large ones.

ChangeLog:

2014-04-15  Will Newton  <will.newton@linaro.org>

	* benchtests/Makefile: (benchset): Add malloc benchmarks.
	Link threaded malloc benchmarks with libpthread.
	* benchtests/bench-malloc-threads-32.c: New file.
	* benchtests/bench-malloc-threads-64.c: Likewise.
	* benchtests/bench-malloc-threads-8.c: Likewise.
	* benchtests/bench-malloc.c: Likewise.
---
 benchtests/Makefile                  |   7 +-
 benchtests/bench-malloc-threads-32.c |  20 +++
 benchtests/bench-malloc-threads-64.c |  20 +++
 benchtests/bench-malloc-threads-8.c  |  20 +++
 benchtests/bench-malloc.c            | 236 +++++++++++++++++++++++++++++++++++
 5 files changed, 302 insertions(+), 1 deletion(-)
 create mode 100644 benchtests/bench-malloc-threads-32.c
 create mode 100644 benchtests/bench-malloc-threads-64.c
 create mode 100644 benchtests/bench-malloc-threads-8.c
 create mode 100644 benchtests/bench-malloc.c

Changes in v2:
 - Move random number generation out of the loop and use arrays of random
   values. This reduces the overhead of the benchmark loop to 10% or less.

Comments

Siddhesh Poyarekar May 30, 2014, 9:45 a.m. UTC | #1
On Thu, Apr 17, 2014 at 01:30:35PM +0100, Will Newton wrote:
> Add a microbenchmark for measuring malloc and free performance. The
> benchmark allocates and frees buffers of random sizes in a random
> order and measures the overall execution time and RSS. Variants of the
> benchmark are run with 8, 32 and 64 threads to measure the effect of
> concurrency on allocator performance.
> 
> The random block sizes used follow an inverse square distribution
> which is intended to mimic the behaviour of real applications which
> tend to allocate many more small blocks than large ones.
> 
> ChangeLog:
> 
> 2014-04-15  Will Newton  <will.newton@linaro.org>
> 
> 	* benchtests/Makefile: (benchset): Add malloc benchmarks.
> 	Link threaded malloc benchmarks with libpthread.
> 	* benchtests/bench-malloc-threads-32.c: New file.
> 	* benchtests/bench-malloc-threads-64.c: Likewise.
> 	* benchtests/bench-malloc-threads-8.c: Likewise.
> 	* benchtests/bench-malloc.c: Likewise.
> ---
>  benchtests/Makefile                  |   7 +-
>  benchtests/bench-malloc-threads-32.c |  20 +++
>  benchtests/bench-malloc-threads-64.c |  20 +++
>  benchtests/bench-malloc-threads-8.c  |  20 +++
>  benchtests/bench-malloc.c            | 236 +++++++++++++++++++++++++++++++++++
>  5 files changed, 302 insertions(+), 1 deletion(-)
>  create mode 100644 benchtests/bench-malloc-threads-32.c
>  create mode 100644 benchtests/bench-malloc-threads-64.c
>  create mode 100644 benchtests/bench-malloc-threads-8.c
>  create mode 100644 benchtests/bench-malloc.c
> 
> Changes in v2:
>  - Move random number generation out of the loop and use arrays of random
>    values. This reduces the overhead of the benchmark loop to 10% or less.
> 
> diff --git a/benchtests/Makefile b/benchtests/Makefile
> index a0954cd..f38380d 100644
> --- a/benchtests/Makefile
> +++ b/benchtests/Makefile
> @@ -37,9 +37,11 @@ string-bench := bcopy bzero memccpy memchr memcmp memcpy memmem memmove \
>  		strspn strstr strcpy_chk stpcpy_chk memrchr strsep strtok
>  string-bench-all := $(string-bench)
>  
> +malloc-bench := malloc malloc-threads-8 malloc-threads-32 malloc-threads-64
> +
>  stdlib-bench := strtod
>  
> -benchset := $(string-bench-all) $(stdlib-bench)
> +benchset := $(string-bench-all) $(stdlib-bench) $(malloc-bench)

The ideal output here would be to have a single bench-malloc.out that
has the number of threads as variants.

>  
>  CFLAGS-bench-ffs.c += -fno-builtin
>  CFLAGS-bench-ffsll.c += -fno-builtin
> @@ -47,6 +49,9 @@ CFLAGS-bench-ffsll.c += -fno-builtin
>  $(addprefix $(objpfx)bench-,$(bench-math)): $(common-objpfx)math/libm.so
>  $(addprefix $(objpfx)bench-,$(bench-pthread)): \
>  	$(common-objpfx)nptl/libpthread.so
> +$(objpfx)bench-malloc-threads-8: $(common-objpfx)nptl/libpthread.so
> +$(objpfx)bench-malloc-threads-32: $(common-objpfx)nptl/libpthread.so
> +$(objpfx)bench-malloc-threads-64: $(common-objpfx)nptl/libpthread.so
>  

$(addprefix $(objpfx)bench-,$(malloc-bench)): $(common-objpfx)nptl/libpthread.so

>  
>  
> diff --git a/benchtests/bench-malloc-threads-32.c b/benchtests/bench-malloc-threads-32.c
> new file mode 100644
> index 0000000..463ceb7
> --- /dev/null
> +++ b/benchtests/bench-malloc-threads-32.c
> @@ -0,0 +1,20 @@
> +/* Measure malloc and free functions with threads.
> +   Copyright (C) 2014 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#define NUM_THREADS 32
> +#include "bench-malloc.c"
> diff --git a/benchtests/bench-malloc-threads-64.c b/benchtests/bench-malloc-threads-64.c
> new file mode 100644
> index 0000000..61d8c10
> --- /dev/null
> +++ b/benchtests/bench-malloc-threads-64.c
> @@ -0,0 +1,20 @@
> +/* Measure malloc and free functions with threads.
> +   Copyright (C) 2014 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#define NUM_THREADS 64
> +#include "bench-malloc.c"
> diff --git a/benchtests/bench-malloc-threads-8.c b/benchtests/bench-malloc-threads-8.c
> new file mode 100644
> index 0000000..ac4ff79
> --- /dev/null
> +++ b/benchtests/bench-malloc-threads-8.c
> @@ -0,0 +1,20 @@
> +/* Measure malloc and free functions with threads.
> +   Copyright (C) 2014 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#define NUM_THREADS 8
> +#include "bench-malloc.c"
> diff --git a/benchtests/bench-malloc.c b/benchtests/bench-malloc.c
> new file mode 100644
> index 0000000..dc4fe17
> --- /dev/null
> +++ b/benchtests/bench-malloc.c
> @@ -0,0 +1,236 @@
> +/* Benchmark malloc and free functions.
> +   Copyright (C) 2013-2014 Free Software Foundation, Inc.
> +   This file is part of the GNU C Library.
> +
> +   The GNU C Library is free software; you can redistribute it and/or
> +   modify it under the terms of the GNU Lesser General Public
> +   License as published by the Free Software Foundation; either
> +   version 2.1 of the License, or (at your option) any later version.
> +
> +   The GNU C Library is distributed in the hope that it will be useful,
> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +   Lesser General Public License for more details.
> +
> +   You should have received a copy of the GNU Lesser General Public
> +   License along with the GNU C Library; if not, see
> +   <http://www.gnu.org/licenses/>.  */
> +
> +#include <math.h>
> +#include <pthread.h>
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <sys/time.h>
> +#include <sys/resource.h>
> +
> +#include "bench-timing.h"
> +#include "json-lib.h"
> +
> +#define BENCHMARK_ITERATIONS	40000000
> +#define RAND_SEED		88
> +
> +#ifndef NUM_THREADS
> +#define NUM_THREADS 1

# define...

> +#endif
> +
> +/* Maximum memory that can be allocated at any one time is:
> +
> +   NUM_THREADS * WORKING_SET_SIZE * MAX_ALLOCATION_SIZE
> +
> +   However due to the distribution of the random block sizes
> +   the typical amount allocated will be much smaller.  */
> +#define WORKING_SET_SIZE	1024
> +
> +#define MIN_ALLOCATION_SIZE	4
> +#define MAX_ALLOCATION_SIZE	32768

A maximum of 32K only tests arena allocation performance.  This is
fine for now since malloc+mmap performance is as interesting.  What is
interesting though is the dynamic threshold management which brings in
allocations into the arena for larger sizes and what kind of
performance improvement it provides, but that is a different
benchmark.

> +
> +/* Get a random block size with an inverse square distribution.  */
> +static unsigned int
> +get_block_size (unsigned int rand_data)
> +{
> +  /* Inverse square.  */
> +  float exponent = -2;

Mark as const.

> +  /* Minimum value of distribution.  */
> +  float dist_min = MIN_ALLOCATION_SIZE;
> +  /* Maximum value of distribution.  */
> +  float dist_max = MAX_ALLOCATION_SIZE;
> +
> +  float min_pow = powf (dist_min, exponent + 1);
> +  float max_pow = powf (dist_max, exponent + 1);
> +
> +  float r = (float) rand_data / RAND_MAX;
> +
> +  return (unsigned int) powf ((max_pow - min_pow) * r + min_pow, 1 / (exponent + 1));
> +}
> +
> +#define NUM_BLOCK_SIZES	8000
> +#define NUM_OFFSETS	((WORKING_SET_SIZE) * 4)
> +
> +static unsigned int random_block_sizes[NUM_BLOCK_SIZES];
> +static unsigned int random_offsets[NUM_OFFSETS];
> +
> +static void
> +init_random_values (void)
> +{
> +  size_t i;
> +
> +  for (i = 0; i < NUM_BLOCK_SIZES; i++)

You can collapse this to:

  for (size_t i = 0; i < NUM_BLOCK_SIZES; i++)

> +    random_block_sizes[i] = get_block_size (rand ());
> +
> +  for (i = 0; i < NUM_OFFSETS; i++)

Likewise.

> +    random_offsets[i] = rand () % WORKING_SET_SIZE;
> +}
> +
> +static unsigned int
> +get_random_block_size (unsigned int *state)
> +{
> +  unsigned int idx = *state;
> +
> +  if (idx >= NUM_BLOCK_SIZES - 1)
> +    idx = 0;
> +  else
> +    idx++;
> +
> +  *state = idx;
> +
> +  return random_block_sizes[idx];
> +}
> +
> +static unsigned int
> +get_random_offset (unsigned int *state)
> +{
> +  unsigned int idx = *state;
> +
> +  if (idx >= NUM_OFFSETS - 1)
> +    idx = 0;
> +  else
> +    idx++;
> +
> +  *state = idx;
> +
> +  return random_offsets[idx];
> +}
> +
> +/* Allocate and free blocks in a random order.  */
> +static void
> +malloc_benchmark_loop (size_t iters, void **ptr_arr)
> +{
> +  size_t i;
> +  unsigned int offset_state = 0, block_state = 0;
> +
> +  for (i = 0; i < iters; i++)

You can collapse this to:

  for (size_t i = 0; i < iters; i++)

> +    {
> +      unsigned int next_idx = get_random_offset (&offset_state);
> +      unsigned int next_block = get_random_block_size (&block_state);
> +
> +      free (ptr_arr[next_idx]);
> +
> +      ptr_arr[next_idx] = malloc (next_block);
> +    }
> +}
> +
> +static void *working_set[NUM_THREADS][WORKING_SET_SIZE];
> +
> +#if NUM_THREADS > 1
> +static pthread_t threads[NUM_THREADS];
> +
> +struct thread_args
> +{
> +  size_t iters;
> +  void **working_set;
> +};
> +
> +static void *
> +benchmark_thread (void *arg)
> +{
> +  struct thread_args *args = (struct thread_args *) arg;
> +  size_t iters = args->iters;
> +  void *thread_set = args->working_set;
> +
> +  malloc_benchmark_loop (iters, thread_set);
> +
> +  return NULL;
> +}
> +#endif
> +
> +static void
> +do_benchmark (size_t iters)
> +{
> +#if NUM_THREADS == 1
> +  malloc_benchmark_loop (iters, working_set[0]);
> +#else
> +  struct thread_args args[NUM_THREADS];
> +
> +  size_t i;
> +
> +  for (i = 0; i < NUM_THREADS; i++)
> +    {
> +      args[i].iters = iters;
> +      args[i].working_set = working_set[i];
> +      pthread_create(&threads[i], NULL, benchmark_thread, &args[i]);
> +    }
> +
> +  for (i = 0; i < NUM_THREADS; i++)
> +    pthread_join(threads[i], NULL);
> +#endif
> +}
> +
> +int
> +main (int argc, char **argv)
> +{
> +  timing_t start, stop, cur;
> +  size_t iters = BENCHMARK_ITERATIONS;
> +  unsigned long res;
> +  json_ctx_t json_ctx;
> +  double d_total_s, d_total_i;
> +
> +  init_random_values ();
> +
> +  json_init (&json_ctx, 0, stdout);
> +
> +  json_document_begin (&json_ctx);
> +
> +  json_attr_string (&json_ctx, "timing_type", TIMING_TYPE);
> +
> +  json_attr_object_begin (&json_ctx, "functions");
> +
> +  json_attr_object_begin (&json_ctx, "malloc");
> +
> +  json_attr_object_begin (&json_ctx, "");
> +
> +  TIMING_INIT (res);
> +
> +  (void) res;
> +
> +  TIMING_NOW (start);
> +  do_benchmark (iters);
> +  TIMING_NOW (stop);
> +
> +  struct rusage usage;
> +  getrusage(RUSAGE_SELF, &usage);
> +
> +  TIMING_DIFF (cur, start, stop);
> +
> +  d_total_s = cur;
> +  d_total_i = iters * NUM_THREADS;
> +
> +  json_attr_double (&json_ctx, "duration", d_total_s);
> +  json_attr_double (&json_ctx, "iterations", d_total_i);
> +  json_attr_double (&json_ctx, "mean", d_total_s / d_total_i);
> +  json_attr_double (&json_ctx, "max_rss", usage.ru_maxrss);

I don't know how useful max_rss would be since we're only doing a
malloc and never really writing anything to the allocated memory.
Smaller sizes may probably result in actual page allocation since we
write to the chunk headers, but probably not so for larger sizes.

Overcommit status of the system on which the benchmark was run would
also be a useful thing to know here because the memory reclamation for
non-main arenas is different when overcommit_memory is set to 2 and
that could have performance implications.  That would be
Linux-specific though, so I'm not sure how to accommodate it here.  It
could be done as a separate change I guess.

> +
> +  json_attr_double (&json_ctx, "threads", NUM_THREADS);
> +  json_attr_double (&json_ctx, "min_size", MIN_ALLOCATION_SIZE);
> +  json_attr_double (&json_ctx, "max_size", MAX_ALLOCATION_SIZE);
> +  json_attr_double (&json_ctx, "random_seed", RAND_SEED);
> +
> +  json_attr_object_end (&json_ctx);
> +
> +  json_attr_object_end (&json_ctx);
> +
> +  json_attr_object_end (&json_ctx);
> +
> +  json_document_end (&json_ctx);
> +
> +  return 0;
> +}
> -- 
> 1.8.1.4
> 

This looks good to me barring a few nits I mentioned above.  Have you
seen if the non-main arena needs to extend/reduce itself with the
number of iterations and working set you have defined?  That is
another overhead since there are a few mprotect/mmap calls happening
there that could be expensive.

Siddhesh
Will Newton June 9, 2014, 3:14 p.m. UTC | #2
On 30 May 2014 10:45, Siddhesh Poyarekar <siddhesh@redhat.com> wrote:

Hi Siddhesh,

Thanks for the review!

> On Thu, Apr 17, 2014 at 01:30:35PM +0100, Will Newton wrote:
>> Add a microbenchmark for measuring malloc and free performance. The
>> benchmark allocates and frees buffers of random sizes in a random
>> order and measures the overall execution time and RSS. Variants of the
>> benchmark are run with 8, 32 and 64 threads to measure the effect of
>> concurrency on allocator performance.
>>
>> The random block sizes used follow an inverse square distribution
>> which is intended to mimic the behaviour of real applications which
>> tend to allocate many more small blocks than large ones.
>>
>> ChangeLog:
>>
>> 2014-04-15  Will Newton  <will.newton@linaro.org>
>>
>>       * benchtests/Makefile: (benchset): Add malloc benchmarks.
>>       Link threaded malloc benchmarks with libpthread.
>>       * benchtests/bench-malloc-threads-32.c: New file.
>>       * benchtests/bench-malloc-threads-64.c: Likewise.
>>       * benchtests/bench-malloc-threads-8.c: Likewise.
>>       * benchtests/bench-malloc.c: Likewise.
>> ---
>>  benchtests/Makefile                  |   7 +-
>>  benchtests/bench-malloc-threads-32.c |  20 +++
>>  benchtests/bench-malloc-threads-64.c |  20 +++
>>  benchtests/bench-malloc-threads-8.c  |  20 +++
>>  benchtests/bench-malloc.c            | 236 +++++++++++++++++++++++++++++++++++
>>  5 files changed, 302 insertions(+), 1 deletion(-)
>>  create mode 100644 benchtests/bench-malloc-threads-32.c
>>  create mode 100644 benchtests/bench-malloc-threads-64.c
>>  create mode 100644 benchtests/bench-malloc-threads-8.c
>>  create mode 100644 benchtests/bench-malloc.c
>>
>> Changes in v2:
>>  - Move random number generation out of the loop and use arrays of random
>>    values. This reduces the overhead of the benchmark loop to 10% or less.
>>
>> diff --git a/benchtests/Makefile b/benchtests/Makefile
>> index a0954cd..f38380d 100644
>> --- a/benchtests/Makefile
>> +++ b/benchtests/Makefile
>> @@ -37,9 +37,11 @@ string-bench := bcopy bzero memccpy memchr memcmp memcpy memmem memmove \
>>               strspn strstr strcpy_chk stpcpy_chk memrchr strsep strtok
>>  string-bench-all := $(string-bench)
>>
>> +malloc-bench := malloc malloc-threads-8 malloc-threads-32 malloc-threads-64
>> +
>>  stdlib-bench := strtod
>>
>> -benchset := $(string-bench-all) $(stdlib-bench)
>> +benchset := $(string-bench-all) $(stdlib-bench) $(malloc-bench)
>
> The ideal output here would be to have a single bench-malloc.out that
> has the number of threads as variants.

Yes, I think you're right. It would also be useful to have all the
data in one file when displaying the results graphically. I'll
refactor the code in that direction.

>>
>>  CFLAGS-bench-ffs.c += -fno-builtin
>>  CFLAGS-bench-ffsll.c += -fno-builtin
>> @@ -47,6 +49,9 @@ CFLAGS-bench-ffsll.c += -fno-builtin
>>  $(addprefix $(objpfx)bench-,$(bench-math)): $(common-objpfx)math/libm.so
>>  $(addprefix $(objpfx)bench-,$(bench-pthread)): \
>>       $(common-objpfx)nptl/libpthread.so
>> +$(objpfx)bench-malloc-threads-8: $(common-objpfx)nptl/libpthread.so
>> +$(objpfx)bench-malloc-threads-32: $(common-objpfx)nptl/libpthread.so
>> +$(objpfx)bench-malloc-threads-64: $(common-objpfx)nptl/libpthread.so
>>
>
> $(addprefix $(objpfx)bench-,$(malloc-bench)): $(common-objpfx)nptl/libpthread.so

Fixed.

>>
>>
>> diff --git a/benchtests/bench-malloc-threads-32.c b/benchtests/bench-malloc-threads-32.c
>> new file mode 100644
>> index 0000000..463ceb7
>> --- /dev/null
>> +++ b/benchtests/bench-malloc-threads-32.c
>> @@ -0,0 +1,20 @@
>> +/* Measure malloc and free functions with threads.
>> +   Copyright (C) 2014 Free Software Foundation, Inc.
>> +   This file is part of the GNU C Library.
>> +
>> +   The GNU C Library is free software; you can redistribute it and/or
>> +   modify it under the terms of the GNU Lesser General Public
>> +   License as published by the Free Software Foundation; either
>> +   version 2.1 of the License, or (at your option) any later version.
>> +
>> +   The GNU C Library is distributed in the hope that it will be useful,
>> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
>> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> +   Lesser General Public License for more details.
>> +
>> +   You should have received a copy of the GNU Lesser General Public
>> +   License along with the GNU C Library; if not, see
>> +   <http://www.gnu.org/licenses/>.  */
>> +
>> +#define NUM_THREADS 32
>> +#include "bench-malloc.c"
>> diff --git a/benchtests/bench-malloc-threads-64.c b/benchtests/bench-malloc-threads-64.c
>> new file mode 100644
>> index 0000000..61d8c10
>> --- /dev/null
>> +++ b/benchtests/bench-malloc-threads-64.c
>> @@ -0,0 +1,20 @@
>> +/* Measure malloc and free functions with threads.
>> +   Copyright (C) 2014 Free Software Foundation, Inc.
>> +   This file is part of the GNU C Library.
>> +
>> +   The GNU C Library is free software; you can redistribute it and/or
>> +   modify it under the terms of the GNU Lesser General Public
>> +   License as published by the Free Software Foundation; either
>> +   version 2.1 of the License, or (at your option) any later version.
>> +
>> +   The GNU C Library is distributed in the hope that it will be useful,
>> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
>> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> +   Lesser General Public License for more details.
>> +
>> +   You should have received a copy of the GNU Lesser General Public
>> +   License along with the GNU C Library; if not, see
>> +   <http://www.gnu.org/licenses/>.  */
>> +
>> +#define NUM_THREADS 64
>> +#include "bench-malloc.c"
>> diff --git a/benchtests/bench-malloc-threads-8.c b/benchtests/bench-malloc-threads-8.c
>> new file mode 100644
>> index 0000000..ac4ff79
>> --- /dev/null
>> +++ b/benchtests/bench-malloc-threads-8.c
>> @@ -0,0 +1,20 @@
>> +/* Measure malloc and free functions with threads.
>> +   Copyright (C) 2014 Free Software Foundation, Inc.
>> +   This file is part of the GNU C Library.
>> +
>> +   The GNU C Library is free software; you can redistribute it and/or
>> +   modify it under the terms of the GNU Lesser General Public
>> +   License as published by the Free Software Foundation; either
>> +   version 2.1 of the License, or (at your option) any later version.
>> +
>> +   The GNU C Library is distributed in the hope that it will be useful,
>> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
>> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> +   Lesser General Public License for more details.
>> +
>> +   You should have received a copy of the GNU Lesser General Public
>> +   License along with the GNU C Library; if not, see
>> +   <http://www.gnu.org/licenses/>.  */
>> +
>> +#define NUM_THREADS 8
>> +#include "bench-malloc.c"
>> diff --git a/benchtests/bench-malloc.c b/benchtests/bench-malloc.c
>> new file mode 100644
>> index 0000000..dc4fe17
>> --- /dev/null
>> +++ b/benchtests/bench-malloc.c
>> @@ -0,0 +1,236 @@
>> +/* Benchmark malloc and free functions.
>> +   Copyright (C) 2013-2014 Free Software Foundation, Inc.
>> +   This file is part of the GNU C Library.
>> +
>> +   The GNU C Library is free software; you can redistribute it and/or
>> +   modify it under the terms of the GNU Lesser General Public
>> +   License as published by the Free Software Foundation; either
>> +   version 2.1 of the License, or (at your option) any later version.
>> +
>> +   The GNU C Library is distributed in the hope that it will be useful,
>> +   but WITHOUT ANY WARRANTY; without even the implied warranty of
>> +   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> +   Lesser General Public License for more details.
>> +
>> +   You should have received a copy of the GNU Lesser General Public
>> +   License along with the GNU C Library; if not, see
>> +   <http://www.gnu.org/licenses/>.  */
>> +
>> +#include <math.h>
>> +#include <pthread.h>
>> +#include <stdio.h>
>> +#include <stdlib.h>
>> +#include <sys/time.h>
>> +#include <sys/resource.h>
>> +
>> +#include "bench-timing.h"
>> +#include "json-lib.h"
>> +
>> +#define BENCHMARK_ITERATIONS 40000000
>> +#define RAND_SEED            88
>> +
>> +#ifndef NUM_THREADS
>> +#define NUM_THREADS 1
>
> # define...

Fixed.

>> +#endif
>> +
>> +/* Maximum memory that can be allocated at any one time is:
>> +
>> +   NUM_THREADS * WORKING_SET_SIZE * MAX_ALLOCATION_SIZE
>> +
>> +   However due to the distribution of the random block sizes
>> +   the typical amount allocated will be much smaller.  */
>> +#define WORKING_SET_SIZE     1024
>> +
>> +#define MIN_ALLOCATION_SIZE  4
>> +#define MAX_ALLOCATION_SIZE  32768
>
> A maximum of 32K only tests arena allocation performance.  This is
> fine for now since malloc+mmap performance is as interesting.  What is
> interesting though is the dynamic threshold management which brings in
> allocations into the arena for larger sizes and what kind of
> performance improvement it provides, but that is a different
> benchmark.

There's at least two axes we are interested in - how performance
scales with the number of threads and how performance scales with the
allocation size. For thread performance (which this benchmark is
about) the larger allocations are not so interesting - typically their
locking overhead is in the kernel rather than userland and in terms of
real world application performance its just not as likely to be a
bottleneck as small allocations. We have to be pragmatic in which
choices we make as the full matrix of threads versus allocation sizes
would be pretty huge.

So I guess I should probably also write a benchmark for allocation
size for glibc as well...

>> +
>> +/* Get a random block size with an inverse square distribution.  */
>> +static unsigned int
>> +get_block_size (unsigned int rand_data)
>> +{
>> +  /* Inverse square.  */
>> +  float exponent = -2;
>
> Mark as const.

Ok, although I don't believe it affects code generation.

>> +  /* Minimum value of distribution.  */
>> +  float dist_min = MIN_ALLOCATION_SIZE;
>> +  /* Maximum value of distribution.  */
>> +  float dist_max = MAX_ALLOCATION_SIZE;
>> +
>> +  float min_pow = powf (dist_min, exponent + 1);
>> +  float max_pow = powf (dist_max, exponent + 1);
>> +
>> +  float r = (float) rand_data / RAND_MAX;
>> +
>> +  return (unsigned int) powf ((max_pow - min_pow) * r + min_pow, 1 / (exponent + 1));
>> +}
>> +
>> +#define NUM_BLOCK_SIZES      8000
>> +#define NUM_OFFSETS  ((WORKING_SET_SIZE) * 4)
>> +
>> +static unsigned int random_block_sizes[NUM_BLOCK_SIZES];
>> +static unsigned int random_offsets[NUM_OFFSETS];
>> +
>> +static void
>> +init_random_values (void)
>> +{
>> +  size_t i;
>> +
>> +  for (i = 0; i < NUM_BLOCK_SIZES; i++)
>
> You can collapse this to:
>
>   for (size_t i = 0; i < NUM_BLOCK_SIZES; i++)
>
>> +    random_block_sizes[i] = get_block_size (rand ());
>> +
>> +  for (i = 0; i < NUM_OFFSETS; i++)
>
> Likewise.
>
>> +    random_offsets[i] = rand () % WORKING_SET_SIZE;
>> +}
>> +
>> +static unsigned int
>> +get_random_block_size (unsigned int *state)
>> +{
>> +  unsigned int idx = *state;
>> +
>> +  if (idx >= NUM_BLOCK_SIZES - 1)
>> +    idx = 0;
>> +  else
>> +    idx++;
>> +
>> +  *state = idx;
>> +
>> +  return random_block_sizes[idx];
>> +}
>> +
>> +static unsigned int
>> +get_random_offset (unsigned int *state)
>> +{
>> +  unsigned int idx = *state;
>> +
>> +  if (idx >= NUM_OFFSETS - 1)
>> +    idx = 0;
>> +  else
>> +    idx++;
>> +
>> +  *state = idx;
>> +
>> +  return random_offsets[idx];
>> +}
>> +
>> +/* Allocate and free blocks in a random order.  */
>> +static void
>> +malloc_benchmark_loop (size_t iters, void **ptr_arr)
>> +{
>> +  size_t i;
>> +  unsigned int offset_state = 0, block_state = 0;
>> +
>> +  for (i = 0; i < iters; i++)
>
> You can collapse this to:
>
>   for (size_t i = 0; i < iters; i++)

Done.

>> +    {
>> +      unsigned int next_idx = get_random_offset (&offset_state);
>> +      unsigned int next_block = get_random_block_size (&block_state);
>> +
>> +      free (ptr_arr[next_idx]);
>> +
>> +      ptr_arr[next_idx] = malloc (next_block);
>> +    }
>> +}
>> +
>> +static void *working_set[NUM_THREADS][WORKING_SET_SIZE];
>> +
>> +#if NUM_THREADS > 1
>> +static pthread_t threads[NUM_THREADS];
>> +
>> +struct thread_args
>> +{
>> +  size_t iters;
>> +  void **working_set;
>> +};
>> +
>> +static void *
>> +benchmark_thread (void *arg)
>> +{
>> +  struct thread_args *args = (struct thread_args *) arg;
>> +  size_t iters = args->iters;
>> +  void *thread_set = args->working_set;
>> +
>> +  malloc_benchmark_loop (iters, thread_set);
>> +
>> +  return NULL;
>> +}
>> +#endif
>> +
>> +static void
>> +do_benchmark (size_t iters)
>> +{
>> +#if NUM_THREADS == 1
>> +  malloc_benchmark_loop (iters, working_set[0]);
>> +#else
>> +  struct thread_args args[NUM_THREADS];
>> +
>> +  size_t i;
>> +
>> +  for (i = 0; i < NUM_THREADS; i++)
>> +    {
>> +      args[i].iters = iters;
>> +      args[i].working_set = working_set[i];
>> +      pthread_create(&threads[i], NULL, benchmark_thread, &args[i]);
>> +    }
>> +
>> +  for (i = 0; i < NUM_THREADS; i++)
>> +    pthread_join(threads[i], NULL);
>> +#endif
>> +}
>> +
>> +int
>> +main (int argc, char **argv)
>> +{
>> +  timing_t start, stop, cur;
>> +  size_t iters = BENCHMARK_ITERATIONS;
>> +  unsigned long res;
>> +  json_ctx_t json_ctx;
>> +  double d_total_s, d_total_i;
>> +
>> +  init_random_values ();
>> +
>> +  json_init (&json_ctx, 0, stdout);
>> +
>> +  json_document_begin (&json_ctx);
>> +
>> +  json_attr_string (&json_ctx, "timing_type", TIMING_TYPE);
>> +
>> +  json_attr_object_begin (&json_ctx, "functions");
>> +
>> +  json_attr_object_begin (&json_ctx, "malloc");
>> +
>> +  json_attr_object_begin (&json_ctx, "");
>> +
>> +  TIMING_INIT (res);
>> +
>> +  (void) res;
>> +
>> +  TIMING_NOW (start);
>> +  do_benchmark (iters);
>> +  TIMING_NOW (stop);
>> +
>> +  struct rusage usage;
>> +  getrusage(RUSAGE_SELF, &usage);
>> +
>> +  TIMING_DIFF (cur, start, stop);
>> +
>> +  d_total_s = cur;
>> +  d_total_i = iters * NUM_THREADS;
>> +
>> +  json_attr_double (&json_ctx, "duration", d_total_s);
>> +  json_attr_double (&json_ctx, "iterations", d_total_i);
>> +  json_attr_double (&json_ctx, "mean", d_total_s / d_total_i);
>> +  json_attr_double (&json_ctx, "max_rss", usage.ru_maxrss);
>
> I don't know how useful max_rss would be since we're only doing a
> malloc and never really writing anything to the allocated memory.
> Smaller sizes may probably result in actual page allocation since we
> write to the chunk headers, but probably not so for larger sizes.

Yes, it is slightly problematic. What you probably want to to do is
zero all the memory and measure RSS at that point but it would slow
down the benchmark and spend lots of time in memset instead. At the
moment it tells you how many pages are taken up by book-keeping but
not how many of those pages your application would touch anyway.

> Overcommit status of the system on which the benchmark was run would
> also be a useful thing to know here because the memory reclamation for
> non-main arenas is different when overcommit_memory is set to 2 and
> that could have performance implications.  That would be
> Linux-specific though, so I'm not sure how to accommodate it here.  It
> could be done as a separate change I guess.

I'll have a think about that...

>> +
>> +  json_attr_double (&json_ctx, "threads", NUM_THREADS);
>> +  json_attr_double (&json_ctx, "min_size", MIN_ALLOCATION_SIZE);
>> +  json_attr_double (&json_ctx, "max_size", MAX_ALLOCATION_SIZE);
>> +  json_attr_double (&json_ctx, "random_seed", RAND_SEED);
>> +
>> +  json_attr_object_end (&json_ctx);
>> +
>> +  json_attr_object_end (&json_ctx);
>> +
>> +  json_attr_object_end (&json_ctx);
>> +
>> +  json_document_end (&json_ctx);
>> +
>> +  return 0;
>> +}
>> --
>> 1.8.1.4
>>
>
> This looks good to me barring a few nits I mentioned above.  Have you
> seen if the non-main arena needs to extend/reduce itself with the
> number of iterations and working set you have defined?  That is
> another overhead since there are a few mprotect/mmap calls happening
> there that could be expensive.

No I haven't looked into that, so far I have been treating malloc as a
black box and I'm hoping not to tailor teh benchmark too far to one
implementation or another.

I'll rework the patches and hopefully get a graphing script to go with it...
Siddhesh Poyarekar June 9, 2014, 4:37 p.m. UTC | #3
On Mon, Jun 09, 2014 at 04:14:35PM +0100, Will Newton wrote:
> > A maximum of 32K only tests arena allocation performance.  This is
> > fine for now since malloc+mmap performance is as interesting.  What is
<snip>
> 
> There's at least two axes we are interested in - how performance
> scales with the number of threads and how performance scales with the
> allocation size. For thread performance (which this benchmark is
> about) the larger allocations are not so interesting - typically their
> locking overhead is in the kernel rather than userland and in terms of
> real world application performance its just not as likely to be a
> bottleneck as small allocations. We have to be pragmatic in which
> choices we make as the full matrix of threads versus allocation sizes
> would be pretty huge.

Heh, I noticed my typo now - I meant to say that malloc+mmap
performance is *not* as interesting :)

> So I guess I should probably also write a benchmark for allocation
> size for glibc as well...

Yes, it would be a separate benchmark and probably would need some
specific allocation patterns rather than random sizes.  Of course
choosing allocation patterns is not going to be easy.

> > Mark as const.
> 
> Ok, although I don't believe it affects code generation.

Right, it's just pedantry.

> > I don't know how useful max_rss would be since we're only doing a
> > malloc and never really writing anything to the allocated memory.
> > Smaller sizes may probably result in actual page allocation since we
> > write to the chunk headers, but probably not so for larger sizes.
> 
> Yes, it is slightly problematic. What you probably want to to do is
> zero all the memory and measure RSS at that point but it would slow
> down the benchmark and spend lots of time in memset instead. At the
> moment it tells you how many pages are taken up by book-keeping but
> not how many of those pages your application would touch anyway.

Oh I didn't mean to imply that we zero pages and try to get a more
accurate RSS value.  My point was that we could probably just do away
with it completely because it doesn't really tell us much - I can't
see how pages taken up by book-keeping would be useful.

However if you do want to show resource usage, then address space
usage (VSZ) might show scary numbers due to the per-thread arenas, but
they would be much more representative.  Also, it might be useful to
see how address space usage scales with threads, especially for
32-bit.

> No I haven't looked into that, so far I have been treating malloc as a
> black box and I'm hoping not to tailor teh benchmark too far to one
> implementation or another.

I agree that the benchmark should not be tailored to the current
implementation, but then this behaviour would essentially be another
set of inputs.  Simply increasing the maximum size from 32K to about
128K (that's the initial threshold for mmap anyway) might result in
that behaviour being triggered more frequently.

> I'll rework the patches and hopefully get a graphing script to go
> with it...

Thanks!  I have marked this patch as Accepted in patchwork as I think
it could go in as an initial revision for the test with nits fixed, so
you can push the benchmark and then work on improvements to it.  Or
you can do your improvements and post a new version - your choice.

Siddhesh
Ondřej Bílka June 9, 2014, 8:33 p.m. UTC | #4
On Mon, Jun 09, 2014 at 10:07:53PM +0530, Siddhesh Poyarekar wrote:
> On Mon, Jun 09, 2014 at 04:14:35PM +0100, Will Newton wrote:
> > > A maximum of 32K only tests arena allocation performance.  This is
> > > fine for now since malloc+mmap performance is as interesting.  What is
> <snip>
> > 
> > There's at least two axes we are interested in - how performance
> > scales with the number of threads and how performance scales with the
> > allocation size. For thread performance (which this benchmark is
> > about) the larger allocations are not so interesting - typically their
> > locking overhead is in the kernel rather than userland and in terms of
> > real world application performance its just not as likely to be a
> > bottleneck as small allocations. We have to be pragmatic in which
> > choices we make as the full matrix of threads versus allocation sizes
> > would be pretty huge.
> 
> Heh, I noticed my typo now - I meant to say that malloc+mmap
> performance is *not* as interesting :)
>
Problem is that this benchmark does not measure a multithread
performance well. Just spawning many threads does not say much, my guess
is that locking will quicky cause convergence to state where at each
core a thread with separate arena is running. Also it does not measure
hard case when you allocate memory in one thread.

I looked on multithread benchmark and it has additional flaws:

Big variance, running time varies around by 10% accoss iterations,
depending on how kernel schedules these. Running threads and measuring
time after you join them measures a slowest thread so at end some cores
are idle.

Bad units, when I run a benchmark then with one benchmark a mean is:
"mean": 91.605,
However when we run 32 threads then it looks that it speeds malloc
around three times:
 "mean": 28.5883,
 
> > So I guess I should probably also write a benchmark for allocation
> > size for glibc as well...
> 
> Yes, it would be a separate benchmark and probably would need some
> specific allocation patterns rather than random sizes.  Of course
> choosing allocation patterns is not going to be easy.
>
No, that was a benchmark that I posted which measured exactly what
happens at given sizes.
 
> 
> > > I don't know how useful max_rss would be since we're only doing a
> > > malloc and never really writing anything to the allocated memory.
> > > Smaller sizes may probably result in actual page allocation since we
> > > write to the chunk headers, but probably not so for larger sizes.
> > 
> > Yes, it is slightly problematic. What you probably want to to do is
> > zero all the memory and measure RSS at that point but it would slow
> > down the benchmark and spend lots of time in memset instead. At the
> > moment it tells you how many pages are taken up by book-keeping but
> > not how many of those pages your application would touch anyway.
> 
> Oh I didn't mean to imply that we zero pages and try to get a more
> accurate RSS value.  My point was that we could probably just do away
> with it completely because it doesn't really tell us much - I can't
> see how pages taken up by book-keeping would be useful.
> 
> However if you do want to show resource usage, then address space
> usage (VSZ) might show scary numbers due to the per-thread arenas, but
> they would be much more representative.  Also, it might be useful to
> see how address space usage scales with threads, especially for
> 32-bit.
>
Still this would be worse than useless as it would vary wildly from real
behaviour (for example it is typical that when there are allocations in
quick succession then they will likely also deallocated in quick
sucession.)  and that would cause us implement something that actually
increases memory usage. It happened in 70's so do not repeat this
mistake.

> > No I haven't looked into that, so far I have been treating malloc as a
> > black box and I'm hoping not to tailor teh benchmark too far to one
> > implementation or another.
> 
> I agree that the benchmark should not be tailored to the current
> implementation, but then this behaviour would essentially be another
> set of inputs.  Simply increasing the maximum size from 32K to about
> 128K (that's the initial threshold for mmap anyway) might result in
> that behaviour being triggered more frequently.
>
For malloc you need to benchmarks satisfy some conditions to be
meaningful. When you compare different implementations one could use
different memory allocation pattern. That could cause additional cache
misses that dominate performance but you do not measure it in benchmark.
Treating malloc as black-box kinda defeats a purpose.
Siddhesh Poyarekar June 10, 2014, midnight UTC | #5
On Mon, Jun 09, 2014 at 10:33:26PM +0200, Ondřej Bílka wrote:
> Problem is that this benchmark does not measure a multithread
> performance well. Just spawning many threads does not say much, my guess
> is that locking will quicky cause convergence to state where at each
> core a thread with separate arena is running.

How is that a bad thing?

> Also it does not measure hard case when you allocate memory in one
> thread.

It does that in bench-malloc.  Or maybe I don't understand what you
mean.

> I looked on multithread benchmark and it has additional flaws:
> 
> Big variance, running time varies around by 10% accoss iterations,
> depending on how kernel schedules these. 

Kernel scheduling may not be the most important decider on variance.
The major factor would be points at which the arena would have to be
extended and then the performance of those syscalls.

> Running threads and measuring time after you join them measures a
> slowest thread so at end some cores are idle.

How does that matter?

> Bad units, when I run a benchmark then with one benchmark a mean is:
> "mean": 91.605,
> However when we run 32 threads then it looks that it speeds malloc
> around three times:
>  "mean": 28.5883,

Why do you think the units are bad?  Mean time for allocation of a
single block in a single thread being slower than that of multiple
threads may have something to do with the difference between
performance on the main arena vs non-main arenas.  Performance
difference between mprotect and brk or even their frequency or the
difference in logic to extend heaps or finally, defaulting to mmap for
the main arena when extension fails could be some factors.

That said, it may be useful to see how each thread performs
separately.  For all we know, the pattern of allocation may somehow be
favouring the multithreaded scenario.

> No, that was a benchmark that I posted which measured exactly what
> happens at given sizes.

Post it again and we can discuss it?  IIRC it was similar to this
benchmark with random sizes, but maybe I misremember.

> > However if you do want to show resource usage, then address space
> > usage (VSZ) might show scary numbers due to the per-thread arenas, but
> > they would be much more representative.  Also, it might be useful to
> > see how address space usage scales with threads, especially for
> > 32-bit.
> >
> Still this would be worse than useless as it would vary wildly from real
> behaviour (for example it is typical that when there are allocations in
> quick succession then they will likely also deallocated in quick
> sucession.)  and that would cause us implement something that actually
> increases memory usage.

It would be a concern if we were measuring memory usage over time.
Looking at just maximum usage does not have that problem.

Siddhesh
Ondřej Bílka June 10, 2014, 12:25 a.m. UTC | #6
On Tue, Jun 10, 2014 at 05:30:15AM +0530, Siddhesh Poyarekar wrote:
> On Mon, Jun 09, 2014 at 10:33:26PM +0200, Ondřej Bílka wrote:
> > Problem is that this benchmark does not measure a multithread
> > performance well. Just spawning many threads does not say much, my guess
> > is that locking will quicky cause convergence to state where at each
> > core a thread with separate arena is running.
> 
> How is that a bad thing?
> 
> > Also it does not measure hard case when you allocate memory in one
> > thread.
> 
> It does that in bench-malloc.  Or maybe I don't understand what you
> mean.
>
Thread A allocates memory, thread B deallocates. In current
implementation both will contend same lock.

> > I looked on multithread benchmark and it has additional flaws:
> > 
> > Big variance, running time varies around by 10% accoss iterations,
> > depending on how kernel schedules these. 
> 
> Kernel scheduling may not be the most important decider on variance.
> The major factor would be points at which the arena would have to be
> extended and then the performance of those syscalls.
>
It would if you measure correct thing which you do not. Did you profile
this benchmark so you are sure about that? Anyway you need a smaller
variance here.
 
> > Running threads and measuring time after you join them measures a
> > slowest thread so at end some cores are idle.
> 
> How does that matter?
>
Because scheduling could make difference. Simple case, you have three
threads and two cores each takes unit time. If you run two threads in
parallel then total time is two units. If you run half of A and B then
half of B and C and then half of A and C then you could finish in 1.5
units.

> > Bad units, when I run a benchmark then with one benchmark a mean is:
> > "mean": 91.605,
> > However when we run 32 threads then it looks that it speeds malloc
> > around three times:
> >  "mean": 28.5883,
> 
> Why do you think the units are bad?  Mean time for allocation of a
> single block in a single thread being slower than that of multiple
> threads may have something to do with the difference between
> performance on the main arena vs non-main arenas.  Performance
> difference between mprotect and brk or even their frequency or the
> difference in logic to extend heaps or finally, defaulting to mmap for
> the main arena when extension fails could be some factors.
> 
> That said, it may be useful to see how each thread performs
> separately.  For all we know, the pattern of allocation may somehow be
> favouring the multithreaded scenario.
> 
No, there is simple reason for that. If you run a multithread program
you need to take number of cores into account?


> > No, that was a benchmark that I posted which measured exactly what
> > happens at given sizes.
> 
> Post it again and we can discuss it?  IIRC it was similar to this
> benchmark with random sizes, but maybe I misremember.
> 
> > > However if you do want to show resource usage, then address space
> > > usage (VSZ) might show scary numbers due to the per-thread arenas, but
> > > they would be much more representative.  Also, it might be useful to
> > > see how address space usage scales with threads, especially for
> > > 32-bit.
> > >
> > Still this would be worse than useless as it would vary wildly from real
> > behaviour (for example it is typical that when there are allocations in
> > quick succession then they will likely also deallocated in quick
> > sucession.)  and that would cause us implement something that actually
> > increases memory usage.
> 
> It would be a concern if we were measuring memory usage over time.
> Looking at just maximum usage does not have that problem.
>
No, its problem even with maximum usage, why do you thing it is
different?

When you do hundred allocations of size A, then hundred of size B, then
free all A and do hundred allocations of size C it is more memory
friendly than if you mixed allocations of A, B with frees from A.
Siddhesh Poyarekar June 10, 2014, 12:55 a.m. UTC | #7
On Tue, Jun 10, 2014 at 02:25:39AM +0200, Ondřej Bílka wrote:
> > It does that in bench-malloc.  Or maybe I don't understand what you
> > mean.
> >
> Thread A allocates memory, thread B deallocates. In current
> implementation both will contend same lock.

OK, yes this is not measured, but it could be added on top of the
current benchmark.

> > > Running threads and measuring time after you join them measures a
> > > slowest thread so at end some cores are idle.
> > 
> > How does that matter?
> >
> Because scheduling could make difference. Simple case, you have three
> threads and two cores each takes unit time. If you run two threads in
> parallel then total time is two units. If you run half of A and B then
> half of B and C and then half of A and C then you could finish in 1.5
> units.

OK, measuring at individual threads should alleviate this.

> No, there is simple reason for that. If you run a multithread program
> you need to take number of cores into account?

Yes, the number of cores is in fact very important, thanks for pointing
out.  Simply recording it in the results should be a good start.

> > It would be a concern if we were measuring memory usage over time.
> > Looking at just maximum usage does not have that problem.
> >
> No, its problem even with maximum usage, why do you thing it is
> different?
> 
> When you do hundred allocations of size A, then hundred of size B, then
> free all A and do hundred allocations of size C it is more memory
> friendly than if you mixed allocations of A, B with frees from A.

OK, I misunderstood your point again, or maybe you can tell me if I
did.  I was referring to maximum usage as a general measure given that
allocation pattern is fixed while you're referring to comparison of
maximum usage given different allocation patterns.  Given that the
random number is not seeded, the test should create a constant
allocation pattern.

Siddhesh
Will Newton June 10, 2014, 7:47 a.m. UTC | #8
On 9 June 2014 21:33, Ondřej Bílka <neleai@seznam.cz> wrote:
> On Mon, Jun 09, 2014 at 10:07:53PM +0530, Siddhesh Poyarekar wrote:
>> On Mon, Jun 09, 2014 at 04:14:35PM +0100, Will Newton wrote:
>> > > A maximum of 32K only tests arena allocation performance.  This is
>> > > fine for now since malloc+mmap performance is as interesting.  What is
>> <snip>
>> >
>> > There's at least two axes we are interested in - how performance
>> > scales with the number of threads and how performance scales with the
>> > allocation size. For thread performance (which this benchmark is
>> > about) the larger allocations are not so interesting - typically their
>> > locking overhead is in the kernel rather than userland and in terms of
>> > real world application performance its just not as likely to be a
>> > bottleneck as small allocations. We have to be pragmatic in which
>> > choices we make as the full matrix of threads versus allocation sizes
>> > would be pretty huge.
>>
>> Heh, I noticed my typo now - I meant to say that malloc+mmap
>> performance is *not* as interesting :)
>>
> Problem is that this benchmark does not measure a multithread
> performance well. Just spawning many threads does not say much, my guess
> is that locking will quicky cause convergence to state where at each
> core a thread with separate arena is running. Also it does not measure
> hard case when you allocate memory in one thread.
>
> I looked on multithread benchmark and it has additional flaws:
>
> Big variance, running time varies around by 10% accoss iterations,
> depending on how kernel schedules these. Running threads and measuring
> time after you join them measures a slowest thread so at end some cores
> are idle.

Thanks for the suggestion, I will look into this.

> Bad units, when I run a benchmark then with one benchmark a mean is:
> "mean": 91.605,
> However when we run 32 threads then it looks that it speeds malloc
> around three times:
>  "mean": 28.5883,

What is wrong with that? I assume you have a multi-core system, would
you not expect more threads to have higher throughput?

>> > So I guess I should probably also write a benchmark for allocation
>> > size for glibc as well...
>>
>> Yes, it would be a separate benchmark and probably would need some
>> specific allocation patterns rather than random sizes.  Of course
>> choosing allocation patterns is not going to be easy.
>>
> No, that was a benchmark that I posted which measured exactly what
> happens at given sizes.
>
>>
>> > > I don't know how useful max_rss would be since we're only doing a
>> > > malloc and never really writing anything to the allocated memory.
>> > > Smaller sizes may probably result in actual page allocation since we
>> > > write to the chunk headers, but probably not so for larger sizes.
>> >
>> > Yes, it is slightly problematic. What you probably want to to do is
>> > zero all the memory and measure RSS at that point but it would slow
>> > down the benchmark and spend lots of time in memset instead. At the
>> > moment it tells you how many pages are taken up by book-keeping but
>> > not how many of those pages your application would touch anyway.
>>
>> Oh I didn't mean to imply that we zero pages and try to get a more
>> accurate RSS value.  My point was that we could probably just do away
>> with it completely because it doesn't really tell us much - I can't
>> see how pages taken up by book-keeping would be useful.
>>
>> However if you do want to show resource usage, then address space
>> usage (VSZ) might show scary numbers due to the per-thread arenas, but
>> they would be much more representative.  Also, it might be useful to
>> see how address space usage scales with threads, especially for
>> 32-bit.
>>
> Still this would be worse than useless as it would vary wildly from real
> behaviour (for example it is typical that when there are allocations in
> quick succession then they will likely also deallocated in quick
> sucession.)  and that would cause us implement something that actually
> increases memory usage. It happened in 70's so do not repeat this
> mistake.
>
>> > No I haven't looked into that, so far I have been treating malloc as a
>> > black box and I'm hoping not to tailor teh benchmark too far to one
>> > implementation or another.
>>
>> I agree that the benchmark should not be tailored to the current
>> implementation, but then this behaviour would essentially be another
>> set of inputs.  Simply increasing the maximum size from 32K to about
>> 128K (that's the initial threshold for mmap anyway) might result in
>> that behaviour being triggered more frequently.
>>
> For malloc you need to benchmarks satisfy some conditions to be
> meaningful. When you compare different implementations one could use
> different memory allocation pattern. That could cause additional cache
> misses that dominate performance but you do not measure it in benchmark.
> Treating malloc as black-box kinda defeats a purpose.
>
Ondřej Bílka June 10, 2014, 4:48 p.m. UTC | #9
On Tue, Jun 10, 2014 at 08:47:36AM +0100, Will Newton wrote:
> On 9 June 2014 21:33, Ondřej Bílka <neleai@seznam.cz> wrote:
> > On Mon, Jun 09, 2014 at 10:07:53PM +0530, Siddhesh Poyarekar wrote:
> >> On Mon, Jun 09, 2014 at 04:14:35PM +0100, Will Newton wrote:
> >> > > A maximum of 32K only tests arena allocation performance.  This is
> >> > > fine for now since malloc+mmap performance is as interesting.  What is
> >> <snip>
> >> >
> >> > There's at least two axes we are interested in - how performance
> >> > scales with the number of threads and how performance scales with the
> >> > allocation size. For thread performance (which this benchmark is
> >> > about) the larger allocations are not so interesting - typically their
> >> > locking overhead is in the kernel rather than userland and in terms of
> >> > real world application performance its just not as likely to be a
> >> > bottleneck as small allocations. We have to be pragmatic in which
> >> > choices we make as the full matrix of threads versus allocation sizes
> >> > would be pretty huge.
> >>
> >> Heh, I noticed my typo now - I meant to say that malloc+mmap
> >> performance is *not* as interesting :)
> >>
> > Problem is that this benchmark does not measure a multithread
> > performance well. Just spawning many threads does not say much, my guess
> > is that locking will quicky cause convergence to state where at each
> > core a thread with separate arena is running. Also it does not measure
> > hard case when you allocate memory in one thread.
> >
> > I looked on multithread benchmark and it has additional flaws:
> >
> > Big variance, running time varies around by 10% accoss iterations,
> > depending on how kernel schedules these. Running threads and measuring
> > time after you join them measures a slowest thread so at end some cores
> > are idle.
> 
> Thanks for the suggestion, I will look into this.
> 
> > Bad units, when I run a benchmark then with one benchmark a mean is:
> > "mean": 91.605,
> > However when we run 32 threads then it looks that it speeds malloc
> > around three times:
> >  "mean": 28.5883,
> 
> What is wrong with that? I assume you have a multi-core system, would
> you not expect more threads to have higher throughput?
> 
It does say mean which is a mean execution time of function not
throughput. You are more interested on overhead caused by parallelism
than throughput and needing to divide it by number of cores.
Ondřej Bílka June 10, 2014, 5:23 p.m. UTC | #10
On Tue, Jun 10, 2014 at 06:25:44AM +0530, Siddhesh Poyarekar wrote:
> On Tue, Jun 10, 2014 at 02:25:39AM +0200, Ondřej Bílka wrote:
> > Problem is that this benchmark does not measure a multithread
> > performance well. Just spawning many threads does not say much, my guess
> > is that locking will quicky cause convergence to state where at each
> > core a thread with separate arena is running.
>
> How is that a bad thing?

It does not happen in wild, if you have many threads which mostly wait
for IO then you will not get that synchronization and there could be
conflicts.

> > > It does that in bench-malloc.  Or maybe I don't understand what you
> > > mean.
> > >
> > Thread A allocates memory, thread B deallocates. In current
> > implementation both will contend same lock.
> 
> OK, yes this is not measured, but it could be added on top of the
> current benchmark.
> 
There is limited amount of criteria that you consider for comparison,
some are more important than others and this is not very important. 

A more pressing criteria are measuring a running time of gcc
compilation. Currently if you replace allocator by tcmalloc then running
time improves by 10% which is huge. Decreasing gaps like that should be
top priority.

Then there are various pathological cases, for example following is
twice as slow when run in parallel than when you run threads
sequentialy.

#include <pthread.h>
#include <malloc.h>

void *reloc (void *x)
{
  int i;
  for (i=0;i<1000000;i++) x = realloc(x,42);
  return x;
}

pthread_t thread;
int main(int argv, char **argc){

  void *x = malloc (42);
  void *y = malloc (42);

  pthread_create (&thread, NULL, reloc,  x);
#ifdef CONTENDED 
  reloc(y);
  pthread_join(thread,NULL);
#else
  pthread_join(thread,NULL);
  reloc(y);
#endif
}

Then there are possible optimizations of cache locality, prefectching
and so on for which evaluation you need separate bencmark.

For patches there will almost always be overriding concern like this and
numbers from uncontended performance would be rigthfully ignored.


> > > > Running threads and measuring time after you join them measures a
> > > > slowest thread so at end some cores are idle.
> > > 
> > > How does that matter?
> > >
> > Because scheduling could make difference. Simple case, you have three
> > threads and two cores each takes unit time. If you run two threads in
> > parallel then total time is two units. If you run half of A and B then
> > half of B and C and then half of A and C then you could finish in 1.5
> > units.
> 
> OK, measuring at individual threads should alleviate this.
> 
> > No, there is simple reason for that. If you run a multithread program
> > you need to take number of cores into account?
> 
> Yes, the number of cores is in fact very important, thanks for pointing
> out.  Simply recording it in the results should be a good start.
>
It is that you should report correct mean time. This should raise a red
flag as 30 instructions are barely enough as single compare and swap
is minimaly 20 cycles and malloc now does three for fastbins.

> > > It would be a concern if we were measuring memory usage over time.
> > > Looking at just maximum usage does not have that problem.
> > >
> > No, its problem even with maximum usage, why do you thing it is
> > different?
> > 
> > When you do hundred allocations of size A, then hundred of size B, then
> > free all A and do hundred allocations of size C it is more memory
> > friendly than if you mixed allocations of A, B with frees from A.
> 
> OK, I misunderstood your point again, or maybe you can tell me if I
> did.  I was referring to maximum usage as a general measure given that
> allocation pattern is fixed while you're referring to comparison of
> maximum usage given different allocation patterns.  Given that the
> random number is not seeded, the test should create a constant
> allocation pattern.
> 
You do benchmarks to compare memory usage. When allocation pattern stays fixed
and algorithm to determine address is also fixed then you will get constant as 
you said. But then you should omit it as it does not matter.

And natural question when you change algorithm in way that changes
allocation pattern how would you check these. Here a specialized
benchmarks are necessary than one that does comparison badly and would
be ignored in most of cases.
Siddhesh Poyarekar June 11, 2014, 8:42 a.m. UTC | #11
On Tue, Jun 10, 2014 at 07:23:01PM +0200, Ondřej Bílka wrote:
> There is limited amount of criteria that you consider for comparison,
> some are more important than others and this is not very important. 
> 
> A more pressing criteria are measuring a running time of gcc
> compilation. Currently if you replace allocator by tcmalloc then running
> time improves by 10% which is huge. Decreasing gaps like that should be
> top priority.

I really don't think gcc compilation passes is a representative case
for malloc performance, but I'll pass on that since we won't really
reach agreement on what kinds of applications can be considered
representative.

> Then there are possible optimizations of cache locality, prefectching
> and so on for which evaluation you need separate bencmark.
> 
> For patches there will almost always be overriding concern like this and
> numbers from uncontended performance would be rigthfully ignored.

I don't think there's any disagreement on the fact that there are a
large number of cases to consider when benchmarking malloc (the same
point holds for string benchmarks).  The question here though is
whether this is a decent starting point.  Work on malloc benchmark
doesn't end here, it's about to begin.

> You do benchmarks to compare memory usage. When allocation pattern stays fixed
> and algorithm to determine address is also fixed then you will get constant as 
> you said. But then you should omit it as it does not matter.
> 
> And natural question when you change algorithm in way that changes
> allocation pattern how would you check these. Here a specialized
> benchmarks are necessary than one that does comparison badly and would
> be ignored in most of cases.

Agreed, but I don't think it makes sense to wait to commit the first
iteration till all cases raised by everyone are addressed.  Lets take
this as a starting point and build on it.  In fact it might even be a
good idea to get the malloc benchmarks into their own directory within
benchtests.  Likewise for the string benchmarks.

Siddhesh
diff mbox

Patch

diff --git a/benchtests/Makefile b/benchtests/Makefile
index a0954cd..f38380d 100644
--- a/benchtests/Makefile
+++ b/benchtests/Makefile
@@ -37,9 +37,11 @@  string-bench := bcopy bzero memccpy memchr memcmp memcpy memmem memmove \
 		strspn strstr strcpy_chk stpcpy_chk memrchr strsep strtok
 string-bench-all := $(string-bench)
 
+malloc-bench := malloc malloc-threads-8 malloc-threads-32 malloc-threads-64
+
 stdlib-bench := strtod
 
-benchset := $(string-bench-all) $(stdlib-bench)
+benchset := $(string-bench-all) $(stdlib-bench) $(malloc-bench)
 
 CFLAGS-bench-ffs.c += -fno-builtin
 CFLAGS-bench-ffsll.c += -fno-builtin
@@ -47,6 +49,9 @@  CFLAGS-bench-ffsll.c += -fno-builtin
 $(addprefix $(objpfx)bench-,$(bench-math)): $(common-objpfx)math/libm.so
 $(addprefix $(objpfx)bench-,$(bench-pthread)): \
 	$(common-objpfx)nptl/libpthread.so
+$(objpfx)bench-malloc-threads-8: $(common-objpfx)nptl/libpthread.so
+$(objpfx)bench-malloc-threads-32: $(common-objpfx)nptl/libpthread.so
+$(objpfx)bench-malloc-threads-64: $(common-objpfx)nptl/libpthread.so
 
 
 
diff --git a/benchtests/bench-malloc-threads-32.c b/benchtests/bench-malloc-threads-32.c
new file mode 100644
index 0000000..463ceb7
--- /dev/null
+++ b/benchtests/bench-malloc-threads-32.c
@@ -0,0 +1,20 @@ 
+/* Measure malloc and free functions with threads.
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define NUM_THREADS 32
+#include "bench-malloc.c"
diff --git a/benchtests/bench-malloc-threads-64.c b/benchtests/bench-malloc-threads-64.c
new file mode 100644
index 0000000..61d8c10
--- /dev/null
+++ b/benchtests/bench-malloc-threads-64.c
@@ -0,0 +1,20 @@ 
+/* Measure malloc and free functions with threads.
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define NUM_THREADS 64
+#include "bench-malloc.c"
diff --git a/benchtests/bench-malloc-threads-8.c b/benchtests/bench-malloc-threads-8.c
new file mode 100644
index 0000000..ac4ff79
--- /dev/null
+++ b/benchtests/bench-malloc-threads-8.c
@@ -0,0 +1,20 @@ 
+/* Measure malloc and free functions with threads.
+   Copyright (C) 2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#define NUM_THREADS 8
+#include "bench-malloc.c"
diff --git a/benchtests/bench-malloc.c b/benchtests/bench-malloc.c
new file mode 100644
index 0000000..dc4fe17
--- /dev/null
+++ b/benchtests/bench-malloc.c
@@ -0,0 +1,236 @@ 
+/* Benchmark malloc and free functions.
+   Copyright (C) 2013-2014 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include <math.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include "bench-timing.h"
+#include "json-lib.h"
+
+#define BENCHMARK_ITERATIONS	40000000
+#define RAND_SEED		88
+
+#ifndef NUM_THREADS
+#define NUM_THREADS 1
+#endif
+
+/* Maximum memory that can be allocated at any one time is:
+
+   NUM_THREADS * WORKING_SET_SIZE * MAX_ALLOCATION_SIZE
+
+   However due to the distribution of the random block sizes
+   the typical amount allocated will be much smaller.  */
+#define WORKING_SET_SIZE	1024
+
+#define MIN_ALLOCATION_SIZE	4
+#define MAX_ALLOCATION_SIZE	32768
+
+/* Get a random block size with an inverse square distribution.  */
+static unsigned int
+get_block_size (unsigned int rand_data)
+{
+  /* Inverse square.  */
+  float exponent = -2;
+  /* Minimum value of distribution.  */
+  float dist_min = MIN_ALLOCATION_SIZE;
+  /* Maximum value of distribution.  */
+  float dist_max = MAX_ALLOCATION_SIZE;
+
+  float min_pow = powf (dist_min, exponent + 1);
+  float max_pow = powf (dist_max, exponent + 1);
+
+  float r = (float) rand_data / RAND_MAX;
+
+  return (unsigned int) powf ((max_pow - min_pow) * r + min_pow, 1 / (exponent + 1));
+}
+
+#define NUM_BLOCK_SIZES	8000
+#define NUM_OFFSETS	((WORKING_SET_SIZE) * 4)
+
+static unsigned int random_block_sizes[NUM_BLOCK_SIZES];
+static unsigned int random_offsets[NUM_OFFSETS];
+
+static void
+init_random_values (void)
+{
+  size_t i;
+
+  for (i = 0; i < NUM_BLOCK_SIZES; i++)
+    random_block_sizes[i] = get_block_size (rand ());
+
+  for (i = 0; i < NUM_OFFSETS; i++)
+    random_offsets[i] = rand () % WORKING_SET_SIZE;
+}
+
+static unsigned int
+get_random_block_size (unsigned int *state)
+{
+  unsigned int idx = *state;
+
+  if (idx >= NUM_BLOCK_SIZES - 1)
+    idx = 0;
+  else
+    idx++;
+
+  *state = idx;
+
+  return random_block_sizes[idx];
+}
+
+static unsigned int
+get_random_offset (unsigned int *state)
+{
+  unsigned int idx = *state;
+
+  if (idx >= NUM_OFFSETS - 1)
+    idx = 0;
+  else
+    idx++;
+
+  *state = idx;
+
+  return random_offsets[idx];
+}
+
+/* Allocate and free blocks in a random order.  */
+static void
+malloc_benchmark_loop (size_t iters, void **ptr_arr)
+{
+  size_t i;
+  unsigned int offset_state = 0, block_state = 0;
+
+  for (i = 0; i < iters; i++)
+    {
+      unsigned int next_idx = get_random_offset (&offset_state);
+      unsigned int next_block = get_random_block_size (&block_state);
+
+      free (ptr_arr[next_idx]);
+
+      ptr_arr[next_idx] = malloc (next_block);
+    }
+}
+
+static void *working_set[NUM_THREADS][WORKING_SET_SIZE];
+
+#if NUM_THREADS > 1
+static pthread_t threads[NUM_THREADS];
+
+struct thread_args
+{
+  size_t iters;
+  void **working_set;
+};
+
+static void *
+benchmark_thread (void *arg)
+{
+  struct thread_args *args = (struct thread_args *) arg;
+  size_t iters = args->iters;
+  void *thread_set = args->working_set;
+
+  malloc_benchmark_loop (iters, thread_set);
+
+  return NULL;
+}
+#endif
+
+static void
+do_benchmark (size_t iters)
+{
+#if NUM_THREADS == 1
+  malloc_benchmark_loop (iters, working_set[0]);
+#else
+  struct thread_args args[NUM_THREADS];
+
+  size_t i;
+
+  for (i = 0; i < NUM_THREADS; i++)
+    {
+      args[i].iters = iters;
+      args[i].working_set = working_set[i];
+      pthread_create(&threads[i], NULL, benchmark_thread, &args[i]);
+    }
+
+  for (i = 0; i < NUM_THREADS; i++)
+    pthread_join(threads[i], NULL);
+#endif
+}
+
+int
+main (int argc, char **argv)
+{
+  timing_t start, stop, cur;
+  size_t iters = BENCHMARK_ITERATIONS;
+  unsigned long res;
+  json_ctx_t json_ctx;
+  double d_total_s, d_total_i;
+
+  init_random_values ();
+
+  json_init (&json_ctx, 0, stdout);
+
+  json_document_begin (&json_ctx);
+
+  json_attr_string (&json_ctx, "timing_type", TIMING_TYPE);
+
+  json_attr_object_begin (&json_ctx, "functions");
+
+  json_attr_object_begin (&json_ctx, "malloc");
+
+  json_attr_object_begin (&json_ctx, "");
+
+  TIMING_INIT (res);
+
+  (void) res;
+
+  TIMING_NOW (start);
+  do_benchmark (iters);
+  TIMING_NOW (stop);
+
+  struct rusage usage;
+  getrusage(RUSAGE_SELF, &usage);
+
+  TIMING_DIFF (cur, start, stop);
+
+  d_total_s = cur;
+  d_total_i = iters * NUM_THREADS;
+
+  json_attr_double (&json_ctx, "duration", d_total_s);
+  json_attr_double (&json_ctx, "iterations", d_total_i);
+  json_attr_double (&json_ctx, "mean", d_total_s / d_total_i);
+  json_attr_double (&json_ctx, "max_rss", usage.ru_maxrss);
+
+  json_attr_double (&json_ctx, "threads", NUM_THREADS);
+  json_attr_double (&json_ctx, "min_size", MIN_ALLOCATION_SIZE);
+  json_attr_double (&json_ctx, "max_size", MAX_ALLOCATION_SIZE);
+  json_attr_double (&json_ctx, "random_seed", RAND_SEED);
+
+  json_attr_object_end (&json_ctx);
+
+  json_attr_object_end (&json_ctx);
+
+  json_attr_object_end (&json_ctx);
+
+  json_document_end (&json_ctx);
+
+  return 0;
+}