diff mbox

[API-NEXT,1/5] linux-gen: _ishm: adding buddy allocation

Message ID 1482319702-5766-2-git-send-email-christophe.milard@linaro.org
State New
Headers show

Commit Message

Christophe Milard Dec. 21, 2016, 11:28 a.m. UTC
_ishm now provides a function to create a pool for buddy memory allocation,
as well as functions to allocated/release memory from the created pool.

Signed-off-by: Christophe Milard <christophe.milard@linaro.org>

---
 platform/linux-generic/Makefile.am                 |   2 +
 platform/linux-generic/_ishmbuddy.c                | 491 +++++++++++++++++++++
 .../linux-generic/include/_ishmbuddy_internal.h    |  44 ++
 3 files changed, 537 insertions(+)
 create mode 100644 platform/linux-generic/_ishmbuddy.c
 create mode 100644 platform/linux-generic/include/_ishmbuddy_internal.h

-- 
2.7.4
diff mbox

Patch

diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 999a7f5..8ae8f39 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -126,6 +126,7 @@  odpdrvplatinclude_HEADERS = \
 noinst_HEADERS = \
 		  ${srcdir}/include/_fdserver_internal.h \
 		  ${srcdir}/include/_ishm_internal.h \
+		  ${srcdir}/include/_ishmbuddy_internal.h \
 		  ${srcdir}/include/_ishmphy_internal.h \
 		  ${srcdir}/include/odp_align_internal.h \
 		  ${srcdir}/include/odp_atomic_internal.h \
@@ -170,6 +171,7 @@  noinst_HEADERS = \
 __LIB__libodp_linux_la_SOURCES = \
 			   _fdserver.c \
 			   _ishm.c \
+			   _ishmbuddy.c \
 			   _ishmphy.c \
 			   odp_atomic.c \
 			   odp_barrier.c \
diff --git a/platform/linux-generic/_ishmbuddy.c b/platform/linux-generic/_ishmbuddy.c
new file mode 100644
index 0000000..417c732
--- /dev/null
+++ b/platform/linux-generic/_ishmbuddy.c
@@ -0,0 +1,491 @@ 
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+/* This file gathers the buddy allocation functionality provided by _ishm.
+ * _odp_ishmbud_pool_create() can be used to create a pool for buddy allocation.
+ * _odp_ishmbud_pool_create() will allocate a memory area using ishm_reserve()
+ * for both the control part (needed for tracking allocation/free...) and the
+ * user memory itself (part of which will be given at each ishmbud_alloc()).
+ * When creating the pool, the pool order, N, (i.e. the log2 of its size)
+ * and its minimum order, M, (i.e. the log2 of the minimum granularity of the
+ * pool) are given.
+ * Any allocation of less than 2^M bytes will result in 2^M bytes being
+ * allocated.
+ * All allocation are rounded to the nearest power of 2.
+ *
+ * The implementation of this buddy allocator is very traditional: it
+ * maintains N lists of free buffers.
+ * The control part actually contains these N queue heads, (N-M are actually
+ * used), the free buffers themselves being used for chaining (the chaining info
+ * is in the buffers: as they are "free" they should not be touched by the
+ * user). The control part also contains a array of bytes for remembering
+ * the size (actually the order) of the allocated buffers:
+ * There are 2^(N-M) such bytes, this number being the maximum number of
+ * allocated buffers (when all allocation are <= 2^M bytes)
+ * Buddy allocators handle fragmentation by splitting or merging blocks by 2.
+ * They guarantee a minimum efficiency of 50%, at worse case fragmentation.
+ *
+ * The reason for not using malloc() is that malloc does not guarantee
+ * memory sharability between ODP threads (regardless of their implememtation)
+ * which ishm_reserve() can do. see the comments around
+ * _odp_ishmbud_pool_create() and ishm_reserve() for more details.
+ */
+
+#include <odp_posix_extensions.h>
+#include <odp_internal.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
+#include <odp/drv/shm.h>
+#include <odp_shm_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_align_internal.h>
+#include <_ishm_internal.h>
+#include <_ishmbuddy_internal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <inttypes.h>
+
+typedef _odpdrv_shm_bpool_t bpool_t; /* for shorter writing          */
+
+/* free buddy blocks contains the following structure, used to link the
+ * free blocks together.
+ */
+typedef struct bblock_t {
+	struct bblock_t *next;
+	uint32_t order;
+} bblock_t;
+
+/* value set in the 'order' table when the block is not allocated:   */
+#define BBLOCK_FREE 0
+
+/* compute ceil(log2(size)) */
+static uint8_t clog2(uint64_t size)
+{
+	uint64_t sz;
+	uint32_t bit;
+	uint8_t res;
+
+	sz = size;	/* we start by computing res = log2(sz)...   */
+	res = 0;
+	for (bit = 32; bit ; bit >>= 1) {
+		if (sz >= ((uint64_t)1 << bit)) {
+			sz >>= bit;
+			res += bit;
+		}
+	}
+	if (((uint64_t)1 << res) < size) /* ...and then ceil(x)      */
+		res++;
+
+	return res;
+}
+
+/*
+ * given a bblock address, and an order value, returns the address
+ * of the buddy bblock (the other "half")
+ */
+static inline bblock_t *get_bblock_buddy(bpool_t *bpool, bblock_t *addr,
+					 uint8_t order)
+{
+	uintptr_t b;
+
+	b = ((uintptr_t)addr - (uintptr_t)bpool->ctrl.user_addr);
+	b ^= 1 << order;
+	return (void *)(b + (uintptr_t)bpool->ctrl.user_addr);
+}
+
+/*
+ * given a buddy block address, return its number (used for busy flags):
+ */
+static inline uintptr_t get_bblock_nr(bpool_t *bpool, void *addr)
+{
+	uintptr_t b;
+	uint8_t min_order;
+
+	min_order = bpool->ctrl.min_order;
+	b = ((uintptr_t)addr - (uintptr_t)bpool->ctrl.user_addr) >> min_order;
+	return b;
+}
+
+/* remove bblock from the list for bblocks of rank order. The bblock to be
+ * removed is really expected to be on the list: not finding it is an error */
+static inline void remove_from_list(bpool_t *bpool, uint8_t order,
+				    bblock_t *bblock)
+{
+	bblock_t *curr;       /* current bblock (when parsing list) */
+	bblock_t *prev;       /* previous bblock (when parsing list) */
+
+	curr = bpool->ctrl.free_heads[order];
+	if (!curr)
+		goto remove_from_list_error;
+
+	if (curr == bblock) {
+		bpool->ctrl.free_heads[order] = curr->next;
+		return;
+	}
+
+	while (curr) {
+		if (curr == bblock) {
+			prev->next = curr->next;
+			return;
+		}
+		prev = curr;
+		curr = curr->next;
+	}
+
+remove_from_list_error:
+	ODP_ERR("List corrupted\n");
+}
+
+/*
+ * create a buddy memory block of given size (actually nearest power of 2),
+ * where allocation will never be smaller than min_alloc.
+ * returns a pointer to the created buddy_pool
+ * The allocated area contains:
+ * - The _odpdrv_shm_bpool_ctrl_t structure
+ * - The array of ((order - min_order) of free list heads
+ * - The array of 'order' values, remembering sizes of allocated bblocks
+ * - alignment to cache line
+ * - The user memory
+ */
+bpool_t *_odp_ishmbud_pool_create(const char *pool_name, uint64_t size,
+				  uint64_t min_alloc, int flags)
+{
+	uint8_t  order;          /* pool order = ceil(log2(size))         */
+	uint8_t  min_order;      /* pool min_order = ceil(log2(min_alloc))*/
+	uint32_t max_nb_bblock;  /* max number of bblock, when smallest   */
+	uint32_t control_sz;     /* size of control area                  */
+	uint32_t free_head_sz;   /* mem area needed for list heads        */
+	uint32_t saved_order_sz; /* mem area to remember given sizes      */
+	uint64_t user_sz;        /* 2^order bytes		          */
+	uint64_t total_sz;	 /* total size to request                 */
+	int	 blk_idx;	 /* as returned by _ishm_resrve()         */
+	bpool_t *bpool;
+	int i;
+	bblock_t *first_block;
+
+	/* a bblock_t must fit in the buffers for linked chain! */
+	if (min_alloc < sizeof(bblock_t))
+		min_alloc = sizeof(bblock_t);
+
+	/* pool order is such that 2^order = size. same for min_order   */
+	order = clog2(size);
+	min_order = clog2(min_alloc);
+
+	/* check parameters obvious wishes: */
+	if (order >= 64)
+		return NULL;
+	if (order < min_order)
+		return NULL;
+
+	/* at worst case, all bblocks have smallest (2^min_order) size  */
+	max_nb_bblock = (1 << (order - min_order));
+
+	/* space needed for the control area (padded to cache line size)*/
+	control_sz =
+		ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(_odpdrv_shm_bpool_ctrl_t));
+
+	/* space needed for 'order' free bblock list heads:             */
+	/* Note that only lists from min_order to order are really used.*/
+	free_head_sz = ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(void *) *
+						   (order + 1));
+
+	/* space needed for order -i.e. size- storage of alloc'd bblock:*/
+	saved_order_sz = ODP_CACHE_LINE_SIZE_ROUNDUP(max_nb_bblock *
+						     sizeof(uint8_t));
+
+	/* space needed for user area is 2^order bytes: */
+	user_sz = 1 << order;
+
+	total_sz = control_sz +
+		   free_head_sz +
+		   saved_order_sz +
+		   user_sz;
+
+	/* allocate required memory: */
+	blk_idx = _odp_ishm_reserve(pool_name, total_sz, -1,
+				    ODP_CACHE_LINE_SIZE, flags, 0);
+	if (blk_idx < 0) {
+		ODP_ERR("_odp_ishm_reserve failed.");
+		return NULL;
+	}
+
+	bpool = _odp_ishm_address(blk_idx);
+	if (bpool == NULL) {
+		ODP_ERR("_odp_ishm_address failed.");
+		return NULL;
+	}
+
+	/* remember block index, needed when pool is destroyed */
+	bpool->ctrl.ishm_blk_idx = blk_idx;
+
+	/* prepare mutex: */
+	odp_spinlock_init(&bpool->ctrl.lock);
+
+	/* initialise pointers and things... */
+	bpool->ctrl.order = order;
+	bpool->ctrl.min_order = min_order;
+	bpool->ctrl.free_heads =
+		(void *)((uintptr_t)bpool->mem + control_sz);
+	bpool->ctrl.alloced_order =
+		(uint8_t *)((uintptr_t)bpool->ctrl.free_heads + free_head_sz);
+	bpool->ctrl.user_addr =
+		(void *)((uintptr_t)bpool->ctrl.alloced_order + saved_order_sz);
+
+	/* initiale all free list to NULL, except the top biggest element:*/
+	for (i = 0; i < (order - min_order); i++)
+		bpool->ctrl.free_heads[i] = NULL;
+	bpool->ctrl.free_heads[order] = bpool->ctrl.user_addr;
+	first_block = (bblock_t *)bpool->ctrl.user_addr;
+	first_block->next = NULL;
+	first_block->order = order;
+
+	/* set all 'order' of allocated bblocks to free: */
+	memset(bpool->ctrl.alloced_order, BBLOCK_FREE, saved_order_sz);
+
+	return bpool;
+}
+
+/* destroy a buddy pool. everything goes away. no operation on the pool should
+ * follow. */
+int _odp_ishmbud_pool_destroy(bpool_t *bpool)
+{
+	return _odp_ishm_free_by_index(bpool->ctrl.ishm_blk_idx);
+}
+
+/* allocated memory from the given pool */
+void *_odp_ishmbud_alloc(bpool_t *bpool, uint64_t size)
+{
+	uint32_t rq_order; /* requested order */
+	uint32_t try_order;
+	bblock_t *bblock;
+	bblock_t *buddy;
+	uintptr_t nr;
+
+	/* if size is zero or too big reject: */
+	if ((!size) && (size > (1U << bpool->ctrl.order))) {
+		ODP_ERR("Invalid alloc size (0 or larger than whole pool)\n");
+		return NULL;
+	}
+
+	/* compute ceil(log2(size)), to get the requested block order:    */
+	rq_order = clog2(size);
+
+	/* make sure the requested order is bigger (or same) as minimum!  */
+	if (rq_order < bpool->ctrl.min_order)
+		rq_order = bpool->ctrl.min_order;
+
+	/* mutex from here: */
+	odp_spinlock_lock(&bpool->ctrl.lock);
+
+	/* now, start trying to allocate a bblock of rq_order. If that
+	 * fails keep trying larger orders until pool order is reached    */
+	bblock = NULL;
+	for (try_order = rq_order; try_order <= bpool->ctrl.order;
+	     try_order++) {
+		if (bpool->ctrl.free_heads[try_order]) {
+			/* remove from list: */
+			bblock =
+				(bblock_t *)(bpool->ctrl.free_heads[try_order]);
+			bpool->ctrl.free_heads[try_order] = bblock->next;
+			break;
+		}
+	}
+
+	if (!bblock) {
+		odp_spinlock_unlock(&bpool->ctrl.lock);
+		ODP_ERR("Out of memory. (Buddy pool full)\n");
+	}
+
+	/* OK: we got a block, but possibbly too large (if try_order>rq_order)
+	 * return the extra halves to the pool hence splitting the bblock at
+	 * each 'extra' order: */
+	while (try_order-- > rq_order) {
+		/* split: */
+		buddy = (bblock_t *)((uintptr_t)bblock + (1 << try_order));
+		buddy->order = try_order;
+		/* add to list: */
+		buddy->next = bpool->ctrl.free_heads[try_order];
+		bpool->ctrl.free_heads[try_order] = buddy;
+		/* mark as free (non allocated block get size 0): */
+		nr = get_bblock_nr(bpool, buddy);
+		bpool->ctrl.alloced_order[nr] = BBLOCK_FREE;
+	}
+
+	/* remember the size if the allocated block: */
+	nr = get_bblock_nr(bpool, bblock);
+	bpool->ctrl.alloced_order[nr] = rq_order;
+
+	/* and return the allocated block! */
+	odp_spinlock_unlock(&bpool->ctrl.lock);
+	return (void *)bblock;
+}
+
+int _odp_ishmbud_free(bpool_t *bpool, void *addr)
+{
+	uintptr_t user_start; /* start of user area */
+	uintptr_t user_stop;  /* stop  of user area */
+	uintptr_t mask;	      /* 2^min_order - 1    */
+	bblock_t *bblock;     /* bblock being freed */
+	bblock_t *buddy;      /* buddy bblock of bblock being freed */
+	uint8_t order;	      /* order of block being freed */
+	uintptr_t nr;	      /* block number */
+
+	/* freeing NULL is regarded as OK, though without any effect:   */
+	if (!addr)
+		return 0;
+
+	user_start = (uintptr_t)bpool->ctrl.user_addr;
+	user_stop  = user_start + ((uintptr_t)1 << bpool->ctrl.order);
+	mask = ((uintptr_t)1 << bpool->ctrl.min_order) - 1;
+
+	/* some sanity checks: check that given address is within pool
+	 * that relative address has 2^min_order granularity:           */
+	if (((uintptr_t)addr < user_start) ||
+	    ((uintptr_t)addr > user_stop)  ||
+	    (((uintptr_t)addr - user_start) & mask)) {
+		ODP_ERR("Invalid address to be freed\n");
+		return -1;
+	}
+
+	/* mutex from here: */
+	odp_spinlock_lock(&bpool->ctrl.lock);
+
+	/* collect saved block order and make sure bblock was allocated */
+	bblock = (bblock_t *)addr;
+	nr = get_bblock_nr(bpool, bblock);
+	order = bpool->ctrl.alloced_order[nr];
+	if (order == BBLOCK_FREE) {
+		ODP_ERR("Double free error\n");
+		odp_spinlock_unlock(&bpool->ctrl.lock);
+		return -1;
+	}
+
+	/* this looks like a valid free, mark at least this as free:   */
+	bpool->ctrl.alloced_order[nr] = BBLOCK_FREE;
+
+	/* go up in orders, trying to merge buddies... */
+	while (order < bpool->ctrl.order) {
+		buddy = get_bblock_buddy(bpool, bblock, order);
+		/*if buddy is not free: no further merge possible */
+		nr = get_bblock_nr(bpool, buddy);
+		if (bpool->ctrl.alloced_order[nr] != BBLOCK_FREE)
+			break;
+		/*merge only bblock of same order:*/
+		if (buddy->order != order)
+			break;
+		/*merge: remove buddy from free list: */
+		remove_from_list(bpool, order, buddy);
+		/*merge: make sure we point at start of block: */
+		if (bblock > buddy)
+			bblock = buddy;
+		/*merge: size of bloack has dubbled: increse order: */
+		order++;
+	}
+
+	/* insert the bblock into its correct free block list: */
+	bblock->next = bpool->ctrl.free_heads[order];
+	bpool->ctrl.free_heads[order] = bblock;
+
+	/* remember the (possibly now merged) block order: */
+	bblock->order = order;
+
+	odp_spinlock_unlock(&bpool->ctrl.lock);
+	return 0;
+}
+
+/* print bufer status and performs sanity checks */
+int _odp_ishmbud_status(bpool_t *bpool)
+{
+	uint8_t order, pool_order, pool_min_order;
+	uint64_t free_q_nb_bblocks[64];
+	uint64_t allocated_nb_bblocks[64];
+	uint64_t free_q_nb_bblocks_bytes[64];
+	uint64_t allocated_nb_bblocks_bytes[64];
+	uint64_t total_bytes_free;
+	uint64_t total_bytes_allocated;
+	uint64_t nr;
+	bblock_t *bblock;
+	int res = 0;
+
+	odp_spinlock_lock(&bpool->ctrl.lock);
+
+	pool_order = bpool->ctrl.order;
+	pool_min_order = bpool->ctrl.min_order;
+
+	ODP_DBG("\nBuddy pool status for POOL\n");
+	ODP_DBG("pool size: %" PRIu64 " (bytes)\n", (1UL << pool_order));
+	ODP_DBG("pool order: %d\n", (int)pool_order);
+	ODP_DBG("pool min_order: %d\n", (int)pool_min_order);
+
+	/* a pool wholse order is more than 64 cannot even be reached on 64
+	 * bit machines! */
+	if (pool_order > 64) {
+		odp_spinlock_unlock(&bpool->ctrl.lock);
+		return -1;
+	}
+
+	total_bytes_free = 0;
+	total_bytes_allocated = 0;
+
+	/* for each queue */
+	for (order = pool_min_order; order <= pool_order; order++) {
+		free_q_nb_bblocks[order] = 0;
+		free_q_nb_bblocks_bytes[order] = 0;
+		allocated_nb_bblocks[order] = 0;
+		allocated_nb_bblocks_bytes[order] = 0;
+
+		/* get the number of buffs in the free queue for this order: */
+		bblock = bpool->ctrl.free_heads[order];
+		while (bblock) {
+			free_q_nb_bblocks[order]++;
+			free_q_nb_bblocks_bytes[order] += (1 << order);
+			bblock = bblock->next;
+		}
+
+		total_bytes_free += free_q_nb_bblocks_bytes[order];
+
+		/* get the number of allocated buffers of this order */
+		for (nr = 0;
+		     nr < (1U << (pool_order - pool_min_order)); nr++) {
+			if (bpool->ctrl.alloced_order[nr] == order)
+				allocated_nb_bblocks[order]++;
+		}
+
+		allocated_nb_bblocks_bytes[order] =
+			allocated_nb_bblocks[order] * (1 << order);
+
+		total_bytes_allocated += allocated_nb_bblocks_bytes[order];
+
+		ODP_DBG("Order %d => Free: %" PRIu64 " buffers "
+			"(%" PRIu64" bytes)   "
+			"Allocated %" PRIu64 " buffers (%" PRIu64 "  bytes)   "
+			"Total: %" PRIu64 "  bytes\n",
+			(int)order, free_q_nb_bblocks[order],
+			free_q_nb_bblocks_bytes[order],
+			allocated_nb_bblocks[order],
+			allocated_nb_bblocks_bytes[order],
+			free_q_nb_bblocks_bytes[order] +
+			allocated_nb_bblocks_bytes[order]);
+	}
+
+	ODP_DBG("Allocated space: %" PRIu64 " (bytes)\n",
+		total_bytes_allocated);
+	ODP_DBG("Free space: %" PRIu64 " (bytes)\n", total_bytes_free);
+
+	if (total_bytes_free + total_bytes_allocated != (1U << pool_order)) {
+		ODP_DBG("Lost bytes on this pool!\n");
+		res = -1;
+	}
+
+	if (res)
+		ODP_DBG("Pool inconsistent!\n");
+
+	odp_spinlock_unlock(&bpool->ctrl.lock);
+	return res;
+}
diff --git a/platform/linux-generic/include/_ishmbuddy_internal.h b/platform/linux-generic/include/_ishmbuddy_internal.h
new file mode 100644
index 0000000..a57d051
--- /dev/null
+++ b/platform/linux-generic/include/_ishmbuddy_internal.h
@@ -0,0 +1,44 @@ 
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#ifndef ODP_ISHMBUDDY_INTERNAL_H_
+#define ODP_ISHMBUDDY_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <odp/api/spinlock.h>
+
+typedef struct _odpdrv_shm_bpool_ctrl_t {
+	int ishm_blk_idx;       /* the block index returned by _ishm_resrve()*/
+	odp_spinlock_t  lock;   /* for pool access mutex		     */
+	uint8_t order;		/* pool is 2^order bytes long		     */
+	uint8_t min_order;	/* allocation will won't go below 2^min_order*/
+	void **free_heads;	/* 'order' free list heads.	             */
+	uint8_t *alloced_order;	/* saved size of allocated blocks, 0 if free */
+	void *user_addr;	/* user pool area ('real user pool')         */
+} _odpdrv_shm_bpool_ctrl_t;
+
+typedef struct _odpdrv_shm_bpool_t {
+	_odpdrv_shm_bpool_ctrl_t ctrl;	/* control part			     */
+	uint8_t mem[1];		/* area for heads, saved alloc'd orders, data*/
+} _odpdrv_shm_bpool_t;
+
+_odpdrv_shm_bpool_t *_odp_ishmbud_pool_create(const char *pool_name,
+					      uint64_t size,
+					      uint64_t min_alloc, int flags);
+int _odp_ishmbud_pool_destroy(_odpdrv_shm_bpool_t *bpool);
+void *_odp_ishmbud_alloc(_odpdrv_shm_bpool_t *bpool, uint64_t size);
+int _odp_ishmbud_free(_odpdrv_shm_bpool_t *bpool, void *addr);
+int _odp_ishmbud_status(_odpdrv_shm_bpool_t *bpool);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif