diff mbox

[PATCHv2,3/3] linux-generic: buffers: add lock-based allocator

Message ID 1420998638-23980-4-git-send-email-bill.fischofer@linaro.org
State New
Headers show

Commit Message

Bill Fischofer Jan. 11, 2015, 5:50 p.m. UTC
To permit performance scalability testing, add the USE_BUFFER_POOL_LOCKS
compile time switch to enable use of locks vs. lockless buffer allocation.
The default is to use lockless allocation. Set this to 1 to enable locks.

Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
---
 .../include/odp_buffer_pool_internal.h             | 120 +++++++++++++++++++++
 platform/linux-generic/odp_buffer_pool.c           |  12 +++
 2 files changed, 132 insertions(+)
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp_buffer_pool_internal.h b/platform/linux-generic/include/odp_buffer_pool_internal.h
index 169e02d..a5a1864 100644
--- a/platform/linux-generic/include/odp_buffer_pool_internal.h
+++ b/platform/linux-generic/include/odp_buffer_pool_internal.h
@@ -64,6 +64,8 @@  typedef struct local_cache_t {
 /* Extra error checks */
 /* #define POOL_ERROR_CHECK */
 
+/* Control use of locks vs. Lockless allocation */
+#define USE_BUFFER_POOL_LOCKS 0
 
 #ifdef POOL_USE_TICKETLOCK
 #include <odp_ticketlock.h>
@@ -80,7 +82,15 @@  typedef struct local_cache_t {
 struct pool_entry_s {
 #ifdef POOL_USE_TICKETLOCK
 	odp_ticketlock_t        lock ODP_ALIGNED_CACHE;
+#if USE_BUFFER_POOL_LOCKS
+	odp_ticketlock_t        buf_lock;
+	odp_ticketlock_t        blk_lock;
+#endif
 #else
+#if USE_BUFFER_POOL_LOCKS
+	odp_spinlock_t          buf_lock;
+	odp_spinlock_t          blk_lock;
+#endif
 	odp_spinlock_t          lock ODP_ALIGNED_CACHE;
 #endif
 
@@ -107,8 +117,13 @@  struct pool_entry_s {
 	size_t                  pool_size;
 	uint32_t                buf_align;
 	uint32_t                buf_stride;
+#if USE_BUFFER_POOL_LOCKS
+	odp_buffer_hdr_t       *buf_freelist;
+	void                   *blk_freelist;
+#else
 	odp_atomic_u64_t        buf_freelist;
 	odp_atomic_u64_t        blk_freelist;
+#endif
 	odp_atomic_u32_t        bufcount;
 	odp_atomic_u32_t        blkcount;
 	odp_atomic_u64_t        bufallocs;
@@ -142,6 +157,109 @@  extern void *pool_entry_ptr[];
 #define pool_is_secure(pool) 0
 #endif
 
+#if USE_BUFFER_POOL_LOCKS
+
+static inline void *get_blk(struct pool_entry_s *pool)
+{
+	void *block;
+
+	POOL_LOCK(&pool->blk_lock);
+
+	block = pool->blk_freelist;
+	if (block != NULL)
+		pool->blk_freelist = ((odp_buf_blk_t *)block)->next;
+
+	POOL_UNLOCK(&pool->blk_lock);
+
+	if (odp_unlikely(block == NULL)) {
+		odp_atomic_inc_u64(&pool->blkempty);
+	} else {
+		odp_atomic_inc_u64(&pool->blkallocs);
+		odp_atomic_dec_u32(&pool->blkcount);
+	}
+
+	return block;
+}
+
+static inline void ret_blk(struct pool_entry_s *pool, void *block)
+{
+	POOL_LOCK(&pool->blk_lock);
+
+	((odp_buf_blk_t *)block)->next = pool->blk_freelist;
+	pool->blk_freelist = block;
+
+	POOL_UNLOCK(&pool->blk_lock);
+
+	odp_atomic_inc_u32(&pool->blkcount);
+	odp_atomic_inc_u64(&pool->blkfrees);
+}
+
+static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool)
+{
+	odp_buffer_hdr_t *buf;
+
+	POOL_LOCK(&pool->buf_lock);
+
+	buf = pool->buf_freelist;
+	if (buf != NULL)
+		pool->buf_freelist = buf->next;
+
+	POOL_UNLOCK(&pool->buf_lock);
+
+	if (odp_unlikely(buf == NULL)) {
+		odp_atomic_inc_u64(&pool->bufempty);
+	} else {
+		uint64_t bufcount =
+			odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1;
+
+		/* Check for low watermark condition */
+		if (bufcount == pool->low_wm && !pool->low_wm_assert) {
+			pool->low_wm_assert = 1;
+			odp_atomic_inc_u64(&pool->low_wm_count);
+		}
+
+		odp_atomic_inc_u64(&pool->bufallocs);
+		/* Mark buffer allocated */
+		buf->allocator = odp_thread_id();
+	}
+
+	return buf;
+}
+
+static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf)
+{
+	buf->allocator = ODP_FREEBUF;  /* Mark buffer free */
+
+	if (!buf->flags.hdrdata && buf->type != ODP_BUFFER_TYPE_RAW) {
+		while (buf->segcount > 0) {
+			if (buffer_is_secure(buf) || pool_is_secure(pool))
+				memset(buf->addr[buf->segcount - 1],
+				       0, buf->segsize);
+			ret_blk(pool, buf->addr[--buf->segcount]);
+		}
+		buf->size = 0;
+	}
+
+	POOL_LOCK(&pool->buf_lock);
+
+	buf->next = pool->buf_freelist;
+	pool->buf_freelist = buf;
+
+	POOL_UNLOCK(&pool->buf_lock);
+
+	uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + 1;
+
+	/* Check if low watermark condition should be deasserted */
+	if (bufcount == pool->high_wm && pool->low_wm_assert) {
+		pool->low_wm_assert = 0;
+		odp_atomic_inc_u64(&pool->high_wm_count);
+	}
+
+	odp_atomic_inc_u64(&pool->buffrees);
+}
+
+#else
+
 #define OFFSET_BITS ODP_BITSIZE((uint64_t)ODP_BUFFER_MAX_BUFFERS * \
 				(uint32_t)ODP_CONFIG_PACKET_BUF_LEN_MAX)
 
@@ -292,6 +410,8 @@  static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf)
 	odp_atomic_inc_u64(&pool->buffrees);
 }
 
+#endif
+
 static inline void *get_local_buf(local_cache_t *buf_cache,
 				  struct pool_entry_s *pool,
 				  size_t totsize)
diff --git a/platform/linux-generic/odp_buffer_pool.c b/platform/linux-generic/odp_buffer_pool.c
index 5d181bb..b905cdd 100644
--- a/platform/linux-generic/odp_buffer_pool.c
+++ b/platform/linux-generic/odp_buffer_pool.c
@@ -82,6 +82,10 @@  int odp_buffer_pool_init_global(void)
 		/* init locks */
 		pool_entry_t *pool = &pool_tbl->pool[i];
 		POOL_LOCK_INIT(&pool->s.lock);
+#if USE_BUFFER_POOL_LOCKS
+		POOL_LOCK_INIT(&pool->s.buf_lock);
+		POOL_LOCK_INIT(&pool->s.blk_lock);
+#endif
 		pool->s.pool_hdl = pool_index_to_handle(i);
 		pool->s.pool_id = i;
 		pool_entry_ptr[i] = pool;
@@ -91,6 +95,9 @@  int odp_buffer_pool_init_global(void)
 	ODP_DBG("  pool_entry_s size     %zu\n", sizeof(struct pool_entry_s));
 	ODP_DBG("  pool_entry_t size     %zu\n", sizeof(pool_entry_t));
 	ODP_DBG("  odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
+#if !USE_BUFFER_POOL_LOCKS
+	ODP_DBG("  offset_bits = %d, tag_bits = %d\n", OFFSET_BITS, TAG_BITS);
+#endif
 	ODP_DBG("\n");
 	return 0;
 }
@@ -287,8 +294,13 @@  odp_buffer_pool_t odp_buffer_pool_create(const char *name,
 
 		pool->s.buf_stride = buf_stride;
 
+#if USE_BUFFER_POOL_LOCKS
+		pool->s.buf_freelist = NULL;
+		pool->s.blk_freelist = NULL;
+#else
 		odp_atomic_store_u64(&pool->s.buf_freelist, NULL_OFFSET);
 		odp_atomic_store_u64(&pool->s.blk_freelist, NULL_OFFSET);
+#endif
 
 		/* Initialization will increment these to their target vals */
 		odp_atomic_store_u32(&pool->s.bufcount, 0);