diff mbox

[v3,7/9] Keystone2: Add initial HW queues support

Message ID 1398246839-15554-8-git-send-email-taras.kondratiuk@linaro.org
State Superseded
Headers show

Commit Message

Taras Kondratiuk April 23, 2014, 9:53 a.m. UTC
Each odp_queue maps to hw queue for now.
odp_queue_enq/deq() translates to hw queue enq/que.

Signed-off-by: Taras Kondratiuk <taras.kondratiuk@linaro.org>
---
 .../linux-keystone2/include/odp_buffer_internal.h  |    2 +-
 .../include/odp_buffer_pool_internal.h             |    2 +-
 .../linux-keystone2/include/odp_queue_internal.h   |  140 ++++++++++++++++
 platform/linux-keystone2/source/odp_buffer_pool.c  |   11 +-
 platform/linux-keystone2/source/odp_queue.c        |  173 +++++++++-----------
 5 files changed, 226 insertions(+), 102 deletions(-)
 create mode 100644 platform/linux-keystone2/include/odp_queue_internal.h
diff mbox

Patch

diff --git a/platform/linux-keystone2/include/odp_buffer_internal.h b/platform/linux-keystone2/include/odp_buffer_internal.h
index 2e0c2a4..b830e12 100644
--- a/platform/linux-keystone2/include/odp_buffer_internal.h
+++ b/platform/linux-keystone2/include/odp_buffer_internal.h
@@ -58,7 +58,7 @@  typedef union odp_buffer_bits_t {
 typedef struct odp_buffer_hdr_t {
 	Cppi_HostDesc   desc;
 	void		*buf_vaddr;
-	odp_queue_t	free_queue;
+	uint32_t	free_queue;
 	int type;
 	struct odp_buffer_hdr_t *next;       /* next buf in a list */
 	odp_buffer_bits_t        handle;     /* handle */
diff --git a/platform/linux-keystone2/include/odp_buffer_pool_internal.h b/platform/linux-keystone2/include/odp_buffer_pool_internal.h
index 6ee3eb0..a77331c 100644
--- a/platform/linux-keystone2/include/odp_buffer_pool_internal.h
+++ b/platform/linux-keystone2/include/odp_buffer_pool_internal.h
@@ -57,7 +57,7 @@  struct pool_entry_s {
 	size_t                  payload_size;
 	size_t                  payload_align;
 	int                     buf_type;
-	odp_queue_t             free_queue;
+	uint32_t		free_queue;
 
 	uintptr_t               buf_base;
 	size_t                  buf_size;
diff --git a/platform/linux-keystone2/include/odp_queue_internal.h b/platform/linux-keystone2/include/odp_queue_internal.h
new file mode 100644
index 0000000..c7c84d6
--- /dev/null
+++ b/platform/linux-keystone2/include/odp_queue_internal.h
@@ -0,0 +1,140 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP queue - implementation internal
+ */
+
+#ifndef ODP_QUEUE_INTERNAL_H_
+#define ODP_QUEUE_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_queue.h>
+#include <odp_buffer_internal.h>
+#include <odp_packet_io.h>
+#include <odp_align.h>
+#include <configs/odp_config_platform.h>
+
+
+#define USE_TICKETLOCK
+
+#ifdef USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#else
+#include <odp_spinlock.h>
+#endif
+
+#define QUEUE_MULTI_MAX 8
+
+#define QUEUE_STATUS_FREE     0
+#define QUEUE_STATUS_READY    1
+#define QUEUE_STATUS_NOTSCHED 2
+#define QUEUE_STATUS_SCHED    3
+
+/* forward declaration */
+union queue_entry_u;
+
+typedef int (*enq_func_t)(union queue_entry_u *, odp_buffer_hdr_t *);
+typedef	odp_buffer_hdr_t *(*deq_func_t)(union queue_entry_u *);
+
+typedef int (*enq_multi_func_t)(union queue_entry_u *,
+				odp_buffer_hdr_t **, int);
+typedef	int (*deq_multi_func_t)(union queue_entry_u *,
+				odp_buffer_hdr_t **, int);
+
+struct queue_entry_s {
+#ifdef USE_TICKETLOCK
+	odp_ticketlock_t  lock ODP_ALIGNED_CACHE;
+#else
+	odp_spinlock_t    lock ODP_ALIGNED_CACHE;
+#endif
+
+	odp_buffer_hdr_t *head;
+	odp_buffer_hdr_t *tail;
+	int               status;
+
+	enq_func_t       enqueue ODP_ALIGNED_CACHE;
+	deq_func_t       dequeue;
+	enq_multi_func_t enqueue_multi;
+	deq_multi_func_t dequeue_multi;
+
+	odp_queue_t       handle;
+	odp_buffer_t      sched_buf;
+	odp_queue_type_t  type;
+	odp_queue_param_t param;
+	odp_pktio_t       pktin;
+	odp_pktio_t       pktout;
+	uint32_t          hw_queue;
+	char              name[ODP_QUEUE_NAME_LEN];
+};
+
+typedef union queue_entry_u {
+	struct queue_entry_s s;
+	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct queue_entry_s))];
+} queue_entry_t;
+
+
+queue_entry_t *get_qentry(uint32_t queue_id);
+
+int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
+odp_buffer_hdr_t *queue_deq(queue_entry_t *queue);
+
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+
+void queue_lock(queue_entry_t *queue);
+void queue_unlock(queue_entry_t *queue);
+
+odp_buffer_t queue_sched_buf(odp_queue_t queue);
+int queue_sched_atomic(odp_queue_t handle);
+
+static inline uint32_t queue_to_id(odp_queue_t handle)
+{
+	return handle - 1;
+}
+
+static inline odp_queue_t queue_from_id(uint32_t queue_id)
+{
+	return queue_id + 1;
+}
+
+static inline queue_entry_t *queue_to_qentry(odp_queue_t handle)
+{
+	uint32_t queue_id;
+
+	queue_id = queue_to_id(handle);
+	return get_qentry(queue_id);
+}
+
+static inline void _ti_hw_queue_push_desc(uint32_t hw_queue,
+						odp_buffer_hdr_t *buf_hdr)
+{
+	ti_em_osal_hw_queue_push_size(hw_queue,
+				      (void *)&buf_hdr->desc,
+				      sizeof(Cppi_HostDesc),
+				      TI_EM_MEM_PUBLIC_DESC);
+}
+
+static inline odp_buffer_hdr_t *_ti_hw_queue_pop_desc(uint32_t hw_queue)
+{
+	return ti_em_osal_hw_queue_pop(hw_queue,
+					TI_EM_MEM_PUBLIC_DESC);
+}
+
+odp_queue_t _odp_queue_create(const char *name, odp_queue_type_t type,
+			     odp_queue_param_t *param, uint32_t hw_queue);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-keystone2/source/odp_buffer_pool.c b/platform/linux-keystone2/source/odp_buffer_pool.c
index 7e10689..9a2f6cb 100644
--- a/platform/linux-keystone2/source/odp_buffer_pool.c
+++ b/platform/linux-keystone2/source/odp_buffer_pool.c
@@ -18,6 +18,7 @@ 
 #include <odp_hints.h>
 #include <odp_debug.h>
 #include <odp_sync.h>
+#include <odp_queue_internal.h>
 
 #include <string.h>
 #include <stdlib.h>
@@ -218,10 +219,7 @@  static int link_bufs(pool_entry_t *pool)
 				   &tag);
 
 		odp_sync_stores();
-		ti_em_osal_hw_queue_push_size(pool->s.free_queue,
-					      (void *)hdr,
-					      sizeof(Cppi_HostDesc),
-					      TI_EM_MEM_PUBLIC_DESC);
+		_ti_hw_queue_push_desc(pool->s.free_queue, hdr);
 		buf_addr.v += buf_size;
 		buf_addr.p += buf_size;
 		desc_index++;
@@ -303,10 +301,7 @@  odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_id)
 void odp_buffer_free(odp_buffer_t buf)
 {
 	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
-	ti_em_osal_hw_queue_push_size(hdr->free_queue,
-				      (void *)hdr,
-				      sizeof(Cppi_HostDesc),
-				      TI_EM_MEM_PUBLIC_DESC);
+	_ti_hw_queue_push_desc(hdr->free_queue, hdr);
 }
 
 void odp_buffer_pool_print(odp_buffer_pool_t pool_id)
diff --git a/platform/linux-keystone2/source/odp_queue.c b/platform/linux-keystone2/source/odp_queue.c
index 6248edb..8e6c2fe 100644
--- a/platform/linux-keystone2/source/odp_queue.c
+++ b/platform/linux-keystone2/source/odp_queue.c
@@ -15,6 +15,7 @@ 
 #include <odp_shared_memory.h>
 #include <odp_schedule_internal.h>
 #include <odp_config.h>
+#include <configs/odp_config_platform.h>
 #include <odp_packet_io_internal.h>
 #include <odp_packet_io_queue.h>
 #include <odp_debug.h>
@@ -110,6 +111,12 @@  int odp_queue_init_global(void)
 		queue_entry_t *queue = get_qentry(i);
 		LOCK_INIT(&queue->s.lock);
 		queue->s.handle = queue_from_id(i);
+		queue->s.status = QUEUE_STATUS_FREE;
+		/*
+		 * TODO: HW queue is mapped dirrectly to queue_entry_t
+		 * instance. It may worth to allocate HW queue on open.
+		 */
+		queue->s.hw_queue = TI_ODP_PUBLIC_QUEUE_BASE_IDX + i;
 	}
 
 	ODP_DBG("done\n");
@@ -141,8 +148,8 @@  odp_schedule_sync_t odp_queue_sched_type(odp_queue_t handle)
 	return queue->s.param.sched.sync;
 }
 
-odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
-			     odp_queue_param_t *param)
+odp_queue_t _odp_queue_create(const char *name, odp_queue_type_t type,
+			      odp_queue_param_t *param, uint32_t hw_queue)
 {
 	uint32_t i;
 	queue_entry_t *queue;
@@ -156,6 +163,18 @@  odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
 
 		LOCK(&queue->s.lock);
 		if (queue->s.status == QUEUE_STATUS_FREE) {
+			if (hw_queue)
+				queue->s.hw_queue = hw_queue;
+			/*
+			 * Don't open hw queue if its number is specified
+			 * as it is most probably opened by Linux kernel
+			 */
+			else if (ti_em_osal_hw_queue_open(queue->s.hw_queue)
+				 != EM_OK) {
+				UNLOCK(&queue->s.lock);
+				continue;
+			}
+
 			queue_init(queue, name, type, param);
 
 			if (type == ODP_QUEUE_TYPE_SCHED ||
@@ -188,6 +207,12 @@  odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
 	return handle;
 }
 
+odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
+			     odp_queue_param_t *param)
+{
+	return _odp_queue_create(name, type, param, 0);
+}
+
 
 odp_buffer_t queue_sched_buf(odp_queue_t handle)
 {
@@ -232,65 +257,51 @@  odp_queue_t odp_queue_lookup(const char *name)
 
 int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
 {
-	int sched = 0;
+	_ti_hw_queue_push_desc(queue->s.hw_queue, buf_hdr);
 
-	LOCK(&queue->s.lock);
-	if (queue->s.head == NULL) {
-		/* Empty queue */
-		queue->s.head = buf_hdr;
-		queue->s.tail = buf_hdr;
-		buf_hdr->next = NULL;
-	} else {
-		queue->s.tail->next = buf_hdr;
-		queue->s.tail = buf_hdr;
-		buf_hdr->next = NULL;
-	}
-
-	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
-		queue->s.status = QUEUE_STATUS_SCHED;
-		sched = 1; /* retval: schedule queue */
+	if (queue->s.type == ODP_QUEUE_TYPE_SCHED) {
+		int sched = 0;
+		LOCK(&queue->s.lock);
+		if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
+			queue->s.status = QUEUE_STATUS_SCHED;
+			sched = 1;
+		}
+		UNLOCK(&queue->s.lock);
+		/* Add queue to scheduling */
+		if (sched)
+			odp_schedule_queue(queue->s.handle,
+					   queue->s.param.sched.prio);
 	}
-	UNLOCK(&queue->s.lock);
-
-	/* Add queue to scheduling */
-	if (sched == 1)
-		odp_schedule_queue(queue->s.handle, queue->s.param.sched.prio);
-
 	return 0;
 }
 
 
 int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
 {
-	int sched = 0;
 	int i;
-	odp_buffer_hdr_t *tail;
 
-	for (i = 0; i < num - 1; i++)
-		buf_hdr[i]->next = buf_hdr[i+1];
-
-	tail = buf_hdr[num-1];
-	buf_hdr[num-1]->next = NULL;
-
-	LOCK(&queue->s.lock);
-	/* Empty queue */
-	if (queue->s.head == NULL)
-		queue->s.head = buf_hdr[0];
-	else
-		queue->s.tail->next = buf_hdr[0];
-
-	queue->s.tail = tail;
-
-	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
-		queue->s.status = QUEUE_STATUS_SCHED;
-		sched = 1; /* retval: schedule queue */
+	/*
+	 * TODO: Should this series of buffers be enqueued atomically?
+	 * Can another buffer be pushed in this queue in the middle?
+	 */
+	for (i = 0; i < num; i++) {
+		/* TODO: Implement multi dequeue a lower level */
+		_ti_hw_queue_push_desc(queue->s.hw_queue, buf_hdr[i]);
 	}
-	UNLOCK(&queue->s.lock);
-
-	/* Add queue to scheduling */
-	if (sched == 1)
-		odp_schedule_queue(queue->s.handle, queue->s.param.sched.prio);
 
+	if (queue->s.type == ODP_QUEUE_TYPE_SCHED) {
+		int sched = 0;
+		LOCK(&queue->s.lock);
+		if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
+			queue->s.status = QUEUE_STATUS_SCHED;
+			sched = 1;
+		}
+		UNLOCK(&queue->s.lock);
+		/* Add queue to scheduling */
+		if (sched)
+			odp_schedule_queue(queue->s.handle,
+					   queue->s.param.sched.prio);
+	}
 	return 0;
 }
 
@@ -327,63 +338,41 @@  int odp_queue_enq(odp_queue_t handle, odp_buffer_t buf)
 
 odp_buffer_hdr_t *queue_deq(queue_entry_t *queue)
 {
-	odp_buffer_hdr_t *buf_hdr = NULL;
+	odp_buffer_hdr_t *buf_hdr;
 
-	LOCK(&queue->s.lock);
+	buf_hdr = (odp_buffer_hdr_t *)ti_em_osal_hw_queue_pop(queue->s.hw_queue,
+			TI_EM_MEM_PUBLIC_DESC);
 
-	if (queue->s.head == NULL) {
-		/* Already empty queue */
-		if (queue->s.status == QUEUE_STATUS_SCHED &&
-		    queue->s.type != ODP_QUEUE_TYPE_PKTIN)
+	if (!buf_hdr && queue->s.type == ODP_QUEUE_TYPE_SCHED) {
+		LOCK(&queue->s.lock);
+		if (!buf_hdr && queue->s.status == QUEUE_STATUS_SCHED)
 			queue->s.status = QUEUE_STATUS_NOTSCHED;
-	} else {
-		buf_hdr       = queue->s.head;
-		queue->s.head = buf_hdr->next;
-		buf_hdr->next = NULL;
-
-		if (queue->s.head == NULL) {
-			/* Queue is now empty */
-			queue->s.tail = NULL;
-		}
+		UNLOCK(&queue->s.lock);
 	}
 
-	UNLOCK(&queue->s.lock);
-
 	return buf_hdr;
 }
 
 
 int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
 {
-	int i = 0;
-
-	LOCK(&queue->s.lock);
-
-	if (queue->s.head == NULL) {
-		/* Already empty queue */
-		if (queue->s.status == QUEUE_STATUS_SCHED &&
-		    queue->s.type != ODP_QUEUE_TYPE_PKTIN)
-			queue->s.status = QUEUE_STATUS_NOTSCHED;
-	} else {
-		odp_buffer_hdr_t *hdr = queue->s.head;
-
-		for (; i < num && hdr; i++) {
-			buf_hdr[i]       = hdr;
-			/* odp_prefetch(hdr->addr); */
-			hdr              = hdr->next;
-			buf_hdr[i]->next = NULL;
-		}
-
-		queue->s.head = hdr;
-
-		if (hdr == NULL) {
-			/* Queue is now empty */
-			queue->s.tail = NULL;
+	int i;
+	for (i = 0; i < num; i++) {
+		/* TODO: Implement multi dequeue a lower level */
+		buf_hdr[i] = (odp_buffer_hdr_t *)ti_em_osal_hw_queue_pop(
+				     queue->s.hw_queue,
+				     TI_EM_MEM_PUBLIC_DESC);
+		if (!buf_hdr[i]) {
+			if (queue->s.type != ODP_QUEUE_TYPE_SCHED)
+				break;
+			LOCK(&queue->s.lock);
+			if (queue->s.status == QUEUE_STATUS_SCHED)
+				queue->s.status = QUEUE_STATUS_NOTSCHED;
+			UNLOCK(&queue->s.lock);
+			break;
 		}
 	}
 
-	UNLOCK(&queue->s.lock);
-
 	return i;
 }