diff mbox

[v3,6/9] Keystone2: Add initial HW buffer management

Message ID 1398246839-15554-7-git-send-email-taras.kondratiuk@linaro.org
State Superseded
Headers show

Commit Message

Taras Kondratiuk April 23, 2014, 9:53 a.m. UTC
Buffer pools are managed as a hw queue filled with free buffers.
Allocating buffer -> dequeue from pool's free queue.
Freeing buffer    -> enqueue to pool's free queue.

This approach has important limitation - all free buffers are
linked to cppi descriptor, so max number of free buffers is
limited to max number of descriptors. If fast internal linking
ram is used, then max number of desctiptors is 16k.
Applications may create pools with much more buffers.
As a temporary solution max pool size is limited to 2000 buffers,
but buffer management should be made more flexible.

Signed-off-by: Taras Kondratiuk <taras.kondratiuk@linaro.org>
---
 .../include/configs/odp_config_platform.h          |    1 -
 .../linux-keystone2/include/odp_buffer_internal.h  |   96 +++++
 .../include/odp_buffer_pool_internal.h             |   80 ++++
 platform/linux-keystone2/include/plat/odp_buffer.h |   11 +
 platform/linux-keystone2/source/odp_buffer.c       |   91 ++++
 platform/linux-keystone2/source/odp_buffer_pool.c  |  315 ++++++++++++++
 platform/linux-keystone2/source/odp_init.c         |    4 +-
 platform/linux-keystone2/source/odp_packet.c       |  337 +++++++++++++++
 platform/linux-keystone2/source/odp_queue.c        |  435 ++++++++++++++++++++
 9 files changed, 1368 insertions(+), 2 deletions(-)
 create mode 100644 platform/linux-keystone2/include/odp_buffer_internal.h
 create mode 100644 platform/linux-keystone2/include/odp_buffer_pool_internal.h
 create mode 100644 platform/linux-keystone2/include/plat/odp_buffer.h
 create mode 100644 platform/linux-keystone2/source/odp_buffer.c
 create mode 100644 platform/linux-keystone2/source/odp_buffer_pool.c
 create mode 100644 platform/linux-keystone2/source/odp_packet.c
 create mode 100644 platform/linux-keystone2/source/odp_queue.c
diff mbox

Patch

diff --git a/platform/linux-keystone2/include/configs/odp_config_platform.h b/platform/linux-keystone2/include/configs/odp_config_platform.h
index bcad278..3a7da8a 100644
--- a/platform/linux-keystone2/include/configs/odp_config_platform.h
+++ b/platform/linux-keystone2/include/configs/odp_config_platform.h
@@ -40,7 +40,6 @@ 
 #error "platform not defined or unsupported!"
 #endif
 
-#define TI_ODP_PUBLIC_DESC_SIZE		(64u)
 #define TI_ODP_PUBLIC_DESC_NUM		(4096u)
 #define TI_ODP_REGION_NUM		(2)  /* local regions are not used on Linux */
 
diff --git a/platform/linux-keystone2/include/odp_buffer_internal.h b/platform/linux-keystone2/include/odp_buffer_internal.h
new file mode 100644
index 0000000..2e0c2a4
--- /dev/null
+++ b/platform/linux-keystone2/include/odp_buffer_internal.h
@@ -0,0 +1,96 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP buffer descriptor - implementation internal
+ */
+
+#ifndef ODP_BUFFER_INTERNAL_H_
+#define ODP_BUFFER_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_std_types.h>
+#include <odp_atomic.h>
+#include <odp_buffer_pool.h>
+#include <odp_buffer.h>
+#include <odp_queue.h>
+#include <odp_debug.h>
+#include <odp_align.h>
+
+#include <event_machine_macros.h>
+#include <event_machine_types.h>
+#include <event_machine_group.h>
+#include <event_machine_hw_macros.h>
+#include <event_machine_hw_types.h>
+#include <event_machine_hw_ti_macros.h>
+#include <event_machine_hw_ti_types.h>
+#include <ti_em_osal_cppi.h>
+#include <src/event_machine_hwpform.h>
+
+/* TODO: move these to correct files */
+
+typedef uintptr_t odp_phys_addr_t;
+
+#define ODP_BUFFER_POOL_BITS   4
+#define ODP_BUFFER_INDEX_BITS  (32 - ODP_BUFFER_POOL_BITS)
+#define ODP_BUFFER_MAX_POOLS   (1 << ODP_BUFFER_POOL_BITS)
+#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS)
+
+typedef union odp_buffer_bits_t {
+	uint32_t     u32;
+	odp_buffer_t handle;
+
+	struct {
+		uint32_t pool:ODP_BUFFER_POOL_BITS;
+		uint32_t index:ODP_BUFFER_INDEX_BITS;
+	};
+} odp_buffer_bits_t;
+
+typedef struct odp_buffer_hdr_t {
+	Cppi_HostDesc   desc;
+	void		*buf_vaddr;
+	odp_queue_t	free_queue;
+	int type;
+	struct odp_buffer_hdr_t *next;       /* next buf in a list */
+	odp_buffer_bits_t        handle;     /* handle */
+} odp_buffer_hdr_t;
+
+
+/*
+ * Chunk of buffers (in single pool)
+ */
+
+ODP_ASSERT(sizeof(odp_buffer_hdr_t) <= ODP_CACHE_LINE_SIZE*2,
+	   ODP_BUFFER_HDR_T__SIZE_ERROR);
+
+static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf)
+{
+	return (odp_buffer_hdr_t *)buf;
+}
+static inline odp_buffer_t hdr_to_odp_buf(odp_buffer_hdr_t *hdr)
+{
+	return (odp_buffer_t)hdr;
+}
+
+extern odp_buffer_pool_t odp_buf_to_pool(odp_buffer_t buf);
+
+
+int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf);
+
+void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-keystone2/include/odp_buffer_pool_internal.h b/platform/linux-keystone2/include/odp_buffer_pool_internal.h
new file mode 100644
index 0000000..6ee3eb0
--- /dev/null
+++ b/platform/linux-keystone2/include/odp_buffer_pool_internal.h
@@ -0,0 +1,80 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP buffer pool - internal header
+ */
+
+#ifndef ODP_BUFFER_POOL_INTERNAL_H_
+#define ODP_BUFFER_POOL_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_std_types.h>
+#include <odp_buffer_pool.h>
+#include <odp_buffer_internal.h>
+#include <odp_align.h>
+#include <odp_hints.h>
+#include <odp_config.h>
+#include <odp_debug.h>
+
+/* Use ticketlock instead of spinlock */
+#define POOL_USE_TICKETLOCK
+
+/* Extra error checks */
+/* #define POOL_ERROR_CHECK */
+
+
+#ifdef POOL_USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#else
+#include <odp_spinlock.h>
+#endif
+
+struct pool_entry_s {
+#ifdef POOL_USE_TICKETLOCK
+	odp_ticketlock_t        lock ODP_ALIGNED_CACHE;
+#else
+	odp_spinlock_t          lock ODP_ALIGNED_CACHE;
+#endif
+
+	uint64_t                free_bufs;
+	char                    name[ODP_BUFFER_POOL_NAME_LEN];
+
+	odp_buffer_pool_t       pool ODP_ALIGNED_CACHE;
+	uint64_t                num_bufs;
+	void                   *pool_base_addr;
+	uintptr_t               pool_base_paddr;
+	uint64_t                pool_size;
+	size_t                  payload_size;
+	size_t                  payload_align;
+	int                     buf_type;
+	odp_queue_t             free_queue;
+
+	uintptr_t               buf_base;
+	size_t                  buf_size;
+	size_t                  buf_offset;
+	size_t                  hdr_size;
+};
+
+extern void *pool_entry_ptr[];
+
+
+static inline void *get_pool_entry(odp_buffer_pool_t pool_id)
+{
+	return pool_entry_ptr[pool_id];
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-keystone2/include/plat/odp_buffer.h b/platform/linux-keystone2/include/plat/odp_buffer.h
new file mode 100644
index 0000000..10790c8
--- /dev/null
+++ b/platform/linux-keystone2/include/plat/odp_buffer.h
@@ -0,0 +1,11 @@ 
+/*
+ * No protector.
+ * This file should be included only in one corresponding top level header.
+ */
+
+/**
+ * ODP buffer
+ */
+typedef uint32_t odp_buffer_t;
+
+#define ODP_BUFFER_INVALID (0) /**< Invalid buffer */
diff --git a/platform/linux-keystone2/source/odp_buffer.c b/platform/linux-keystone2/source/odp_buffer.c
new file mode 100644
index 0000000..7a50aa2
--- /dev/null
+++ b/platform/linux-keystone2/source/odp_buffer.c
@@ -0,0 +1,91 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include <odp_buffer.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_pool_internal.h>
+#include <ti_em_rh.h>
+
+void *odp_buffer_addr(odp_buffer_t buf)
+{
+	return odp_buf_to_hdr(buf)->buf_vaddr;
+}
+
+size_t odp_buffer_size(odp_buffer_t buf)
+{
+	return (size_t)odp_buf_to_hdr(buf)->desc.origBufferLen;
+}
+
+int odp_buffer_type(odp_buffer_t buf)
+{
+	return odp_buf_to_hdr(buf)->type;
+}
+
+int odp_buffer_is_scatter(odp_buffer_t buf)
+{
+	return (odp_buf_to_hdr(buf)->desc.nextBDPtr) ? 1 : 0;
+}
+
+
+int odp_buffer_is_valid(odp_buffer_t buf)
+{
+	return (buf != ODP_BUFFER_INVALID);
+}
+
+
+int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf)
+{
+	odp_buffer_hdr_t *desc;
+	int len = 0;
+
+	if (!odp_buffer_is_valid(buf)) {
+		printf("Buffer is not valid.\n");
+		return len;
+	}
+
+	desc = odp_buf_to_hdr(buf);
+
+	len += snprintf(&str[len], n-len,
+			"Buffer\n");
+	len += snprintf(&str[len], n-len,
+			"  desc_vaddr  %p\n",      desc);
+	len += snprintf(&str[len], n-len,
+			"  buf_vaddr   %p\n",      desc->buf_vaddr);
+	len += snprintf(&str[len], n-len,
+			"  buf_paddr_o 0x%x\n",    desc->desc.origBuffPtr);
+	len += snprintf(&str[len], n-len,
+			"  buf_paddr   0x%x\n",    desc->desc.buffPtr);
+	len += snprintf(&str[len], n-len,
+			"  pool        %i\n",      odp_buf_to_pool(buf));
+	len += snprintf(&str[len], n-len,
+			"  free_queue  %u\n",      desc->free_queue);
+
+	len += snprintf(&str[len], n-len, "\n");
+
+	ti_em_rh_dump_mem(desc, sizeof(*desc), "Descriptor dump");
+	ti_em_rh_dump_mem(desc->buf_vaddr, 64, "Buffer start");
+
+	return len;
+}
+
+
+void odp_buffer_print(odp_buffer_t buf)
+{
+	int max_len = 512;
+	char str[max_len];
+	int len;
+
+	len = odp_buffer_snprint(str, max_len-1, buf);
+	str[len] = 0;
+
+	printf("\n%s\n", str);
+}
+
+void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src)
+{
+	(void)buf_dst;
+	(void)buf_src;
+}
diff --git a/platform/linux-keystone2/source/odp_buffer_pool.c b/platform/linux-keystone2/source/odp_buffer_pool.c
new file mode 100644
index 0000000..7e10689
--- /dev/null
+++ b/platform/linux-keystone2/source/odp_buffer_pool.c
@@ -0,0 +1,315 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include <odp_std_types.h>
+#include <odp_buffer_pool.h>
+#include <odp_buffer_pool_internal.h>
+#include <odp_buffer_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_shared_memory.h>
+#include <odp_shared_memory_internal.h>
+#include <odp_align.h>
+#include <odp_internal.h>
+#include <odp_config.h>
+#include <configs/odp_config_platform.h>
+#include <odp_hints.h>
+#include <odp_debug.h>
+#include <odp_sync.h>
+
+#include <string.h>
+#include <stdlib.h>
+#include <ti_em_rh.h>
+
+#ifdef POOL_USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#define LOCK(a)      odp_ticketlock_lock(a)
+#define UNLOCK(a)    odp_ticketlock_unlock(a)
+#define LOCK_INIT(a) odp_ticketlock_init(a)
+#else
+#include <odp_spinlock.h>
+#define LOCK(a)      odp_spinlock_lock(a)
+#define UNLOCK(a)    odp_spinlock_unlock(a)
+#define LOCK_INIT(a) odp_spinlock_init(a)
+#endif
+
+
+#define NULL_INDEX ((uint32_t)-1)
+
+
+
+
+typedef union pool_entry_u {
+	struct pool_entry_s s;
+
+	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
+
+} pool_entry_t;
+
+
+typedef struct pool_table_t {
+	pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS];
+
+} pool_table_t;
+
+typedef struct {
+	uintptr_t p;
+	uintptr_t v;
+} pvaddr_t;
+
+/* The pool table */
+static pool_table_t *pool_tbl;
+
+/* Pool entry pointers (for inlining) */
+void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS];
+
+static uint32_t ti_odp_alloc_public_desc(uint32_t num)
+{
+	static uint32_t free_desc_id;
+	uint32_t tmp;
+
+	if (free_desc_id + num > TI_ODP_PUBLIC_DESC_NUM)
+		return -1;
+
+	tmp = __sync_fetch_and_add(&free_desc_id, num);
+
+	if (tmp + num > TI_ODP_PUBLIC_DESC_NUM) {
+		__sync_fetch_and_sub(&free_desc_id, num);
+		return -1;
+	}
+	return tmp;
+}
+
+odp_buffer_pool_t odp_buf_to_pool(odp_buffer_t buf)
+{
+	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+	pool_entry_t *pool = get_pool_entry(0);
+	return hdr->free_queue - pool->s.free_queue;
+}
+
+int odp_buffer_pool_init_global(void)
+{
+	odp_buffer_pool_t i;
+
+	pool_tbl = odp_shm_reserve("odp_buffer_pools",
+				   sizeof(pool_table_t),
+				   sizeof(pool_entry_t));
+
+	if (pool_tbl == NULL)
+		return -1;
+
+	memset(pool_tbl, 0, sizeof(pool_table_t));
+
+	for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
+		/* init locks */
+		pool_entry_t *pool = &pool_tbl->pool[i];
+		LOCK_INIT(&pool->s.lock);
+		pool->s.pool = i;
+		pool_entry_ptr[i] = pool;
+		pool->s.free_queue = TI_ODP_FREE_QUEUE_BASE_IDX + i;
+	}
+
+	ODP_DBG("\nBuffer pool init global\n");
+	ODP_DBG("  pool_entry_s size     %zu\n", sizeof(struct pool_entry_s));
+	ODP_DBG("  pool_entry_t size     %zu\n", sizeof(pool_entry_t));
+	ODP_DBG("\n");
+	return 0;
+}
+
+#define MAX_BUFS_PER_POOL	2000
+
+static int link_bufs(pool_entry_t *pool)
+{
+	size_t buf_size, buf_align;
+	uint64_t pool_size;
+	uintptr_t pool_base;
+	pvaddr_t buf_addr, desc_addr;
+	uint32_t desc_index;
+	uint32_t num_bufs, i;
+
+	buf_align  = pool->s.payload_align;
+	buf_size   = ODP_ALIGN_ROUNDUP(pool->s.payload_size, buf_align);
+	pool_size  = pool->s.pool_size;
+	pool_base  = (uintptr_t) pool->s.pool_base_addr;
+	/* First buffer */
+	buf_addr.v = ODP_ALIGN_ROUNDUP(pool_base, buf_align);
+	buf_addr.p = _odp_shm_get_paddr((void *)buf_addr.v);
+	pool->s.buf_base = buf_addr.v;
+
+	num_bufs   = (pool_size - (buf_addr.v - pool_base)) / buf_size;
+	/*
+	 * FIXME: Currently a number of HW descriptors is limited,
+	 *        so temporary limit max number of buffers per pool
+	 *        to be albe to run ODP example apps.
+	 *        Descriptor management have to be made more intelligent
+	 *        To remove this limitation.
+	 */
+	if (num_bufs > MAX_BUFS_PER_POOL) {
+		ODP_DBG("Limiting number of buffer in %s from %d to %d\n",
+			pool->s.name, num_bufs, MAX_BUFS_PER_POOL);
+		num_bufs = MAX_BUFS_PER_POOL;
+	}
+
+	desc_index = ti_odp_alloc_public_desc(num_bufs);
+
+	ODP_DBG("%s: buf_size: %zu, buf_align: %zu\n", __func__,
+		buf_size, buf_align);
+	ODP_DBG("%s: pool_size: %llu, pool_base: 0x%p\n", __func__,
+		pool_size, (void *)pool_base);
+	ODP_DBG("%s: buf_addr.v: 0x%p, buf_addr.p: 0x%p\n", __func__,
+		(void *)buf_addr.v, (void *)buf_addr.p);
+	ODP_DBG("%s: num_bufs: %u, desc_index: %u\n", __func__,
+		num_bufs, desc_index);
+
+	/* FIXME: Need to define error codes somewhere */
+	if (desc_index == (uint32_t)-1) {
+		ODP_ERR("Failed to allocate %u descriptors for pool %s\n",
+			num_bufs, pool->s.name);
+		return -1;
+	}
+
+	if (ti_em_osal_hw_queue_open(pool->s.free_queue) != EM_OK) {
+		ODP_ERR("Failed to open HW queue %u\n", pool->s.free_queue);
+		return -1;
+	}
+
+	for (i = 0; i < num_bufs; i++) {
+		Cppi_DescTag     tag;
+		odp_buffer_hdr_t *hdr;
+
+		/*
+		 * TODO: Need to get descriptor size here and shift
+		 * descriptor address, but not query it on every iteration.
+		 */
+		desc_addr.v = (uintptr_t)ti_em_rh_public_desc_addr(desc_index,
+				&desc_addr.p);
+		hdr = (odp_buffer_hdr_t *)desc_addr.v;
+		memset((void *)hdr, 0, sizeof(*hdr));
+
+		hdr->free_queue = pool->s.free_queue;
+		hdr->buf_vaddr  = (void *)buf_addr.v;
+
+		/* Set defaults in descriptor */
+		hdr->desc.descInfo = (Cppi_DescType_HOST << 30) |
+				     (Cppi_PSLoc_PS_IN_DESC << 22) |
+				     (buf_size & 0xFFFF);
+		hdr->desc.packetInfo =
+			(((uint32_t) Cppi_EPIB_EPIB_PRESENT) << 31) |
+			(0x2 << 16) |
+			(((uint32_t) Cppi_ReturnPolicy_RETURN_BUFFER) << 15) |
+			(pool->s.free_queue & 0x3FFF);
+		hdr->desc.origBuffPtr   = buf_addr.p;
+		hdr->desc.buffPtr       = buf_addr.p;
+		hdr->desc.origBufferLen = buf_size;
+		hdr->desc.buffLen       = buf_size;
+
+		/* TODO: pslen is set to 0, but should be configurable */
+		ti_em_cppi_set_pslen(Cppi_DescType_HOST,
+				     (Cppi_Desc *)(hdr), 0);
+
+		tag.srcTagHi  = 0x00;
+		tag.srcTagLo  = 0xFF;
+		tag.destTagHi = 0x00;
+		tag.destTagLo = 0x00;
+		ti_em_cppi_set_tag(Cppi_DescType_HOST,
+				   (Cppi_Desc *)(hdr),
+				   &tag);
+
+		odp_sync_stores();
+		ti_em_osal_hw_queue_push_size(pool->s.free_queue,
+					      (void *)hdr,
+					      sizeof(Cppi_HostDesc),
+					      TI_EM_MEM_PUBLIC_DESC);
+		buf_addr.v += buf_size;
+		buf_addr.p += buf_size;
+		desc_index++;
+	}
+
+	return 0;
+}
+
+odp_buffer_pool_t odp_buffer_pool_create(const char *name,
+		void *base_addr, uint64_t size,
+		size_t buf_size, size_t buf_align,
+		int buf_type)
+{
+	odp_buffer_pool_t i;
+	pool_entry_t *pool;
+	odp_buffer_pool_t pool_id = ODP_BUFFER_POOL_INVALID;
+
+	for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
+		pool = get_pool_entry(i);
+
+		LOCK(&pool->s.lock);
+
+		if (pool->s.buf_base == 0) {
+			/* found free pool */
+			ODP_DBG("%s: found free pool id: %u for %s\n", __func__,
+				i, name);
+			strncpy(pool->s.name, name,
+				ODP_BUFFER_POOL_NAME_LEN - 1);
+			pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0;
+			pool->s.pool_base_addr = base_addr;
+			pool->s.pool_size      = size;
+			pool->s.payload_size   = buf_size;
+			pool->s.payload_align  = buf_align;
+			pool->s.buf_type       = buf_type;
+			pool->s.buf_base = (uintptr_t)ODP_ALIGN_ROUNDUP_PTR(
+						   base_addr, buf_align);
+
+			if (link_bufs(pool) != -1)
+				pool_id = i;
+			UNLOCK(&pool->s.lock);
+			break;
+		}
+
+		UNLOCK(&pool->s.lock);
+	}
+
+	return pool_id;
+}
+
+odp_buffer_pool_t odp_buffer_pool_lookup(const char *name)
+{
+	odp_buffer_pool_t i;
+	pool_entry_t *pool;
+
+	for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
+		pool = get_pool_entry(i);
+
+		LOCK(&pool->s.lock);
+		if (strcmp(name, pool->s.name) == 0) {
+			/* found it */
+			UNLOCK(&pool->s.lock);
+			return i;
+		}
+		UNLOCK(&pool->s.lock);
+	}
+
+	return ODP_BUFFER_POOL_INVALID;
+}
+
+
+odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_id)
+{
+	pool_entry_t *pool = get_pool_entry(pool_id);
+	return (odp_buffer_t)ti_em_osal_hw_queue_pop(pool->s.free_queue,
+			TI_EM_MEM_PUBLIC_DESC);
+}
+
+
+void odp_buffer_free(odp_buffer_t buf)
+{
+	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+	ti_em_osal_hw_queue_push_size(hdr->free_queue,
+				      (void *)hdr,
+				      sizeof(Cppi_HostDesc),
+				      TI_EM_MEM_PUBLIC_DESC);
+}
+
+void odp_buffer_pool_print(odp_buffer_pool_t pool_id)
+{
+	(void)pool_id;
+}
diff --git a/platform/linux-keystone2/source/odp_init.c b/platform/linux-keystone2/source/odp_init.c
index b466e67..0b36960 100644
--- a/platform/linux-keystone2/source/odp_init.c
+++ b/platform/linux-keystone2/source/odp_init.c
@@ -12,6 +12,7 @@ 
 #include <ti_em_osal_queue.h>
 #include <ti_em_rh.h>
 #include <odp_config.h>
+#include <odp_buffer_internal.h>
 
 /*
  * Make region_configs[] global, because hw_config is saved in
@@ -47,7 +48,8 @@  static int ti_init_hw_config(void)
 	/* Define descriptor regions */
 	reg_config = &region_configs[TI_EM_RH_PUBLIC];
 	reg_config->region_idx   = TI_ODP_PUBLIC_REGION_IDX;
-	reg_config->desc_size    = TI_ODP_PUBLIC_DESC_SIZE;
+	reg_config->desc_size    =
+		ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t));
 	reg_config->desc_num     = TI_ODP_PUBLIC_DESC_NUM;
 	reg_config->desc_base    = TI_ODP_PUBLIC_DESC_BASE;
 	reg_config->desc_vbase   = TI_ODP_PUBLIC_DESC_VBASE;
diff --git a/platform/linux-keystone2/source/odp_packet.c b/platform/linux-keystone2/source/odp_packet.c
new file mode 100644
index 0000000..f03d849
--- /dev/null
+++ b/platform/linux-keystone2/source/odp_packet.c
@@ -0,0 +1,337 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include <odp_packet.h>
+#include <odp_packet_internal.h>
+#include <odp_hints.h>
+#include <odp_byteorder.h>
+
+#include <helper/odp_eth.h>
+#include <helper/odp_ip.h>
+
+#include <string.h>
+#include <stdio.h>
+
+static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr, odp_ipv4hdr_t *ipv4,
+				size_t *offset_out);
+static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr, odp_ipv6hdr_t *ipv6,
+				size_t *offset_out);
+
+void odp_packet_init(odp_packet_t pkt)
+{
+	odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
+	const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr);
+	uint8_t *start;
+	size_t len;
+
+	start = (uint8_t *)pkt_hdr + start_offset;
+	len = ODP_OFFSETOF(odp_packet_hdr_t, payload) - start_offset;
+	memset(start, 0, len);
+
+	pkt_hdr->l2_offset = (uint32_t) ODP_PACKET_OFFSET_INVALID;
+	pkt_hdr->l3_offset = (uint32_t) ODP_PACKET_OFFSET_INVALID;
+	pkt_hdr->l4_offset = (uint32_t) ODP_PACKET_OFFSET_INVALID;
+}
+
+odp_packet_t odp_packet_from_buffer(odp_buffer_t buf)
+{
+	return (odp_packet_t)buf;
+}
+
+odp_buffer_t odp_buffer_from_packet(odp_packet_t pkt)
+{
+	return (odp_buffer_t)pkt;
+}
+
+void odp_packet_set_len(odp_packet_t pkt, size_t len)
+{
+	odp_packet_hdr(pkt)->frame_len = len;
+}
+
+size_t odp_packet_get_len(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->frame_len;
+}
+
+uint8_t *odp_packet_buf_addr(odp_packet_t pkt)
+{
+	return odp_buffer_addr(odp_buffer_from_packet(pkt));
+}
+
+uint8_t *odp_packet_start(odp_packet_t pkt)
+{
+	return odp_packet_buf_addr(pkt) + odp_packet_hdr(pkt)->frame_offset;
+}
+
+
+uint8_t *odp_packet_l2(odp_packet_t pkt)
+{
+	const size_t offset = odp_packet_l2_offset(pkt);
+
+	if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+		return NULL;
+
+	return odp_packet_buf_addr(pkt) + offset;
+}
+
+size_t odp_packet_l2_offset(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->l2_offset;
+}
+
+void odp_packet_set_l2_offset(odp_packet_t pkt, size_t offset)
+{
+	odp_packet_hdr(pkt)->l2_offset = offset;
+}
+
+uint8_t *odp_packet_l3(odp_packet_t pkt)
+{
+	const size_t offset = odp_packet_l3_offset(pkt);
+
+	if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+		return NULL;
+
+	return odp_packet_buf_addr(pkt) + offset;
+}
+
+size_t odp_packet_l3_offset(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->l3_offset;
+}
+
+void odp_packet_set_l3_offset(odp_packet_t pkt, size_t offset)
+{
+	odp_packet_hdr(pkt)->l3_offset = offset;
+}
+
+uint8_t *odp_packet_l4(odp_packet_t pkt)
+{
+	const size_t offset = odp_packet_l4_offset(pkt);
+
+	if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+		return NULL;
+
+	return odp_packet_buf_addr(pkt) + offset;
+}
+
+size_t odp_packet_l4_offset(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->l4_offset;
+}
+
+void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset)
+{
+	odp_packet_hdr(pkt)->l4_offset = offset;
+}
+
+/**
+ * Simple packet parser: eth, VLAN, IP, TCP/UDP/ICMP
+ *
+ * Internal function: caller is resposible for passing only valid packet handles
+ * , lengths and offsets (usually done&called in packet input).
+ *
+ * @param pkt        Packet handle
+ * @param len        Packet length in bytes
+ * @param frame_offset  Byte offset to L2 header
+ */
+void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset)
+{
+	odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
+	odp_ethhdr_t *eth;
+	odp_vlanhdr_t *vlan;
+	odp_ipv4hdr_t *ipv4;
+	odp_ipv6hdr_t *ipv6;
+	uint16_t ethtype;
+	size_t offset = 0;
+	uint8_t ip_proto = 0;
+
+	pkt_hdr->input_flags.eth = 1;
+	pkt_hdr->frame_offset = frame_offset;
+	pkt_hdr->frame_len = len;
+
+	if (odp_unlikely(len < ODP_ETH_LEN_MIN)) {
+		pkt_hdr->error_flags.frame_len = 1;
+		return;
+	} else if (len > ODP_ETH_LEN_MAX) {
+		pkt_hdr->input_flags.jumbo = 1;
+	}
+
+	/* Assume valid L2 header, no CRC/FCS check in SW */
+	pkt_hdr->input_flags.l2 = 1;
+	pkt_hdr->l2_offset = frame_offset;
+
+	eth = (odp_ethhdr_t *)odp_packet_start(pkt);
+	ethtype = odp_be_to_cpu_16(eth->type);
+	vlan = (odp_vlanhdr_t *)&eth->type;
+
+	if (ethtype == ODP_ETHTYPE_VLAN_OUTER) {
+		pkt_hdr->input_flags.vlan_qinq = 1;
+		ethtype = odp_be_to_cpu_16(vlan->tpid);
+		offset += sizeof(odp_vlanhdr_t);
+		vlan = &vlan[1];
+	}
+
+	if (ethtype == ODP_ETHTYPE_VLAN) {
+		pkt_hdr->input_flags.vlan = 1;
+		ethtype = odp_be_to_cpu_16(vlan->tpid);
+		offset += sizeof(odp_vlanhdr_t);
+	}
+
+	/* Set l3_offset+flag only for known ethtypes */
+	switch (ethtype) {
+	case ODP_ETHTYPE_IPV4:
+		pkt_hdr->input_flags.ipv4 = 1;
+		pkt_hdr->input_flags.l3 = 1;
+		pkt_hdr->l3_offset = frame_offset + ODP_ETHHDR_LEN + offset;
+		ipv4 = (odp_ipv4hdr_t *)odp_packet_l3(pkt);
+		ip_proto = parse_ipv4(pkt_hdr, ipv4, &offset);
+		break;
+	case ODP_ETHTYPE_IPV6:
+		pkt_hdr->input_flags.ipv6 = 1;
+		pkt_hdr->input_flags.l3 = 1;
+		pkt_hdr->l3_offset = frame_offset + ODP_ETHHDR_LEN + offset;
+		ipv6 = (odp_ipv6hdr_t *)odp_packet_l3(pkt);
+		ip_proto = parse_ipv6(pkt_hdr, ipv6, &offset);
+		break;
+	case ODP_ETHTYPE_ARP:
+		pkt_hdr->input_flags.arp = 1;
+		/* fall through */
+	default:
+		ip_proto = 0;
+		break;
+	}
+
+	switch (ip_proto) {
+	case ODP_IPPROTO_UDP:
+		pkt_hdr->input_flags.udp = 1;
+		pkt_hdr->input_flags.l4 = 1;
+		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+		break;
+	case ODP_IPPROTO_TCP:
+		pkt_hdr->input_flags.tcp = 1;
+		pkt_hdr->input_flags.l4 = 1;
+		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+		break;
+	case ODP_IPPROTO_SCTP:
+		pkt_hdr->input_flags.sctp = 1;
+		pkt_hdr->input_flags.l4 = 1;
+		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+		break;
+	case ODP_IPPROTO_ICMP:
+		pkt_hdr->input_flags.icmp = 1;
+		pkt_hdr->input_flags.l4 = 1;
+		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+		break;
+	default:
+		/* 0 or unhandled IP protocols, don't set L4 flag+offset */
+		if (pkt_hdr->input_flags.ipv6) {
+			/* IPv6 next_hdr is not L4, mark as IP-option instead */
+			pkt_hdr->input_flags.ipopt = 1;
+		}
+		break;
+	}
+}
+
+static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr, odp_ipv4hdr_t *ipv4,
+				size_t *offset_out)
+{
+	uint8_t ihl;
+	uint16_t frag_offset;
+
+	ihl = ODP_IPV4HDR_IHL(ipv4->ver_ihl);
+	if (odp_unlikely(ihl < ODP_IPV4HDR_IHL_MIN)) {
+		pkt_hdr->error_flags.ip_err = 1;
+		return 0;
+	}
+
+	if (odp_unlikely(ihl > ODP_IPV4HDR_IHL_MIN)) {
+		pkt_hdr->input_flags.ipopt = 1;
+		return 0;
+	}
+
+	/* A packet is a fragment if:
+	*  "more fragments" flag is set (all fragments except the last)
+	*     OR
+	*  "fragment offset" field is nonzero (all fragments except the first)
+	*/
+	frag_offset = odp_be_to_cpu_16(ipv4->frag_offset);
+	if (odp_unlikely(ODP_IPV4HDR_IS_FRAGMENT(frag_offset))) {
+		pkt_hdr->input_flags.ipfrag = 1;
+		return 0;
+	}
+
+	if (ipv4->proto == ODP_IPPROTO_ESP ||
+	    ipv4->proto == ODP_IPPROTO_AH) {
+		pkt_hdr->input_flags.ipsec = 1;
+		return 0;
+	}
+
+	/* Set pkt_hdr->input_flags.ipopt when checking L4 hdrs after return */
+
+	*offset_out = sizeof(uint32_t) * ihl;
+	return ipv4->proto;
+}
+
+static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr, odp_ipv6hdr_t *ipv6,
+				size_t *offset_out)
+{
+	if (ipv6->next_hdr == ODP_IPPROTO_ESP ||
+	    ipv6->next_hdr == ODP_IPPROTO_AH) {
+		pkt_hdr->input_flags.ipopt = 1;
+		pkt_hdr->input_flags.ipsec = 1;
+		return 0;
+	}
+
+	if (odp_unlikely(ipv6->next_hdr == ODP_IPPROTO_FRAG)) {
+		pkt_hdr->input_flags.ipopt = 1;
+		pkt_hdr->input_flags.ipfrag = 1;
+		return 0;
+	}
+
+	/* Don't step through more extensions */
+	*offset_out = ODP_IPV6HDR_LEN;
+	return ipv6->next_hdr;
+}
+
+void odp_packet_print(odp_packet_t pkt)
+{
+	int max_len = 512;
+	char str[max_len];
+	int len = 0;
+	int n = max_len-1;
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+
+	len += snprintf(&str[len], n-len, "Packet ");
+	len += odp_buffer_snprint(&str[len], n-len, (odp_buffer_t) pkt);
+	len += snprintf(&str[len], n-len,
+			"  input_flags  0x%x\n", hdr->input_flags.all);
+	len += snprintf(&str[len], n-len,
+			"  error_flags  0x%x\n", hdr->error_flags.all);
+	len += snprintf(&str[len], n-len,
+			"  output_flags 0x%x\n", hdr->output_flags.all);
+	len += snprintf(&str[len], n-len,
+			"  frame_offset %u\n", hdr->frame_offset);
+	len += snprintf(&str[len], n-len,
+			"  l2_offset    %u\n", hdr->l2_offset);
+	len += snprintf(&str[len], n-len,
+			"  l3_offset    %u\n", hdr->l3_offset);
+	len += snprintf(&str[len], n-len,
+			"  l4_offset    %u\n", hdr->l4_offset);
+	len += snprintf(&str[len], n-len,
+			"  frame_len    %u\n", hdr->frame_len);
+	len += snprintf(&str[len], n-len,
+			"  input        %u\n", hdr->input);
+	str[len] = '\0';
+
+	printf("\n%s\n", str);
+}
+
+int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src)
+{
+	(void) pkt_dst;
+	(void) pkt_src;
+	return -1;
+}
diff --git a/platform/linux-keystone2/source/odp_queue.c b/platform/linux-keystone2/source/odp_queue.c
new file mode 100644
index 0000000..6248edb
--- /dev/null
+++ b/platform/linux-keystone2/source/odp_queue.c
@@ -0,0 +1,435 @@ 
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+#include <odp_queue.h>
+#include <odp_queue_internal.h>
+#include <odp_std_types.h>
+#include <odp_align.h>
+#include <odp_buffer.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_pool_internal.h>
+#include <odp_internal.h>
+#include <odp_shared_memory.h>
+#include <odp_schedule_internal.h>
+#include <odp_config.h>
+#include <odp_packet_io_internal.h>
+#include <odp_packet_io_queue.h>
+#include <odp_debug.h>
+#include <odp_hints.h>
+
+#ifdef USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#define LOCK(a)      odp_ticketlock_lock(a)
+#define UNLOCK(a)    odp_ticketlock_unlock(a)
+#define LOCK_INIT(a) odp_ticketlock_init(a)
+#else
+#include <odp_spinlock.h>
+#define LOCK(a)      odp_spinlock_lock(a)
+#define UNLOCK(a)    odp_spinlock_unlock(a)
+#define LOCK_INIT(a) odp_spinlock_init(a)
+#endif
+
+#include <string.h>
+
+
+typedef struct queue_table_t {
+	queue_entry_t  queue[ODP_CONFIG_QUEUES];
+} queue_table_t;
+
+static queue_table_t *queue_tbl;
+
+
+queue_entry_t *get_qentry(uint32_t queue_id)
+{
+	return &queue_tbl->queue[queue_id];
+}
+
+static void queue_init(queue_entry_t *queue, const char *name,
+		       odp_queue_type_t type, odp_queue_param_t *param)
+{
+	strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
+	queue->s.type = type;
+
+	if (param) {
+		memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
+	} else {
+		/* Defaults */
+		memset(&queue->s.param, 0, sizeof(odp_queue_param_t));
+		queue->s.param.sched.prio  = ODP_SCHED_PRIO_DEFAULT;
+		queue->s.param.sched.sync  = ODP_SCHED_SYNC_DEFAULT;
+		queue->s.param.sched.group = ODP_SCHED_GROUP_DEFAULT;
+	}
+
+	switch (type) {
+	case ODP_QUEUE_TYPE_PKTIN:
+		queue->s.enqueue = pktin_enqueue;
+		queue->s.dequeue = pktin_dequeue;
+		queue->s.enqueue_multi = pktin_enq_multi;
+		queue->s.dequeue_multi = pktin_deq_multi;
+		break;
+	case ODP_QUEUE_TYPE_PKTOUT:
+		queue->s.enqueue = pktout_enqueue;
+		queue->s.dequeue = pktout_dequeue;
+		queue->s.enqueue_multi = pktout_enq_multi;
+		queue->s.dequeue_multi = pktout_deq_multi;
+		break;
+	default:
+		queue->s.enqueue = queue_enq;
+		queue->s.dequeue = queue_deq;
+		queue->s.enqueue_multi = queue_enq_multi;
+		queue->s.dequeue_multi = queue_deq_multi;
+		break;
+	}
+
+	queue->s.head = NULL;
+	queue->s.tail = NULL;
+	queue->s.sched_buf = ODP_BUFFER_INVALID;
+}
+
+
+int odp_queue_init_global(void)
+{
+	uint32_t i;
+
+	ODP_DBG("Queue init ... ");
+
+	queue_tbl = odp_shm_reserve("odp_queues",
+				    sizeof(queue_table_t),
+				    sizeof(queue_entry_t));
+
+	if (queue_tbl == NULL)
+		return -1;
+
+	memset(queue_tbl, 0, sizeof(queue_table_t));
+
+	for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
+		/* init locks */
+		queue_entry_t *queue = get_qentry(i);
+		LOCK_INIT(&queue->s.lock);
+		queue->s.handle = queue_from_id(i);
+	}
+
+	ODP_DBG("done\n");
+	ODP_DBG("Queue init global\n");
+	ODP_DBG("  struct queue_entry_s size %zu\n",
+		sizeof(struct queue_entry_s));
+	ODP_DBG("  queue_entry_t size        %zu\n",
+		sizeof(queue_entry_t));
+	ODP_DBG("\n");
+
+	return 0;
+}
+
+odp_queue_type_t odp_queue_type(odp_queue_t handle)
+{
+	queue_entry_t *queue;
+
+	queue = queue_to_qentry(handle);
+
+	return queue->s.type;
+}
+
+odp_schedule_sync_t odp_queue_sched_type(odp_queue_t handle)
+{
+	queue_entry_t *queue;
+
+	queue = queue_to_qentry(handle);
+
+	return queue->s.param.sched.sync;
+}
+
+odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
+			     odp_queue_param_t *param)
+{
+	uint32_t i;
+	queue_entry_t *queue;
+	odp_queue_t handle = ODP_QUEUE_INVALID;
+
+	for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
+		queue = &queue_tbl->queue[i];
+
+		if (queue->s.status != QUEUE_STATUS_FREE)
+			continue;
+
+		LOCK(&queue->s.lock);
+		if (queue->s.status == QUEUE_STATUS_FREE) {
+			queue_init(queue, name, type, param);
+
+			if (type == ODP_QUEUE_TYPE_SCHED ||
+			    type == ODP_QUEUE_TYPE_PKTIN)
+				queue->s.status = QUEUE_STATUS_NOTSCHED;
+			else
+				queue->s.status = QUEUE_STATUS_READY;
+
+			handle = queue->s.handle;
+			UNLOCK(&queue->s.lock);
+			break;
+		}
+		UNLOCK(&queue->s.lock);
+	}
+
+	if (handle != ODP_QUEUE_INVALID &&
+	    (type == ODP_QUEUE_TYPE_SCHED || type == ODP_QUEUE_TYPE_PKTIN)) {
+		odp_buffer_t buf;
+
+		buf = odp_schedule_buffer_alloc(handle);
+		if (buf == ODP_BUFFER_INVALID) {
+			ODP_ERR("queue_init: sched buf alloc failed\n");
+			return ODP_QUEUE_INVALID;
+		}
+
+		queue->s.sched_buf = buf;
+		odp_schedule_mask_set(handle, queue->s.param.sched.prio);
+	}
+
+	return handle;
+}
+
+
+odp_buffer_t queue_sched_buf(odp_queue_t handle)
+{
+	queue_entry_t *queue;
+	queue = queue_to_qentry(handle);
+
+	return queue->s.sched_buf;
+}
+
+
+int queue_sched_atomic(odp_queue_t handle)
+{
+	queue_entry_t *queue;
+	queue = queue_to_qentry(handle);
+
+	return queue->s.param.sched.sync == ODP_SCHED_SYNC_ATOMIC;
+}
+
+
+odp_queue_t odp_queue_lookup(const char *name)
+{
+	uint32_t i;
+
+	for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
+		queue_entry_t *queue = &queue_tbl->queue[i];
+
+		if (queue->s.status == QUEUE_STATUS_FREE)
+			continue;
+
+		LOCK(&queue->s.lock);
+		if (strcmp(name, queue->s.name) == 0) {
+			/* found it */
+			UNLOCK(&queue->s.lock);
+			return queue->s.handle;
+		}
+		UNLOCK(&queue->s.lock);
+	}
+
+	return ODP_QUEUE_INVALID;
+}
+
+
+int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
+{
+	int sched = 0;
+
+	LOCK(&queue->s.lock);
+	if (queue->s.head == NULL) {
+		/* Empty queue */
+		queue->s.head = buf_hdr;
+		queue->s.tail = buf_hdr;
+		buf_hdr->next = NULL;
+	} else {
+		queue->s.tail->next = buf_hdr;
+		queue->s.tail = buf_hdr;
+		buf_hdr->next = NULL;
+	}
+
+	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
+		queue->s.status = QUEUE_STATUS_SCHED;
+		sched = 1; /* retval: schedule queue */
+	}
+	UNLOCK(&queue->s.lock);
+
+	/* Add queue to scheduling */
+	if (sched == 1)
+		odp_schedule_queue(queue->s.handle, queue->s.param.sched.prio);
+
+	return 0;
+}
+
+
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+{
+	int sched = 0;
+	int i;
+	odp_buffer_hdr_t *tail;
+
+	for (i = 0; i < num - 1; i++)
+		buf_hdr[i]->next = buf_hdr[i+1];
+
+	tail = buf_hdr[num-1];
+	buf_hdr[num-1]->next = NULL;
+
+	LOCK(&queue->s.lock);
+	/* Empty queue */
+	if (queue->s.head == NULL)
+		queue->s.head = buf_hdr[0];
+	else
+		queue->s.tail->next = buf_hdr[0];
+
+	queue->s.tail = tail;
+
+	if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
+		queue->s.status = QUEUE_STATUS_SCHED;
+		sched = 1; /* retval: schedule queue */
+	}
+	UNLOCK(&queue->s.lock);
+
+	/* Add queue to scheduling */
+	if (sched == 1)
+		odp_schedule_queue(queue->s.handle, queue->s.param.sched.prio);
+
+	return 0;
+}
+
+
+int odp_queue_enq_multi(odp_queue_t handle, odp_buffer_t buf[], int num)
+{
+	odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
+	queue_entry_t *queue;
+	int i;
+
+	if (num > QUEUE_MULTI_MAX)
+		num = QUEUE_MULTI_MAX;
+
+	queue = queue_to_qentry(handle);
+
+	for (i = 0; i < num; i++)
+		buf_hdr[i] = odp_buf_to_hdr(buf[i]);
+
+	return queue->s.enqueue_multi(queue, buf_hdr, num);
+}
+
+
+int odp_queue_enq(odp_queue_t handle, odp_buffer_t buf)
+{
+	odp_buffer_hdr_t *buf_hdr;
+	queue_entry_t *queue;
+
+	queue   = queue_to_qentry(handle);
+	buf_hdr = odp_buf_to_hdr(buf);
+
+	return queue->s.enqueue(queue, buf_hdr);
+}
+
+
+odp_buffer_hdr_t *queue_deq(queue_entry_t *queue)
+{
+	odp_buffer_hdr_t *buf_hdr = NULL;
+
+	LOCK(&queue->s.lock);
+
+	if (queue->s.head == NULL) {
+		/* Already empty queue */
+		if (queue->s.status == QUEUE_STATUS_SCHED &&
+		    queue->s.type != ODP_QUEUE_TYPE_PKTIN)
+			queue->s.status = QUEUE_STATUS_NOTSCHED;
+	} else {
+		buf_hdr       = queue->s.head;
+		queue->s.head = buf_hdr->next;
+		buf_hdr->next = NULL;
+
+		if (queue->s.head == NULL) {
+			/* Queue is now empty */
+			queue->s.tail = NULL;
+		}
+	}
+
+	UNLOCK(&queue->s.lock);
+
+	return buf_hdr;
+}
+
+
+int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+{
+	int i = 0;
+
+	LOCK(&queue->s.lock);
+
+	if (queue->s.head == NULL) {
+		/* Already empty queue */
+		if (queue->s.status == QUEUE_STATUS_SCHED &&
+		    queue->s.type != ODP_QUEUE_TYPE_PKTIN)
+			queue->s.status = QUEUE_STATUS_NOTSCHED;
+	} else {
+		odp_buffer_hdr_t *hdr = queue->s.head;
+
+		for (; i < num && hdr; i++) {
+			buf_hdr[i]       = hdr;
+			/* odp_prefetch(hdr->addr); */
+			hdr              = hdr->next;
+			buf_hdr[i]->next = NULL;
+		}
+
+		queue->s.head = hdr;
+
+		if (hdr == NULL) {
+			/* Queue is now empty */
+			queue->s.tail = NULL;
+		}
+	}
+
+	UNLOCK(&queue->s.lock);
+
+	return i;
+}
+
+
+int odp_queue_deq_multi(odp_queue_t handle, odp_buffer_t buf[], int num)
+{
+	queue_entry_t *queue;
+	odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
+	int i, ret;
+
+	if (num > QUEUE_MULTI_MAX)
+		num = QUEUE_MULTI_MAX;
+
+	queue = queue_to_qentry(handle);
+
+	ret = queue->s.dequeue_multi(queue, buf_hdr, num);
+
+	for (i = 0; i < ret; i++)
+		buf[i] = hdr_to_odp_buf(buf_hdr[i]);
+
+	return ret;
+}
+
+
+odp_buffer_t odp_queue_deq(odp_queue_t handle)
+{
+	queue_entry_t *queue;
+	odp_buffer_hdr_t *buf_hdr;
+
+	queue   = queue_to_qentry(handle);
+	buf_hdr = queue->s.dequeue(queue);
+
+	if (buf_hdr)
+		return hdr_to_odp_buf(buf_hdr);
+
+	return ODP_BUFFER_INVALID;
+}
+
+
+void queue_lock(queue_entry_t *queue)
+{
+	LOCK(&queue->s.lock);
+}
+
+
+void queue_unlock(queue_entry_t *queue)
+{
+	UNLOCK(&queue->s.lock);
+}