new file mode 100644
@@ -0,0 +1,32 @@
+PROJECT_NAME = "API Reference Manual"
+PROJECT_LOGO = ../../doc/images/ODP-Logo-HQ.png
+QUIET = YES
+OUTPUT_DIRECTORY = ./doc
+FULL_PATH_NAMES = NO
+JAVADOC_AUTOBRIEF = YES
+OPTIMIZE_OUTPUT_FOR_C = YES
+TYPEDEF_HIDES_STRUCT = YES
+EXTRACT_STATIC = YES
+SORT_MEMBER_DOCS = NO
+WARN_NO_PARAMDOC = YES
+INPUT = ../../include ../../test
+FILE_PATTERNS = odp*.h odp*.c
+RECURSIVE = YES
+SOURCE_BROWSER = YES
+REFERENCED_BY_RELATION = YES
+REFERENCES_RELATION = YES
+ALPHABETICAL_INDEX = NO
+QHP_NAMESPACE =
+GENERATE_TREEVIEW = YES
+PAPER_TYPE = a4wide
+CLASS_DIAGRAMS = NO
+HAVE_DOT = YES
+CALL_GRAPH = YES
+DOT_MULTI_TARGETS = NO
+EXAMPLE_PATH = ../../test
+EXAMPLE_PATTERNS = *.c
+EXAMPLE_RECURSIVE = YES
+IMAGE_PATH = ../../doc/images
+HTML_EXTRA_STYLESHEET = ../../doc/odpdoxygen.css
+PREDEFINED = __GNUC__
+INTERNAL_DOCS = YES
new file mode 100644
@@ -0,0 +1,141 @@
+## Copyright (c) 2013, Linaro Limited
+## All rights reserved.
+##
+## Redistribution and use in source and binary forms, with or without
+## modification, are permitted provided that the following conditions are met:
+##
+## * Redistributions of source code must retain the above copyright notice, this
+## list of conditions and the following disclaimer.
+##
+## * Redistributions in binary form must reproduce the above copyright notice, this
+## list of conditions and the following disclaimer in the documentation and/or
+## other materials provided with the distribution.
+##
+## * Neither the name of Linaro Limited nor the names of its contributors may be
+## used to endorse or promote products derived from this software without specific
+## prior written permission.
+##
+## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+## ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+## WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+.DEFAULT_GOAL := libs
+
+ODP_ROOT = ../..
+LIB_DIR = ./lib
+DOC_DIR = ./doc
+
+EXTRA_CFLAGS += -I$(ODP_ROOT)/include
+EXTRA_CFLAGS += -I./include
+EXTRA_CFLAGS += -I./include/api
+EXTRA_CFLAGS += -fPIC
+
+ifeq ($(ODP_HAVE_NETMAP),yes)
+EXTRA_CFLAGS += -DODP_HAVE_NETMAP
+endif
+
+include $(ODP_ROOT)/Makefile.inc
+STATIC_LIB = ./lib/libodp.a
+
+#
+# Object files
+#
+OBJS =
+OBJS += $(OBJ_DIR)/odp_barrier.o
+OBJS += $(OBJ_DIR)/odp_buffer.o
+OBJS += $(OBJ_DIR)/odp_buffer_pool.o
+OBJS += $(OBJ_DIR)/odp_coremask.o
+OBJS += $(OBJ_DIR)/odp_init.o
+OBJS += $(OBJ_DIR)/odp_linux.o
+OBJS += $(OBJ_DIR)/odp_packet.o
+OBJS += $(OBJ_DIR)/odp_packet_flags.o
+OBJS += $(OBJ_DIR)/odp_packet_io.o
+OBJS += $(OBJ_DIR)/odp_packet_socket.o
+OBJS += $(OBJ_DIR)/odp_queue.o
+OBJS += $(OBJ_DIR)/odp_schedule.o
+OBJS += $(OBJ_DIR)/odp_shared_memory.o
+OBJS += $(OBJ_DIR)/odp_spinlock.o
+OBJS += $(OBJ_DIR)/odp_system_info.o
+OBJS += $(OBJ_DIR)/odp_thread.o
+OBJS += $(OBJ_DIR)/odp_ticketlock.o
+OBJS += $(OBJ_DIR)/odp_time.o
+OBJS += $(OBJ_DIR)/odp_timer.o
+OBJS += $(OBJ_DIR)/odp_ring.o
+OBJS += $(OBJ_DIR)/odp_rwlock.o
+ifeq ($(ODP_HAVE_NETMAP),yes)
+OBJS += $(OBJ_DIR)/odp_packet_netmap.o
+endif
+
+DEPS = $(OBJS:.o=.d)
+
+.PHONY: all
+all: libs docs
+
+-include $(DEPS)
+
+#$(OBJ_DIR):
+# $(MKDIR) $(OBJ_DIR)
+
+$(LIB_DIR):
+ $(MKDIR) $(LIB_DIR)
+
+$(DOC_DIR):
+ $(MKDIR) $(DOC_DIR)/html
+ $(MKDIR) $(DOC_DIR)/latex
+
+#
+# Compile rules
+#
+$(OBJ_DIR)/%.o: ./source/%.c
+ $(ECHO) Compiling $<
+ $(CC) -c -MD $(EXTRA_CFLAGS) $(CFLAGS) -o $@ $<
+
+#
+# Lib rule
+#
+$(STATIC_LIB): $(OBJS)
+ $(AR) -cr $@ $(OBJS)
+
+clean:
+ $(RMDIR) $(OBJ_DIR)
+ $(RMDIR) $(LIB_DIR)
+ $(RMDIR) $(DOC_DIR)
+ $(RM) Doxyfile
+
+Doxyfile: Doxyfile.in
+ doxygen -u - < $< > $@
+
+.PHONY: docs
+docs: $(DOC_DIR) Doxyfile ./include/odp*.h
+ doxygen
+
+.PHONY: docs_install
+docs_install: docs
+ $(COPY) doc $(DESTDIR)
+
+.PHONY: pdf
+pdf: docs
+ make --directory doc/latex refman.pdf 1> /dev/null
+
+.PHONY: libs
+libs: $(OBJ_DIR) $(LIB_DIR) $(STATIC_LIB)
+
+.PHONY: lib_install
+lib_install: libs
+ install -d $(DESTDIR)/lib
+ install -m 0644 ${STATIC_LIB} $(DESTDIR)/lib/
+
+.PHONY: headers_install
+headers_install: libs
+ $(ECHO) Installing headers to $(DESTDIR)/include
+ $(COPY) $(ODP_ROOT)/include $(DESTDIR)
+ $(COPY) include/api/* $(DESTDIR)/include/
+
+install: lib_install headers_install
new file mode 100644
@@ -0,0 +1,107 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP buffer descriptor
+ */
+
+#ifndef ODP_BUFFER_H_
+#define ODP_BUFFER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+#include <odp_std_types.h>
+
+
+
+
+
+/**
+ * ODP buffer
+ */
+typedef uint32_t odp_buffer_t;
+
+#define ODP_BUFFER_INVALID (0xffffffff) /**< Invalid buffer */
+
+
+/**
+ * Buffer start address
+ *
+ * @param buf Buffer handle
+ *
+ * @return Buffer start address
+ */
+void *odp_buffer_addr(odp_buffer_t buf);
+
+/**
+ * Buffer maximum data size
+ *
+ * @param buf Buffer handle
+ *
+ * @return Buffer maximum data size
+ */
+size_t odp_buffer_size(odp_buffer_t buf);
+
+/**
+ * Buffer type
+ *
+ * @param buf Buffer handle
+ *
+ * @return Buffer type
+ */
+int odp_buffer_type(odp_buffer_t buf);
+
+#define ODP_BUFFER_TYPE_INVALID (-1) /**< Buffer type invalid */
+#define ODP_BUFFER_TYPE_RAW 0 /**< Raw buffer */
+#define ODP_BUFFER_TYPE_PACKET 1 /**< Packet buffer */
+#define ODP_BUFFER_TYPE_TIMER 2 /**< Timer buffer */
+
+/**
+ * Tests if buffer is part of a scatter/gather list
+ *
+ * @param buf Buffer handle
+ *
+ * @return 1 if belongs to a scatter list, otherwise 0
+ */
+int odp_buffer_is_scatter(odp_buffer_t buf);
+
+/**
+ * Tests if buffer is valid
+ *
+ * @param buf Buffer handle
+ *
+ * @return 1 if valid, otherwise 0
+ */
+int odp_buffer_is_valid(odp_buffer_t buf);
+
+/**
+ * Print buffer metadata to STDOUT
+ *
+ * @param buf Buffer handle
+ *
+ */
+void odp_buffer_print(odp_buffer_t buf);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
+
+
+
+
+
new file mode 100644
@@ -0,0 +1,22 @@
+
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PKTIO_NETMAP_H
+#define ODP_PKTIO_NETMAP_H
+
+#include <odp_pktio_types.h>
+
+#define ODP_NETMAP_MODE_HW 0
+#define ODP_NETMAP_MODE_SW 1
+
+typedef struct {
+ odp_pktio_type_t type;
+ int netmap_mode;
+ uint16_t ringid;
+} netmap_params_t;
+
+#endif
new file mode 100644
@@ -0,0 +1,25 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PKTIO_SOCKET_H
+#define ODP_PKTIO_SOCKET_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_pktio_types.h>
+
+typedef struct {
+ odp_pktio_type_t type;
+ int fanout;
+} socket_params_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,43 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PKTIO_TYPES_H
+#define ODP_PKTIO_TYPES_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* We should ensure that future enum values will never overlap, otherwise
+ * applications that want netmap suport might get in trouble if the odp lib
+ * was not built with netmap support and there are more types define below
+ */
+
+typedef enum {
+ ODP_PKTIO_TYPE_SOCKET_BASIC = 0x1,
+ ODP_PKTIO_TYPE_SOCKET_MMSG,
+ ODP_PKTIO_TYPE_SOCKET_MMAP,
+ ODP_PKTIO_TYPE_NETMAP,
+} odp_pktio_type_t;
+
+#include <odp_pktio_socket.h>
+#ifdef ODP_HAVE_NETMAP
+#include <odp_pktio_netmap.h>
+#endif
+
+typedef union odp_pktio_params_t {
+ odp_pktio_type_t type;
+ socket_params_t sock_params;
+#ifdef ODP_HAVE_NETMAP
+ netmap_params_t nm_params;
+#endif
+} odp_pktio_params_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,124 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP buffer descriptor - implementation internal
+ */
+
+#ifndef ODP_BUFFER_INTERNAL_H_
+#define ODP_BUFFER_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_std_types.h>
+#include <odp_atomic.h>
+#include <odp_buffer_pool.h>
+#include <odp_buffer.h>
+#include <odp_debug.h>
+#include <odp_align.h>
+
+/* TODO: move these to correct files */
+
+typedef uint64_t odp_phys_addr_t;
+
+#define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2)
+#define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1)
+
+#define ODP_BUFS_PER_CHUNK 16
+#define ODP_BUFS_PER_SCATTER 4
+
+#define ODP_BUFFER_TYPE_CHUNK 0xffff
+
+
+#define ODP_BUFFER_POOL_BITS 4
+#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS)
+#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS)
+#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS)
+
+typedef union odp_buffer_bits_t {
+ uint32_t u32;
+ odp_buffer_t handle;
+
+ struct {
+ uint32_t pool:ODP_BUFFER_POOL_BITS;
+ uint32_t index:ODP_BUFFER_INDEX_BITS;
+ };
+} odp_buffer_bits_t;
+
+
+/* forward declaration */
+struct odp_buffer_hdr_t;
+
+
+/*
+ * Scatter/gather list of buffers
+ */
+typedef struct odp_buffer_scatter_t {
+ /* buffer pointers */
+ struct odp_buffer_hdr_t *buf[ODP_BUFS_PER_SCATTER];
+ int num_bufs; /* num buffers */
+ int pos; /* position on the list */
+ size_t total_len; /* Total length */
+} odp_buffer_scatter_t;
+
+
+/*
+ * Chunk of buffers (in single pool)
+ */
+typedef struct odp_buffer_chunk_t {
+ uint32_t num_bufs; /* num buffers */
+ uint32_t buf_index[ODP_BUFS_PER_CHUNK]; /* buffers */
+} odp_buffer_chunk_t;
+
+
+typedef struct odp_buffer_hdr_t {
+ struct odp_buffer_hdr_t *next; /* next buf in a list */
+ odp_buffer_bits_t handle; /* handle */
+ odp_phys_addr_t phys_addr; /* physical data start address */
+ void *addr; /* virtual data start address */
+ uint32_t index; /* buf index in the pool */
+ size_t size; /* max data size */
+ size_t cur_offset; /* current offset */
+ odp_atomic_int_t ref_count; /* reference count */
+ odp_buffer_scatter_t scatter; /* Scatter/gather list */
+ int type; /* type of next header */
+ odp_buffer_pool_t pool; /* buffer pool */
+
+ uint8_t payload[]; /* next header or data */
+} odp_buffer_hdr_t;
+
+ODP_ASSERT(sizeof(odp_buffer_hdr_t) == ODP_OFFSETOF(odp_buffer_hdr_t, payload),
+ ODP_BUFFER_HDR_T__SIZE_ERROR);
+
+
+typedef struct odp_buffer_chunk_hdr_t {
+ odp_buffer_hdr_t buf_hdr;
+ odp_buffer_chunk_t chunk;
+} odp_buffer_chunk_hdr_t;
+
+
+int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf);
+
+void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
+
+
+
+
+
new file mode 100644
@@ -0,0 +1,115 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP buffer pool - internal header
+ */
+
+#ifndef ODP_BUFFER_POOL_INTERNAL_H_
+#define ODP_BUFFER_POOL_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_std_types.h>
+#include <odp_buffer_pool.h>
+#include <odp_buffer_internal.h>
+#include <odp_align.h>
+#include <odp_hints.h>
+#include <odp_config.h>
+#include <odp_debug.h>
+
+/* Use ticketlock instead of spinlock */
+#define POOL_USE_TICKETLOCK
+
+/* Extra error checks */
+/* #define POOL_ERROR_CHECK */
+
+
+#ifdef POOL_USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#else
+#include <odp_spinlock.h>
+#endif
+
+
+struct pool_entry_s {
+#ifdef POOL_USE_TICKETLOCK
+ odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+#else
+ odp_spinlock_t lock ODP_ALIGNED_CACHE;
+#endif
+
+ odp_buffer_chunk_hdr_t *head;
+ uint64_t free_bufs;
+ char name[ODP_BUFFER_POOL_NAME_LEN];
+
+ odp_buffer_pool_t pool ODP_ALIGNED_CACHE;
+ uintptr_t buf_base;
+ size_t buf_size;
+ size_t buf_offset;
+ uint64_t num_bufs;
+ void *pool_base_addr;
+ uint64_t pool_size;
+ size_t payload_size;
+ size_t payload_align;
+ int buf_type;
+ size_t hdr_size;
+};
+
+
+extern void *pool_entry_ptr[];
+
+
+static inline void *get_pool_entry(odp_buffer_pool_t pool_id)
+{
+ return pool_entry_ptr[pool_id];
+}
+
+
+static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf)
+{
+ odp_buffer_bits_t handle;
+ uint32_t pool_id;
+ uint32_t index;
+ struct pool_entry_s *pool;
+ odp_buffer_hdr_t *hdr;
+
+ handle.u32 = buf;
+ pool_id = handle.pool;
+ index = handle.index;
+
+#ifdef POOL_ERROR_CHECK
+ if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) {
+ ODP_ERR("odp_buf_to_hdr: Bad pool id\n");
+ return NULL;
+ }
+#endif
+
+ pool = get_pool_entry(pool_id);
+
+#ifdef POOL_ERROR_CHECK
+ if (odp_unlikely(index > pool->num_bufs - 1)) {
+ ODP_ERR("odp_buf_to_hdr: Bad buffer index\n");
+ return NULL;
+ }
+#endif
+
+ hdr = (odp_buffer_hdr_t *)(pool->buf_base + index * pool->buf_size);
+
+ return hdr;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,53 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP HW system information
+ */
+
+#ifndef ODP_INTERNAL_H_
+#define ODP_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+int odp_system_info_init(void);
+
+void odp_thread_init_global(void);
+void odp_thread_init_local(int thr_id);
+
+int odp_shm_init_global(void);
+int odp_shm_init_local(void);
+
+int odp_buffer_pool_init_global(void);
+
+int odp_pktio_init_global(void);
+int odp_pktio_init_local(void);
+
+int odp_queue_init_global(void);
+
+int odp_schedule_init_global(void);
+int odp_schedule_init_local(void);
+
+int odp_timer_init_global(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+
+
+
+
+
+
new file mode 100644
@@ -0,0 +1,145 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP packet descriptor - implementation internal
+ */
+
+#ifndef ODP_PACKET_INTERNAL_H_
+#define ODP_PACKET_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_align.h>
+#include <odp_debug.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_pool_internal.h>
+#include <odp_packet.h>
+#include <odp_packet_io.h>
+
+/**
+ * Packet input & protocol flags
+ */
+typedef union {
+ /* All input flags */
+ uint32_t all;
+
+ struct {
+ /* Bitfield flags for each protocol */
+ uint32_t l2:1; /**< known L2 protocol present */
+ uint32_t l3:1; /**< known L3 protocol present */
+ uint32_t l4:1; /**< known L4 protocol present */
+
+ uint32_t eth:1; /**< Ethernet */
+ uint32_t jumbo:1; /**< Jumbo frame */
+ uint32_t vlan:1; /**< VLAN hdr found */
+ uint32_t vlan_qinq:1; /**< Stacked VLAN found, QinQ */
+
+ uint32_t arp:1; /**< ARP */
+
+ uint32_t ipv4:1; /**< IPv4 */
+ uint32_t ipv6:1; /**< IPv6 */
+ uint32_t ipfrag:1; /**< IP fragment */
+ uint32_t ipopt:1; /**< IP optional headers */
+ uint32_t ipsec:1; /**< IPSec decryption may be needed */
+
+ uint32_t udp:1; /**< UDP */
+ uint32_t tcp:1; /**< TCP */
+ uint32_t sctp:1; /**< SCTP */
+ uint32_t icmp:1; /**< ICMP */
+ };
+} input_flags_t;
+
+ODP_ASSERT(sizeof(input_flags_t) == sizeof(uint32_t), INPUT_FLAGS_SIZE_ERROR);
+
+/**
+ * Packet error flags
+ */
+typedef union {
+ /* All error flags */
+ uint32_t all;
+
+ struct {
+ /* Bitfield flags for each detected error */
+ uint32_t frame_len:1; /**< Frame length error */
+ uint32_t l2_chksum:1; /**< L2 checksum error, checks TBD */
+ uint32_t ip_err:1; /**< IP error, checks TBD */
+ uint32_t tcp_err:1; /**< TCP error, checks TBD */
+ uint32_t udp_err:1; /**< UDP error, checks TBD */
+ };
+} error_flags_t;
+
+ODP_ASSERT(sizeof(error_flags_t) == sizeof(uint32_t), ERROR_FLAGS_SIZE_ERROR);
+
+/**
+ * Packet output flags
+ */
+typedef union {
+ /* All output flags */
+ uint32_t all;
+
+ struct {
+ /* Bitfield flags for each output option */
+ uint32_t l4_chksum:1; /**< Request L4 checksum calculation */
+ };
+} output_flags_t;
+
+ODP_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), OUTPUT_FLAGS_SIZE_ERROR);
+
+/**
+ * Internal Packet header
+ */
+typedef struct {
+ /* common buffer header */
+ odp_buffer_hdr_t buf_hdr;
+
+ input_flags_t input_flags;
+ error_flags_t error_flags;
+ output_flags_t output_flags;
+
+ uint32_t frame_offset; /**< offset to start of frame, even on error */
+ uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */
+ uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */
+ uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+
+ uint32_t frame_len;
+
+ odp_pktio_t input;
+
+ uint32_t pad;
+ uint8_t payload[];
+
+} odp_packet_hdr_t;
+
+ODP_ASSERT(sizeof(odp_packet_hdr_t) == ODP_OFFSETOF(odp_packet_hdr_t, payload),
+ ODP_PACKET_HDR_T__SIZE_ERR);
+ODP_ASSERT(sizeof(odp_packet_hdr_t) % sizeof(uint64_t) == 0,
+ ODP_PACKET_HDR_T__SIZE_ERR2);
+
+/**
+ * Return the packet header
+ */
+static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt)
+{
+ return (odp_packet_hdr_t *)odp_buf_to_hdr((odp_buffer_t)pkt);
+}
+
+/**
+ * Parse packet and set internal metadata
+ */
+void odp_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
new file mode 100644
@@ -0,0 +1,50 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP packet IO - implementation internal
+ */
+
+#ifndef ODP_PACKET_IO_INTERNAL_H_
+#define ODP_PACKET_IO_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_spinlock.h>
+#include <odp_packet_socket.h>
+#ifdef ODP_HAVE_NETMAP
+#include <odp_packet_netmap.h>
+#endif
+
+struct pktio_entry {
+ odp_spinlock_t lock; /**< entry spinlock */
+ int taken; /**< is entry taken(1) or free(0) */
+ odp_queue_t inq_default; /**< default input queue, if set */
+ odp_queue_t outq_default; /**< default out queue */
+ odp_pktio_params_t params; /**< pktio parameters */
+ pkt_sock_t pkt_sock; /**< using socket API for IO */
+ pkt_sock_mmap_t pkt_sock_mmap; /**< using socket mmap API for IO */
+#ifdef ODP_HAVE_NETMAP
+ pkt_netmap_t pkt_nm; /**< using netmap API for IO */
+#endif
+};
+
+typedef union {
+ struct pktio_entry s;
+ uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pktio_entry))];
+} pktio_entry_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
new file mode 100644
@@ -0,0 +1,50 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP packet IO - implementation internal
+ */
+
+#ifndef ODP_PACKET_IO_QUEUE_H_
+#define ODP_PACKET_IO_QUEUE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_queue_internal.h>
+#include <odp_buffer_internal.h>
+
+/** Max nbr of pkts to receive in one burst (keep same as QUEUE_MULTI_MAX) */
+#define ODP_PKTIN_QUEUE_MAX_BURST 16
+/* pktin_deq_multi() depends on the condition: */
+ODP_ASSERT(ODP_PKTIN_QUEUE_MAX_BURST >= QUEUE_MULTI_MAX,
+ ODP_PKTIN_DEQ_MULTI_MAX_ERROR);
+
+int pktin_enqueue(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
+odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *queue);
+
+int pktin_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+int pktin_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+
+
+int pktout_enqueue(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
+odp_buffer_hdr_t *pktout_dequeue(queue_entry_t *queue);
+
+int pktout_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
+ int num);
+int pktout_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
+ int num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
new file mode 100644
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PACKET_NETMAP_H
+#define ODP_PACKET_NETMAP_H
+
+#include <stdint.h>
+
+#include <net/if.h>
+#include <net/netmap.h>
+#include <net/netmap_user.h>
+
+#include <odp_align.h>
+#include <odp_debug.h>
+#include <odp_buffer_pool.h>
+#include <odp_packet.h>
+
+#include <odp_pktio_netmap.h>
+
+#define ODP_NETMAP_MODE_HW 0
+#define ODP_NETMAP_MODE_SW 1
+
+#define NETMAP_BLOCKING_IO
+
+/** Packet socket using netmap mmaped rings for both Rx and Tx */
+typedef struct {
+ odp_buffer_pool_t pool;
+ size_t max_frame_len; /**< max frame len = buf_size - sizeof(pkt_hdr) */
+ size_t frame_offset; /**< frame start offset from start of pkt buf */
+ size_t buf_size; /**< size of buffer payload in 'pool' */
+ int netmap_mode;
+ struct nm_desc_t *nm_desc;
+ uint32_t begin;
+ uint32_t end;
+ struct netmap_ring *rxring;
+ struct netmap_ring *txring;
+ odp_queue_t tx_access; /* Used for exclusive access to send packets */
+ uint32_t if_flags;
+ char ifname[32];
+} pkt_netmap_t;
+
+/**
+ * Configure an interface to work in netmap mode
+ */
+int setup_pkt_netmap(pkt_netmap_t * const pkt_nm, char *netdev,
+ odp_buffer_pool_t pool, netmap_params_t *nm_params);
+
+/**
+ * Switch interface from netmap mode to normal mode
+ */
+int close_pkt_netmap(pkt_netmap_t * const pkt_nm);
+
+/**
+ * Receive packets using netmap
+ */
+int recv_pkt_netmap(pkt_netmap_t * const pkt_nm, odp_packet_t pkt_table[],
+ unsigned len);
+
+/**
+ * Send packets using netmap
+ */
+int send_pkt_netmap(pkt_netmap_t * const pkt_nm, odp_packet_t pkt_table[],
+ unsigned len);
+#endif
new file mode 100644
@@ -0,0 +1,114 @@
+/* Copyright (c) 2013, Linaro Limited
+ * Copyright (c) 2013, Nokia Solutions and Networks
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PACKET_SOCKET_H
+#define ODP_PACKET_SOCKET_H
+
+#include <linux/if_packet.h>
+#include <linux/if_ether.h>
+#include <sys/socket.h>
+
+#include <odp_align.h>
+#include <odp_buffer.h>
+#include <odp_debug.h>
+#include <odp_buffer_pool.h>
+#include <odp_packet.h>
+
+#include <linux/version.h>
+
+/*
+ * Packet socket config:
+ */
+
+/** Max receive (Rx) burst size*/
+#define ODP_PACKET_SOCKET_MAX_BURST_RX 32
+/** Max transmit (Tx) burst size*/
+#define ODP_PACKET_SOCKET_MAX_BURST_TX 32
+
+typedef struct {
+ int sockfd; /**< socket descriptor */
+ odp_buffer_pool_t pool; /**< buffer pool to alloc packets from */
+ size_t buf_size; /**< size of buffer payload in 'pool' */
+ size_t max_frame_len; /**< max frame len = buf_size - sizeof(pkt_hdr) */
+ size_t frame_offset; /**< frame start offset from start of pkt buf */
+ unsigned char if_mac[ETH_ALEN]; /**< IF eth mac addr */
+} pkt_sock_t;
+
+/** packet mmap ring */
+struct ring {
+ struct iovec *rd;
+ unsigned frame_num;
+ int rd_num;
+
+ int sock;
+ int type;
+ int version;
+ uint8_t *mm_space;
+ size_t mm_len;
+ size_t rd_len;
+ int flen;
+
+ struct tpacket_req req;
+};
+ODP_ASSERT(offsetof(struct ring, mm_space) <= ODP_CACHE_LINE_SIZE,
+ ERR_STRUCT_RING);
+
+/** Packet socket using mmap rings for both Rx and Tx */
+typedef struct {
+ /** Packet mmap ring for Rx */
+ struct ring rx_ring ODP_ALIGNED_CACHE;
+ /** Packet mmap ring for Tx */
+ struct ring tx_ring ODP_ALIGNED_CACHE;
+
+ int sockfd ODP_ALIGNED_CACHE;
+ odp_buffer_pool_t pool;
+ size_t frame_offset; /**< frame start offset from start of pkt buf */
+ uint8_t *mmap_base;
+ unsigned mmap_len;
+ unsigned char if_mac[ETH_ALEN];
+ struct sockaddr_ll ll;
+} pkt_sock_mmap_t;
+
+/**
+ * Open & configure a raw packet socket
+ */
+int setup_pkt_sock(pkt_sock_t * const pkt_sock, char *netdev,
+ odp_buffer_pool_t pool);
+
+int setup_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock, char *netdev,
+ odp_buffer_pool_t pool, int fanout);
+
+/**
+ * Close a packet socket
+ */
+int close_pkt_sock(pkt_sock_t * const pkt_sock);
+
+int close_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock);
+
+/**
+ * Receive packets from the packet socket
+ */
+int recv_pkt_sock_basic(pkt_sock_t * const pkt_sock, odp_packet_t pkt_table[],
+ unsigned len);
+
+int recv_pkt_sock_mmsg(pkt_sock_t * const pkt_sock, odp_packet_t pkt_table[],
+ unsigned len);
+
+int recv_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len);
+/**
+ * Send packets through the packet socket
+ */
+int send_pkt_sock_basic(pkt_sock_t * const pkt_sock, odp_packet_t pkt_table[],
+ unsigned len);
+
+int send_pkt_sock_mmsg(pkt_sock_t * const pkt_sock, odp_packet_t pkt_table[],
+ unsigned len);
+
+int send_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len);
+#endif
new file mode 100644
@@ -0,0 +1,120 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP queue - implementation internal
+ */
+
+#ifndef ODP_QUEUE_INTERNAL_H_
+#define ODP_QUEUE_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_queue.h>
+#include <odp_buffer_internal.h>
+#include <odp_packet_io.h>
+#include <odp_align.h>
+
+
+#define USE_TICKETLOCK
+
+#ifdef USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#else
+#include <odp_spinlock.h>
+#endif
+
+#define QUEUE_MULTI_MAX 8
+
+#define QUEUE_STATUS_FREE 0
+#define QUEUE_STATUS_READY 1
+#define QUEUE_STATUS_NOTSCHED 2
+#define QUEUE_STATUS_SCHED 3
+
+/* forward declaration */
+union queue_entry_u;
+
+typedef int (*enq_func_t)(union queue_entry_u *, odp_buffer_hdr_t *);
+typedef odp_buffer_hdr_t *(*deq_func_t)(union queue_entry_u *);
+
+typedef int (*enq_multi_func_t)(union queue_entry_u *,
+ odp_buffer_hdr_t **, int);
+typedef int (*deq_multi_func_t)(union queue_entry_u *,
+ odp_buffer_hdr_t **, int);
+
+struct queue_entry_s {
+#ifdef USE_TICKETLOCK
+ odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+#else
+ odp_spinlock_t lock ODP_ALIGNED_CACHE;
+#endif
+
+ odp_buffer_hdr_t *head;
+ odp_buffer_hdr_t *tail;
+ int status;
+
+ enq_func_t enqueue ODP_ALIGNED_CACHE;
+ deq_func_t dequeue;
+ enq_multi_func_t enqueue_multi;
+ deq_multi_func_t dequeue_multi;
+
+ odp_queue_t handle;
+ odp_buffer_t sched_buf;
+ odp_queue_type_t type;
+ odp_queue_param_t param;
+ odp_pktio_t pktin;
+ odp_pktio_t pktout;
+ char name[ODP_QUEUE_NAME_LEN];
+};
+
+typedef union queue_entry_u {
+ struct queue_entry_s s;
+ uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct queue_entry_s))];
+} queue_entry_t;
+
+
+queue_entry_t *get_qentry(uint32_t queue_id);
+
+int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
+odp_buffer_hdr_t *queue_deq(queue_entry_t *queue);
+
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
+
+void queue_lock(queue_entry_t *queue);
+void queue_unlock(queue_entry_t *queue);
+
+odp_buffer_t queue_sched_buf(odp_queue_t queue);
+int queue_sched_atomic(odp_queue_t handle);
+
+static inline uint32_t queue_to_id(odp_queue_t handle)
+{
+ return handle - 1;
+}
+
+static inline odp_queue_t queue_from_id(uint32_t queue_id)
+{
+ return queue_id + 1;
+}
+
+static inline queue_entry_t *queue_to_qentry(odp_queue_t handle)
+{
+ uint32_t queue_id;
+
+ queue_id = queue_to_id(handle);
+ return get_qentry(queue_id);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,31 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+
+#ifndef ODP_SCHEDULE_INTERNAL_H_
+#define ODP_SCHEDULE_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#include <odp_buffer.h>
+#include <odp_queue.h>
+
+void odp_schedule_mask_set(odp_queue_t queue, int prio);
+
+odp_buffer_t odp_schedule_buffer_alloc(odp_queue_t queue);
+
+void odp_schedule_queue(odp_queue_t queue, int prio);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+
+#ifndef ODP_SPIN_INTERNAL_H_
+#define ODP_SPIN_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * GCC memory barrier for ODP internal use
+ */
+static inline void odp_mem_barrier(void)
+{
+ asm __volatile__ ("" : : : "memory");
+}
+
+
+/**
+ * Spin loop for ODP internal use
+ */
+static inline void odp_spin(void)
+{
+#if defined __x86_64__ || defined __i386__
+
+ #ifdef __SSE2__
+ asm __volatile__ ("pause");
+ #else
+ asm __volatile__ ("rep; nop");
+ #endif
+
+#elif defined __arm__
+
+ #if __ARM_ARCH == 7
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ #endif
+
+#elif defined __OCTEON__
+
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+ asm __volatile__ ("nop");
+
+#endif
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
new file mode 100644
@@ -0,0 +1,48 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_barrier.h>
+#include <odp_sync.h>
+#include <odp_spin_internal.h>
+
+void odp_barrier_init_count(odp_barrier_t *barrier, int count)
+{
+ barrier->count = count;
+ barrier->bar = 0;
+ odp_sync_stores();
+}
+
+/*
+ * Efficient barrier_sync -
+ *
+ * Barriers are initialized with a count of the number of callers
+ * that must sync on the barrier before any may proceed.
+ *
+ * To avoid race conditions and to permit the barrier to be fully
+ * reusable, the barrier value cycles between 0..2*count-1. When
+ * synchronizing the wasless variable simply tracks which half of
+ * the cycle the barrier was in upon entry. Exit is when the
+ * barrier crosses to the other half of the cycle.
+ */
+
+void odp_barrier_sync(odp_barrier_t *barrier)
+{
+ int count;
+ int wasless;
+
+ odp_sync_stores();
+ wasless = barrier->bar < barrier->count;
+ count = odp_atomic_fetch_inc_int(&barrier->bar);
+
+ if (count == 2*barrier->count-1) {
+ barrier->bar = 0;
+ } else {
+ while ((barrier->bar < barrier->count) == wasless)
+ odp_spin();
+ }
+
+ odp_mem_barrier();
+}
new file mode 100644
@@ -0,0 +1,119 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_buffer.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_pool_internal.h>
+
+#include <string.h>
+#include <stdio.h>
+
+
+void *odp_buffer_addr(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+
+ return hdr->addr;
+}
+
+
+size_t odp_buffer_size(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+
+ return hdr->size;
+}
+
+
+int odp_buffer_type(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+
+ return hdr->type;
+}
+
+
+int odp_buffer_is_scatter(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+
+ if (hdr->scatter.num_bufs == 0)
+ return 0;
+ else
+ return 1;
+}
+
+
+int odp_buffer_is_valid(odp_buffer_t buf)
+{
+ odp_buffer_bits_t handle;
+
+ handle.u32 = buf;
+
+ return (handle.index != ODP_BUFFER_INVALID_INDEX);
+}
+
+
+int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr;
+ int len = 0;
+
+ if (!odp_buffer_is_valid(buf)) {
+ printf("Buffer is not valid.\n");
+ return len;
+ }
+
+ hdr = odp_buf_to_hdr(buf);
+
+ len += snprintf(&str[len], n-len,
+ "Buffer\n");
+ len += snprintf(&str[len], n-len,
+ " pool %i\n", hdr->pool);
+ len += snprintf(&str[len], n-len,
+ " index %"PRIu32"\n", hdr->index);
+ len += snprintf(&str[len], n-len,
+ " phy_addr %"PRIu64"\n", hdr->phys_addr);
+ len += snprintf(&str[len], n-len,
+ " addr %p\n", hdr->addr);
+ len += snprintf(&str[len], n-len,
+ " size %zu\n", hdr->size);
+ len += snprintf(&str[len], n-len,
+ " cur_offset %zu\n", hdr->cur_offset);
+ len += snprintf(&str[len], n-len,
+ " ref_count %i\n", hdr->ref_count);
+ len += snprintf(&str[len], n-len,
+ " type %i\n", hdr->type);
+ len += snprintf(&str[len], n-len,
+ " Scatter list\n");
+ len += snprintf(&str[len], n-len,
+ " num_bufs %i\n", hdr->scatter.num_bufs);
+ len += snprintf(&str[len], n-len,
+ " pos %i\n", hdr->scatter.pos);
+ len += snprintf(&str[len], n-len,
+ " total_len %zu\n", hdr->scatter.total_len);
+
+ return len;
+}
+
+
+void odp_buffer_print(odp_buffer_t buf)
+{
+ int max_len = 512;
+ char str[max_len];
+ int len;
+
+ len = odp_buffer_snprint(str, max_len-1, buf);
+ str[len] = 0;
+
+ printf("\n%s\n", str);
+}
+
+void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src)
+{
+ (void)buf_dst;
+ (void)buf_src;
+}
new file mode 100644
@@ -0,0 +1,511 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_std_types.h>
+#include <odp_buffer_pool.h>
+#include <odp_buffer_pool_internal.h>
+#include <odp_buffer_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_shared_memory.h>
+#include <odp_align.h>
+#include <odp_internal.h>
+#include <odp_config.h>
+#include <odp_hints.h>
+#include <odp_debug.h>
+
+#include <string.h>
+#include <stdlib.h>
+
+
+#ifdef POOL_USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#define LOCK(a) odp_ticketlock_lock(a)
+#define UNLOCK(a) odp_ticketlock_unlock(a)
+#define LOCK_INIT(a) odp_ticketlock_init(a)
+#else
+#include <odp_spinlock.h>
+#define LOCK(a) odp_spinlock_lock(a)
+#define UNLOCK(a) odp_spinlock_unlock(a)
+#define LOCK_INIT(a) odp_spinlock_init(a)
+#endif
+
+
+#if ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS
+#error ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS
+#endif
+
+#define NULL_INDEX ((uint32_t)-1)
+
+
+typedef union pool_entry_u {
+ struct pool_entry_s s;
+
+ uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
+
+} pool_entry_t;
+
+
+typedef struct pool_table_t {
+ pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS];
+
+} pool_table_t;
+
+
+/* The pool table */
+static pool_table_t *pool_tbl;
+
+/* Pool entry pointers (for inlining) */
+void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS];
+
+
+static __thread odp_buffer_chunk_hdr_t *local_chunk[ODP_CONFIG_BUFFER_POOLS];
+
+
+static inline void set_handle(odp_buffer_hdr_t *hdr,
+ pool_entry_t *pool, uint32_t index)
+{
+ uint32_t pool_id = (uint32_t) pool->s.pool;
+
+ if (pool_id > ODP_CONFIG_BUFFER_POOLS)
+ ODP_ERR("set_handle: Bad pool id\n");
+
+ if (index > ODP_BUFFER_MAX_INDEX)
+ ODP_ERR("set_handle: Bad buffer index\n");
+
+ hdr->handle.pool = pool_id;
+ hdr->handle.index = index;
+}
+
+
+int odp_buffer_pool_init_global(void)
+{
+ odp_buffer_pool_t i;
+
+ pool_tbl = odp_shm_reserve("odp_buffer_pools",
+ sizeof(pool_table_t),
+ sizeof(pool_entry_t));
+
+ if (pool_tbl == NULL)
+ return -1;
+
+ memset(pool_tbl, 0, sizeof(pool_table_t));
+
+ for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
+ /* init locks */
+ pool_entry_t *pool = &pool_tbl->pool[i];
+ LOCK_INIT(&pool->s.lock);
+ pool->s.pool = i;
+
+ pool_entry_ptr[i] = pool;
+ }
+
+ ODP_DBG("\nBuffer pool init global\n");
+ ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s));
+ ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t));
+ ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
+ ODP_DBG("\n");
+ return 0;
+}
+
+
+static odp_buffer_hdr_t *index_to_hdr(pool_entry_t *pool, uint32_t index)
+{
+ odp_buffer_hdr_t *hdr;
+
+ hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index * pool->s.buf_size);
+ return hdr;
+}
+
+
+static void add_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr, uint32_t index)
+{
+ uint32_t i = chunk_hdr->chunk.num_bufs;
+ chunk_hdr->chunk.buf_index[i] = index;
+ chunk_hdr->chunk.num_bufs++;
+}
+
+
+static uint32_t rem_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr)
+{
+ uint32_t index;
+ uint32_t i;
+
+ i = chunk_hdr->chunk.num_bufs - 1;
+ index = chunk_hdr->chunk.buf_index[i];
+ chunk_hdr->chunk.num_bufs--;
+ return index;
+}
+
+
+static odp_buffer_chunk_hdr_t *next_chunk(pool_entry_t *pool,
+ odp_buffer_chunk_hdr_t *chunk_hdr)
+{
+ uint32_t index;
+
+ index = chunk_hdr->chunk.buf_index[ODP_BUFS_PER_CHUNK-1];
+ if (index == NULL_INDEX)
+ return NULL;
+ else
+ return (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index);
+}
+
+
+static odp_buffer_chunk_hdr_t *rem_chunk(pool_entry_t *pool)
+{
+ odp_buffer_chunk_hdr_t *chunk_hdr;
+
+ chunk_hdr = pool->s.head;
+ if (chunk_hdr == NULL) {
+ /* Pool is empty */
+ return NULL;
+ }
+
+ pool->s.head = next_chunk(pool, chunk_hdr);
+ pool->s.free_bufs -= ODP_BUFS_PER_CHUNK;
+
+ /* unlink */
+ rem_buf_index(chunk_hdr);
+ return chunk_hdr;
+}
+
+
+static void add_chunk(pool_entry_t *pool, odp_buffer_chunk_hdr_t *chunk_hdr)
+{
+ if (pool->s.head) {
+ /* link pool head to the chunk */
+ add_buf_index(chunk_hdr, pool->s.head->buf_hdr.index);
+ } else
+ add_buf_index(chunk_hdr, NULL_INDEX);
+
+ pool->s.head = chunk_hdr;
+ pool->s.free_bufs += ODP_BUFS_PER_CHUNK;
+}
+
+
+static void check_align(pool_entry_t *pool, odp_buffer_hdr_t *hdr)
+{
+ if (!ODP_ALIGNED_CHECK_POWER_2(hdr->addr, pool->s.payload_align)) {
+ ODP_ERR("check_align: payload align error %p, align %zu\n",
+ hdr->addr, pool->s.payload_align);
+ exit(0);
+ }
+
+ if (!ODP_ALIGNED_CHECK_POWER_2(hdr, ODP_CACHE_LINE_SIZE)) {
+ ODP_ERR("check_align: hdr align error %p, align %i\n",
+ hdr, ODP_CACHE_LINE_SIZE);
+ exit(0);
+ }
+}
+
+
+static void fill_hdr(void *ptr, pool_entry_t *pool, uint32_t index,
+ int buf_type)
+{
+ odp_buffer_hdr_t *hdr = (odp_buffer_hdr_t *)ptr;
+ size_t size = pool->s.hdr_size;
+ uint8_t *payload = hdr->payload;
+
+ if (buf_type == ODP_BUFFER_TYPE_CHUNK)
+ size = sizeof(odp_buffer_chunk_hdr_t);
+
+ if (pool->s.buf_type == ODP_BUFFER_TYPE_PACKET) {
+ odp_packet_hdr_t *packet_hdr = ptr;
+ payload = packet_hdr->payload;
+ }
+
+ memset(hdr, 0, size);
+
+ set_handle(hdr, pool, index);
+
+ hdr->addr = &payload[pool->s.buf_offset - pool->s.hdr_size];
+ hdr->index = index;
+ hdr->size = pool->s.payload_size;
+ hdr->pool = pool->s.pool;
+ hdr->type = buf_type;
+
+ check_align(pool, hdr);
+}
+
+
+static void link_bufs(pool_entry_t *pool)
+{
+ odp_buffer_chunk_hdr_t *chunk_hdr;
+ size_t hdr_size;
+ size_t payload_size;
+ size_t payload_align;
+ size_t size;
+ size_t offset;
+ size_t min_size;
+ uint64_t pool_size;
+ uintptr_t buf_base;
+ uint32_t index;
+ uintptr_t pool_base;
+ int buf_type;
+
+ buf_type = pool->s.buf_type;
+ payload_size = pool->s.payload_size;
+ payload_align = pool->s.payload_align;
+ pool_size = pool->s.pool_size;
+ pool_base = (uintptr_t) pool->s.pool_base_addr;
+
+ if (buf_type == ODP_BUFFER_TYPE_RAW)
+ hdr_size = sizeof(odp_buffer_hdr_t);
+ else if (buf_type == ODP_BUFFER_TYPE_PACKET)
+ hdr_size = sizeof(odp_packet_hdr_t);
+ else {
+ ODP_ERR("odp_buffer_pool_create: Bad type %i\n",
+ buf_type);
+ exit(0);
+ }
+
+ /* Chunk must fit into buffer payload.*/
+ min_size = sizeof(odp_buffer_chunk_hdr_t) - hdr_size;
+ if (payload_size < min_size)
+ payload_size = min_size;
+
+ /* Roundup payload size to full cachelines */
+ payload_size = ODP_CACHE_LINE_SIZE_ROUNDUP(payload_size);
+
+ /* Min cacheline alignment for buffer header and payload */
+ payload_align = ODP_CACHE_LINE_SIZE_ROUNDUP(payload_align);
+ offset = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size);
+
+ /* Multiples of cacheline size */
+ if (payload_size > payload_align)
+ size = payload_size + offset;
+ else
+ size = payload_align + offset;
+
+ /* First buffer */
+ buf_base = ODP_ALIGN_ROUNDUP(pool_base + offset, payload_align)
+ - offset;
+
+ pool->s.hdr_size = hdr_size;
+ pool->s.buf_base = buf_base;
+ pool->s.buf_size = size;
+ pool->s.buf_offset = offset;
+ index = 0;
+
+ chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index);
+ pool->s.head = NULL;
+ pool_size -= buf_base - pool_base;
+
+ while (pool_size > ODP_BUFS_PER_CHUNK * size) {
+ int i;
+
+ fill_hdr(chunk_hdr, pool, index, ODP_BUFFER_TYPE_CHUNK);
+
+ index++;
+
+ for (i = 0; i < ODP_BUFS_PER_CHUNK - 1; i++) {
+ odp_buffer_hdr_t *hdr = index_to_hdr(pool, index);
+
+ fill_hdr(hdr, pool, index, buf_type);
+
+ add_buf_index(chunk_hdr, index);
+ index++;
+ }
+
+ add_chunk(pool, chunk_hdr);
+
+ chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool,
+ index);
+ pool->s.num_bufs += ODP_BUFS_PER_CHUNK;
+ pool_size -= ODP_BUFS_PER_CHUNK * size;
+ }
+}
+
+
+odp_buffer_pool_t odp_buffer_pool_create(const char *name,
+ void *base_addr, uint64_t size,
+ size_t buf_size, size_t buf_align,
+ int buf_type)
+{
+ odp_buffer_pool_t i;
+ pool_entry_t *pool;
+ odp_buffer_pool_t pool_id = ODP_BUFFER_POOL_INVALID;
+
+ for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
+ pool = get_pool_entry(i);
+
+ LOCK(&pool->s.lock);
+
+ if (pool->s.buf_base == 0) {
+ /* found free pool */
+
+ strncpy(pool->s.name, name,
+ ODP_BUFFER_POOL_NAME_LEN - 1);
+ pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0;
+ pool->s.pool_base_addr = base_addr;
+ pool->s.pool_size = size;
+ pool->s.payload_size = buf_size;
+ pool->s.payload_align = buf_align;
+ pool->s.buf_type = buf_type;
+
+ link_bufs(pool);
+
+ UNLOCK(&pool->s.lock);
+
+ pool_id = i;
+ break;
+ }
+
+ UNLOCK(&pool->s.lock);
+ }
+
+ return pool_id;
+}
+
+
+odp_buffer_pool_t odp_buffer_pool_lookup(const char *name)
+{
+ odp_buffer_pool_t i;
+ pool_entry_t *pool;
+
+ for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
+ pool = get_pool_entry(i);
+
+ LOCK(&pool->s.lock);
+ if (strcmp(name, pool->s.name) == 0) {
+ /* found it */
+ UNLOCK(&pool->s.lock);
+ return i;
+ }
+ UNLOCK(&pool->s.lock);
+ }
+
+ return ODP_BUFFER_POOL_INVALID;
+}
+
+
+odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_id)
+{
+ pool_entry_t *pool;
+ odp_buffer_chunk_hdr_t *chunk;
+ odp_buffer_bits_t handle;
+
+ pool = get_pool_entry(pool_id);
+ chunk = local_chunk[pool_id];
+
+ if (chunk == NULL) {
+ LOCK(&pool->s.lock);
+ chunk = rem_chunk(pool);
+ UNLOCK(&pool->s.lock);
+
+ if (chunk == NULL)
+ return ODP_BUFFER_INVALID;
+
+ local_chunk[pool_id] = chunk;
+ }
+
+ if (chunk->chunk.num_bufs == 0) {
+ /* give the chunk buffer */
+ local_chunk[pool_id] = NULL;
+ chunk->buf_hdr.type = pool->s.buf_type;
+
+ handle = chunk->buf_hdr.handle;
+ } else {
+ odp_buffer_hdr_t *hdr;
+ uint32_t index;
+ index = rem_buf_index(chunk);
+ hdr = index_to_hdr(pool, index);
+
+ handle = hdr->handle;
+ }
+
+ return handle.u32;
+}
+
+
+void odp_buffer_free(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr;
+ odp_buffer_pool_t pool_id;
+ pool_entry_t *pool;
+ odp_buffer_chunk_hdr_t *chunk_hdr;
+
+ hdr = odp_buf_to_hdr(buf);
+ pool_id = hdr->pool;
+ pool = get_pool_entry(pool_id);
+ chunk_hdr = local_chunk[pool_id];
+
+ if (chunk_hdr && chunk_hdr->chunk.num_bufs == ODP_BUFS_PER_CHUNK - 1) {
+ /* Current chunk is full. Push back to the pool */
+ LOCK(&pool->s.lock);
+ add_chunk(pool, chunk_hdr);
+ UNLOCK(&pool->s.lock);
+ chunk_hdr = NULL;
+ }
+
+ if (chunk_hdr == NULL) {
+ /* Use this buffer */
+ chunk_hdr = (odp_buffer_chunk_hdr_t *)hdr;
+ local_chunk[pool_id] = chunk_hdr;
+ chunk_hdr->chunk.num_bufs = 0;
+ } else {
+ /* Add to current chunk */
+ add_buf_index(chunk_hdr, hdr->index);
+ }
+}
+
+
+void odp_buffer_pool_print(odp_buffer_pool_t pool_id)
+{
+ pool_entry_t *pool;
+ odp_buffer_chunk_hdr_t *chunk_hdr;
+ uint32_t i;
+
+ pool = get_pool_entry(pool_id);
+
+ printf("Pool info\n");
+ printf("---------\n");
+ printf(" pool %i\n", pool->s.pool);
+ printf(" name %s\n", pool->s.name);
+ printf(" pool base %p\n", pool->s.pool_base_addr);
+ printf(" buf base 0x%"PRIxPTR"\n", pool->s.buf_base);
+ printf(" pool size 0x%"PRIx64"\n", pool->s.pool_size);
+ printf(" buf size %zu\n", pool->s.payload_size);
+ printf(" buf align %zu\n", pool->s.payload_align);
+ printf(" hdr size %zu\n", pool->s.hdr_size);
+ printf(" alloc size %zu\n", pool->s.buf_size);
+ printf(" offset to hdr %zu\n", pool->s.buf_offset);
+ printf(" num bufs %"PRIu64"\n", pool->s.num_bufs);
+ printf(" free bufs %"PRIu64"\n", pool->s.free_bufs);
+
+ /* first chunk */
+ chunk_hdr = pool->s.head;
+
+ if (chunk_hdr == NULL) {
+ ODP_ERR(" POOL EMPTY\n");
+ return;
+ }
+
+ printf("\n First chunk\n");
+
+ for (i = 0; i < chunk_hdr->chunk.num_bufs - 1; i++) {
+ uint32_t index;
+ odp_buffer_hdr_t *hdr;
+
+ index = chunk_hdr->chunk.buf_index[i];
+ hdr = index_to_hdr(pool, index);
+
+ printf(" [%i] addr %p, id %"PRIu32"\n", i, hdr->addr, index);
+ }
+
+ printf(" [%i] addr %p, id %"PRIu32"\n", i, chunk_hdr->buf_hdr.addr,
+ chunk_hdr->buf_hdr.index);
+
+ /* next chunk */
+ chunk_hdr = next_chunk(pool, chunk_hdr);
+
+ if (chunk_hdr) {
+ printf(" Next chunk\n");
+ printf(" addr %p, id %"PRIu32"\n", chunk_hdr->buf_hdr.addr,
+ chunk_hdr->buf_hdr.index);
+ }
+
+ printf("\n");
+}
new file mode 100644
@@ -0,0 +1,109 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_coremask.h>
+#include <odp_debug.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#define MAX_CORE_NUM 64
+
+
+void odp_coremask_from_str(const char *str, odp_coremask_t *mask)
+{
+ uint64_t mask_u64;
+
+ if (strlen(str) > 18) {
+ /* more than 64 bits */
+ return;
+ }
+
+ mask_u64 = strtoull(str, NULL, 16);
+
+ odp_coremask_from_u64(&mask_u64, 1, mask);
+}
+
+
+void odp_coremask_to_str(char *str, int len, const odp_coremask_t *mask)
+{
+ int ret;
+
+ ret = snprintf(str, len, "0x%"PRIx64"", mask->_u64[0]);
+
+ if (ret >= 0 && ret < len) {
+ /* force trailing zero */
+ str[len-1] = '\0';
+ }
+}
+
+
+void odp_coremask_from_u64(const uint64_t *u64, int num, odp_coremask_t *mask)
+{
+ int i;
+
+ if (num > ODP_COREMASK_SIZE_U64) {
+ /* force max size */
+ num = ODP_COREMASK_SIZE_U64;
+ }
+
+ for (i = 0; i < num; i++)
+ mask->_u64[0] |= u64[i];
+}
+
+void odp_coremask_set(int core, odp_coremask_t *mask)
+{
+ /* should not be more than 63
+ * core no. should be from 0..63= 64bit
+ */
+ if (core >= MAX_CORE_NUM) {
+ ODP_ERR("invalid core count\n");
+ return;
+ }
+
+ mask->_u64[0] |= (1 << core);
+}
+
+void odp_coremask_clr(int core, odp_coremask_t *mask)
+{
+ /* should not be more than 63
+ * core no. should be from 0..63= 64bit
+ */
+ if (core >= MAX_CORE_NUM) {
+ ODP_ERR("invalid core count\n");
+ return;
+ }
+
+ mask->_u64[0] &= ~(1 << core);
+}
+
+
+int odp_coremask_isset(int core, const odp_coremask_t *mask)
+{
+ /* should not be more than 63
+ * core no. should be from 0..63= 64bit
+ */
+ if (core >= MAX_CORE_NUM) {
+ ODP_ERR("invalid core count\n");
+ return -1;
+ }
+
+ return (mask->_u64[0] >> core) & 1;
+}
+
+int odp_coremask_count(const odp_coremask_t *mask)
+{
+ uint64_t coremask = mask->_u64[0];
+ int cnt = 0;
+
+ while (coremask != 0) {
+ coremask >>= 1;
+ if (coremask & 1)
+ cnt++;
+ }
+
+ return cnt;
+}
new file mode 100644
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_init.h>
+#include <odp_internal.h>
+#include <odp_debug.h>
+
+
+int odp_init_global(void)
+{
+ odp_thread_init_global();
+
+ odp_system_info_init();
+
+ if (odp_shm_init_global()) {
+ ODP_ERR("ODP shm init failed.\n");
+ return -1;
+ }
+
+ if (odp_buffer_pool_init_global()) {
+ ODP_ERR("ODP buffer pool init failed.\n");
+ return -1;
+ }
+
+ if (odp_queue_init_global()) {
+ ODP_ERR("ODP queue init failed.\n");
+ return -1;
+ }
+
+ if (odp_schedule_init_global()) {
+ ODP_ERR("ODP schedule init failed.\n");
+ return -1;
+ }
+
+ if (odp_pktio_init_global()) {
+ ODP_ERR("ODP packet io init failed.\n");
+ return -1;
+ }
+
+ if (odp_timer_init_global()) {
+ ODP_ERR("ODP timer init failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int odp_init_local(int thr_id)
+{
+ odp_thread_init_local(thr_id);
+
+ if (odp_pktio_init_local()) {
+ ODP_ERR("ODP packet io local init failed.\n");
+ return -1;
+ }
+
+ if (odp_schedule_init_local()) {
+ ODP_ERR("ODP schedule local init failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,90 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#define _GNU_SOURCE
+#include <sched.h>
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <assert.h>
+
+#include <helper/odp_linux.h>
+#include <odp_internal.h>
+#include <odp_thread.h>
+#include <odp_init.h>
+#include <odp_system_info.h>
+
+
+typedef struct {
+ int thr_id;
+ void *(*start_routine) (void *);
+ void *arg;
+
+} odp_start_args_t;
+
+
+static void *odp_run_start_routine(void *arg)
+{
+ odp_start_args_t *start_args = arg;
+
+ /* ODP thread local init */
+ odp_init_local(start_args->thr_id);
+
+ return start_args->start_routine(start_args->arg);
+}
+
+
+void odp_linux_pthread_create(odp_linux_pthread_t *thread_tbl, int num,
+ int first_core, void *(*start_routine) (void *), void *arg)
+{
+ int i;
+ cpu_set_t cpu_set;
+ odp_start_args_t *start_args;
+ int core_count;
+ int cpu;
+
+ core_count = odp_sys_core_count();
+
+ assert((first_core >= 0) && (first_core < core_count));
+ assert((num >= 0) && (num <= core_count));
+
+ memset(thread_tbl, 0, num * sizeof(odp_linux_pthread_t));
+
+ for (i = 0; i < num; i++) {
+ pthread_attr_init(&thread_tbl[i].attr);
+
+ CPU_ZERO(&cpu_set);
+
+ cpu = (first_core + i) % core_count;
+ CPU_SET(cpu, &cpu_set);
+
+ pthread_attr_setaffinity_np(&thread_tbl[i].attr,
+ sizeof(cpu_set_t), &cpu_set);
+
+ start_args = malloc(sizeof(odp_start_args_t));
+ memset(start_args, 0, sizeof(odp_start_args_t));
+ start_args->start_routine = start_routine;
+ start_args->arg = arg;
+
+ start_args->thr_id = odp_thread_create(cpu);
+
+ pthread_create(&thread_tbl[i].thread, &thread_tbl[i].attr,
+ odp_run_start_routine, start_args);
+ }
+}
+
+
+void odp_linux_pthread_join(odp_linux_pthread_t *thread_tbl, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ /* Wait thread to exit */
+ pthread_join(thread_tbl[i].thread, NULL);
+ }
+}
new file mode 100644
@@ -0,0 +1,368 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_packet.h>
+#include <odp_packet_internal.h>
+#include <odp_hints.h>
+#include <odp_byteorder.h>
+
+#include <helper/odp_eth.h>
+#include <helper/odp_ip.h>
+
+#include <string.h>
+#include <stdio.h>
+
+static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr, odp_ipv4hdr_t *ipv4,
+ size_t *offset_out);
+static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr, odp_ipv6hdr_t *ipv6,
+ size_t *offset_out);
+
+void odp_packet_init(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
+ const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr);
+ uint8_t *start;
+ size_t len;
+
+ start = (uint8_t *)pkt_hdr + start_offset;
+ len = ODP_OFFSETOF(odp_packet_hdr_t, payload) - start_offset;
+ memset(start, 0, len);
+
+ pkt_hdr->l2_offset = (uint32_t) ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->l3_offset = (uint32_t) ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->l4_offset = (uint32_t) ODP_PACKET_OFFSET_INVALID;
+}
+
+odp_packet_t odp_packet_from_buffer(odp_buffer_t buf)
+{
+ return (odp_packet_t)buf;
+}
+
+odp_buffer_t odp_buffer_from_packet(odp_packet_t pkt)
+{
+ return (odp_buffer_t)pkt;
+}
+
+void odp_packet_set_len(odp_packet_t pkt, size_t len)
+{
+ odp_packet_hdr(pkt)->frame_len = len;
+}
+
+size_t odp_packet_get_len(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->frame_len;
+}
+
+uint8_t *odp_packet_buf_addr(odp_packet_t pkt)
+{
+ return odp_buffer_addr(odp_buffer_from_packet(pkt));
+}
+
+uint8_t *odp_packet_start(odp_packet_t pkt)
+{
+ return odp_packet_buf_addr(pkt) + odp_packet_hdr(pkt)->frame_offset;
+}
+
+
+uint8_t *odp_packet_l2(odp_packet_t pkt)
+{
+ const size_t offset = odp_packet_l2_offset(pkt);
+
+ if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ return NULL;
+
+ return odp_packet_buf_addr(pkt) + offset;
+}
+
+size_t odp_packet_l2_offset(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->l2_offset;
+}
+
+void odp_packet_set_l2_offset(odp_packet_t pkt, size_t offset)
+{
+ odp_packet_hdr(pkt)->l2_offset = offset;
+}
+
+uint8_t *odp_packet_l3(odp_packet_t pkt)
+{
+ const size_t offset = odp_packet_l3_offset(pkt);
+
+ if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ return NULL;
+
+ return odp_packet_buf_addr(pkt) + offset;
+}
+
+size_t odp_packet_l3_offset(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->l3_offset;
+}
+
+void odp_packet_set_l3_offset(odp_packet_t pkt, size_t offset)
+{
+ odp_packet_hdr(pkt)->l3_offset = offset;
+}
+
+uint8_t *odp_packet_l4(odp_packet_t pkt)
+{
+ const size_t offset = odp_packet_l4_offset(pkt);
+
+ if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ return NULL;
+
+ return odp_packet_buf_addr(pkt) + offset;
+}
+
+size_t odp_packet_l4_offset(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->l4_offset;
+}
+
+void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset)
+{
+ odp_packet_hdr(pkt)->l4_offset = offset;
+}
+
+/**
+ * Simple packet parser: eth, VLAN, IP, TCP/UDP/ICMP
+ *
+ * Internal function: caller is resposible for passing only valid packet handles
+ * , lengths and offsets (usually done&called in packet input).
+ *
+ * @param pkt Packet handle
+ * @param len Packet length in bytes
+ * @param frame_offset Byte offset to L2 header
+ */
+void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset)
+{
+ odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
+ odp_ethhdr_t *eth;
+ odp_vlanhdr_t *vlan;
+ odp_ipv4hdr_t *ipv4;
+ odp_ipv6hdr_t *ipv6;
+ uint16_t ethtype;
+ size_t offset = 0;
+ uint8_t ip_proto = 0;
+
+ pkt_hdr->input_flags.eth = 1;
+ pkt_hdr->frame_offset = frame_offset;
+ pkt_hdr->frame_len = len;
+
+ if (odp_unlikely(len < ODP_ETH_LEN_MIN)) {
+ pkt_hdr->error_flags.frame_len = 1;
+ return;
+ } else if (len > ODP_ETH_LEN_MAX) {
+ pkt_hdr->input_flags.jumbo = 1;
+ }
+
+ /* Assume valid L2 header, no CRC/FCS check in SW */
+ pkt_hdr->input_flags.l2 = 1;
+ pkt_hdr->l2_offset = frame_offset;
+
+ eth = (odp_ethhdr_t *)odp_packet_start(pkt);
+ ethtype = odp_be_to_cpu_16(eth->type);
+ vlan = (odp_vlanhdr_t *)ð->type;
+
+ if (ethtype == ODP_ETHTYPE_VLAN_OUTER) {
+ pkt_hdr->input_flags.vlan_qinq = 1;
+ ethtype = odp_be_to_cpu_16(vlan->tpid);
+ offset += sizeof(odp_vlanhdr_t);
+ vlan = &vlan[1];
+ }
+
+ if (ethtype == ODP_ETHTYPE_VLAN) {
+ pkt_hdr->input_flags.vlan = 1;
+ ethtype = odp_be_to_cpu_16(vlan->tpid);
+ offset += sizeof(odp_vlanhdr_t);
+ }
+
+ /* Set l3_offset+flag only for known ethtypes */
+ switch (ethtype) {
+ case ODP_ETHTYPE_IPV4:
+ pkt_hdr->input_flags.ipv4 = 1;
+ pkt_hdr->input_flags.l3 = 1;
+ pkt_hdr->l3_offset = frame_offset + ODP_ETHHDR_LEN + offset;
+ ipv4 = (odp_ipv4hdr_t *)odp_packet_l3(pkt);
+ ip_proto = parse_ipv4(pkt_hdr, ipv4, &offset);
+ break;
+ case ODP_ETHTYPE_IPV6:
+ pkt_hdr->input_flags.ipv6 = 1;
+ pkt_hdr->input_flags.l3 = 1;
+ pkt_hdr->l3_offset = frame_offset + ODP_ETHHDR_LEN + offset;
+ ipv6 = (odp_ipv6hdr_t *)odp_packet_l3(pkt);
+ ip_proto = parse_ipv6(pkt_hdr, ipv6, &offset);
+ break;
+ case ODP_ETHTYPE_ARP:
+ pkt_hdr->input_flags.arp = 1;
+ /* fall through */
+ default:
+ ip_proto = 0;
+ break;
+ }
+
+ switch (ip_proto) {
+ case ODP_IPPROTO_UDP:
+ pkt_hdr->input_flags.udp = 1;
+ pkt_hdr->input_flags.l4 = 1;
+ pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+ break;
+ case ODP_IPPROTO_TCP:
+ pkt_hdr->input_flags.tcp = 1;
+ pkt_hdr->input_flags.l4 = 1;
+ pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+ break;
+ case ODP_IPPROTO_SCTP:
+ pkt_hdr->input_flags.sctp = 1;
+ pkt_hdr->input_flags.l4 = 1;
+ pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+ break;
+ case ODP_IPPROTO_ICMP:
+ pkt_hdr->input_flags.icmp = 1;
+ pkt_hdr->input_flags.l4 = 1;
+ pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
+ break;
+ default:
+ /* 0 or unhandled IP protocols, don't set L4 flag+offset */
+ if (pkt_hdr->input_flags.ipv6) {
+ /* IPv6 next_hdr is not L4, mark as IP-option instead */
+ pkt_hdr->input_flags.ipopt = 1;
+ }
+ break;
+ }
+}
+
+static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr, odp_ipv4hdr_t *ipv4,
+ size_t *offset_out)
+{
+ uint8_t ihl;
+ uint16_t frag_offset;
+
+ ihl = ODP_IPV4HDR_IHL(ipv4->ver_ihl);
+ if (odp_unlikely(ihl < ODP_IPV4HDR_IHL_MIN)) {
+ pkt_hdr->error_flags.ip_err = 1;
+ return 0;
+ }
+
+ if (odp_unlikely(ihl > ODP_IPV4HDR_IHL_MIN)) {
+ pkt_hdr->input_flags.ipopt = 1;
+ return 0;
+ }
+
+ /* A packet is a fragment if:
+ * "more fragments" flag is set (all fragments except the last)
+ * OR
+ * "fragment offset" field is nonzero (all fragments except the first)
+ */
+ frag_offset = odp_be_to_cpu_16(ipv4->frag_offset);
+ if (odp_unlikely(ODP_IPV4HDR_IS_FRAGMENT(frag_offset))) {
+ pkt_hdr->input_flags.ipfrag = 1;
+ return 0;
+ }
+
+ if (ipv4->proto == ODP_IPPROTO_ESP ||
+ ipv4->proto == ODP_IPPROTO_AH) {
+ pkt_hdr->input_flags.ipsec = 1;
+ return 0;
+ }
+
+ /* Set pkt_hdr->input_flags.ipopt when checking L4 hdrs after return */
+
+ *offset_out = sizeof(uint32_t) * ihl;
+ return ipv4->proto;
+}
+
+static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr, odp_ipv6hdr_t *ipv6,
+ size_t *offset_out)
+{
+ if (ipv6->next_hdr == ODP_IPPROTO_ESP ||
+ ipv6->next_hdr == ODP_IPPROTO_AH) {
+ pkt_hdr->input_flags.ipopt = 1;
+ pkt_hdr->input_flags.ipsec = 1;
+ return 0;
+ }
+
+ if (odp_unlikely(ipv6->next_hdr == ODP_IPPROTO_FRAG)) {
+ pkt_hdr->input_flags.ipopt = 1;
+ pkt_hdr->input_flags.ipfrag = 1;
+ return 0;
+ }
+
+ /* Don't step through more extensions */
+ *offset_out = ODP_IPV6HDR_LEN;
+ return ipv6->next_hdr;
+}
+
+void odp_packet_print(odp_packet_t pkt)
+{
+ int max_len = 512;
+ char str[max_len];
+ int len = 0;
+ int n = max_len-1;
+ odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+
+ len += snprintf(&str[len], n-len, "Packet ");
+ len += odp_buffer_snprint(&str[len], n-len, (odp_buffer_t) pkt);
+ len += snprintf(&str[len], n-len,
+ " input_flags 0x%x\n", hdr->input_flags.all);
+ len += snprintf(&str[len], n-len,
+ " error_flags 0x%x\n", hdr->error_flags.all);
+ len += snprintf(&str[len], n-len,
+ " output_flags 0x%x\n", hdr->output_flags.all);
+ len += snprintf(&str[len], n-len,
+ " frame_offset %u\n", hdr->frame_offset);
+ len += snprintf(&str[len], n-len,
+ " l2_offset %u\n", hdr->l2_offset);
+ len += snprintf(&str[len], n-len,
+ " l3_offset %u\n", hdr->l3_offset);
+ len += snprintf(&str[len], n-len,
+ " l4_offset %u\n", hdr->l4_offset);
+ len += snprintf(&str[len], n-len,
+ " frame_len %u\n", hdr->frame_len);
+ len += snprintf(&str[len], n-len,
+ " input %u\n", hdr->input);
+ str[len] = '\0';
+
+ printf("\n%s\n", str);
+}
+
+int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src)
+{
+ odp_packet_hdr_t *const pkt_hdr_dst = odp_packet_hdr(pkt_dst);
+ odp_packet_hdr_t *const pkt_hdr_src = odp_packet_hdr(pkt_src);
+ const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr);
+ uint8_t *start_src;
+ uint8_t *start_dst;
+ size_t len;
+
+ if (pkt_dst == ODP_PACKET_INVALID || pkt_src == ODP_PACKET_INVALID)
+ return -1;
+
+ if (pkt_hdr_dst->buf_hdr.size <
+ pkt_hdr_src->frame_len + pkt_hdr_src->frame_offset)
+ return -1;
+
+ /* Copy packet header */
+ start_dst = (uint8_t *)pkt_hdr_dst + start_offset;
+ start_src = (uint8_t *)pkt_hdr_src + start_offset;
+ len = ODP_OFFSETOF(odp_packet_hdr_t, payload) - start_offset;
+ memcpy(start_dst, start_src, len);
+
+ /* Copy frame payload */
+ start_dst = (uint8_t *)odp_packet_start(pkt_dst);
+ start_src = (uint8_t *)odp_packet_start(pkt_src);
+ len = pkt_hdr_src->frame_len;
+ memcpy(start_dst, start_src, len);
+
+ /* Copy useful things from the buffer header */
+ pkt_hdr_dst->buf_hdr.cur_offset = pkt_hdr_src->buf_hdr.cur_offset;
+
+ /* Create a copy of the scatter list */
+ odp_buffer_copy_scatter(odp_buffer_from_packet(pkt_dst),
+ odp_buffer_from_packet(pkt_src));
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,115 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_packet_flags.h>
+#include <odp_packet_internal.h>
+
+
+int odp_packet_error(odp_packet_t pkt)
+{
+ return (odp_packet_hdr(pkt)->error_flags.all != 0);
+}
+
+/* Get Error Flags */
+
+int odp_packet_errflag_frame_len(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->error_flags.frame_len;
+}
+
+/* Get Input Flags */
+
+int odp_packet_inflag_l2(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.l2;
+}
+
+int odp_packet_inflag_l3(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.l3;
+}
+
+int odp_packet_inflag_l4(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.l4;
+}
+
+int odp_packet_inflag_eth(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.eth;
+}
+
+int odp_packet_inflag_jumbo(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.jumbo;
+}
+
+int odp_packet_inflag_vlan(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.vlan;
+}
+
+int odp_packet_inflag_vlan_qinq(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.vlan_qinq;
+}
+
+int odp_packet_inflag_arp(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.arp;
+}
+
+int odp_packet_inflag_ipv4(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.ipv4;
+}
+
+int odp_packet_inflag_ipv6(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.ipv6;
+}
+
+int odp_packet_inflag_ipfrag(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.ipfrag;
+}
+
+int odp_packet_inflag_ipopt(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.ipopt;
+}
+
+int odp_packet_inflag_ipsec(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.ipsec;
+}
+
+int odp_packet_inflag_udp(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.udp;
+}
+
+int odp_packet_inflag_tcp(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.tcp;
+}
+
+int odp_packet_inflag_sctp(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.sctp;
+}
+
+int odp_packet_inflag_icmp(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input_flags.icmp;
+}
+
+/* Set Output Flags */
+
+void odp_packet_outflag_l4_chksum(odp_packet_t pkt)
+{
+ odp_packet_hdr(pkt)->output_flags.l4_chksum = 1;
+}
new file mode 100644
@@ -0,0 +1,537 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_packet_io.h>
+#include <odp_packet_io_internal.h>
+#include <odp_packet_io_queue.h>
+#include <odp_packet.h>
+#include <odp_packet_internal.h>
+#include <odp_internal.h>
+#include <odp_spinlock.h>
+#include <odp_shared_memory.h>
+#include <odp_packet_socket.h>
+#ifdef ODP_HAVE_NETMAP
+#include <odp_packet_netmap.h>
+#endif
+#include <odp_hints.h>
+#include <odp_config.h>
+#include <odp_queue_internal.h>
+#include <odp_schedule_internal.h>
+#include <odp_debug.h>
+
+#include <odp_pktio_socket.h>
+#ifdef ODP_HAVE_NETMAP
+#include <odp_pktio_netmap.h>
+#endif
+
+#include <string.h>
+
+typedef struct {
+ pktio_entry_t entries[ODP_CONFIG_PKTIO_ENTRIES];
+} pktio_table_t;
+
+static pktio_table_t *pktio_tbl;
+
+
+static pktio_entry_t *get_entry(odp_pktio_t id)
+{
+ if (odp_unlikely(id == ODP_PKTIO_INVALID ||
+ id > ODP_CONFIG_PKTIO_ENTRIES))
+ return NULL;
+
+ return &pktio_tbl->entries[id - 1];
+}
+
+int odp_pktio_init_global(void)
+{
+ char name[ODP_QUEUE_NAME_LEN];
+ pktio_entry_t *pktio_entry;
+ queue_entry_t *queue_entry;
+ odp_queue_t qid;
+ int id;
+
+ pktio_tbl = odp_shm_reserve("odp_pktio_entries",
+ sizeof(pktio_table_t),
+ sizeof(pktio_entry_t));
+ if (pktio_tbl == NULL)
+ return -1;
+
+ memset(pktio_tbl, 0, sizeof(pktio_table_t));
+
+ for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) {
+ pktio_entry = get_entry(id);
+
+ odp_spinlock_init(&pktio_entry->s.lock);
+
+ /* Create a default output queue for each pktio resource */
+ snprintf(name, sizeof(name), "%i-pktio_outq_default", (int)id);
+ name[ODP_QUEUE_NAME_LEN-1] = '\0';
+
+ qid = odp_queue_create(name, ODP_QUEUE_TYPE_PKTOUT, NULL);
+ if (qid == ODP_QUEUE_INVALID)
+ return -1;
+ pktio_entry->s.outq_default = qid;
+
+ queue_entry = queue_to_qentry(qid);
+ queue_entry->s.pktout = id;
+ }
+
+ return 0;
+}
+
+int odp_pktio_init_local(void)
+{
+ return 0;
+}
+
+static int is_free(pktio_entry_t *entry)
+{
+ return (entry->s.taken == 0);
+}
+
+static void set_free(pktio_entry_t *entry)
+{
+ entry->s.taken = 0;
+}
+
+static void set_taken(pktio_entry_t *entry)
+{
+ entry->s.taken = 1;
+}
+
+static void lock_entry(pktio_entry_t *entry)
+{
+ odp_spinlock_lock(&entry->s.lock);
+}
+
+static void unlock_entry(pktio_entry_t *entry)
+{
+ odp_spinlock_unlock(&entry->s.lock);
+}
+
+static void init_pktio_entry(pktio_entry_t *entry, odp_pktio_params_t *params)
+{
+ set_taken(entry);
+ entry->s.inq_default = ODP_QUEUE_INVALID;
+ switch (params->type) {
+ case ODP_PKTIO_TYPE_SOCKET_BASIC:
+ case ODP_PKTIO_TYPE_SOCKET_MMSG:
+ case ODP_PKTIO_TYPE_SOCKET_MMAP:
+ memset(&entry->s.pkt_sock, 0, sizeof(entry->s.pkt_sock));
+ memset(&entry->s.pkt_sock_mmap, 0,
+ sizeof(entry->s.pkt_sock_mmap));
+ break;
+#ifdef ODP_HAVE_NETMAP
+ case ODP_PKTIO_TYPE_NETMAP:
+ memset(&entry->s.pkt_nm, 0, sizeof(entry->s.pkt_nm));
+ break;
+#endif
+ default:
+ ODP_ERR("Packet I/O type not supported. Please recompile\n");
+ break;
+ }
+ /* Save pktio parameters, type is the most useful */
+ memcpy(&entry->s.params, params, sizeof(*params));
+}
+
+static odp_pktio_t alloc_lock_pktio_entry(odp_pktio_params_t *params)
+{
+ odp_pktio_t id;
+ pktio_entry_t *entry;
+ int i;
+
+ for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
+ entry = &pktio_tbl->entries[i];
+ if (is_free(entry)) {
+ lock_entry(entry);
+ if (is_free(entry)) {
+ init_pktio_entry(entry, params);
+ id = i + 1;
+ return id; /* return with entry locked! */
+ }
+ unlock_entry(entry);
+ }
+ }
+
+ return ODP_PKTIO_INVALID;
+}
+
+static int free_pktio_entry(odp_pktio_t id)
+{
+ pktio_entry_t *entry = get_entry(id);
+
+ if (entry == NULL)
+ return -1;
+
+ set_free(entry);
+
+ return 0;
+}
+
+odp_pktio_t odp_pktio_open(char *dev, odp_buffer_pool_t pool,
+ odp_pktio_params_t *params)
+{
+ odp_pktio_t id;
+ pktio_entry_t *pktio_entry;
+ int res;
+
+ if (params == NULL) {
+ ODP_ERR("Invalid pktio params\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ switch (params->type) {
+ case ODP_PKTIO_TYPE_SOCKET_BASIC:
+ case ODP_PKTIO_TYPE_SOCKET_MMSG:
+ case ODP_PKTIO_TYPE_SOCKET_MMAP:
+ ODP_DBG("Allocating socket pktio\n");
+ break;
+#ifdef ODP_HAVE_NETMAP
+ case ODP_PKTIO_TYPE_NETMAP:
+ ODP_DBG("Allocating netmap pktio\n");
+ break;
+#endif
+ default:
+ ODP_ERR("Invalid pktio type: %02x\n", params->type);
+ return ODP_PKTIO_INVALID;
+ }
+
+ id = alloc_lock_pktio_entry(params);
+ if (id == ODP_PKTIO_INVALID) {
+ ODP_ERR("No resources available.\n");
+ return ODP_PKTIO_INVALID;
+ }
+ /* if successful, alloc_pktio_entry() returns with the entry locked */
+
+ pktio_entry = get_entry(id);
+
+ switch (params->type) {
+ case ODP_PKTIO_TYPE_SOCKET_BASIC:
+ case ODP_PKTIO_TYPE_SOCKET_MMSG:
+ res = setup_pkt_sock(&pktio_entry->s.pkt_sock, dev, pool);
+ if (res == -1) {
+ close_pkt_sock(&pktio_entry->s.pkt_sock);
+ free_pktio_entry(id);
+ id = ODP_PKTIO_INVALID;
+ }
+ break;
+ case ODP_PKTIO_TYPE_SOCKET_MMAP:
+ res = setup_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap, dev,
+ pool, params->sock_params.fanout);
+ if (res == -1) {
+ close_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap);
+ free_pktio_entry(id);
+ id = ODP_PKTIO_INVALID;
+ }
+ break;
+#ifdef ODP_HAVE_NETMAP
+ case ODP_PKTIO_TYPE_NETMAP:
+
+ res = setup_pkt_netmap(&pktio_entry->s.pkt_nm, dev,
+ pool, ¶ms->nm_params);
+ if (res == -1) {
+ close_pkt_netmap(&pktio_entry->s.pkt_nm);
+ free_pktio_entry(id);
+ id = ODP_PKTIO_INVALID;
+ }
+ break;
+#endif
+ default:
+ free_pktio_entry(id);
+ id = ODP_PKTIO_INVALID;
+ ODP_ERR("This type of I/O is not supported. Please recompile.\n");
+ break;
+ }
+
+ unlock_entry(pktio_entry);
+ return id;
+}
+
+int odp_pktio_close(odp_pktio_t id)
+{
+ pktio_entry_t *entry;
+ int res = -1;
+
+ entry = get_entry(id);
+ if (entry == NULL)
+ return -1;
+
+ lock_entry(entry);
+ if (!is_free(entry)) {
+ switch (entry->s.params.type) {
+ case ODP_PKTIO_TYPE_SOCKET_BASIC:
+ case ODP_PKTIO_TYPE_SOCKET_MMSG:
+ res = close_pkt_sock(&entry->s.pkt_sock);
+ break;
+ case ODP_PKTIO_TYPE_SOCKET_MMAP:
+ res = close_pkt_sock_mmap(&entry->s.pkt_sock_mmap);
+ break;
+#ifdef ODP_HAVE_NETMAP
+ case ODP_PKTIO_TYPE_NETMAP:
+ res = close_pkt_netmap(&entry->s.pkt_nm);
+ break;
+#endif
+ default:
+ break;
+ res |= free_pktio_entry(id);
+ }
+ }
+ unlock_entry(entry);
+
+ if (res != 0)
+ return -1;
+
+ return 0;
+}
+
+void odp_pktio_set_input(odp_packet_t pkt, odp_pktio_t pktio)
+{
+ odp_packet_hdr(pkt)->input = pktio;
+}
+
+odp_pktio_t odp_pktio_get_input(odp_packet_t pkt)
+{
+ return odp_packet_hdr(pkt)->input;
+}
+
+int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
+{
+ pktio_entry_t *pktio_entry = get_entry(id);
+ int pkts;
+ int i;
+
+ if (pktio_entry == NULL)
+ return -1;
+
+ lock_entry(pktio_entry);
+ switch (pktio_entry->s.params.type) {
+ case ODP_PKTIO_TYPE_SOCKET_BASIC:
+ pkts = recv_pkt_sock_basic(&pktio_entry->s.pkt_sock,
+ pkt_table, len);
+ break;
+ case ODP_PKTIO_TYPE_SOCKET_MMSG:
+ pkts = recv_pkt_sock_mmsg(&pktio_entry->s.pkt_sock,
+ pkt_table, len);
+ break;
+ case ODP_PKTIO_TYPE_SOCKET_MMAP:
+ pkts = recv_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap,
+ pkt_table, len);
+ break;
+#ifdef ODP_HAVE_NETMAP
+ case ODP_PKTIO_TYPE_NETMAP:
+ pkts = recv_pkt_netmap(&pktio_entry->s.pkt_nm, pkt_table, len);
+ break;
+#endif
+ default:
+ pkts = -1;
+ break;
+ }
+
+ unlock_entry(pktio_entry);
+ if (pkts < 0)
+ return pkts;
+
+ for (i = 0; i < pkts; ++i)
+ odp_pktio_set_input(pkt_table[i], id);
+
+ return pkts;
+}
+
+int odp_pktio_send(odp_pktio_t id, odp_packet_t pkt_table[], unsigned len)
+{
+ pktio_entry_t *pktio_entry = get_entry(id);
+ int pkts;
+
+ if (pktio_entry == NULL)
+ return -1;
+
+ lock_entry(pktio_entry);
+ switch (pktio_entry->s.params.type) {
+ case ODP_PKTIO_TYPE_SOCKET_BASIC:
+ pkts = send_pkt_sock_basic(&pktio_entry->s.pkt_sock,
+ pkt_table, len);
+ break;
+ case ODP_PKTIO_TYPE_SOCKET_MMSG:
+ pkts = send_pkt_sock_mmsg(&pktio_entry->s.pkt_sock,
+ pkt_table, len);
+ break;
+ case ODP_PKTIO_TYPE_SOCKET_MMAP:
+ pkts = send_pkt_sock_mmap(&pktio_entry->s.pkt_sock_mmap,
+ pkt_table, len);
+ break;
+#ifdef ODP_HAVE_NETMAP
+ case ODP_PKTIO_TYPE_NETMAP:
+ pkts = send_pkt_netmap(&pktio_entry->s.pkt_nm,
+ pkt_table, len);
+ break;
+#endif
+ default:
+ pkts = -1;
+ }
+ unlock_entry(pktio_entry);
+
+ return pkts;
+}
+
+int odp_pktio_inq_setdef(odp_pktio_t id, odp_queue_t queue)
+{
+ pktio_entry_t *pktio_entry = get_entry(id);
+ queue_entry_t *qentry = queue_to_qentry(queue);
+
+ if (pktio_entry == NULL || qentry == NULL)
+ return -1;
+
+ if (qentry->s.type != ODP_QUEUE_TYPE_PKTIN)
+ return -1;
+
+ lock_entry(pktio_entry);
+ pktio_entry->s.inq_default = queue;
+ unlock_entry(pktio_entry);
+
+ queue_lock(qentry);
+ qentry->s.pktin = id;
+ qentry->s.status = QUEUE_STATUS_SCHED;
+ queue_unlock(qentry);
+
+ odp_schedule_queue(queue, qentry->s.param.sched.prio);
+
+ return 0;
+}
+
+int odp_pktio_inq_remdef(odp_pktio_t id)
+{
+ return odp_pktio_inq_setdef(id, ODP_QUEUE_INVALID);
+}
+
+odp_queue_t odp_pktio_inq_getdef(odp_pktio_t id)
+{
+ pktio_entry_t *pktio_entry = get_entry(id);
+
+ if (pktio_entry == NULL)
+ return ODP_QUEUE_INVALID;
+
+ return pktio_entry->s.inq_default;
+}
+
+odp_queue_t odp_pktio_outq_getdef(odp_pktio_t id)
+{
+ pktio_entry_t *pktio_entry = get_entry(id);
+
+ if (pktio_entry == NULL)
+ return ODP_QUEUE_INVALID;
+
+ return pktio_entry->s.outq_default;
+}
+
+int pktout_enqueue(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr)
+{
+ odp_packet_t pkt = odp_packet_from_buffer(buf_hdr->handle.handle);
+ int len = 1;
+ int nbr;
+
+ nbr = odp_pktio_send(qentry->s.pktout, &pkt, len);
+ return (nbr == len ? 0 : -1);
+}
+
+odp_buffer_hdr_t *pktout_dequeue(queue_entry_t *qentry)
+{
+ (void)qentry;
+ return NULL;
+}
+
+int pktout_enq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[],
+ int num)
+{
+ odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
+ int nbr;
+ int i;
+
+ for (i = 0; i < num; ++i)
+ pkt_tbl[i] = odp_packet_from_buffer(buf_hdr[i]->handle.handle);
+
+ nbr = odp_pktio_send(qentry->s.pktout, pkt_tbl, num);
+ return (nbr == num ? 0 : -1);
+}
+
+int pktout_deq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[],
+ int num)
+{
+ (void)qentry;
+ (void)buf_hdr;
+ (void)num;
+
+ return 0;
+}
+
+int pktin_enqueue(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr)
+{
+ /* Use default action */
+ return queue_enq(qentry, buf_hdr);
+}
+
+odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *qentry)
+{
+ odp_buffer_hdr_t *buf_hdr;
+
+ buf_hdr = queue_deq(qentry);
+
+ if (buf_hdr == NULL) {
+ odp_packet_t pkt;
+ odp_buffer_t buf;
+ odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
+ odp_buffer_hdr_t *tmp_hdr_tbl[QUEUE_MULTI_MAX];
+ int pkts, i, j;
+
+ pkts = odp_pktio_recv(qentry->s.pktin, pkt_tbl,
+ QUEUE_MULTI_MAX);
+
+ if (pkts > 0) {
+ pkt = pkt_tbl[0];
+ buf = odp_buffer_from_packet(pkt);
+ buf_hdr = odp_buf_to_hdr(buf);
+
+ for (i = 1, j = 0; i < pkts; ++i) {
+ buf = odp_buffer_from_packet(pkt_tbl[i]);
+ tmp_hdr_tbl[j++] = odp_buf_to_hdr(buf);
+ }
+ queue_enq_multi(qentry, tmp_hdr_tbl, j);
+ }
+ }
+
+ return buf_hdr;
+}
+
+int pktin_enq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[], int num)
+{
+ /* Use default action */
+ return queue_enq_multi(qentry, buf_hdr, num);
+}
+
+int pktin_deq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[], int num)
+{
+ int nbr;
+
+ nbr = queue_deq_multi(qentry, buf_hdr, num);
+
+ if (nbr < num) {
+ odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
+ odp_buffer_hdr_t *tmp_hdr_tbl[QUEUE_MULTI_MAX];
+ odp_buffer_t buf;
+ int pkts, i;
+
+ pkts = odp_pktio_recv(qentry->s.pktin, pkt_tbl,
+ QUEUE_MULTI_MAX);
+ if (pkts > 0) {
+ for (i = 0; i < pkts; ++i) {
+ buf = odp_buffer_from_packet(pkt_tbl[i]);
+ tmp_hdr_tbl[i] = odp_buf_to_hdr(buf);
+ }
+ queue_enq_multi(qentry, tmp_hdr_tbl, pkts);
+ }
+ }
+
+ return nbr;
+}
new file mode 100644
@@ -0,0 +1,453 @@
+/* Copyright (c) 2013, Linaro Limited
+ * Copyright (c) 2013, Nokia Solutions and Networks
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * NETMAP I/O code inspired by the pkt-gen example application in netmap by:
+ * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
+ * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
+ */
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <poll.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+
+#include <odp_packet_internal.h>
+#include <odp_hints.h>
+#include <odp_thread.h>
+
+#include <helper/odp_eth.h>
+#include <helper/odp_ip.h>
+#include <helper/odp_packet_helper.h>
+
+#define NETMAP_WITH_LIBS
+#include <odp_packet_netmap.h>
+
+/** Eth buffer start offset from u32-aligned address to make sure the following
+ * header (e.g. IP) starts at a 32-bit aligned address.
+ */
+#define ETHBUF_OFFSET (ODP_ALIGN_ROUNDUP(ODP_ETHHDR_LEN, sizeof(uint32_t)) \
+ - ODP_ETHHDR_LEN)
+
+/** Round up buffer address to get a properly aliged eth buffer, i.e. aligned
+ * so that the next header always starts at a 32bit aligned address.
+ */
+#define ETHBUF_ALIGN(buf_ptr) ((uint8_t *)ODP_ALIGN_ROUNDUP_PTR((buf_ptr), \
+ sizeof(uint32_t)) + ETHBUF_OFFSET)
+
+#define ETH_PROMISC 1 /* TODO: maybe this should be exported to the user */
+#define WAITLINK_TMO 2
+#define POLL_TMO 50
+
+static int nm_do_ioctl(pkt_netmap_t * const pkt_nm, unsigned long cmd,
+ int subcmd)
+{
+ struct ethtool_value eval;
+ struct ifreq ifr;
+ int error;
+ int fd;
+
+ fd = socket(AF_INET, SOCK_DGRAM, 0);
+ if (fd < 0) {
+ ODP_ERR("Error: cannot get device control socket\n");
+ return -1;
+ }
+
+ memset(&ifr, 0, sizeof(ifr));
+ strncpy(ifr.ifr_name, pkt_nm->ifname, sizeof(ifr.ifr_name));
+
+ switch (cmd) {
+ case SIOCSIFFLAGS:
+ ifr.ifr_flags = pkt_nm->if_flags & 0xffff;
+ break;
+ case SIOCETHTOOL:
+ eval.cmd = subcmd;
+ eval.data = 0;
+ ifr.ifr_data = (caddr_t)&eval;
+ break;
+ default:
+ break;
+ }
+ error = ioctl(fd, cmd, &ifr);
+ if (error)
+ goto done;
+
+ switch (cmd) {
+ case SIOCGIFFLAGS:
+ pkt_nm->if_flags = (ifr.ifr_flags << 16) |
+ (0xffff & ifr.ifr_flags);
+ ODP_DBG("flags are 0x%x\n", pkt_nm->if_flags);
+ break;
+ default:
+ break;
+ }
+done:
+ close(fd);
+ if (error)
+ ODP_ERR("ioctl err %d %lu: %s\n", error, cmd, strerror(errno));
+
+ return error;
+}
+
+int setup_pkt_netmap(pkt_netmap_t * const pkt_nm, char *netdev,
+ odp_buffer_pool_t pool, netmap_params_t *nm_params)
+{
+ char qname[ODP_QUEUE_NAME_LEN];
+ char ifname[32];
+ odp_packet_t pkt;
+ odp_buffer_t token;
+ uint8_t *pkt_buf;
+ uint16_t ringid;
+ uint8_t *l2_hdr;
+ int ret;
+
+ if (pool == ODP_BUFFER_POOL_INVALID)
+ return -1;
+ pkt_nm->pool = pool;
+
+ pkt = odp_packet_alloc(pool);
+ if (!odp_packet_is_valid(pkt))
+ return -1;
+
+ pkt_buf = odp_packet_buf_addr(pkt);
+ l2_hdr = ETHBUF_ALIGN(pkt_buf);
+ /* Store eth buffer offset for buffers from this pool */
+ pkt_nm->frame_offset = (uintptr_t)l2_hdr - (uintptr_t)pkt_buf;
+ /* pkt buffer size */
+ pkt_nm->buf_size = odp_packet_buf_size(pkt);
+ /* max frame len taking into account the l2-offset */
+ pkt_nm->max_frame_len = pkt_nm->buf_size - pkt_nm->frame_offset;
+ /* save netmap_mode for later use */
+ pkt_nm->netmap_mode = nm_params->netmap_mode;
+
+ odp_packet_free(pkt);
+
+ if (nm_params->netmap_mode == ODP_NETMAP_MODE_SW)
+ ringid = NETMAP_SW_RING;
+ else
+ ringid = nm_params->ringid;
+
+ strncpy(pkt_nm->ifname, netdev, sizeof(pkt_nm->ifname));
+ snprintf(ifname, sizeof(ifname), "netmap:%s", netdev);
+ pkt_nm->nm_desc = nm_open(ifname, NULL, ringid, 0);
+
+ if (pkt_nm->nm_desc == NULL) {
+ ODP_ERR("Error opening nm interface: %s\n", strerror(errno));
+ return -1;
+ }
+
+ ODP_DBG("thread %d mode %s mmap addr %p\n",
+ odp_thread_id(),
+ nm_params->netmap_mode == ODP_NETMAP_MODE_SW ? "SW" : "HW",
+ pkt_nm->nm_desc->mem);
+
+ if (nm_params->netmap_mode == ODP_NETMAP_MODE_SW) {
+ pkt_nm->begin = pkt_nm->nm_desc->req.nr_rx_rings;
+ pkt_nm->end = pkt_nm->begin + 1;
+ pkt_nm->rxring = NETMAP_RXRING(pkt_nm->nm_desc->nifp,
+ pkt_nm->nm_desc->req.nr_rx_rings);
+ pkt_nm->txring = NETMAP_TXRING(pkt_nm->nm_desc->nifp,
+ pkt_nm->nm_desc->req.nr_tx_rings);
+ } else if (nm_params->ringid & NETMAP_HW_RING) {
+ pkt_nm->begin = nm_params->ringid & NETMAP_RING_MASK;
+ pkt_nm->end = pkt_nm->begin + 1;
+ pkt_nm->rxring = NETMAP_RXRING(pkt_nm->nm_desc->nifp,
+ pkt_nm->begin);
+ pkt_nm->txring = NETMAP_TXRING(pkt_nm->nm_desc->nifp,
+ pkt_nm->begin);
+ } else {
+ pkt_nm->begin = 0;
+ pkt_nm->end = pkt_nm->nm_desc->req.nr_rx_rings;
+ pkt_nm->rxring = NETMAP_RXRING(pkt_nm->nm_desc->nifp, 0);
+ pkt_nm->txring = NETMAP_TXRING(pkt_nm->nm_desc->nifp, 0);
+ }
+
+ /* Set TX checksumming if hardware rings */
+ if (nm_params->netmap_mode == ODP_NETMAP_MODE_HW) {
+ ret = nm_do_ioctl(pkt_nm, SIOCGIFFLAGS, 0);
+ if (ret)
+ return ret;
+ if ((pkt_nm->if_flags & IFF_UP) == 0) {
+ ODP_DBG("%s is down, bringing up...\n", pkt_nm->ifname);
+ pkt_nm->if_flags |= IFF_UP;
+ }
+ if (ETH_PROMISC) {
+ pkt_nm->if_flags |= IFF_PROMISC;
+ nm_do_ioctl(pkt_nm, SIOCSIFFLAGS, 0);
+ }
+ ret = nm_do_ioctl(pkt_nm, SIOCETHTOOL, ETHTOOL_SGSO);
+ if (ret)
+ ODP_DBG("ETHTOOL_SGSO not supported\n");
+
+ ret = nm_do_ioctl(pkt_nm, SIOCETHTOOL, ETHTOOL_STSO);
+ if (ret)
+ ODP_DBG("ETHTOOL_STSO not supported\n");
+ /* TODO: This seems to cause the app to not receive frames
+ * first time it is launched after netmap driver is inserted.
+ * Should be investigated further.
+ */
+ /*
+ nm_do_ioctl(pkt_nm, SIOCETHTOOL, ETHTOOL_SRXCSUM);
+ */
+ ret = nm_do_ioctl(pkt_nm, SIOCETHTOOL, ETHTOOL_STXCSUM);
+ if (ret)
+ ODP_DBG("ETHTOOL_STXCSUM not supported\n");
+ }
+
+ /* Set up the TX access queue */
+ snprintf(qname, sizeof(qname), "%s:%s-pktio_tx_access", netdev,
+ nm_params->netmap_mode == ODP_NETMAP_MODE_SW ? "SW" : "HW");
+ pkt_nm->tx_access = odp_queue_create(qname, ODP_QUEUE_TYPE_POLL, NULL);
+ if (pkt_nm->tx_access == ODP_QUEUE_INVALID) {
+ ODP_ERR("Error: pktio queue creation failed\n");
+ return -1;
+ }
+ token = odp_buffer_alloc(pool);
+ if (!odp_buffer_is_valid(token)) {
+ ODP_ERR("Error: token creation failed\n");
+ return -1;
+ }
+
+ odp_queue_enq(pkt_nm->tx_access, token);
+
+ ODP_DBG("Wait for link to come up\n");
+ sleep(WAITLINK_TMO);
+ ODP_DBG("Done\n");
+
+ return 0;
+}
+
+int close_pkt_netmap(pkt_netmap_t * const pkt_nm)
+{
+ if (pkt_nm->nm_desc != NULL) {
+ nm_close(pkt_nm->nm_desc);
+ pkt_nm->nm_desc = NULL;
+ }
+
+ return 0;
+}
+
+int recv_pkt_netmap(pkt_netmap_t * const pkt_nm, odp_packet_t pkt_table[],
+ unsigned len)
+{
+ struct netmap_ring *rxring = pkt_nm->rxring;
+ int fd;
+ unsigned nb_rx = 0;
+ uint32_t limit, rx;
+ uint32_t ringid = pkt_nm->begin;
+ odp_packet_t pkt = ODP_PACKET_INVALID;
+#ifdef NETMAP_BLOCKING_IO
+ struct pollfd fds[1];
+ int ret;
+#endif
+
+ fd = pkt_nm->nm_desc->fd;
+#ifdef NETMAP_BLOCKING_IO
+ fds[0].fd = fd;
+ fds[0].events = POLLIN;
+#endif
+
+ while (nb_rx < len) {
+#ifdef NETMAP_BLOCKING_IO
+ ret = poll(&fds[0], 1, POLL_TMO);
+ if (ret <= 0 || (fds[0].revents & POLLERR))
+ break;
+#else
+ ioctl(fd, NIOCRXSYNC, NULL);
+#endif
+
+ /* Find first ring not empty */
+ while (nm_ring_empty(rxring)) {
+ ringid++;
+
+ /* Return to scheduler if no more data to meet the
+ requested amount (len) */
+ if (ringid == pkt_nm->end) {
+ ODP_DBG("No more data on the wire\n");
+ break;
+ }
+
+ rxring = NETMAP_RXRING(pkt_nm->nm_desc->nifp, ringid);
+ }
+
+ limit = len - nb_rx;
+ if (nm_ring_space(rxring) < limit)
+ limit = nm_ring_space(rxring);
+
+ ODP_DBG("receiving %d frames out of %u\n", limit, len);
+
+ for (rx = 0; rx < limit; rx++) {
+ struct netmap_slot *rslot;
+ char *p;
+ uint16_t frame_len;
+ uint8_t *pkt_buf;
+ uint8_t *l2_hdr;
+ uint32_t cur;
+
+ if (odp_likely(pkt == ODP_PACKET_INVALID)) {
+ pkt = odp_packet_alloc(pkt_nm->pool);
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID))
+ break;
+ }
+
+ cur = rxring->cur;
+ rslot = &rxring->slot[cur];
+ p = NETMAP_BUF(rxring, rslot->buf_idx);
+ frame_len = rslot->len;
+
+ rxring->head = nm_ring_next(rxring, cur);
+ rxring->cur = rxring->head;
+
+ pkt_buf = odp_packet_buf_addr(pkt);
+ l2_hdr = pkt_buf + pkt_nm->frame_offset;
+
+ if (frame_len > pkt_nm->max_frame_len) {
+ ODP_ERR("RX: frame too big %u %lu!\n",
+ frame_len, pkt_nm->max_frame_len);
+ /* drop the frame, reuse pkt next interation */
+ continue;
+ }
+ if (odp_unlikely(frame_len < ODP_ETH_LEN_MIN)) {
+ if (odp_unlikely(pkt_nm->netmap_mode !=
+ ODP_NETMAP_MODE_SW)) {
+ ODP_ERR("RX: Frame truncated: %u\n",
+ (unsigned)frame_len);
+ continue;
+ }
+ memset(l2_hdr + frame_len, 0,
+ ODP_ETH_LEN_MIN - frame_len);
+ frame_len = ODP_ETH_LEN_MIN;
+ }
+
+ /* For now copy the data in the mbuf,
+ worry about zero-copy later */
+ memcpy(l2_hdr, p, frame_len);
+
+ /* Initialize, parse and set packet header data */
+ odp_packet_init(pkt);
+ odp_packet_parse(pkt, frame_len, pkt_nm->frame_offset);
+
+ pkt_table[nb_rx] = pkt;
+ pkt = ODP_PACKET_INVALID;
+ nb_rx++;
+ }
+
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID))
+ break;
+ }
+
+ if (odp_unlikely(pkt != ODP_PACKET_INVALID))
+ odp_buffer_free((odp_buffer_t) pkt);
+
+ if (nb_rx)
+ ODP_DBG("<=== rcvd %03u frames from netmap adapter\n", nb_rx);
+
+ return nb_rx;
+}
+
+int send_pkt_netmap(pkt_netmap_t * const pkt_nm, odp_packet_t pkt_table[],
+ unsigned len)
+{
+ struct netmap_ring *txring = pkt_nm->txring;
+ int fd;
+ unsigned nb_tx = 0;
+ uint32_t limit, tx;
+ uint32_t ringid = pkt_nm->begin;
+ odp_packet_t pkt;
+ odp_buffer_t token;
+
+#ifdef NETMAP_BLOCKING_IO
+ struct pollfd fds[2];
+ int ret;
+#endif
+
+ fd = pkt_nm->nm_desc->fd;
+#ifdef NETMAP_BLOCKING_IO
+ fds[0].fd = fd;
+ fds[0].events = POLLOUT;
+#endif
+
+ token = odp_queue_deq(pkt_nm->tx_access);
+
+ while (nb_tx < len) {
+#ifdef NETMAP_BLOCKING_IO
+ ret = poll(&fds[0], 1, POLL_TMO);
+ if (ret <= 0 || (fds[0].revents & POLLERR))
+ break;
+#else
+ ioctl(fd, NIOCTXSYNC, NULL);
+#endif
+
+ /* Find first ring not empty */
+ while (nm_ring_empty(txring)) {
+ ringid++;
+
+ /* Return to scheduler if no more space to meet the
+ requested amount (len) */
+ if (ringid == pkt_nm->end) {
+ ODP_DBG("No more space in TX rings\n");
+ break;
+ }
+
+ txring = NETMAP_TXRING(pkt_nm->nm_desc->nifp, ringid);
+ }
+
+ limit = len - nb_tx;
+ if (nm_ring_space(txring) < limit)
+ limit = nm_ring_space(txring);
+
+ ODP_DBG("Sending %d packets out of %d to netmap %p %u\n",
+ limit, len, txring, txring->cur);
+
+ for (tx = 0; tx < limit; tx++) {
+ struct netmap_slot *tslot;
+ size_t frame_len;
+ uint32_t cur;
+ uint8_t *frame;
+ void *txbuf;
+
+ cur = txring->cur;
+ tslot = &txring->slot[cur];
+ txbuf = NETMAP_BUF(txring, tslot->buf_idx);
+
+ pkt = pkt_table[nb_tx];
+ frame = odp_packet_start(pkt);
+ frame_len = odp_packet_get_len(pkt);
+
+ memcpy(txbuf, frame, frame_len);
+ tslot->len = frame_len;
+ txring->head = nm_ring_next(txring, cur);
+ txring->cur = txring->head;
+ nb_tx++;
+ }
+ }
+
+ odp_queue_enq(pkt_nm->tx_access, token);
+
+#ifndef NETMAP_BLOCKING_IO
+ ioctl(fd, NIOCTXSYNC, NULL);
+#endif
+
+ if (nb_tx)
+ ODP_DBG("===> sent %03u frames to netmap adapter\n", nb_tx);
+
+ for (tx = 0; tx < len; tx++)
+ odp_packet_free(pkt_table[tx]);
+
+ return nb_tx;
+}
new file mode 100644
@@ -0,0 +1,791 @@
+/* Copyright (c) 2013, Linaro Limited
+ * Copyright (c) 2013, Nokia Solutions and Networks
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+#define _GNU_SOURCE
+#include <sys/socket.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <linux/if_packet.h>
+#include <linux/filter.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <bits/wordsize.h>
+#include <net/ethernet.h>
+#include <netinet/ip.h>
+#include <arpa/inet.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <net/if.h>
+#include <inttypes.h>
+#include <poll.h>
+#include <sys/ioctl.h>
+#include <errno.h>
+#include <sys/syscall.h>
+
+#include <odp_packet_socket.h>
+#include <odp_packet_internal.h>
+#include <odp_hints.h>
+
+#include <helper/odp_eth.h>
+#include <helper/odp_ip.h>
+#include <helper/odp_packet_helper.h>
+
+/** Eth buffer start offset from u32-aligned address to make sure the following
+ * header (e.g. IP) starts at a 32-bit aligned address.
+ */
+#define ETHBUF_OFFSET (ODP_ALIGN_ROUNDUP(ODP_ETHHDR_LEN, sizeof(uint32_t)) \
+ - ODP_ETHHDR_LEN)
+
+/** Round up buffer address to get a properly aliged eth buffer, i.e. aligned
+ * so that the next header always starts at a 32bit aligned address.
+ */
+#define ETHBUF_ALIGN(buf_ptr) ((uint8_t *)ODP_ALIGN_ROUNDUP_PTR((buf_ptr), \
+ sizeof(uint32_t)) + ETHBUF_OFFSET)
+
+
+static void ethaddr_copy(unsigned char mac_dst[], unsigned char mac_src[])
+{
+ memcpy(mac_dst, mac_src, ETH_ALEN);
+}
+
+static inline int ethaddrs_equal(unsigned char mac_a[], unsigned char mac_b[])
+{
+ return !memcmp(mac_a, mac_b, ETH_ALEN);
+}
+
+static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t * const pkt_sock,
+ int sock_group_idx)
+{
+ int sockfd = pkt_sock->sockfd;
+ int val;
+ int err;
+ uint16_t fanout_group;
+
+ fanout_group = (uint16_t) (sock_group_idx & 0xffff);
+ val = (PACKET_FANOUT_HASH << 16) | fanout_group;
+
+ err = setsockopt(sockfd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val));
+ if (err != 0) {
+ perror("set_pkt_sock_fanout() - setsockopt(PACKET_FANOUT)");
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * ODP_PACKET_SOCKET_BASIC:
+ * ODP_PACKET_SOCKET_MMSG:
+ */
+int setup_pkt_sock(pkt_sock_t * const pkt_sock, char *netdev,
+ odp_buffer_pool_t pool)
+{
+ int sockfd;
+ int err;
+ unsigned int if_idx;
+ struct ifreq ethreq;
+ struct sockaddr_ll sa_ll;
+ odp_packet_t pkt;
+ uint8_t *pkt_buf;
+ uint8_t *l2_hdr;
+
+ if (pool == ODP_BUFFER_POOL_INVALID)
+ return -1;
+ pkt_sock->pool = pool;
+
+ pkt = odp_packet_alloc(pool);
+ if (!odp_packet_is_valid(pkt))
+ return -1;
+
+ pkt_buf = odp_packet_buf_addr(pkt);
+ l2_hdr = ETHBUF_ALIGN(pkt_buf);
+ /* Store eth buffer offset for pkt buffers from this pool */
+ pkt_sock->frame_offset = (uintptr_t)l2_hdr - (uintptr_t)pkt_buf;
+ /* pkt buffer size */
+ pkt_sock->buf_size = odp_packet_buf_size(pkt);
+ /* max frame len taking into account the l2-offset */
+ pkt_sock->max_frame_len = pkt_sock->buf_size - pkt_sock->frame_offset;
+
+ odp_packet_free(pkt);
+
+ sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (sockfd == -1) {
+ perror("setup_pkt_sock() - socket()");
+ return -1;
+ }
+ pkt_sock->sockfd = sockfd;
+
+ /* get if index */
+ memset(ðreq, 0, sizeof(struct ifreq));
+ strncpy(ethreq.ifr_name, netdev, IFNAMSIZ);
+ err = ioctl(sockfd, SIOCGIFINDEX, ðreq);
+ if (err != 0) {
+ perror("setup_pkt_sock() - ioctl(SIOCGIFINDEX)");
+ return -1;
+ }
+ if_idx = ethreq.ifr_ifindex;
+
+ /* get MAC address */
+ memset(ðreq, 0, sizeof(ethreq));
+ strncpy(ethreq.ifr_name, netdev, IFNAMSIZ);
+ err = ioctl(sockfd, SIOCGIFHWADDR, ðreq);
+ if (err != 0) {
+ perror("setup_pkt_sock() - ioctl(SIOCGIFHWADDR)");
+ return -1;
+ }
+ ethaddr_copy(pkt_sock->if_mac,
+ (unsigned char *)ethreq.ifr_ifru.ifru_hwaddr.sa_data);
+
+ /* bind socket to if */
+ memset(&sa_ll, 0, sizeof(sa_ll));
+ sa_ll.sll_family = AF_PACKET;
+ sa_ll.sll_ifindex = if_idx;
+ sa_ll.sll_protocol = htons(ETH_P_ALL);
+ if (bind(sockfd, (struct sockaddr *)&sa_ll, sizeof(sa_ll)) < 0) {
+ perror("setup_pkt_sock() - bind(to IF)");
+ return -1;
+ }
+
+ return sockfd;
+}
+
+/*
+ * ODP_PACKET_SOCKET_BASIC:
+ * ODP_PACKET_SOCKET_MMSG:
+ */
+int close_pkt_sock(pkt_sock_t * const pkt_sock)
+{
+ if (close(pkt_sock->sockfd) != 0) {
+ perror("close_pkt_sock() - close(sockfd)");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * ODP_PACKET_SOCKET_BASIC:
+ */
+int recv_pkt_sock_basic(pkt_sock_t *const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ ssize_t recv_bytes;
+ unsigned i;
+ struct sockaddr_ll sll;
+ socklen_t addrlen = sizeof(sll);
+ int const sockfd = pkt_sock->sockfd;
+ odp_packet_t pkt = ODP_PACKET_INVALID;
+ uint8_t *pkt_buf;
+ uint8_t *l2_hdr;
+ int nb_rx = 0;
+
+ for (i = 0; i < len; i++) {
+ if (odp_likely(pkt == ODP_PACKET_INVALID)) {
+ pkt = odp_packet_alloc(pkt_sock->pool);
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID))
+ break;
+ }
+
+ pkt_buf = odp_packet_buf_addr(pkt);
+ l2_hdr = pkt_buf + pkt_sock->frame_offset;
+
+ recv_bytes = recvfrom(sockfd, l2_hdr,
+ pkt_sock->max_frame_len, MSG_DONTWAIT,
+ (struct sockaddr *)&sll, &addrlen);
+ /* no data or error: free recv buf and break out of loop */
+ if (odp_unlikely(recv_bytes < 1))
+ break;
+ /* frame not explicitly for us, reuse pkt buf for next frame */
+ if (odp_unlikely(sll.sll_pkttype != PACKET_HOST))
+ continue;
+
+ /* Parse and set packet header data */
+ odp_packet_parse(pkt, recv_bytes, pkt_sock->frame_offset);
+
+ pkt_table[nb_rx] = pkt;
+ pkt = ODP_PACKET_INVALID;
+ nb_rx++;
+ } /* end for() */
+
+ if (odp_unlikely(pkt != ODP_PACKET_INVALID))
+ odp_packet_free(pkt);
+
+ return nb_rx;
+}
+
+/*
+ * ODP_PACKET_SOCKET_BASIC:
+ */
+int send_pkt_sock_basic(pkt_sock_t * const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ odp_packet_t pkt;
+ uint8_t *frame;
+ size_t frame_len;
+ unsigned i;
+ unsigned flags;
+ int sockfd;
+ int nb_tx;
+ int ret;
+
+ sockfd = pkt_sock->sockfd;
+ flags = MSG_DONTWAIT;
+ i = 0;
+ while (i < len) {
+ pkt = pkt_table[i];
+
+ frame = odp_packet_l2(pkt);
+ frame_len = odp_packet_get_len(pkt);
+
+ ret = send(sockfd, frame, frame_len, flags);
+ if (odp_unlikely(ret == -1)) {
+ if (odp_likely(errno == EAGAIN)) {
+ flags = 0; /* blocking for next rounds */
+ continue; /* resend buffer */
+ } else {
+ break;
+ }
+ }
+
+ i++;
+ } /* end while */
+ nb_tx = i;
+
+ for (i = 0; i < len; i++)
+ odp_packet_free(pkt_table[i]);
+
+ return nb_tx;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMSG:
+ */
+int recv_pkt_sock_mmsg(pkt_sock_t * const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ const int sockfd = pkt_sock->sockfd;
+ int msgvec_len;
+ struct mmsghdr msgvec[ODP_PACKET_SOCKET_MAX_BURST_RX];
+ struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_RX];
+ uint8_t *pkt_buf;
+ uint8_t *l2_hdr;
+ int nb_rx = 0;
+ int recv_msgs;
+ int i;
+
+ if (odp_unlikely(len > ODP_PACKET_SOCKET_MAX_BURST_RX))
+ return -1;
+
+ memset(msgvec, 0, sizeof(msgvec));
+
+ for (i = 0; i < (int)len; i++) {
+ pkt_table[i] = odp_packet_alloc(pkt_sock->pool);
+ if (odp_unlikely(pkt_table[i] == ODP_PACKET_INVALID))
+ break;
+
+ pkt_buf = odp_packet_buf_addr(pkt_table[i]);
+ l2_hdr = pkt_buf + pkt_sock->frame_offset;
+ iovecs[i].iov_base = l2_hdr;
+ iovecs[i].iov_len = pkt_sock->max_frame_len;
+ msgvec[i].msg_hdr.msg_iov = &iovecs[i];
+ msgvec[i].msg_hdr.msg_iovlen = 1;
+ }
+ msgvec_len = i; /* number of successfully allocated pkt buffers */
+
+ recv_msgs = recvmmsg(sockfd, msgvec, msgvec_len, MSG_DONTWAIT, NULL);
+
+ for (i = 0; i < recv_msgs; i++) {
+ void *base = msgvec[i].msg_hdr.msg_iov->iov_base;
+ struct ethhdr *eth_hdr = base;
+
+ /* Don't receive packets sent by ourselves */
+ if (odp_unlikely(ethaddrs_equal(pkt_sock->if_mac,
+ eth_hdr->h_source))) {
+ odp_packet_free(pkt_table[i]);
+ continue;
+ }
+
+ /* Parse and set packet header data */
+ odp_packet_parse(pkt_table[i], msgvec[i].msg_len,
+ pkt_sock->frame_offset);
+
+ pkt_table[nb_rx] = pkt_table[i];
+ nb_rx++;
+ }
+
+ /* Free unused pkt buffers */
+ for (; i < msgvec_len; i++)
+ odp_packet_free(pkt_table[i]);
+
+ return nb_rx;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMSG:
+ */
+int send_pkt_sock_mmsg(pkt_sock_t * const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ struct mmsghdr msgvec[ODP_PACKET_SOCKET_MAX_BURST_TX];
+ struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX];
+ int ret;
+ int sockfd;
+ unsigned i;
+ unsigned sent_msgs = 0;
+ unsigned flags;
+
+ if (odp_unlikely(len > ODP_PACKET_SOCKET_MAX_BURST_TX))
+ return -1;
+
+ sockfd = pkt_sock->sockfd;
+ memset(msgvec, 0, sizeof(msgvec));
+
+ for (i = 0; i < len; i++) {
+ uint8_t *const frame = odp_packet_l2(pkt_table[i]);
+ const size_t frame_len = odp_packet_get_len(pkt_table[i]);
+ iovecs[i].iov_base = frame;
+ iovecs[i].iov_len = frame_len;
+ msgvec[i].msg_hdr.msg_iov = &iovecs[i];
+ msgvec[i].msg_hdr.msg_iovlen = 1;
+ }
+
+ flags = MSG_DONTWAIT;
+ for (i = 0; i < len; i += sent_msgs) {
+ ret = sendmmsg(sockfd, &msgvec[i], len - i, flags);
+ sent_msgs = ret > 0 ? (unsigned)ret : 0;
+ flags = 0; /* blocking for next rounds */
+ }
+
+ for (i = 0; i < len; i++)
+ odp_packet_free(pkt_table[i]);
+
+ return len;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+
+union frame_map {
+ struct {
+ struct tpacket2_hdr tp_h ODP_ALIGNED(TPACKET_ALIGNMENT);
+ struct sockaddr_ll s_ll
+ ODP_ALIGNED(TPACKET_ALIGN(sizeof(struct tpacket2_hdr)));
+ } *v2;
+
+ void *raw;
+};
+
+static int mmap_pkt_socket(void)
+{
+ int ver = TPACKET_V2;
+
+ int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
+ if (sock == -1) {
+ perror("pkt_socket() - socket(SOCK_RAW)");
+ return -1;
+ }
+
+ ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver));
+ if (ret == -1) {
+ perror("pkt_socket() - setsockopt(PACKET_VERSION)");
+ return -1;
+ }
+
+ return sock;
+}
+
+static inline int mmap_rx_kernel_ready(struct tpacket2_hdr *hdr)
+{
+ return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER);
+}
+
+static inline void mmap_rx_user_ready(struct tpacket2_hdr *hdr)
+{
+ hdr->tp_status = TP_STATUS_KERNEL;
+ __sync_synchronize();
+}
+
+static inline int mmap_tx_kernel_ready(struct tpacket2_hdr *hdr)
+{
+ return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
+}
+
+static inline void mmap_tx_user_ready(struct tpacket2_hdr *hdr)
+{
+ hdr->tp_status = TP_STATUS_SEND_REQUEST;
+ __sync_synchronize();
+}
+
+static inline unsigned pkt_mmap_v2_rx(int sock, struct ring *ring,
+ odp_packet_t pkt_table[], unsigned len,
+ odp_buffer_pool_t pool,
+ size_t frame_offset,
+ unsigned char if_mac[])
+{
+ union frame_map ppd;
+ unsigned frame_num, next_frame_num;
+ uint8_t *pkt_buf;
+ int pkt_len;
+ struct ethhdr *eth_hdr;
+ uint8_t *l2_hdr;
+ unsigned i = 0;
+
+ (void)sock;
+
+ frame_num = ring->frame_num;
+
+ while (i < len) {
+ if (mmap_rx_kernel_ready(ring->rd[frame_num].iov_base)) {
+ ppd.raw = ring->rd[frame_num].iov_base;
+
+ next_frame_num = (frame_num + 1) % ring->rd_num;
+
+ pkt_buf = (uint8_t *)ppd.raw + ppd.v2->tp_h.tp_mac;
+ pkt_len = ppd.v2->tp_h.tp_snaplen;
+
+ /* Don't receive packets sent by ourselves */
+ eth_hdr = (struct ethhdr *)pkt_buf;
+ if (odp_unlikely(ethaddrs_equal(if_mac,
+ eth_hdr->h_source))) {
+ mmap_rx_user_ready(ppd.raw); /* drop */
+ continue;
+ }
+
+ pkt_table[i] = odp_packet_alloc(pool);
+ if (odp_unlikely(pkt_table[i] == ODP_PACKET_INVALID))
+ break;
+
+ l2_hdr = odp_packet_buf_addr(pkt_table[i])
+ + frame_offset;
+ memcpy(l2_hdr, pkt_buf, pkt_len);
+
+ mmap_rx_user_ready(ppd.raw);
+
+ /* Parse and set packet header data */
+ odp_packet_parse(pkt_table[i], pkt_len, frame_offset);
+
+ frame_num = next_frame_num;
+ i++;
+ } else {
+ break;
+ }
+ }
+
+ ring->frame_num = frame_num;
+
+ return i;
+}
+
+static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ union frame_map ppd;
+ uint8_t *pkt_buf;
+ size_t pkt_len;
+ unsigned frame_num, next_frame_num;
+ int ret;
+ unsigned i = 0;
+
+ frame_num = ring->frame_num;
+
+ while (i < len) {
+ if (mmap_tx_kernel_ready(ring->rd[frame_num].iov_base)) {
+ ppd.raw = ring->rd[frame_num].iov_base;
+
+ next_frame_num = (frame_num + 1) % ring->rd_num;
+
+ pkt_buf = odp_packet_l2(pkt_table[i]);
+ pkt_len = odp_packet_get_len(pkt_table[i]);
+
+ ppd.v2->tp_h.tp_snaplen = pkt_len;
+ ppd.v2->tp_h.tp_len = pkt_len;
+
+ memcpy((uint8_t *)ppd.raw + TPACKET2_HDRLEN -
+ sizeof(struct sockaddr_ll), pkt_buf, pkt_len);
+
+ mmap_tx_user_ready(ppd.raw);
+
+ odp_packet_free(pkt_table[i]);
+ frame_num = next_frame_num;
+ i++;
+ } else {
+ break;
+ }
+ }
+
+ ring->frame_num = frame_num;
+
+ ret = sendto(sock, NULL, 0, MSG_DONTWAIT, NULL, 0);
+ if (ret == -1) {
+ if (errno != EAGAIN) {
+ perror("pkt_mmap_v2_tx() - sendto(pkt mmap)");
+ return -1;
+ }
+ }
+
+ return i;
+}
+
+static void mmap_fill_ring(struct ring *ring, unsigned blocks)
+{
+ ring->req.tp_block_size = getpagesize() << 2;
+ ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
+ ring->req.tp_block_nr = blocks;
+
+ ring->req.tp_frame_nr = ring->req.tp_block_size /
+ ring->req.tp_frame_size * ring->req.tp_block_nr;
+
+ ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
+ ring->rd_num = ring->req.tp_frame_nr;
+ ring->flen = ring->req.tp_frame_size;
+}
+
+static int mmap_set_packet_loss_discard(int sock)
+{
+ int ret, discard = 1;
+
+ ret = setsockopt(sock, SOL_PACKET, PACKET_LOSS, (void *)&discard,
+ sizeof(discard));
+ if (ret == -1) {
+ perror("set_packet_loss_discard() - setsockopt(PACKET_LOSS)");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mmap_setup_ring(int sock, struct ring *ring, int type)
+{
+ int ret = 0;
+ unsigned blocks = 256;
+
+ ring->sock = sock;
+ ring->type = type;
+ ring->version = TPACKET_V2;
+
+ if (type == PACKET_TX_RING) {
+ ret = mmap_set_packet_loss_discard(sock);
+ if (ret != 0)
+ return -1;
+ }
+
+ mmap_fill_ring(ring, blocks);
+
+ ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req));
+ if (ret == -1) {
+ perror("setup_ring() - setsockopt(pkt mmap)");
+ return -1;
+ }
+
+ ring->rd_len = ring->rd_num * sizeof(*ring->rd);
+ ring->rd = malloc(ring->rd_len);
+ if (ring->rd == NULL) {
+ perror("setup_ring() - env_shared_malloc()");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mmap_sock(pkt_sock_mmap_t *pkt_sock)
+{
+ int i;
+ int sock = pkt_sock->sockfd;
+
+ /* map rx + tx buffer to userspace : they are in this order */
+ pkt_sock->mmap_len =
+ pkt_sock->rx_ring.req.tp_block_size *
+ pkt_sock->rx_ring.req.tp_block_nr +
+ pkt_sock->tx_ring.req.tp_block_size *
+ pkt_sock->tx_ring.req.tp_block_nr;
+
+ pkt_sock->mmap_base =
+ mmap(NULL, pkt_sock->mmap_len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0);
+
+ if (pkt_sock->mmap_base == MAP_FAILED) {
+ perror("mmap_sock() - mmap rx&tx buffer failed");
+ return -1;
+ }
+
+ pkt_sock->rx_ring.mm_space = pkt_sock->mmap_base;
+ memset(pkt_sock->rx_ring.rd, 0, pkt_sock->rx_ring.rd_len);
+ for (i = 0; i < pkt_sock->rx_ring.rd_num; ++i) {
+ pkt_sock->rx_ring.rd[i].iov_base =
+ pkt_sock->rx_ring.mm_space + (i * pkt_sock->rx_ring.flen);
+ pkt_sock->rx_ring.rd[i].iov_len = pkt_sock->rx_ring.flen;
+ }
+
+ pkt_sock->tx_ring.mm_space =
+ pkt_sock->mmap_base + pkt_sock->rx_ring.mm_len;
+ memset(pkt_sock->tx_ring.rd, 0, pkt_sock->tx_ring.rd_len);
+ for (i = 0; i < pkt_sock->tx_ring.rd_num; ++i) {
+ pkt_sock->tx_ring.rd[i].iov_base =
+ pkt_sock->tx_ring.mm_space + (i * pkt_sock->tx_ring.flen);
+ pkt_sock->tx_ring.rd[i].iov_len = pkt_sock->tx_ring.flen;
+ }
+
+ return 0;
+}
+
+static void mmap_unmap_sock(pkt_sock_mmap_t *pkt_sock)
+{
+ munmap(pkt_sock->mmap_base, pkt_sock->mmap_len);
+ free(pkt_sock->rx_ring.rd);
+ free(pkt_sock->tx_ring.rd);
+}
+
+static int mmap_bind_sock(pkt_sock_mmap_t *pkt_sock, char *netdev)
+{
+ int ret;
+
+ pkt_sock->ll.sll_family = PF_PACKET;
+ pkt_sock->ll.sll_protocol = htons(ETH_P_ALL);
+ pkt_sock->ll.sll_ifindex = if_nametoindex(netdev);
+ pkt_sock->ll.sll_hatype = 0;
+ pkt_sock->ll.sll_pkttype = 0;
+ pkt_sock->ll.sll_halen = 0;
+
+ ret =
+ bind(pkt_sock->sockfd, (struct sockaddr *)&pkt_sock->ll,
+ sizeof(pkt_sock->ll));
+ if (ret == -1) {
+ perror("bind_sock() - bind(to IF)");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mmap_store_hw_addr(pkt_sock_mmap_t * const pkt_sock, char *netdev)
+{
+ struct ifreq ethreq;
+ int ret;
+
+ /* get MAC address */
+ memset(ðreq, 0, sizeof(ethreq));
+ strncpy(ethreq.ifr_name, netdev, IFNAMSIZ);
+ ret = ioctl(pkt_sock->sockfd, SIOCGIFHWADDR, ðreq);
+ if (ret != 0) {
+ perror("store_hw_addr() - ioctl(SIOCGIFHWADDR)");
+ return -1;
+ }
+
+ ethaddr_copy(pkt_sock->if_mac,
+ (unsigned char *)ethreq.ifr_ifru.ifru_hwaddr.sa_data);
+
+ return 0;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+int setup_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock, char *netdev,
+ odp_buffer_pool_t pool, int fanout)
+{
+ odp_packet_t pkt;
+ uint8_t *pkt_buf;
+ uint8_t *l2_hdr;
+ int if_idx;
+ int ret = 0;
+
+ memset(pkt_sock, 0, sizeof(*pkt_sock));
+
+ if (pool == ODP_BUFFER_POOL_INVALID)
+ return -1;
+
+ pkt = odp_packet_alloc(pool);
+ if (!odp_packet_is_valid(pkt))
+ return -1;
+
+ pkt_buf = odp_packet_buf_addr(pkt);
+ l2_hdr = ETHBUF_ALIGN(pkt_buf);
+ /* Store eth buffer offset for pkt buffers from this pool */
+ pkt_sock->frame_offset = (uintptr_t)l2_hdr - (uintptr_t)pkt_buf;
+
+ odp_packet_free(pkt);
+
+ pkt_sock->pool = pool;
+ pkt_sock->sockfd = mmap_pkt_socket();
+
+ ret = mmap_bind_sock(pkt_sock, netdev);
+ if (ret != 0)
+ return -1;
+
+ ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
+ PACKET_TX_RING);
+ if (ret != 0)
+ return -1;
+
+ ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
+ PACKET_RX_RING);
+ if (ret != 0)
+ return -1;
+
+ ret = mmap_sock(pkt_sock);
+ if (ret != 0)
+ return -1;
+
+ ret = mmap_store_hw_addr(pkt_sock, netdev);
+ if (ret != 0)
+ return -1;
+
+ if_idx = if_nametoindex(netdev);
+ if (if_idx == 0) {
+ perror("setup_pkt_sock(): if_nametoindex()");
+ return -1;
+ }
+
+ if (fanout) {
+ ret = set_pkt_sock_fanout_mmap(pkt_sock, if_idx);
+ if (ret != 0)
+ return -1;
+ }
+
+ return pkt_sock->sockfd;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+int close_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock)
+{
+ mmap_unmap_sock(pkt_sock);
+ if (close(pkt_sock->sockfd) != 0) {
+ perror("close_pkt_sock() - close(sockfd)");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+int recv_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ return pkt_mmap_v2_rx(pkt_sock->rx_ring.sock, &pkt_sock->rx_ring,
+ pkt_table, len, pkt_sock->pool,
+ pkt_sock->frame_offset, pkt_sock->if_mac);
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+int send_pkt_sock_mmap(pkt_sock_mmap_t * const pkt_sock,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ return pkt_mmap_v2_tx(pkt_sock->tx_ring.sock, &pkt_sock->tx_ring,
+ pkt_table, len);
+}
new file mode 100644
@@ -0,0 +1,426 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_queue.h>
+#include <odp_queue_internal.h>
+#include <odp_std_types.h>
+#include <odp_align.h>
+#include <odp_buffer.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_pool_internal.h>
+#include <odp_internal.h>
+#include <odp_shared_memory.h>
+#include <odp_schedule_internal.h>
+#include <odp_config.h>
+#include <odp_packet_io_internal.h>
+#include <odp_packet_io_queue.h>
+#include <odp_debug.h>
+#include <odp_hints.h>
+
+#ifdef USE_TICKETLOCK
+#include <odp_ticketlock.h>
+#define LOCK(a) odp_ticketlock_lock(a)
+#define UNLOCK(a) odp_ticketlock_unlock(a)
+#define LOCK_INIT(a) odp_ticketlock_init(a)
+#else
+#include <odp_spinlock.h>
+#define LOCK(a) odp_spinlock_lock(a)
+#define UNLOCK(a) odp_spinlock_unlock(a)
+#define LOCK_INIT(a) odp_spinlock_init(a)
+#endif
+
+#include <string.h>
+
+
+typedef struct queue_table_t {
+ queue_entry_t queue[ODP_CONFIG_QUEUES];
+} queue_table_t;
+
+static queue_table_t *queue_tbl;
+
+
+queue_entry_t *get_qentry(uint32_t queue_id)
+{
+ return &queue_tbl->queue[queue_id];
+}
+
+static void queue_init(queue_entry_t *queue, const char *name,
+ odp_queue_type_t type, odp_queue_param_t *param)
+{
+ strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
+ queue->s.type = type;
+
+ if (param) {
+ memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
+ } else {
+ /* Defaults */
+ memset(&queue->s.param, 0, sizeof(odp_queue_param_t));
+ queue->s.param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ queue->s.param.sched.sync = ODP_SCHED_SYNC_DEFAULT;
+ queue->s.param.sched.group = ODP_SCHED_GROUP_DEFAULT;
+ }
+
+ switch (type) {
+ case ODP_QUEUE_TYPE_PKTIN:
+ queue->s.enqueue = pktin_enqueue;
+ queue->s.dequeue = pktin_dequeue;
+ queue->s.enqueue_multi = pktin_enq_multi;
+ queue->s.dequeue_multi = pktin_deq_multi;
+ break;
+ case ODP_QUEUE_TYPE_PKTOUT:
+ queue->s.enqueue = pktout_enqueue;
+ queue->s.dequeue = pktout_dequeue;
+ queue->s.enqueue_multi = pktout_enq_multi;
+ queue->s.dequeue_multi = pktout_deq_multi;
+ break;
+ default:
+ queue->s.enqueue = queue_enq;
+ queue->s.dequeue = queue_deq;
+ queue->s.enqueue_multi = queue_enq_multi;
+ queue->s.dequeue_multi = queue_deq_multi;
+ break;
+ }
+
+ queue->s.head = NULL;
+ queue->s.tail = NULL;
+ queue->s.sched_buf = ODP_BUFFER_INVALID;
+}
+
+
+int odp_queue_init_global(void)
+{
+ uint32_t i;
+
+ ODP_DBG("Queue init ... ");
+
+ queue_tbl = odp_shm_reserve("odp_queues",
+ sizeof(queue_table_t),
+ sizeof(queue_entry_t));
+
+ if (queue_tbl == NULL)
+ return -1;
+
+ memset(queue_tbl, 0, sizeof(queue_table_t));
+
+ for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
+ /* init locks */
+ queue_entry_t *queue = get_qentry(i);
+ LOCK_INIT(&queue->s.lock);
+ queue->s.handle = queue_from_id(i);
+ }
+
+ ODP_DBG("done\n");
+ ODP_DBG("Queue init global\n");
+ ODP_DBG(" struct queue_entry_s size %zu\n",
+ sizeof(struct queue_entry_s));
+ ODP_DBG(" queue_entry_t size %zu\n",
+ sizeof(queue_entry_t));
+ ODP_DBG("\n");
+
+ return 0;
+}
+
+odp_queue_type_t odp_queue_type(odp_queue_t handle)
+{
+ queue_entry_t *queue;
+
+ queue = queue_to_qentry(handle);
+
+ return queue->s.type;
+}
+
+odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
+ odp_queue_param_t *param)
+{
+ uint32_t i;
+ queue_entry_t *queue;
+ odp_queue_t handle = ODP_QUEUE_INVALID;
+
+ for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
+ queue = &queue_tbl->queue[i];
+
+ if (queue->s.status != QUEUE_STATUS_FREE)
+ continue;
+
+ LOCK(&queue->s.lock);
+ if (queue->s.status == QUEUE_STATUS_FREE) {
+ queue_init(queue, name, type, param);
+
+ if (type == ODP_QUEUE_TYPE_SCHED ||
+ type == ODP_QUEUE_TYPE_PKTIN)
+ queue->s.status = QUEUE_STATUS_NOTSCHED;
+ else
+ queue->s.status = QUEUE_STATUS_READY;
+
+ handle = queue->s.handle;
+ UNLOCK(&queue->s.lock);
+ break;
+ }
+ UNLOCK(&queue->s.lock);
+ }
+
+ if (handle != ODP_QUEUE_INVALID &&
+ (type == ODP_QUEUE_TYPE_SCHED || type == ODP_QUEUE_TYPE_PKTIN)) {
+ odp_buffer_t buf;
+
+ buf = odp_schedule_buffer_alloc(handle);
+ if (buf == ODP_BUFFER_INVALID) {
+ ODP_ERR("queue_init: sched buf alloc failed\n");
+ return ODP_QUEUE_INVALID;
+ }
+
+ queue->s.sched_buf = buf;
+ odp_schedule_mask_set(handle, queue->s.param.sched.prio);
+ }
+
+ return handle;
+}
+
+
+odp_buffer_t queue_sched_buf(odp_queue_t handle)
+{
+ queue_entry_t *queue;
+ queue = queue_to_qentry(handle);
+
+ return queue->s.sched_buf;
+}
+
+
+int queue_sched_atomic(odp_queue_t handle)
+{
+ queue_entry_t *queue;
+ queue = queue_to_qentry(handle);
+
+ return queue->s.param.sched.sync == ODP_SCHED_SYNC_ATOMIC;
+}
+
+
+odp_queue_t odp_queue_lookup(const char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
+ queue_entry_t *queue = &queue_tbl->queue[i];
+
+ if (queue->s.status == QUEUE_STATUS_FREE)
+ continue;
+
+ LOCK(&queue->s.lock);
+ if (strcmp(name, queue->s.name) == 0) {
+ /* found it */
+ UNLOCK(&queue->s.lock);
+ return queue->s.handle;
+ }
+ UNLOCK(&queue->s.lock);
+ }
+
+ return ODP_QUEUE_INVALID;
+}
+
+
+int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
+{
+ int sched = 0;
+
+ LOCK(&queue->s.lock);
+ if (queue->s.head == NULL) {
+ /* Empty queue */
+ queue->s.head = buf_hdr;
+ queue->s.tail = buf_hdr;
+ buf_hdr->next = NULL;
+ } else {
+ queue->s.tail->next = buf_hdr;
+ queue->s.tail = buf_hdr;
+ buf_hdr->next = NULL;
+ }
+
+ if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
+ queue->s.status = QUEUE_STATUS_SCHED;
+ sched = 1; /* retval: schedule queue */
+ }
+ UNLOCK(&queue->s.lock);
+
+ /* Add queue to scheduling */
+ if (sched == 1)
+ odp_schedule_queue(queue->s.handle, queue->s.param.sched.prio);
+
+ return 0;
+}
+
+
+int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+{
+ int sched = 0;
+ int i;
+ odp_buffer_hdr_t *tail;
+
+ for (i = 0; i < num - 1; i++)
+ buf_hdr[i]->next = buf_hdr[i+1];
+
+ tail = buf_hdr[num-1];
+ buf_hdr[num-1]->next = NULL;
+
+ LOCK(&queue->s.lock);
+ /* Empty queue */
+ if (queue->s.head == NULL)
+ queue->s.head = buf_hdr[0];
+ else
+ queue->s.tail->next = buf_hdr[0];
+
+ queue->s.tail = tail;
+
+ if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
+ queue->s.status = QUEUE_STATUS_SCHED;
+ sched = 1; /* retval: schedule queue */
+ }
+ UNLOCK(&queue->s.lock);
+
+ /* Add queue to scheduling */
+ if (sched == 1)
+ odp_schedule_queue(queue->s.handle, queue->s.param.sched.prio);
+
+ return 0;
+}
+
+
+int odp_queue_enq_multi(odp_queue_t handle, odp_buffer_t buf[], int num)
+{
+ odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
+ queue_entry_t *queue;
+ int i;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ queue = queue_to_qentry(handle);
+
+ for (i = 0; i < num; i++)
+ buf_hdr[i] = odp_buf_to_hdr(buf[i]);
+
+ return queue->s.enqueue_multi(queue, buf_hdr, num);
+}
+
+
+int odp_queue_enq(odp_queue_t handle, odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *buf_hdr;
+ queue_entry_t *queue;
+
+ queue = queue_to_qentry(handle);
+ buf_hdr = odp_buf_to_hdr(buf);
+
+ return queue->s.enqueue(queue, buf_hdr);
+}
+
+
+odp_buffer_hdr_t *queue_deq(queue_entry_t *queue)
+{
+ odp_buffer_hdr_t *buf_hdr = NULL;
+
+ LOCK(&queue->s.lock);
+
+ if (queue->s.head == NULL) {
+ /* Already empty queue */
+ if (queue->s.status == QUEUE_STATUS_SCHED &&
+ queue->s.type != ODP_QUEUE_TYPE_PKTIN)
+ queue->s.status = QUEUE_STATUS_NOTSCHED;
+ } else {
+ buf_hdr = queue->s.head;
+ queue->s.head = buf_hdr->next;
+ buf_hdr->next = NULL;
+
+ if (queue->s.head == NULL) {
+ /* Queue is now empty */
+ queue->s.tail = NULL;
+ }
+ }
+
+ UNLOCK(&queue->s.lock);
+
+ return buf_hdr;
+}
+
+
+int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
+{
+ int i = 0;
+
+ LOCK(&queue->s.lock);
+
+ if (queue->s.head == NULL) {
+ /* Already empty queue */
+ if (queue->s.status == QUEUE_STATUS_SCHED &&
+ queue->s.type != ODP_QUEUE_TYPE_PKTIN)
+ queue->s.status = QUEUE_STATUS_NOTSCHED;
+ } else {
+ odp_buffer_hdr_t *hdr = queue->s.head;
+
+ for (; i < num && hdr; i++) {
+ buf_hdr[i] = hdr;
+ /* odp_prefetch(hdr->addr); */
+ hdr = hdr->next;
+ buf_hdr[i]->next = NULL;
+ }
+
+ queue->s.head = hdr;
+
+ if (hdr == NULL) {
+ /* Queue is now empty */
+ queue->s.tail = NULL;
+ }
+ }
+
+ UNLOCK(&queue->s.lock);
+
+ return i;
+}
+
+
+int odp_queue_deq_multi(odp_queue_t handle, odp_buffer_t buf[], int num)
+{
+ queue_entry_t *queue;
+ odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
+ int i, ret;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ queue = queue_to_qentry(handle);
+
+ ret = queue->s.dequeue_multi(queue, buf_hdr, num);
+
+ for (i = 0; i < ret; i++)
+ buf[i] = buf_hdr[i]->handle.handle;
+
+ return ret;
+}
+
+
+odp_buffer_t odp_queue_deq(odp_queue_t handle)
+{
+ queue_entry_t *queue;
+ odp_buffer_hdr_t *buf_hdr;
+
+ queue = queue_to_qentry(handle);
+ buf_hdr = queue->s.dequeue(queue);
+
+ if (buf_hdr)
+ return buf_hdr->handle.handle;
+
+ return ODP_BUFFER_INVALID;
+}
+
+
+void queue_lock(queue_entry_t *queue)
+{
+ LOCK(&queue->s.lock);
+}
+
+
+void queue_unlock(queue_entry_t *queue)
+{
+ UNLOCK(&queue->s.lock);
+}
new file mode 100644
@@ -0,0 +1,619 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Derived from FreeBSD's bufring.c
+ *
+ **************************************************************************
+ *
+ * Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. The name of Kip Macy nor the names of other
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***************************************************************************/
+
+#include <odp_shared_memory.h>
+#include <odp_internal.h>
+#include <odp_spin_internal.h>
+#include <odp_spinlock.h>
+#include <odp_align.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <odp_debug.h>
+#include <odp_rwlock.h>
+#include <helper/odp_ring.h>
+
+static TAILQ_HEAD(, odp_ring) odp_ring_list;
+
+/*
+ * the enqueue of pointers on the ring.
+ */
+#define ENQUEUE_PTRS() do { \
+ const uint32_t size = r->prod.size; \
+ uint32_t idx = prod_head & mask; \
+ if (odp_likely(idx + n < size)) { \
+ for (i = 0; i < (n & ((~(unsigned)0x3))); i += 4, idx += 4) { \
+ r->ring[idx] = obj_table[i]; \
+ r->ring[idx+1] = obj_table[i+1]; \
+ r->ring[idx+2] = obj_table[i+2]; \
+ r->ring[idx+3] = obj_table[i+3]; \
+ } \
+ switch (n & 0x3) { \
+ case 3: \
+ r->ring[idx++] = obj_table[i++]; \
+ case 2: \
+ r->ring[idx++] = obj_table[i++]; \
+ case 1: \
+ r->ring[idx++] = obj_table[i++]; \
+ } \
+ } else { \
+ for (i = 0; idx < size; i++, idx++)\
+ r->ring[idx] = obj_table[i]; \
+ for (idx = 0; i < n; i++, idx++) \
+ r->ring[idx] = obj_table[i]; \
+ } \
+} while (0)
+
+/*
+ * the actual copy of pointers on the ring to obj_table.
+ */
+#define DEQUEUE_PTRS() do { \
+ uint32_t idx = cons_head & mask; \
+ const uint32_t size = r->cons.size; \
+ if (odp_likely(idx + n < size)) { \
+ for (i = 0; i < (n & (~(unsigned)0x3)); i += 4, idx += 4) {\
+ obj_table[i] = r->ring[idx]; \
+ obj_table[i+1] = r->ring[idx+1]; \
+ obj_table[i+2] = r->ring[idx+2]; \
+ obj_table[i+3] = r->ring[idx+3]; \
+ } \
+ switch (n & 0x3) { \
+ case 3: \
+ obj_table[i++] = r->ring[idx++]; \
+ case 2: \
+ obj_table[i++] = r->ring[idx++]; \
+ case 1: \
+ obj_table[i++] = r->ring[idx++]; \
+ } \
+ } else { \
+ for (i = 0; idx < size; i++, idx++) \
+ obj_table[i] = r->ring[idx]; \
+ for (idx = 0; i < n; i++, idx++) \
+ obj_table[i] = r->ring[idx]; \
+ } \
+} while (0)
+
+static odp_rwlock_t qlock; /* rings tailq lock */
+
+/* init tailq_ring */
+void odp_ring_tailq_init(void)
+{
+ TAILQ_INIT(&odp_ring_list);
+ odp_rwlock_init(&qlock);
+}
+
+/* create the ring */
+odp_ring_t *
+odp_ring_create(const char *name, unsigned count, unsigned flags)
+{
+ char ring_name[ODP_RING_NAMESIZE];
+ odp_ring_t *r;
+ size_t ring_size;
+
+ /* count must be a power of 2 */
+ if (!ODP_VAL_IS_POWER_2(count) || (count > ODP_RING_SZ_MASK)) {
+ ODP_ERR("Requested size is invalid, must be power of 2, and do not exceed the size limit %u\n",
+ ODP_RING_SZ_MASK);
+ return NULL;
+ }
+
+ snprintf(ring_name, sizeof(ring_name), "%s", name);
+ ring_size = count*sizeof(void *)+sizeof(odp_ring_t);
+
+ odp_rwlock_write_lock(&qlock);
+ /* reserve a memory zone for this ring.*/
+ r = odp_shm_reserve(ring_name, ring_size, ODP_CACHE_LINE_SIZE);
+
+ if (r != NULL) {
+ /* init the ring structure */
+ snprintf(r->name, sizeof(r->name), "%s", name);
+ r->flags = flags;
+ r->prod.watermark = count;
+ r->prod.sp_enqueue = !!(flags & ODP_RING_F_SP_ENQ);
+ r->cons.sc_dequeue = !!(flags & ODP_RING_F_SC_DEQ);
+ r->prod.size = count;
+ r->cons.size = count;
+ r->prod.mask = count-1;
+ r->cons.mask = count-1;
+ r->prod.head = 0;
+ r->cons.head = 0;
+ r->prod.tail = 0;
+ r->cons.tail = 0;
+
+ TAILQ_INSERT_TAIL(&odp_ring_list, r, next);
+ } else {
+ ODP_ERR("Cannot reserve memory\n");
+ }
+
+ odp_rwlock_write_unlock(&qlock);
+ return r;
+}
+
+/*
+ * change the high water mark. If *count* is 0, water marking is
+ * disabled
+ */
+int odp_ring_set_water_mark(odp_ring_t *r, unsigned count)
+{
+ if (count >= r->prod.size)
+ return -EINVAL;
+
+ /* if count is 0, disable the watermarking */
+ if (count == 0)
+ count = r->prod.size;
+
+ r->prod.watermark = count;
+ return 0;
+}
+
+/**
+ * Enqueue several objects on the ring (multi-producers safe).
+ */
+int __odp_ring_mp_do_enqueue(odp_ring_t *r, void * const *obj_table,
+ unsigned n, enum odp_ring_queue_behavior behavior)
+{
+ uint32_t prod_head, prod_next;
+ uint32_t cons_tail, free_entries;
+ const unsigned max = n;
+ int success;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+ int ret;
+
+ /* move prod.head atomically */
+ do {
+ /* Reset n to the initial burst count */
+ n = max;
+
+ prod_head = r->prod.head;
+ cons_tail = r->cons.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * prod_head > cons_tail). So 'free_entries' is always between 0
+ * and size(ring)-1. */
+ free_entries = (mask + cons_tail - prod_head);
+
+ /* check that we have enough room in ring */
+ if (odp_unlikely(n > free_entries)) {
+ if (behavior == ODP_RING_QUEUE_FIXED) {
+ return -ENOBUFS;
+ } else {
+ /* No free entry available */
+ if (odp_unlikely(free_entries == 0))
+ return 0;
+
+ n = free_entries;
+ }
+ }
+
+ prod_next = prod_head + n;
+ success = odp_atomic_cmpset_u32(&r->prod.head, prod_head,
+ prod_next);
+ } while (odp_unlikely(success == 0));
+
+ /* write entries in ring */
+ ENQUEUE_PTRS();
+ odp_mem_barrier();
+
+ /* if we exceed the watermark */
+ if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ ret = (behavior == ODP_RING_QUEUE_FIXED) ? -EDQUOT :
+ (int)(n | ODP_RING_QUOT_EXCEED);
+ } else {
+ ret = (behavior == ODP_RING_QUEUE_FIXED) ? 0 : n;
+ }
+
+ /*
+ * If there are other enqueues in progress that preceeded us,
+ * we need to wait for them to complete
+ */
+ while (odp_unlikely(r->prod.tail != prod_head))
+ odp_spin();
+
+ r->prod.tail = prod_next;
+ return ret;
+}
+
+/**
+ * Enqueue several objects on a ring (NOT multi-producers safe).
+ */
+int __odp_ring_sp_do_enqueue(odp_ring_t *r, void * const *obj_table,
+ unsigned n, enum odp_ring_queue_behavior behavior)
+{
+ uint32_t prod_head, cons_tail;
+ uint32_t prod_next, free_entries;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+ int ret;
+
+ prod_head = r->prod.head;
+ cons_tail = r->cons.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * prod_head > cons_tail). So 'free_entries' is always between 0
+ * and size(ring)-1. */
+ free_entries = mask + cons_tail - prod_head;
+
+ /* check that we have enough room in ring */
+ if (odp_unlikely(n > free_entries)) {
+ if (behavior == ODP_RING_QUEUE_FIXED) {
+ return -ENOBUFS;
+ } else {
+ /* No free entry available */
+ if (odp_unlikely(free_entries == 0))
+ return 0;
+
+ n = free_entries;
+ }
+ }
+
+ prod_next = prod_head + n;
+ r->prod.head = prod_next;
+
+ /* write entries in ring */
+ ENQUEUE_PTRS();
+ odp_mem_barrier();
+
+ /* if we exceed the watermark */
+ if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+ ret = (behavior == ODP_RING_QUEUE_FIXED) ? -EDQUOT :
+ (int)(n | ODP_RING_QUOT_EXCEED);
+ } else {
+ ret = (behavior == ODP_RING_QUEUE_FIXED) ? 0 : n;
+ }
+
+ r->prod.tail = prod_next;
+ return ret;
+}
+
+/**
+ * Dequeue several objects from a ring (multi-consumers safe).
+ */
+
+int __odp_ring_mc_do_dequeue(odp_ring_t *r, void **obj_table,
+ unsigned n, enum odp_ring_queue_behavior behavior)
+{
+ uint32_t cons_head, prod_tail;
+ uint32_t cons_next, entries;
+ const unsigned max = n;
+ int success;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+
+ /* move cons.head atomically */
+ do {
+ /* Restore n as it may change every loop */
+ n = max;
+
+ cons_head = r->cons.head;
+ prod_tail = r->prod.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1. */
+ entries = (prod_tail - cons_head);
+
+ /* Set the actual entries for dequeue */
+ if (n > entries) {
+ if (behavior == ODP_RING_QUEUE_FIXED) {
+ return -ENOENT;
+ } else {
+ if (odp_unlikely(entries == 0))
+ return 0;
+
+ n = entries;
+ }
+ }
+
+ cons_next = cons_head + n;
+ success = odp_atomic_cmpset_u32(&r->cons.head, cons_head,
+ cons_next);
+ } while (odp_unlikely(success == 0));
+
+ /* copy in table */
+ DEQUEUE_PTRS();
+ odp_mem_barrier();
+
+ /*
+ * If there are other dequeues in progress that preceded us,
+ * we need to wait for them to complete
+ */
+ while (odp_unlikely(r->cons.tail != cons_head))
+ odp_spin();
+
+ r->cons.tail = cons_next;
+
+ return behavior == ODP_RING_QUEUE_FIXED ? 0 : n;
+}
+
+/**
+ * Dequeue several objects from a ring (NOT multi-consumers safe).
+ */
+int __odp_ring_sc_do_dequeue(odp_ring_t *r, void **obj_table,
+ unsigned n, enum odp_ring_queue_behavior behavior)
+{
+ uint32_t cons_head, prod_tail;
+ uint32_t cons_next, entries;
+ unsigned i;
+ uint32_t mask = r->prod.mask;
+
+ cons_head = r->cons.head;
+ prod_tail = r->prod.tail;
+ /* The subtraction is done between two unsigned 32bits value
+ * (the result is always modulo 32 bits even if we have
+ * cons_head > prod_tail). So 'entries' is always between 0
+ * and size(ring)-1. */
+ entries = prod_tail - cons_head;
+
+ if (n > entries) {
+ if (behavior == ODP_RING_QUEUE_FIXED) {
+ return -ENOENT;
+ } else {
+ if (odp_unlikely(entries == 0))
+ return 0;
+
+ n = entries;
+ }
+ }
+
+ cons_next = cons_head + n;
+ r->cons.head = cons_next;
+
+ /* copy in table */
+ DEQUEUE_PTRS();
+ odp_mem_barrier();
+
+ r->cons.tail = cons_next;
+ return behavior == ODP_RING_QUEUE_FIXED ? 0 : n;
+}
+
+/**
+ * Enqueue several objects on the ring (multi-producers safe).
+ */
+int odp_ring_mp_enqueue_bulk(odp_ring_t *r, void * const *obj_table,
+ unsigned n)
+{
+ return __odp_ring_mp_do_enqueue(r, obj_table, n, ODP_RING_QUEUE_FIXED);
+}
+
+/**
+ * Enqueue several objects on a ring (NOT multi-producers safe).
+ */
+int odp_ring_sp_enqueue_bulk(odp_ring_t *r, void * const *obj_table,
+ unsigned n)
+{
+ return __odp_ring_sp_do_enqueue(r, obj_table, n, ODP_RING_QUEUE_FIXED);
+}
+
+/**
+ * Dequeue several objects from a ring (multi-consumers safe).
+ */
+int odp_ring_mc_dequeue_bulk(odp_ring_t *r, void **obj_table, unsigned n)
+{
+ return __odp_ring_mc_do_dequeue(r, obj_table, n, ODP_RING_QUEUE_FIXED);
+}
+
+/**
+ * Dequeue several objects from a ring (NOT multi-consumers safe).
+ */
+int odp_ring_sc_dequeue_bulk(odp_ring_t *r, void **obj_table, unsigned n)
+{
+ return __odp_ring_sc_do_dequeue(r, obj_table, n, ODP_RING_QUEUE_FIXED);
+}
+
+/**
+ * Test if a ring is full.
+ */
+int odp_ring_full(const odp_ring_t *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
+}
+
+/**
+ * Test if a ring is empty.
+ */
+int odp_ring_empty(const odp_ring_t *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return !!(cons_tail == prod_tail);
+}
+
+/**
+ * Return the number of entries in a ring.
+ */
+unsigned odp_ring_count(const odp_ring_t *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return (prod_tail - cons_tail) & r->prod.mask;
+}
+
+/**
+ * Return the number of free entries in a ring.
+ */
+unsigned odp_ring_free_count(const odp_ring_t *r)
+{
+ uint32_t prod_tail = r->prod.tail;
+ uint32_t cons_tail = r->cons.tail;
+ return (cons_tail - prod_tail - 1) & r->prod.mask;
+}
+
+/* dump the status of the ring on the console */
+void odp_ring_dump(const odp_ring_t *r)
+{
+ ODP_DBG("ring <%s>@%p\n", r->name, r);
+ ODP_DBG(" flags=%x\n", r->flags);
+ ODP_DBG(" size=%"PRIu32"\n", r->prod.size);
+ ODP_DBG(" ct=%"PRIu32"\n", r->cons.tail);
+ ODP_DBG(" ch=%"PRIu32"\n", r->cons.head);
+ ODP_DBG(" pt=%"PRIu32"\n", r->prod.tail);
+ ODP_DBG(" ph=%"PRIu32"\n", r->prod.head);
+ ODP_DBG(" used=%u\n", odp_ring_count(r));
+ ODP_DBG(" avail=%u\n", odp_ring_free_count(r));
+ if (r->prod.watermark == r->prod.size)
+ ODP_DBG(" watermark=0\n");
+ else
+ ODP_DBG(" watermark=%"PRIu32"\n", r->prod.watermark);
+}
+
+/* dump the status of all rings on the console */
+void odp_ring_list_dump(void)
+{
+ const odp_ring_t *mp = NULL;
+
+ odp_rwlock_read_lock(&qlock);
+
+ TAILQ_FOREACH(mp, &odp_ring_list, next) {
+ odp_ring_dump(mp);
+ }
+
+ odp_rwlock_read_unlock(&qlock);
+}
+
+/* search a ring from its name */
+odp_ring_t *odp_ring_lookup(const char *name)
+{
+ odp_ring_t *r = odp_shm_lookup(name);
+
+ odp_rwlock_read_lock(&qlock);
+ TAILQ_FOREACH(r, &odp_ring_list, next) {
+ if (strncmp(name, r->name, ODP_RING_NAMESIZE) == 0)
+ break;
+ }
+ odp_rwlock_read_unlock(&qlock);
+
+ return r;
+}
+
+/**
+ * Enqueue several objects on the ring (multi-producers safe).
+ */
+int odp_ring_mp_enqueue_burst(odp_ring_t *r, void * const *obj_table,
+ unsigned n)
+{
+ return __odp_ring_mp_do_enqueue(r, obj_table, n,
+ ODP_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Enqueue several objects on a ring (NOT multi-producers safe).
+ */
+int odp_ring_sp_enqueue_burst(odp_ring_t *r, void * const *obj_table,
+ unsigned n)
+{
+ return __odp_ring_sp_do_enqueue(r, obj_table, n,
+ ODP_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Enqueue several objects on a ring.
+ */
+int odp_ring_enqueue_burst(odp_ring_t *r, void * const *obj_table,
+ unsigned n)
+{
+ if (r->prod.sp_enqueue)
+ return odp_ring_sp_enqueue_burst(r, obj_table, n);
+ else
+ return odp_ring_mp_enqueue_burst(r, obj_table, n);
+}
+
+/**
+ * Dequeue several objects from a ring (multi-consumers safe).
+ */
+int odp_ring_mc_dequeue_burst(odp_ring_t *r, void **obj_table, unsigned n)
+{
+ return __odp_ring_mc_do_dequeue(r, obj_table, n,
+ ODP_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Dequeue several objects from a ring (NOT multi-consumers safe).
+ */
+int odp_ring_sc_dequeue_burst(odp_ring_t *r, void **obj_table, unsigned n)
+{
+ return __odp_ring_sc_do_dequeue(r, obj_table, n,
+ ODP_RING_QUEUE_VARIABLE);
+}
+
+/**
+ * Dequeue multiple objects from a ring up to a maximum number.
+ */
+int odp_ring_dequeue_burst(odp_ring_t *r, void **obj_table, unsigned n)
+{
+ if (r->cons.sc_dequeue)
+ return odp_ring_sc_dequeue_burst(r, obj_table, n);
+ else
+ return odp_ring_mc_dequeue_burst(r, obj_table, n);
+}
new file mode 100644
@@ -0,0 +1,61 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_atomic.h>
+#include <odp_rwlock.h>
+
+#include "odp_spin_internal.h"
+
+void odp_rwlock_init(odp_rwlock_t *rwlock)
+{
+ rwlock->cnt = 0;
+}
+
+void odp_rwlock_read_lock(odp_rwlock_t *rwlock)
+{
+ int32_t cnt;
+ int is_locked = 0;
+
+ while (is_locked == 0) {
+ cnt = rwlock->cnt;
+ /* waiting for read lock */
+ if (cnt < 0) {
+ odp_spin();
+ continue;
+ }
+ is_locked = odp_atomic_cmpset_u32(
+ (volatile uint32_t *)&rwlock->cnt,
+ cnt, cnt + 1);
+ }
+}
+
+void odp_rwlock_read_unlock(odp_rwlock_t *rwlock)
+{
+ odp_atomic_dec_u32((odp_atomic_u32_t *)(intptr_t)&rwlock->cnt);
+}
+
+void odp_rwlock_write_lock(odp_rwlock_t *rwlock)
+{
+ int32_t cnt;
+ int is_locked = 0;
+
+ while (is_locked == 0) {
+ cnt = rwlock->cnt;
+ /* lock aquired, wait */
+ if (cnt != 0) {
+ odp_spin();
+ continue;
+ }
+ is_locked = odp_atomic_cmpset_u32(
+ (volatile uint32_t *)&rwlock->cnt,
+ 0, -1);
+ }
+}
+
+void odp_rwlock_write_unlock(odp_rwlock_t *rwlock)
+{
+ odp_atomic_inc_u32((odp_atomic_u32_t *)(intptr_t)&rwlock->cnt);
+}
new file mode 100644
@@ -0,0 +1,396 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_schedule.h>
+#include <odp_schedule_internal.h>
+#include <odp_align.h>
+#include <odp_queue.h>
+#include <odp_shared_memory.h>
+#include <odp_buffer.h>
+#include <odp_buffer_pool.h>
+#include <odp_internal.h>
+#include <odp_config.h>
+#include <odp_debug.h>
+#include <odp_thread.h>
+#include <odp_spinlock.h>
+#include <odp_hints.h>
+
+#include <odp_queue_internal.h>
+
+
+/* Limits to number of scheduled queues */
+#define SCHED_POOL_SIZE (256*1024)
+
+/* Scheduler sub queues */
+#define QUEUES_PER_PRIO 4
+
+/* TODO: random or queue based selection */
+#define SEL_PRI_QUEUE(x) ((QUEUES_PER_PRIO-1) & (queue_to_id(x)))
+
+/* Maximum number of dequeues */
+#define MAX_DEQ 4
+
+
+/* Mask of queues per priority */
+typedef uint8_t pri_mask_t;
+
+ODP_ASSERT((8*sizeof(pri_mask_t)) >= QUEUES_PER_PRIO, pri_mask_t_is_too_small);
+
+
+typedef struct {
+ odp_queue_t pri_queue[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
+ pri_mask_t pri_mask[ODP_CONFIG_SCHED_PRIOS];
+ odp_spinlock_t mask_lock;
+ odp_buffer_pool_t pool;
+} sched_t;
+
+typedef struct {
+ odp_queue_t queue;
+
+} queue_desc_t;
+
+typedef struct {
+ odp_queue_t pri_queue;
+ odp_buffer_t desc_buf;
+
+ odp_buffer_t buf[MAX_DEQ];
+ int num;
+ int index;
+ odp_queue_t queue;
+
+} sched_local_t;
+
+/* Global scheduler context */
+static sched_t *sched;
+
+/* Thread local scheduler context */
+static __thread sched_local_t sched_local;
+
+
+static inline odp_queue_t select_pri_queue(odp_queue_t queue, int prio)
+{
+ int id = SEL_PRI_QUEUE(queue);
+ return sched->pri_queue[prio][id];
+}
+
+
+int odp_schedule_init_global(void)
+{
+ odp_buffer_pool_t pool;
+ void *pool_base;
+ int i, j;
+
+ ODP_DBG("Schedule init ... ");
+
+ sched = odp_shm_reserve("odp_scheduler",
+ sizeof(sched_t),
+ ODP_CACHE_LINE_SIZE);
+
+ if (sched == NULL) {
+ ODP_ERR("Schedule init: Shm reserve failed.\n");
+ return -1;
+ }
+
+
+ pool_base = odp_shm_reserve("odp_sched_pool",
+ SCHED_POOL_SIZE, ODP_CACHE_LINE_SIZE);
+
+ pool = odp_buffer_pool_create("odp_sched_pool", pool_base,
+ SCHED_POOL_SIZE, sizeof(queue_desc_t),
+ ODP_CACHE_LINE_SIZE,
+ ODP_BUFFER_TYPE_RAW);
+
+ if (pool == ODP_BUFFER_POOL_INVALID) {
+ ODP_ERR("Schedule init: Pool create failed.\n");
+ return -1;
+ }
+
+ sched->pool = pool;
+ odp_spinlock_init(&sched->mask_lock);
+
+ for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
+ odp_queue_t queue;
+ char name[] = "odp_priXX_YY";
+
+ name[7] = '0' + i / 10;
+ name[8] = '0' + i - 10*(i / 10);
+
+ for (j = 0; j < QUEUES_PER_PRIO; j++) {
+ name[10] = '0' + j / 10;
+ name[11] = '0' + j - 10*(j / 10);
+
+ queue = odp_queue_create(name,
+ ODP_QUEUE_TYPE_POLL, NULL);
+
+ if (queue == ODP_QUEUE_INVALID) {
+ ODP_ERR("Sched init: Queue create failed.\n");
+ return -1;
+ }
+
+ sched->pri_queue[i][j] = queue;
+ sched->pri_mask[i] = 0;
+ }
+ }
+
+ ODP_DBG("done\n");
+
+ return 0;
+}
+
+
+int odp_schedule_init_local(void)
+{
+ int i;
+
+ sched_local.pri_queue = ODP_QUEUE_INVALID;
+ sched_local.desc_buf = ODP_BUFFER_INVALID;
+
+ for (i = 0; i < MAX_DEQ; i++)
+ sched_local.buf[i] = ODP_BUFFER_INVALID;
+
+ sched_local.num = 0;
+ sched_local.index = 0;
+ sched_local.queue = ODP_QUEUE_INVALID;
+
+ return 0;
+}
+
+
+void odp_schedule_mask_set(odp_queue_t queue, int prio)
+{
+ int id = SEL_PRI_QUEUE(queue);
+
+ odp_spinlock_lock(&sched->mask_lock);
+ sched->pri_mask[prio] |= 1 << id;
+ odp_spinlock_unlock(&sched->mask_lock);
+}
+
+
+odp_buffer_t odp_schedule_buffer_alloc(odp_queue_t queue)
+{
+ odp_buffer_t buf;
+
+ buf = odp_buffer_alloc(sched->pool);
+
+ if (buf != ODP_BUFFER_INVALID) {
+ queue_desc_t *desc;
+ desc = odp_buffer_addr(buf);
+ desc->queue = queue;
+ }
+
+ return buf;
+}
+
+
+void odp_schedule_queue(odp_queue_t queue, int prio)
+{
+ odp_buffer_t desc_buf;
+ odp_queue_t pri_queue;
+
+ pri_queue = select_pri_queue(queue, prio);
+ desc_buf = queue_sched_buf(queue);
+
+ odp_queue_enq(pri_queue, desc_buf);
+}
+
+
+void odp_schedule_release_atomic_context(void)
+{
+ if (sched_local.pri_queue != ODP_QUEUE_INVALID &&
+ sched_local.num == 0) {
+ /* Release current atomic queue */
+ odp_queue_enq(sched_local.pri_queue, sched_local.desc_buf);
+ sched_local.pri_queue = ODP_QUEUE_INVALID;
+ }
+}
+
+
+static inline int copy_bufs(odp_buffer_t out_buf[], unsigned int max)
+{
+ int i = 0;
+
+ while (sched_local.num && max) {
+ out_buf[i] = sched_local.buf[sched_local.index];
+ sched_local.index++;
+ sched_local.num--;
+ max--;
+ i++;
+ }
+
+ return i;
+}
+
+/*
+ * Schedule queues
+ *
+ * TODO: SYNC_ORDERED not implemented yet
+ */
+static int schedule(odp_queue_t *out_queue, odp_buffer_t out_buf[],
+ unsigned int max_num)
+{
+ int i, j;
+ int thr;
+ int ret;
+
+ if (sched_local.num) {
+ ret = copy_bufs(out_buf, max_num);
+
+ if (out_queue)
+ *out_queue = sched_local.queue;
+
+ return ret;
+ }
+
+ odp_schedule_release_atomic_context();
+
+ thr = odp_thread_id();
+
+ for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
+ int id;
+
+ if (sched->pri_mask[i] == 0)
+ continue;
+
+ id = thr & (QUEUES_PER_PRIO-1);
+
+ for (j = 0; j < QUEUES_PER_PRIO; j++, id++) {
+ odp_queue_t pri_q;
+ odp_buffer_t desc_buf;
+
+ if (id >= QUEUES_PER_PRIO)
+ id = 0;
+
+ if (odp_unlikely((sched->pri_mask[i] & (1 << id)) == 0))
+ continue;
+
+ pri_q = sched->pri_queue[i][id];
+ desc_buf = odp_queue_deq(pri_q);
+
+ if (desc_buf != ODP_BUFFER_INVALID) {
+ queue_desc_t *desc;
+ odp_queue_t queue;
+ int num;
+
+ desc = odp_buffer_addr(desc_buf);
+ queue = desc->queue;
+
+ num = odp_queue_deq_multi(queue,
+ sched_local.buf,
+ MAX_DEQ);
+
+ if (num == 0) {
+ /* Remove empty queue from scheduling,
+ * except packet input queues
+ */
+ if (odp_queue_type(queue) ==
+ ODP_QUEUE_TYPE_PKTIN)
+ odp_queue_enq(pri_q, desc_buf);
+
+ continue;
+ }
+
+ sched_local.num = num;
+ sched_local.index = 0;
+ ret = copy_bufs(out_buf, max_num);
+
+ sched_local.queue = queue;
+
+ if (queue_sched_atomic(queue)) {
+ /* Hold queue during atomic access */
+ sched_local.pri_queue = pri_q;
+ sched_local.desc_buf = desc_buf;
+ } else {
+ /* Continue scheduling the queue */
+ odp_queue_enq(pri_q, desc_buf);
+ }
+
+ /* Output the source queue handle */
+ if (out_queue)
+ *out_queue = queue;
+
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+
+odp_buffer_t odp_schedule_once(odp_queue_t *out_queue)
+{
+ odp_buffer_t buf = ODP_BUFFER_INVALID;
+
+ schedule(out_queue, &buf, 1);
+
+ return buf;
+}
+
+
+odp_buffer_t odp_schedule(odp_queue_t *out_queue)
+{
+ odp_buffer_t buf;
+ int ret;
+
+ while (1) {
+ ret = schedule(out_queue, &buf, 1);
+
+ if (ret)
+ return buf;
+ }
+}
+
+
+odp_buffer_t odp_schedule_n(odp_queue_t *out_queue, unsigned int n)
+{
+ odp_buffer_t buf;
+ int ret;
+
+ while (n--) {
+ ret = schedule(out_queue, &buf, 1);
+
+ if (ret)
+ return buf;
+ }
+
+ return ODP_BUFFER_INVALID;
+}
+
+
+int odp_schedule_multi(odp_queue_t *out_queue, odp_buffer_t out_buf[],
+ unsigned int num)
+{
+ int ret;
+
+ while (1) {
+ ret = schedule(out_queue, out_buf, num);
+
+ if (ret)
+ return ret;
+ }
+}
+
+
+int odp_schedule_multi_n(odp_queue_t *out_queue, odp_buffer_t out_buf[],
+ unsigned int num, unsigned int n)
+{
+ int ret;
+
+ while (n--) {
+ ret = schedule(out_queue, out_buf, num);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+
+int odp_schedule_num_prio(void)
+{
+ return ODP_CONFIG_SCHED_PRIOS;
+}
new file mode 100644
@@ -0,0 +1,224 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_shared_memory.h>
+#include <odp_internal.h>
+#include <odp_spinlock.h>
+#include <odp_align.h>
+#include <odp_system_info.h>
+#include <odp_debug.h>
+
+#include <sys/mman.h>
+#ifdef __powerpc__
+#include <asm/mman.h>
+#endif
+#include <fcntl.h>
+
+#include <stdio.h>
+#include <string.h>
+
+
+#define ODP_SHM_NUM_BLOCKS 32
+
+
+typedef struct {
+ char name[ODP_SHM_NAME_LEN];
+ uint64_t size;
+ uint64_t align;
+ void *addr_orig;
+ void *addr;
+ int huge;
+
+} odp_shm_block_t;
+
+
+typedef struct {
+ odp_shm_block_t block[ODP_SHM_NUM_BLOCKS];
+ odp_spinlock_t lock;
+
+} odp_shm_table_t;
+
+
+#define SHM_FLAGS (MAP_SHARED | MAP_ANONYMOUS)
+
+
+/* Global shared memory table */
+static odp_shm_table_t *odp_shm_tbl;
+
+
+int odp_shm_init_global(void)
+{
+ void *addr;
+
+#ifndef MAP_HUGETLB
+ ODP_DBG("NOTE: mmap does not support huge pages\n");
+#endif
+
+ addr = mmap(NULL, sizeof(odp_shm_table_t),
+ PROT_READ | PROT_WRITE, SHM_FLAGS, -1, 0);
+
+ if (addr == MAP_FAILED)
+ return -1;
+
+ odp_shm_tbl = addr;
+
+ memset(odp_shm_tbl, 0, sizeof(odp_shm_table_t));
+ odp_spinlock_init(&odp_shm_tbl->lock);
+
+ return 0;
+}
+
+
+int odp_shm_init_local(void)
+{
+ return 0;
+}
+
+
+static int find_block(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ODP_SHM_NUM_BLOCKS; i++) {
+ if (strcmp(name, odp_shm_tbl->block[i].name) == 0) {
+ /* found it */
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+
+void *odp_shm_reserve(const char *name, uint64_t size, uint64_t align)
+{
+ int i;
+ odp_shm_block_t *block;
+ void *addr;
+#ifdef MAP_HUGETLB
+ uint64_t huge_sz, page_sz;
+
+ huge_sz = odp_sys_huge_page_size();
+ page_sz = odp_sys_page_size();
+#endif
+
+ odp_spinlock_lock(&odp_shm_tbl->lock);
+
+ if (find_block(name) >= 0) {
+ /* Found a block with the same name */
+ odp_spinlock_unlock(&odp_shm_tbl->lock);
+ return NULL;
+ }
+
+ for (i = 0; i < ODP_SHM_NUM_BLOCKS; i++) {
+ if (odp_shm_tbl->block[i].addr == NULL) {
+ /* Found free block */
+ break;
+ }
+ }
+
+ if (i > ODP_SHM_NUM_BLOCKS - 1) {
+ /* Table full */
+ odp_spinlock_unlock(&odp_shm_tbl->lock);
+ return NULL;
+ }
+
+ block = &odp_shm_tbl->block[i];
+
+ addr = MAP_FAILED;
+ block->huge = 0;
+
+#ifdef MAP_HUGETLB
+ /* Try first huge pages */
+ if (huge_sz && (size + align) > page_sz) {
+ addr = mmap(NULL, size + align, PROT_READ | PROT_WRITE,
+ SHM_FLAGS | MAP_HUGETLB, -1, 0);
+ }
+#endif
+
+ /* Use normal pages for small or failed huge page allocations */
+ if (addr == MAP_FAILED) {
+ addr = mmap(NULL, size + align, PROT_READ | PROT_WRITE,
+ SHM_FLAGS, -1, 0);
+
+ } else {
+ block->huge = 1;
+ }
+
+ if (addr == MAP_FAILED) {
+ /* Alloc failed */
+ odp_spinlock_unlock(&odp_shm_tbl->lock);
+ return NULL;
+ }
+
+ block->addr_orig = addr;
+
+ /* move to correct alignment */
+ addr = ODP_ALIGN_ROUNDUP_PTR(addr, align);
+
+ strncpy(block->name, name, ODP_SHM_NAME_LEN - 1);
+ block->name[ODP_SHM_NAME_LEN - 1] = 0;
+ block->size = size;
+ block->align = align;
+ block->addr = addr;
+
+ odp_spinlock_unlock(&odp_shm_tbl->lock);
+ return addr;
+}
+
+
+void *odp_shm_lookup(const char *name)
+{
+ int i;
+ void *addr;
+
+ odp_spinlock_lock(&odp_shm_tbl->lock);
+
+ i = find_block(name);
+
+ if (i < 0) {
+ odp_spinlock_unlock(&odp_shm_tbl->lock);
+ return NULL;
+ }
+
+ addr = odp_shm_tbl->block[i].addr;
+ odp_spinlock_unlock(&odp_shm_tbl->lock);
+
+ return addr;
+}
+
+
+void odp_shm_print_all(void)
+{
+ int i;
+
+ printf("\nShared memory\n");
+ printf("--------------\n");
+ printf(" page size: %"PRIu64" kB\n", odp_sys_page_size() / 1024);
+ printf(" huge page size: %"PRIu64" kB\n",
+ odp_sys_huge_page_size() / 1024);
+ printf("\n");
+
+ printf(" id name kB align huge addr\n");
+
+ for (i = 0; i < ODP_SHM_NUM_BLOCKS; i++) {
+ odp_shm_block_t *block;
+
+ block = &odp_shm_tbl->block[i];
+
+ if (block->addr) {
+ printf(" %2i %-24s %4"PRIu64" %4"PRIu64" %2c %p\n",
+ i,
+ block->name,
+ block->size/1024,
+ block->align,
+ (block->huge ? '*' : ' '),
+ block->addr);
+ }
+ }
+
+ printf("\n");
+}
new file mode 100644
@@ -0,0 +1,40 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_spinlock.h>
+#include <odp_spin_internal.h>
+
+
+void odp_spinlock_init(odp_spinlock_t *spinlock)
+{
+ __sync_lock_release(&spinlock->lock);
+}
+
+
+void odp_spinlock_lock(odp_spinlock_t *spinlock)
+{
+ while (__sync_lock_test_and_set(&spinlock->lock, 1))
+ while (spinlock->lock)
+ odp_spin();
+}
+
+
+int odp_spinlock_trylock(odp_spinlock_t *spinlock)
+{
+ return (__sync_lock_test_and_set(&spinlock->lock, 1) == 0);
+}
+
+
+void odp_spinlock_unlock(odp_spinlock_t *spinlock)
+{
+ __sync_lock_release(&spinlock->lock);
+}
+
+
+int odp_spinlock_is_locked(odp_spinlock_t *spinlock)
+{
+ return spinlock->lock != 0;
+}
new file mode 100644
@@ -0,0 +1,409 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_system_info.h>
+#include <odp_internal.h>
+#include <odp_debug.h>
+#include <odp_align.h>
+#include <string.h>
+#include <stdio.h>
+
+/* sysconf */
+#include <unistd.h>
+#include <sys/sysinfo.h>
+
+/* opendir, readdir */
+#include <sys/types.h>
+#include <dirent.h>
+
+typedef struct {
+ uint64_t cpu_hz;
+ uint64_t huge_page_size;
+ uint64_t page_size;
+ int cache_line_size;
+ int core_count;
+ char model_str[128];
+
+} odp_system_info_t;
+
+typedef struct {
+ const char *cpu_arch_str;
+ int (*cpuinfo_parser)(FILE *file, odp_system_info_t *sysinfo);
+
+} odp_compiler_info_t;
+
+static odp_system_info_t odp_system_info;
+
+
+#define CACHE_LNSZ_FILE \
+ "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size"
+
+#define HUGE_PAGE_DIR "/sys/kernel/mm/hugepages"
+
+
+/*
+ * Sysconf
+ */
+static int sysconf_core_count(void)
+{
+ long ret;
+
+ ret = sysconf(_SC_NPROCESSORS_CONF);
+ if (ret < 0)
+ return 0;
+
+ return (int)ret;
+}
+
+
+#if defined __x86_64__ || defined __i386__ || defined __OCTEON__ || \
+defined __powerpc__
+/*
+ * Analysis of /sys/devices/system/cpu/ files
+ */
+static int systemcpu_cache_line_size(void)
+{
+ FILE *file;
+ char str[128];
+ int size = 0;
+
+ file = fopen(CACHE_LNSZ_FILE, "rt");
+ if (file == NULL) {
+ /* File not found */
+ return 0;
+ }
+
+ if (fgets(str, sizeof(str), file) != NULL) {
+ /* Read cache line size */
+ sscanf(str, "%i", &size);
+ }
+
+ fclose(file);
+
+ return size;
+}
+
+
+static int huge_page_size(void)
+{
+ DIR *dir;
+ struct dirent *dirent;
+ int size = 0;
+
+ dir = opendir(HUGE_PAGE_DIR);
+ if (dir == NULL) {
+ ODP_ERR("%s not found\n", HUGE_PAGE_DIR);
+ return 0;
+ }
+
+ while ((dirent = readdir(dir)) != NULL) {
+ int temp = 0;
+ sscanf(dirent->d_name, "hugepages-%i", &temp);
+
+ if (temp > size)
+ size = temp;
+ }
+
+ if (closedir(dir)) {
+ ODP_ERR("closedir failed\n");
+ return 0;
+ }
+
+ return size*1024;
+}
+
+#endif
+
+
+/*
+ * HW specific /proc/cpuinfo file parsing
+ */
+#if defined __x86_64__ || defined __i386__
+
+static int cpuinfo_x86(FILE *file, odp_system_info_t *sysinfo)
+{
+ char str[1024];
+ char *pos;
+ double mhz = 0.0;
+ int model = 0;
+ int count = 2;
+
+ while (fgets(str, sizeof(str), file) != NULL && count > 0) {
+ if (!mhz) {
+ pos = strstr(str, "cpu MHz");
+ if (pos) {
+ sscanf(pos, "cpu MHz : %lf", &mhz);
+ count--;
+ }
+ }
+
+ if (!model) {
+ pos = strstr(str, "model name");
+ if (pos) {
+ int len;
+ pos = strchr(str, ':');
+ strncpy(sysinfo->model_str, pos+2,
+ sizeof(sysinfo->model_str));
+ len = strlen(sysinfo->model_str);
+ sysinfo->model_str[len - 1] = 0;
+ model = 1;
+ count--;
+ }
+ }
+ }
+
+ sysinfo->cpu_hz = (uint64_t) (mhz * 1000000.0);
+
+ return 0;
+}
+
+#elif defined __arm__
+
+static int cpuinfo_arm(FILE *file ODP_UNUSED,
+odp_system_info_t *sysinfo ODP_UNUSED)
+{
+ return 0;
+}
+
+#elif defined __OCTEON__
+
+static int cpuinfo_octeon(FILE *file, odp_system_info_t *sysinfo)
+{
+ char str[1024];
+ char *pos;
+ double mhz = 0.0;
+ int model = 0;
+ int count = 2;
+
+ while (fgets(str, sizeof(str), file) != NULL && count > 0) {
+ if (!mhz) {
+ pos = strstr(str, "BogoMIPS");
+
+ if (pos) {
+ sscanf(pos, "BogoMIPS : %lf", &mhz);
+ count--;
+ }
+ }
+
+ if (!model) {
+ pos = strstr(str, "cpu model");
+
+ if (pos) {
+ int len;
+ pos = strchr(str, ':');
+ strncpy(sysinfo->model_str, pos+2,
+ sizeof(sysinfo->model_str));
+ len = strlen(sysinfo->model_str);
+ sysinfo->model_str[len - 1] = 0;
+ model = 1;
+ count--;
+ }
+ }
+ }
+
+ /* bogomips seems to be 2x freq */
+ sysinfo->cpu_hz = (uint64_t) (mhz * 1000000.0 / 2.0);
+
+ return 0;
+}
+#elif defined __powerpc__
+static int cpuinfo_powerpc(FILE *file, odp_system_info_t *sysinfo)
+{
+ char str[1024];
+ char *pos;
+ double mhz = 0.0;
+ int model = 0;
+ int count = 2;
+
+ while (fgets(str, sizeof(str), file) != NULL && count > 0) {
+ if (!mhz) {
+ pos = strstr(str, "clock");
+
+ if (pos) {
+ sscanf(pos, "clock : %lf", &mhz);
+ count--;
+ }
+ }
+
+ if (!model) {
+ pos = strstr(str, "cpu");
+
+ if (pos) {
+ int len;
+ pos = strchr(str, ':');
+ strncpy(sysinfo->model_str, pos+2,
+ sizeof(sysinfo->model_str));
+ len = strlen(sysinfo->model_str);
+ sysinfo->model_str[len - 1] = 0;
+ model = 1;
+ count--;
+ }
+ }
+
+ sysinfo->cpu_hz = (uint64_t) (mhz * 1000000.0);
+ }
+
+
+ return 0;
+}
+
+#else
+ #error GCC target not found
+#endif
+
+static odp_compiler_info_t compiler_info = {
+ #if defined __x86_64__ || defined __i386__
+ .cpu_arch_str = "x86",
+ .cpuinfo_parser = cpuinfo_x86
+
+ #elif defined __arm__
+ .cpu_arch_str = "arm",
+ .cpuinfo_parser = cpuinfo_arm
+
+ #elif defined __OCTEON__
+ .cpu_arch_str = "octeon",
+ .cpuinfo_parser = cpuinfo_octeon
+
+ #elif defined __powerpc__
+ .cpu_arch_str = "powerpc",
+ .cpuinfo_parser = cpuinfo_powerpc
+
+ #else
+ #error GCC target not found
+ #endif
+};
+
+
+#if defined __x86_64__ || defined __i386__ || defined __OCTEON__ || \
+defined __powerpc__
+
+/*
+ * Analysis of /sys/devices/system/cpu/ files
+ */
+static int systemcpu(odp_system_info_t *sysinfo)
+{
+ int ret;
+
+ ret = sysconf_core_count();
+ if (ret == 0) {
+ ODP_ERR("sysconf_core_count failed.\n");
+ return -1;
+ }
+
+ sysinfo->core_count = ret;
+
+
+ ret = systemcpu_cache_line_size();
+ if (ret == 0) {
+ ODP_ERR("systemcpu_cache_line_size failed.\n");
+ return -1;
+ }
+
+ sysinfo->cache_line_size = ret;
+
+ if (ret != ODP_CACHE_LINE_SIZE) {
+ ODP_ERR("Cache line sizes definitions don't match.\n");
+ return -1;
+ }
+
+ odp_system_info.huge_page_size = huge_page_size();
+
+ return 0;
+}
+
+#else
+
+/*
+ * Use sysconf and dummy values in generic case
+ */
+
+
+static int systemcpu(odp_system_info_t *sysinfo)
+{
+ int ret;
+
+ ret = sysconf_core_count();
+ if (ret == 0) {
+ ODP_ERR("sysconf_core_count failed.\n");
+ return -1;
+ }
+
+ sysinfo->core_count = ret;
+
+ /* Dummy values */
+ sysinfo->cpu_hz = 1400000000;
+ sysinfo->cache_line_size = 64;
+
+ strncpy(sysinfo->model_str, "UNKNOWN", sizeof(sysinfo->model_str));
+
+ return 0;
+}
+
+#endif
+
+/*
+ * System info initialisation
+ */
+int odp_system_info_init(void)
+{
+ FILE *file;
+
+ memset(&odp_system_info, 0, sizeof(odp_system_info_t));
+
+ odp_system_info.page_size = ODP_PAGE_SIZE;
+
+ file = fopen("/proc/cpuinfo", "rt");
+ if (file == NULL) {
+ ODP_ERR("Failed to open /proc/cpuinfo\n");
+ return -1;
+ }
+
+ compiler_info.cpuinfo_parser(file, &odp_system_info);
+
+ fclose(file);
+
+ if (systemcpu(&odp_system_info)) {
+ ODP_ERR("systemcpu failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ *************************
+ * Public access functions
+ *************************
+ */
+uint64_t odp_sys_cpu_hz(void)
+{
+ return odp_system_info.cpu_hz;
+}
+
+uint64_t odp_sys_huge_page_size(void)
+{
+ return odp_system_info.huge_page_size;
+}
+
+uint64_t odp_sys_page_size(void)
+{
+ return odp_system_info.page_size;
+}
+
+const char *odp_sys_cpu_model_str(void)
+{
+ return odp_system_info.model_str;
+}
+
+int odp_sys_cache_line_size(void)
+{
+ return odp_system_info.cache_line_size;
+}
+
+int odp_sys_core_count(void)
+{
+ return odp_system_info.core_count;
+}
new file mode 100644
@@ -0,0 +1,68 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_thread.h>
+#include <odp_internal.h>
+#include <odp_atomic.h>
+#include <odp_config.h>
+
+#include <string.h>
+#include <stdio.h>
+
+
+typedef struct {
+ int thr_id;
+ int phys_core;
+
+} odp_thread_tbl_t;
+
+
+/* Globals */
+static odp_thread_tbl_t odp_thread_tbl[ODP_CONFIG_MAX_THREADS];
+static odp_atomic_int_t num_threads;
+
+/* Thread local */
+static __thread odp_thread_tbl_t *odp_this_thread;
+
+
+void odp_thread_init_global(void)
+{
+ memset(odp_thread_tbl, 0, sizeof(odp_thread_tbl));
+ num_threads = 0;
+}
+
+
+void odp_thread_init_local(int thr_id)
+{
+ odp_this_thread = &odp_thread_tbl[thr_id];
+}
+
+
+int odp_thread_create(int phys_core)
+{
+ int id;
+
+ id = odp_atomic_fetch_add_int(&num_threads, 1);
+
+ if (id < ODP_CONFIG_MAX_THREADS) {
+ odp_thread_tbl[id].thr_id = id;
+ odp_thread_tbl[id].phys_core = phys_core;
+ }
+
+ return id;
+}
+
+
+int odp_thread_id(void)
+{
+ return odp_this_thread->thr_id;
+}
+
+
+int odp_thread_core(void)
+{
+ return odp_this_thread->phys_core;
+}
new file mode 100644
@@ -0,0 +1,51 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_ticketlock.h>
+#include <odp_atomic.h>
+#include <odp_sync.h>
+#include <odp_spin_internal.h>
+
+
+void odp_ticketlock_init(odp_ticketlock_t *ticketlock)
+{
+ ticketlock->next_ticket = 0;
+ ticketlock->cur_ticket = 0;
+ odp_sync_stores();
+}
+
+
+void odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
+{
+ uint32_t ticket;
+
+ ticket = odp_atomic_fetch_inc_u32(&ticketlock->next_ticket);
+
+ while (ticket != ticketlock->cur_ticket)
+ odp_spin();
+
+ odp_mem_barrier();
+}
+
+
+void odp_ticketlock_unlock(odp_ticketlock_t *ticketlock)
+{
+ odp_sync_stores();
+
+ ticketlock->cur_ticket++;
+
+#if defined __OCTEON__
+ odp_sync_stores();
+#else
+ odp_mem_barrier();
+#endif
+}
+
+
+int odp_ticketlock_is_locked(odp_ticketlock_t *ticketlock)
+{
+ return ticketlock->cur_ticket != ticketlock->next_ticket;
+}
new file mode 100644
@@ -0,0 +1,92 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_time.h>
+#include <odp_hints.h>
+#include <odp_system_info.h>
+#include <odp_debug.h>
+
+#if defined __x86_64__ || defined __i386__
+
+uint64_t odp_time_get_cycles(void)
+{
+ union {
+ uint64_t tsc_64;
+ struct {
+ uint32_t lo_32;
+ uint32_t hi_32;
+ };
+ } tsc;
+
+ asm volatile("rdtsc" :
+ "=a" (tsc.lo_32),
+ "=d" (tsc.hi_32) : : "memory");
+
+ return tsc.tsc_64;
+}
+
+
+#elif defined __OCTEON__
+
+uint64_t odp_time_get_cycles(void)
+{
+ #define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
+ #define CVMX_TMP_STR2(x) #x
+ uint64_t cycle;
+
+ asm __volatile__ ("rdhwr %[rt],$" CVMX_TMP_STR(31) :
+ [rt] "=d" (cycle) : : "memory");
+
+ return cycle;
+}
+
+#else
+
+#include <time.h>
+#include <stdlib.h>
+
+uint64_t odp_time_get_cycles(void)
+{
+ struct timespec time;
+ uint64_t sec, ns, hz, cycles;
+ int ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time);
+
+ if (ret != 0) {
+ ODP_ERR("clock_gettime failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ hz = odp_sys_cpu_hz();
+ sec = (uint64_t) time.tv_sec;
+ ns = (uint64_t) time.tv_nsec;
+
+ cycles = sec * hz;
+ cycles += (ns * hz) / 1000000000;
+
+ return cycles;
+}
+
+#endif
+
+uint64_t odp_time_diff_cycles(uint64_t t1, uint64_t t2)
+{
+ if (odp_likely(t2 > t1))
+ return t2 - t1;
+
+ return t2 + (UINT64_MAX - t1);
+}
+
+uint64_t odp_time_cycles_to_ns(uint64_t cycles)
+{
+ uint64_t hz = odp_sys_cpu_hz();
+
+ if (cycles > (UINT64_MAX / 1000000000))
+ return 1000000000*(cycles/hz);
+
+ return (1000000000*cycles)/hz;
+}
new file mode 100644
@@ -0,0 +1,332 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_timer.h>
+#include <odp_internal.h>
+#include <odp_atomic.h>
+#include <odp_spinlock.h>
+#include <odp_sync.h>
+#include <odp_debug.h>
+
+#include <signal.h>
+#include <time.h>
+
+#include <string.h>
+
+#define NUM_TIMERS 1
+#define MAX_TICKS 1024
+#define RESOLUTION_NS 1000000
+
+struct timeout_t;
+
+typedef struct timeout_t {
+ struct timeout_t *next;
+ int timer_id;
+ int tick;
+ uint64_t tmo_tick;
+ odp_queue_t queue;
+ odp_buffer_t buf;
+ odp_buffer_t tmo_buf;
+} timeout_t;
+
+typedef struct {
+ odp_spinlock_t lock;
+ timeout_t *list;
+} tick_t;
+
+typedef struct {
+ volatile int active;
+ volatile uint64_t cur_tick;
+ timer_t timerid;
+ odp_buffer_pool_t pool;
+ uint64_t resolution_ns;
+ uint64_t max_ticks;
+ tick_t tick[MAX_TICKS];
+
+} timer_ring_t;
+
+typedef struct {
+ timer_ring_t timer[NUM_TIMERS];
+ odp_atomic_int_t num_timers;
+} timer_global_t;
+
+/* Global */
+timer_global_t odp_timer;
+
+static void add_tmo(tick_t *tick, timeout_t *tmo)
+{
+ odp_spinlock_lock(&tick->lock);
+
+ tmo->next = tick->list;
+ tick->list = tmo;
+
+ odp_spinlock_unlock(&tick->lock);
+}
+
+static timeout_t *rem_tmo(tick_t *tick)
+{
+ timeout_t *tmo;
+
+ odp_spinlock_lock(&tick->lock);
+
+ tmo = tick->list;
+
+ if (tmo)
+ tick->list = tmo->next;
+
+ odp_spinlock_unlock(&tick->lock);
+
+ if (tmo)
+ tmo->next = NULL;
+
+ return tmo;
+}
+
+/**
+ * Search and delete tmo entry from timeout list
+ * return -1 : on error.. handle not in list
+ * 0 : success
+ */
+static int find_and_del_tmo(timeout_t **tmo, odp_timer_tmo_t handle)
+{
+ timeout_t *cur, *prev;
+ prev = NULL;
+
+ for (cur = *tmo; cur != NULL; prev = cur, cur = cur->next) {
+ if (cur->tmo_buf == handle) {
+ if (prev == NULL)
+ *tmo = cur->next;
+ else
+ prev->next = cur->next;
+
+ break;
+ }
+ }
+
+ if (!cur)
+ /* couldn't find tmo in list */
+ return -1;
+
+ /* application to free tmo_buf provided by absolute_tmo call */
+ return 0;
+}
+
+int odp_timer_cancel_tmo(odp_timer_t timer, odp_timer_tmo_t tmo)
+{
+ int id;
+ uint64_t tick_idx;
+ timeout_t *cancel_tmo;
+ tick_t *tick;
+
+ /* get id */
+ id = timer - 1;
+
+ /* get tmo_buf to cancel */
+ cancel_tmo = (timeout_t *)odp_buffer_addr(tmo);
+ tick_idx = cancel_tmo->tick;
+ tick = &odp_timer.timer[id].tick[tick_idx];
+
+ odp_spinlock_lock(&tick->lock);
+ /* search and delete tmo from tick list */
+ if (find_and_del_tmo(&tick->list, tmo) != 0) {
+ odp_spinlock_unlock(&tick->lock);
+ ODP_DBG("Couldn't find the tmo (%d) in tick list\n", tmo);
+ return -1;
+ }
+ odp_spinlock_unlock(&tick->lock);
+
+ return 0;
+}
+
+static void notify_function(union sigval sigval)
+{
+ (void) sigval;
+ uint64_t cur_tick;
+ timeout_t *tmo;
+ tick_t *tick;
+
+ if (odp_timer.timer[0].active == 0)
+ return;
+
+ /* ODP_DBG("Tick\n"); */
+
+ cur_tick = odp_timer.timer[0].cur_tick++;
+
+ tick = &odp_timer.timer[0].tick[cur_tick % MAX_TICKS];
+
+ while ((tmo = rem_tmo(tick)) != NULL) {
+ odp_queue_t queue;
+ odp_buffer_t buf;
+
+ queue = tmo->queue;
+ buf = tmo->buf;
+
+ if (buf != tmo->tmo_buf)
+ odp_buffer_free(tmo->tmo_buf);
+
+ odp_queue_enq(queue, buf);
+ }
+}
+
+static void timer_init(void)
+{
+ struct sigevent sigev;
+ struct itimerspec ispec;
+
+ ODP_DBG("Timer thread starts\n");
+
+ memset(&sigev, 0, sizeof(sigev));
+ memset(&ispec, 0, sizeof(ispec));
+
+ sigev.sigev_notify = SIGEV_THREAD;
+ sigev.sigev_notify_function = notify_function;
+
+ if (timer_create(CLOCK_MONOTONIC, &sigev,
+ &odp_timer.timer[0].timerid)) {
+ ODP_DBG("Timer create failed\n");
+ return;
+ }
+
+ ispec.it_interval.tv_sec = 0;
+ ispec.it_interval.tv_nsec = RESOLUTION_NS;
+ ispec.it_value.tv_sec = 0;
+ ispec.it_value.tv_nsec = RESOLUTION_NS;
+
+ if (timer_settime(odp_timer.timer[0].timerid, 0, &ispec, NULL)) {
+ ODP_DBG("Timer set failed\n");
+ return;
+ }
+
+ return;
+}
+
+int odp_timer_init_global(void)
+{
+ int i;
+
+ memset(&odp_timer, 0, sizeof(timer_global_t));
+
+ for (i = 0; i < MAX_TICKS; i++)
+ odp_spinlock_init(&odp_timer.timer[0].tick[i].lock);
+
+ timer_init();
+
+ return 0;
+}
+
+odp_timer_t odp_timer_create(const char *name, odp_buffer_pool_t pool,
+ uint64_t resolution, uint64_t min_tmo,
+ uint64_t max_tmo)
+{
+ uint32_t id;
+ (void) name; (void) resolution; (void) min_tmo; (void) max_tmo;
+
+ if (odp_timer.num_timers >= NUM_TIMERS)
+ return ODP_TIMER_INVALID;
+
+ id = odp_atomic_fetch_inc_int(&odp_timer.num_timers);
+ if (id >= NUM_TIMERS)
+ return ODP_TIMER_INVALID;
+
+ odp_timer.timer[id].pool = pool;
+ odp_timer.timer[id].resolution_ns = RESOLUTION_NS;
+ odp_timer.timer[id].max_ticks = MAX_TICKS;
+
+ odp_sync_stores();
+
+ odp_timer.timer[id].active = 1;
+
+ return id + 1;
+}
+
+odp_timer_tmo_t odp_timer_absolute_tmo(odp_timer_t timer, uint64_t tmo_tick,
+ odp_queue_t queue, odp_buffer_t buf)
+{
+ int id;
+ uint64_t tick;
+ uint64_t cur_tick;
+ timeout_t *new_tmo;
+ odp_buffer_t tmo_buf;
+
+ id = timer - 1;
+
+ cur_tick = odp_timer.timer[id].cur_tick;
+ if (tmo_tick <= cur_tick) {
+ ODP_DBG("timeout too close\n");
+ return ODP_TIMER_TMO_INVALID;
+ }
+
+ tick = tmo_tick - cur_tick;
+ if (tick > MAX_TICKS) {
+ ODP_DBG("timeout too far\n");
+ return ODP_TIMER_TMO_INVALID;
+ }
+
+ tick = (cur_tick + tick) % MAX_TICKS;
+
+ tmo_buf = odp_buffer_alloc(odp_timer.timer[id].pool);
+ if (tmo_buf == ODP_BUFFER_INVALID) {
+ ODP_DBG("alloc failed\n");
+ return ODP_TIMER_TMO_INVALID;
+ }
+
+ new_tmo = (timeout_t *)odp_buffer_addr(tmo_buf);
+
+ new_tmo->timer_id = id;
+ new_tmo->tick = (int)tick;
+ new_tmo->tmo_tick = tmo_tick;
+ new_tmo->queue = queue;
+ new_tmo->tmo_buf = tmo_buf;
+
+ if (buf != ODP_BUFFER_INVALID)
+ new_tmo->buf = buf;
+ else
+ new_tmo->buf = tmo_buf;
+
+ add_tmo(&odp_timer.timer[id].tick[tick], new_tmo);
+
+ return tmo_buf;
+}
+
+uint64_t odp_timer_tick_to_ns(odp_timer_t timer, uint64_t ticks)
+{
+ uint32_t id;
+
+ id = timer - 1;
+ return ticks * odp_timer.timer[id].resolution_ns;
+}
+
+uint64_t odp_timer_ns_to_tick(odp_timer_t timer, uint64_t ns)
+{
+ uint32_t id;
+
+ id = timer - 1;
+ return ns / odp_timer.timer[id].resolution_ns;
+}
+
+uint64_t odp_timer_resolution(odp_timer_t timer)
+{
+ uint32_t id;
+
+ id = timer - 1;
+ return odp_timer.timer[id].resolution_ns;
+}
+
+uint64_t odp_timer_maximum_tmo(odp_timer_t timer)
+{
+ uint32_t id;
+
+ id = timer - 1;
+ return odp_timer.timer[id].max_ticks;
+}
+
+uint64_t odp_timer_current_tick(odp_timer_t timer)
+{
+ uint32_t id;
+
+ id = timer - 1;
+ return odp_timer.timer[id].cur_tick;
+}
Copy linux-generic into linux-keystone2. linux-generic based on commit 6d7fadf "Fix netmap pkt I/O to poll all hardware rings" Signed-off-by: Taras Kondratiuk <taras.kondratiuk@linaro.org> --- platform/linux-keystone2/Doxyfile.in | 32 + platform/linux-keystone2/Makefile | 141 ++++ platform/linux-keystone2/include/api/odp_buffer.h | 107 +++ .../linux-keystone2/include/api/odp_pktio_netmap.h | 22 + .../linux-keystone2/include/api/odp_pktio_socket.h | 25 + .../linux-keystone2/include/api/odp_pktio_types.h | 43 ++ .../linux-keystone2/include/odp_buffer_internal.h | 124 +++ .../include/odp_buffer_pool_internal.h | 115 +++ platform/linux-keystone2/include/odp_internal.h | 53 ++ .../linux-keystone2/include/odp_packet_internal.h | 145 ++++ .../include/odp_packet_io_internal.h | 50 ++ .../linux-keystone2/include/odp_packet_io_queue.h | 50 ++ .../linux-keystone2/include/odp_packet_netmap.h | 67 ++ .../linux-keystone2/include/odp_packet_socket.h | 114 +++ .../linux-keystone2/include/odp_queue_internal.h | 120 +++ .../include/odp_schedule_internal.h | 31 + .../linux-keystone2/include/odp_spin_internal.h | 67 ++ platform/linux-keystone2/source/odp_barrier.c | 48 ++ platform/linux-keystone2/source/odp_buffer.c | 119 +++ platform/linux-keystone2/source/odp_buffer_pool.c | 511 +++++++++++++ platform/linux-keystone2/source/odp_coremask.c | 109 +++ platform/linux-keystone2/source/odp_init.c | 67 ++ platform/linux-keystone2/source/odp_linux.c | 90 +++ platform/linux-keystone2/source/odp_packet.c | 368 +++++++++ platform/linux-keystone2/source/odp_packet_flags.c | 115 +++ platform/linux-keystone2/source/odp_packet_io.c | 537 +++++++++++++ .../linux-keystone2/source/odp_packet_netmap.c | 453 +++++++++++ .../linux-keystone2/source/odp_packet_socket.c | 791 ++++++++++++++++++++ platform/linux-keystone2/source/odp_queue.c | 426 +++++++++++ platform/linux-keystone2/source/odp_ring.c | 619 +++++++++++++++ platform/linux-keystone2/source/odp_rwlock.c | 61 ++ platform/linux-keystone2/source/odp_schedule.c | 396 ++++++++++ .../linux-keystone2/source/odp_shared_memory.c | 224 ++++++ platform/linux-keystone2/source/odp_spinlock.c | 40 + platform/linux-keystone2/source/odp_system_info.c | 409 ++++++++++ platform/linux-keystone2/source/odp_thread.c | 68 ++ platform/linux-keystone2/source/odp_ticketlock.c | 51 ++ platform/linux-keystone2/source/odp_time.c | 92 +++ platform/linux-keystone2/source/odp_timer.c | 332 ++++++++ 39 files changed, 7232 insertions(+) create mode 100644 platform/linux-keystone2/Doxyfile.in create mode 100644 platform/linux-keystone2/Makefile create mode 100644 platform/linux-keystone2/include/api/odp_buffer.h create mode 100644 platform/linux-keystone2/include/api/odp_pktio_netmap.h create mode 100644 platform/linux-keystone2/include/api/odp_pktio_socket.h create mode 100644 platform/linux-keystone2/include/api/odp_pktio_types.h create mode 100644 platform/linux-keystone2/include/odp_buffer_internal.h create mode 100644 platform/linux-keystone2/include/odp_buffer_pool_internal.h create mode 100644 platform/linux-keystone2/include/odp_internal.h create mode 100644 platform/linux-keystone2/include/odp_packet_internal.h create mode 100644 platform/linux-keystone2/include/odp_packet_io_internal.h create mode 100644 platform/linux-keystone2/include/odp_packet_io_queue.h create mode 100644 platform/linux-keystone2/include/odp_packet_netmap.h create mode 100644 platform/linux-keystone2/include/odp_packet_socket.h create mode 100644 platform/linux-keystone2/include/odp_queue_internal.h create mode 100644 platform/linux-keystone2/include/odp_schedule_internal.h create mode 100644 platform/linux-keystone2/include/odp_spin_internal.h create mode 100644 platform/linux-keystone2/source/odp_barrier.c create mode 100644 platform/linux-keystone2/source/odp_buffer.c create mode 100644 platform/linux-keystone2/source/odp_buffer_pool.c create mode 100644 platform/linux-keystone2/source/odp_coremask.c create mode 100644 platform/linux-keystone2/source/odp_init.c create mode 100644 platform/linux-keystone2/source/odp_linux.c create mode 100644 platform/linux-keystone2/source/odp_packet.c create mode 100644 platform/linux-keystone2/source/odp_packet_flags.c create mode 100644 platform/linux-keystone2/source/odp_packet_io.c create mode 100644 platform/linux-keystone2/source/odp_packet_netmap.c create mode 100644 platform/linux-keystone2/source/odp_packet_socket.c create mode 100644 platform/linux-keystone2/source/odp_queue.c create mode 100644 platform/linux-keystone2/source/odp_ring.c create mode 100644 platform/linux-keystone2/source/odp_rwlock.c create mode 100644 platform/linux-keystone2/source/odp_schedule.c create mode 100644 platform/linux-keystone2/source/odp_shared_memory.c create mode 100644 platform/linux-keystone2/source/odp_spinlock.c create mode 100644 platform/linux-keystone2/source/odp_system_info.c create mode 100644 platform/linux-keystone2/source/odp_thread.c create mode 100644 platform/linux-keystone2/source/odp_ticketlock.c create mode 100644 platform/linux-keystone2/source/odp_time.c create mode 100644 platform/linux-keystone2/source/odp_timer.c