diff mbox

[RFC,API-NEXT,10/12] linux-generic: tm: implementation of egress traffic manager

Message ID 1437249827-578-11-git-send-email-bill.fischofer@linaro.org
State New
Headers show

Commit Message

Bill Fischofer July 18, 2015, 8:03 p.m. UTC
Signed-off-by: Barry Spinney <spinney@ezchip.com>
Signed-off-by: Mike Holmes <mike.holmes@linaro.org>
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
---
 platform/linux-generic/Makefile.am        |    1 +
 platform/linux-generic/odp_traffic_mngr.c | 2792 +++++++++++++++++++++++++++++
 2 files changed, 2793 insertions(+)
 create mode 100644 platform/linux-generic/odp_traffic_mngr.c
diff mbox

Patch

diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 393c066..0570dfb 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -169,6 +169,7 @@  __LIB__libodp_la_SOURCES = \
 			   odp_ticketlock.c \
 			   odp_time.c \
 			   odp_timer.c \
+			   odp_traffic_mngr.c \
 			   odp_version.c \
 			   odp_weak.c \
 			   arch/@ARCH@/odp_time.c
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
new file mode 100644
index 0000000..33f0c4c
--- /dev/null
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -0,0 +1,2792 @@ 
+/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
+ *
+ * Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <odp/std_types.h>
+#include <odp/traffic_mngr.h>
+#include <odp/packet_io.h>
+#include <odp_name_table_internal.h>
+#include <odp_timer_wheel_internal.h>
+#include <odp_pkt_queue_internal.h>
+#include <odp_sorted_list_internal.h>
+#include <odp_internal.h>
+#include <odp_debug_internal.h>
+
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+#define INPUT_WORK_RING_SIZE  (16 * 1024)
+
+/* Macros to convert handles to internal pointers and vice versa.*/
+
+#define MAKE_ODP_TM_HANDLE(tm_system)  ((odp_tm_t)tm_system)
+#define GET_TM_SYSTEM(odp_tm)          ((tm_system_t *)odp_tm)
+
+#define MAKE_PROFILE_HANDLE(profile_kind, tbl_idx) \
+	(((profile_kind & 0xF) << 28) | ((tbl_idx + 1) & 0xFFFFFFF))
+
+#define GET_PROFILE_KIND(profile_handle)  \
+	((profile_kind_t)((profile_handle >> 28) & 0xF))
+
+#define GET_TBL_IDX(profile_handle)  ((profile_handle & 0xFFFFFFF) - 1)
+
+#define MAKE_ODP_TM_NODE(tm_node_obj)  ((odp_tm_node_t)tm_node_obj)
+#define GET_TM_NODE_OBJ(odp_tm_node)   ((tm_node_obj_t *)odp_tm_node)
+
+#define MAKE_ODP_TM_QUEUE(tm_queue_obj)  ((odp_tm_queue_t)tm_queue_obj)
+#define GET_TM_QUEUE_OBJ(odp_tm_queue)   ((tm_queue_obj_t *)odp_tm_queue)
+
+typedef uint64_t tm_handle_t;
+
+#define PF_RM_CURRENT_BEST  0x01
+#define PF_NEW_PKT_IN       0x02
+#define PF_PRIORITY_BOOST   0x08
+#define PF_SHAPER_DELAYED   0x10
+#define PF_CHANGED_OUT_PKT  0x20
+#define PF_REACHED_EGRESS   0x40
+#define PF_ERROR            0x80
+
+typedef struct {
+	uint32_t num_allocd;
+	uint32_t num_used;
+	void **array_ptrs; /* Ptr to an array of num_allocd void * ptrs. */
+} dynamic_tbl_t;
+
+#define ODP_TM_NUM_PROFILES  4
+
+typedef enum {
+	TM_SHAPER_PROFILE,
+	TM_SCHED_PROFILE,
+	TM_THRESHOLD_PROFILE,
+	TM_WRED_PROFILE
+} profile_kind_t;
+
+typedef struct tm_node_obj_s tm_node_obj_t;
+
+typedef struct {
+	/* A zero value for max_bytes or max_pkts indicates that this quantity
+	 * is not limited, nor has a RED threshold.
+	 */
+	uint64_t max_pkts;
+	uint64_t max_bytes;
+	odp_int_name_t name_tbl_id;
+} tm_queue_thresholds_t;
+
+typedef struct {
+	odp_atomic_u64_t pkt_cnt;
+	odp_atomic_u64_t byte_cnt;
+} tm_queue_cnts_t;
+
+typedef struct tm_wred_node_s tm_wred_node_t;
+
+struct tm_wred_node_s {
+	tm_wred_node_t *next_tm_wred_node;
+	odp_tm_wred_params_t *wred_params[ODP_NUM_PKT_COLORS];
+	tm_queue_thresholds_t *threshold_params;
+	tm_queue_cnts_t queue_cnts;
+	odp_ticketlock_t tm_wred_node_lock;
+};
+
+typedef struct /* 64-bits long.*/
+{
+	uint32_t queue_num;
+	uint16_t pkt_len;
+	int8_t shaper_len_adjust;
+	uint8_t drop_eligible :1;
+	uint8_t pkt_color :2;
+	uint8_t unused :5;
+} pkt_desc_t;
+
+typedef struct {
+	odp_tm_sched_mode_t sched_modes[ODP_TM_MAX_PRIORITIES];
+	uint16_t inverted_weights[ODP_TM_MAX_PRIORITIES];
+	odp_int_name_t name_tbl_id;
+} tm_sched_params_t;
+
+typedef enum {
+	DELAY_PKT, DECR_NOTHING, DECR_COMMIT, DECR_PEAK, DECR_BOTH
+} tm_shaper_action_t;
+
+typedef struct {
+	uint8_t output_priority;
+	tm_shaper_action_t action;
+} tm_prop_t;
+
+typedef struct {
+	uint64_t commit_rate;
+	uint64_t peak_rate;
+	int64_t max_commit; /* Byte cnt as a fp integer with 26 bits. */
+	int64_t max_peak;
+	uint64_t max_commit_time_delta;
+	uint64_t max_peak_time_delta;
+	uint32_t min_time_delta;
+	odp_int_name_t name_tbl_id;
+	int8_t len_adjust;
+	odp_bool_t dual_rate;
+	odp_bool_t enabled;
+} tm_shaper_params_t;
+
+typedef enum {
+	NO_CALLBACK, UNDELAY_PKT, BOOST_PRIORITY
+} tm_shaper_callback_reason_t;
+
+typedef struct {
+	tm_node_obj_t *next_tm_node; /* NULL if connected to egress.*/
+	tm_shaper_params_t *shaper_params;
+	tm_sched_params_t *sched_params;
+
+	uint64_t last_update_time; /* In clock cycles. */
+	uint64_t callback_time;
+
+	/* The shaper token bucket counters are represented as a number of
+	 * bytes in a 64-bit fixed point format where the decimal point is at
+	 * bit 24.  (aka int64_24).  In other words, the number of bytes that
+	 * commit_cnt represents is "commit_cnt / 2**24".  Hence the
+	 * commit_rate and peak_rate are in units of bytes per cycle = "8 *
+	 * bits per sec / cycles per sec"
+	 */
+	int64_t commit_cnt; /* Note token counters can go slightly negative */
+	int64_t peak_cnt; /* Note token counters can go slightly negative */
+
+	uint64_t virtual_finish_time;
+	pkt_desc_t *in_pkt_desc;
+	pkt_desc_t *out_pkt_desc;
+	uint8_t callback_reason;
+	tm_prop_t propagation_result;
+	uint8_t input_priority;
+	uint8_t out_priority;
+	uint8_t valid_finish_time;
+	uint8_t timer_outstanding;
+	uint8_t initialized;
+} tm_shaper_obj_t;
+
+typedef struct {
+	uint32_t pkts_rcvd_cnt;
+	uint32_t pkts_enqueued_cnt;
+	uint32_t pkts_dequeued_cnt;
+	uint32_t pkts_consumed_cnt;
+	odp_int_pkt_queue_t odp_int_pkt_queue;
+	tm_wred_node_t *tm_wred_node;
+	odp_packet_t pkt;
+	pkt_desc_t in_pkt_desc;
+	tm_shaper_obj_t shaper_obj;
+	uint32_t queue_num;
+	uint8_t priority;
+	uint8_t tm_idx;
+	uint8_t connected_to_egress;
+	uint8_t tm_nodes_to_egress;
+} tm_queue_obj_t;
+
+typedef struct {
+	/* Note that the priority is implicit. */
+	pkt_desc_t *smallest_pkt_desc;
+	uint64_t base_virtual_time;
+	uint64_t smallest_finish_time;
+	odp_int_sorted_list_t sorted_list;
+	uint32_t sorted_list_cnt; /* Debugging use only.*/
+} tm_sched_state_t;
+
+typedef struct {
+	uint32_t priority_bit_mask; /* bit set if priority has pkt. */
+	pkt_desc_t *out_pkt_desc; /* highest priority pkt desc. */
+	uint8_t num_priorities;
+	uint8_t highest_priority;
+	tm_sched_state_t sched_states[0];
+} tm_schedulers_obj_t;
+
+struct tm_node_obj_s {
+	tm_wred_node_t *tm_wred_node;
+	tm_shaper_obj_t shaper_obj;
+	tm_schedulers_obj_t *schedulers_obj;
+	odp_int_name_t name_tbl_id;
+	uint32_t max_fanin;
+	uint8_t level; /* Primarily for debugging */
+	uint8_t tm_idx;
+	uint8_t connected_to_egress;
+};
+
+typedef struct {
+	tm_queue_obj_t *tm_queue_obj;
+	odp_packet_t pkt;
+} input_work_item_t;
+
+typedef struct {
+	uint64_t total_enqueues;
+	uint64_t enqueue_fail_cnt;
+	uint64_t total_dequeues;
+	odp_atomic_u32_t queue_cnt;
+	uint32_t peak_cnt;
+	uint32_t head_idx;
+	uint32_t tail_idx;
+	odp_ticketlock_t lock;
+	input_work_item_t work_ring[INPUT_WORK_RING_SIZE];
+} input_work_queue_t;
+
+typedef struct {
+	uint32_t next_random_byte;
+	uint8_t buf[256];
+} tm_random_data_t;
+
+typedef struct {
+	tm_queue_thresholds_t *threshold_params;
+	tm_queue_cnts_t queue_cnts;
+} tm_queue_info_t;
+
+typedef struct {
+	odp_ticketlock_t tm_system_lock;
+	odp_barrier_t tm_system_barrier;
+	odp_barrier_t tm_system_destroy_barrier;
+	odp_atomic_u32_t destroying;
+	odp_int_name_t name_tbl_id;
+
+	uint32_t next_queue_num;
+	tm_queue_obj_t **queue_num_tbl;
+	input_work_queue_t *input_work_queue;
+	tm_queue_cnts_t priority_queue_cnts;
+	tm_queue_cnts_t total_queue_cnts;
+	pkt_desc_t *egress_pkt_desc;
+
+	odp_int_queue_pool_t odp_int_queue_pool;
+	odp_timer_wheel_t odp_int_timer_wheel;
+	odp_int_sorted_pool_t odp_int_sorted_pool;
+
+	odp_tm_egress_t egress;
+	odp_tm_capability_t capability;
+
+	tm_queue_info_t total_info;
+	tm_queue_info_t priority_info[ODP_TM_MAX_PRIORITIES];
+
+	tm_random_data_t tm_random_data;
+
+	uint64_t current_cycles;
+	uint8_t tm_idx;
+	uint8_t first_enq;
+
+	uint64_t shaper_green_cnt;
+	uint64_t shaper_yellow_cnt;
+	uint64_t shaper_red_cnt;
+} tm_system_t;
+
+static const odp_int_name_kind_t PROFILE_TO_HANDLE_KIND[ODP_TM_NUM_PROFILES] = {
+		[TM_SHAPER_PROFILE] = ODP_TM_SHAPER_PROFILE_HANDLE,
+		[TM_SCHED_PROFILE] = ODP_TM_SCHED_PROFILE_HANDLE,
+		[TM_THRESHOLD_PROFILE] = ODP_TM_THRESHOLD_PROFILE_HANDLE,
+		[TM_WRED_PROFILE] = ODP_TM_WRED_PROFILE_HANDLE };
+
+static const pkt_desc_t EMPTY_PKT_DESC = { .queue_num = 0, .pkt_len = 0,
+		.shaper_len_adjust = 0, .drop_eligible = 0, .pkt_color = 0,
+		.unused = 0 };
+
+#define MAX_PRIORITIES ODP_TM_MAX_PRIORITIES
+#define NUM_SHAPER_COLORS ODP_NUM_SHAPER_COLORS
+
+static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = {
+	[0] = {
+		[SHAPER_GREEN] = { 0, DECR_BOTH },
+		[SHAPER_YELLOW] = { 0, DECR_BOTH },
+		[SHAPER_RED] = { 0, DELAY_PKT } },
+	[1] = {
+		[SHAPER_GREEN] = { 1, DECR_BOTH },
+		[SHAPER_YELLOW] = { 1, DECR_BOTH },
+		[SHAPER_RED] = { 1, DELAY_PKT } },
+	[2] = {
+		[SHAPER_GREEN] = { 2, DECR_BOTH },
+		[SHAPER_YELLOW] = { 2, DECR_BOTH },
+		[SHAPER_RED] = { 2, DELAY_PKT } },
+	[3] = {
+		[SHAPER_GREEN] = { 3, DECR_BOTH },
+		[SHAPER_YELLOW] = { 3, DECR_BOTH },
+		[SHAPER_RED] = { 3, DELAY_PKT } },
+	[4] = {
+		[SHAPER_GREEN] = { 4, DECR_BOTH },
+		[SHAPER_YELLOW] = { 4, DECR_BOTH },
+		[SHAPER_RED] = { 4, DELAY_PKT } },
+	[5] = {
+		[SHAPER_GREEN] = { 5, DECR_BOTH },
+		[SHAPER_YELLOW] = { 5, DECR_BOTH },
+		[SHAPER_RED] = { 5, DELAY_PKT } },
+	[6] = {
+		[SHAPER_GREEN] = { 6, DECR_BOTH },
+		[SHAPER_YELLOW] = { 6, DECR_BOTH },
+		[SHAPER_RED] = { 6, DELAY_PKT } },
+	[7] = {
+		[SHAPER_GREEN] = { 7, DECR_BOTH },
+		[SHAPER_YELLOW] = { 7, DECR_BOTH },
+		[SHAPER_RED] = { 7, DELAY_PKT } } };
+
+static tm_prop_t dual_rate_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = {
+	[0] = {
+		[SHAPER_GREEN] = { 0, DECR_BOTH },
+		[SHAPER_YELLOW] = { 0, DECR_BOTH },
+		[SHAPER_RED] = { 0, DECR_BOTH } },
+	[1] = {
+		[SHAPER_GREEN] = { 1, DECR_BOTH },
+		[SHAPER_YELLOW] = { 1, DECR_BOTH },
+		[SHAPER_RED] = { 1, DELAY_PKT } },
+	[2] = {
+		[SHAPER_GREEN] = { 2, DECR_BOTH },
+		[SHAPER_YELLOW] = { 3, DECR_PEAK },
+		[SHAPER_RED] = { 3, DELAY_PKT } },
+	[3] = {
+		[SHAPER_GREEN] = { 3, DECR_PEAK },
+		[SHAPER_YELLOW] = { 3, DECR_PEAK },
+		[SHAPER_RED] = { 3, DELAY_PKT } },
+	[4] = {
+		[SHAPER_GREEN] = { 4, DECR_PEAK },
+		[SHAPER_YELLOW] = { 4, DECR_PEAK },
+		[SHAPER_RED] = { 4, DELAY_PKT } },
+	[5] = {
+		[SHAPER_GREEN] = { 5, DECR_PEAK },
+		[SHAPER_YELLOW] = { 5, DECR_PEAK },
+		[SHAPER_RED] = { 5, DELAY_PKT } },
+	[6] = {
+		[SHAPER_GREEN] = { 6, DECR_PEAK },
+		[SHAPER_YELLOW] = { 6, DECR_PEAK },
+		[SHAPER_RED] = { 6, DELAY_PKT } },
+	[7] = {
+		[SHAPER_GREEN] = { 7, DECR_PEAK },
+		[SHAPER_YELLOW] = { 7, DECR_PEAK },
+		[SHAPER_RED] = { 7, DELAY_PKT } } };
+
+/* Profile tables. */
+static dynamic_tbl_t odp_tm_profile_tbls[ODP_TM_NUM_PROFILES];
+
+/* TM systems table. */
+static tm_system_t *odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS];
+
+static uint64_t odp_tm_cycles_per_sec = ODP_CYCLES_PER_SEC;
+
+static odp_ticketlock_t tm_create_lock;
+static odp_ticketlock_t tm_profile_lock;
+static odp_barrier_t tm_first_enq;
+
+/* Forward function declarations. */
+static void tm_queue_cnts_decrement(tm_system_t *tm_system,
+				    tm_wred_node_t *tm_wred_node,
+				    uint32_t priority,
+				    uint32_t frame_len);
+
+static void tm_init_random_data(tm_random_data_t *tm_random_data)
+{
+	uint32_t byte_cnt;
+
+	/* Keep calling odp_random_data until we have filled the entire random
+	 * data buffer.  Hopefully we don't need to call odp_random_data too
+	 * many times to get 256 bytes worth of random data.
+	 */
+	byte_cnt = 0;
+	while (byte_cnt < 256)
+		byte_cnt += odp_random_data(&tm_random_data->buf[byte_cnt],
+					    256 - byte_cnt, 1);
+
+	tm_random_data->next_random_byte = 0;
+}
+
+static uint16_t tm_random16(tm_random_data_t *tm_random_data)
+{
+	uint32_t buf_idx;
+	uint16_t rand8a, rand8b;
+
+	if (256 <= tm_random_data->next_random_byte)
+		tm_init_random_data(tm_random_data);
+
+	/* Collect 2 random bytes and return them.  Endianness does NOT matter
+	 * here.  This code does assume that next_random_byte is a multiple of
+	 * 2.
+	 */
+	buf_idx = tm_random_data->next_random_byte;
+	rand8a = tm_random_data->buf[buf_idx++];
+	rand8b = tm_random_data->buf[buf_idx++];
+
+	tm_random_data->next_random_byte = buf_idx;
+	return (rand8b << 8) | rand8a;
+}
+
+static odp_bool_t tm_random_drop(tm_random_data_t *tm_random_data,
+				 odp_tm_percent_t drop_prob)
+{
+	odp_bool_t drop;
+	uint32_t random_int, scaled_random_int;
+
+	/* Pick a random integer between 0 and 10000.*/
+	random_int = tm_random16(tm_random_data);
+	scaled_random_int = (random_int * 10000) >> 16;
+	drop = scaled_random_int < (uint32_t)drop_prob;
+	return drop;
+}
+
+static void *alloc_entry_in_dynamic_tbl(dynamic_tbl_t *dynamic_tbl,
+					uint32_t record_size,
+					uint32_t *dynamic_idx_ptr)
+{
+	uint32_t num_allocd, new_num_allocd, idx;
+	void **new_array_ptrs, *new_record;
+
+	num_allocd = dynamic_tbl->num_allocd;
+	if (num_allocd <= dynamic_tbl->num_used) {
+		/* Need to alloc or realloc the array of ptrs. */
+		if (num_allocd <= 32)
+			new_num_allocd = 64;
+		else
+			new_num_allocd = 4 * num_allocd;
+
+		new_array_ptrs = malloc(new_num_allocd * sizeof(void *));
+		memset(new_array_ptrs, 0, new_num_allocd * sizeof(void *));
+
+		if (dynamic_tbl->num_used != 0)
+			memcpy(new_array_ptrs, dynamic_tbl->array_ptrs,
+			       dynamic_tbl->num_used * sizeof(void *));
+
+		if (dynamic_tbl->array_ptrs)
+			free(dynamic_tbl->array_ptrs);
+
+		dynamic_tbl->num_allocd = new_num_allocd;
+		dynamic_tbl->array_ptrs = new_array_ptrs;
+	}
+
+	idx = dynamic_tbl->num_used;
+	new_record = malloc(record_size);
+	memset(new_record, 0, record_size);
+
+	dynamic_tbl->array_ptrs[idx] = new_record;
+	dynamic_tbl->num_used++;
+	if (dynamic_idx_ptr)
+		*dynamic_idx_ptr = idx;
+
+	return new_record;
+}
+
+static void free_dynamic_tbl_entry(dynamic_tbl_t *dynamic_tbl ODP_UNUSED,
+				   uint32_t record_size ODP_UNUSED,
+				   uint32_t dynamic_idx ODP_UNUSED)
+{
+	/**< @todo Currently we don't bother with freeing. */
+}
+
+static input_work_queue_t *input_work_queue_create(void)
+{
+	input_work_queue_t *input_work_queue;
+
+	input_work_queue = malloc(sizeof(input_work_queue_t));
+	memset(input_work_queue, 0, sizeof(input_work_queue_t));
+	odp_atomic_init_u32(&input_work_queue->queue_cnt, 0);
+	odp_ticketlock_init(&input_work_queue->lock);
+	return input_work_queue;
+}
+
+static void input_work_queue_destroy(input_work_queue_t *input_work_queue)
+{
+	/* We first need to "flush/drain" this input_work_queue before
+	 * freeing it.  Of course, elsewhere it is essential to have first
+	 * stopped new tm_enq() (et al) calls from succeeding.
+	 */
+	odp_ticketlock_lock(&input_work_queue->lock);
+	free(input_work_queue);
+}
+
+static int input_work_queue_append(tm_system_t *tm_system,
+				   input_work_item_t *work_item)
+{
+	input_work_queue_t *input_work_queue;
+	input_work_item_t *entry_ptr;
+	uint32_t queue_cnt, tail_idx;
+
+	input_work_queue = tm_system->input_work_queue;
+	queue_cnt = odp_atomic_load_u32(&input_work_queue->queue_cnt);
+	if (INPUT_WORK_RING_SIZE <= queue_cnt) {
+		input_work_queue->enqueue_fail_cnt++;
+		return -1;
+	}
+
+	odp_ticketlock_lock(&input_work_queue->lock);
+	tail_idx = input_work_queue->tail_idx;
+	entry_ptr = &input_work_queue->work_ring[tail_idx];
+
+	*entry_ptr = *work_item;
+	tail_idx++;
+	if (INPUT_WORK_RING_SIZE <= tail_idx)
+		tail_idx = 0;
+
+	input_work_queue->total_enqueues++;
+	input_work_queue->tail_idx = tail_idx;
+	odp_ticketlock_unlock(&input_work_queue->lock);
+	odp_atomic_inc_u32(&input_work_queue->queue_cnt);
+	if (input_work_queue->peak_cnt <= queue_cnt)
+		input_work_queue->peak_cnt = queue_cnt + 1;
+	return 0;
+}
+
+static int input_work_queue_remove(input_work_queue_t *input_work_queue,
+				   input_work_item_t *work_item)
+{
+	input_work_item_t *entry_ptr;
+	uint32_t queue_cnt, head_idx;
+
+	queue_cnt = odp_atomic_load_u32(&input_work_queue->queue_cnt);
+	if (queue_cnt == 0)
+		return -1;
+
+	odp_ticketlock_lock(&input_work_queue->lock);
+	head_idx = input_work_queue->head_idx;
+	entry_ptr = &input_work_queue->work_ring[head_idx];
+
+	*work_item = *entry_ptr;
+	head_idx++;
+	if (INPUT_WORK_RING_SIZE <= head_idx)
+		head_idx = 0;
+
+	input_work_queue->total_dequeues++;
+	input_work_queue->head_idx = head_idx;
+	odp_ticketlock_unlock(&input_work_queue->lock);
+	odp_atomic_dec_u32(&input_work_queue->queue_cnt);
+	return 0;
+}
+
+static tm_system_t *tm_system_alloc(void)
+{
+	tm_system_t *tm_system;
+	uint32_t tm_idx;
+
+	/* Find an open slot in the odp_tm_systems array. */
+	for (tm_idx = 0; tm_idx < ODP_TM_MAX_NUM_SYSTEMS; tm_idx++) {
+		if (!odp_tm_systems[tm_idx]) {
+			tm_system = malloc(sizeof(tm_system_t));
+			memset(tm_system, 0, sizeof(tm_system_t));
+			odp_tm_systems[tm_idx] = tm_system;
+			tm_system->tm_idx = tm_idx;
+			return tm_system;
+		}
+	}
+
+	return NULL;
+}
+
+static void tm_system_free(tm_system_t *tm_system)
+{
+	/* @todo Free any internally malloc'd blocks. */
+	odp_tm_systems[tm_system->tm_idx] = NULL;
+	free(tm_system);
+}
+
+static void *tm_common_profile_create(char *name,
+				      profile_kind_t profile_kind,
+				      uint32_t object_size,
+				      tm_handle_t *profile_handle_ptr,
+				      odp_int_name_t *name_tbl_id_ptr)
+{
+	odp_int_name_kind_t handle_kind;
+	odp_int_name_t name_tbl_id;
+	dynamic_tbl_t *dynamic_tbl;
+	tm_handle_t profile_handle;
+	uint32_t dynamic_tbl_idx;
+	void *object_ptr;
+
+	dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
+	object_ptr = alloc_entry_in_dynamic_tbl(dynamic_tbl, object_size,
+						&dynamic_tbl_idx);
+	if (!object_ptr)
+		return NULL;
+
+	handle_kind = PROFILE_TO_HANDLE_KIND[profile_kind];
+	profile_handle = MAKE_PROFILE_HANDLE(profile_kind, dynamic_tbl_idx);
+	name_tbl_id = ODP_INVALID_NAME;
+
+	if ((!name) && (name[0] != '\0')) {
+		name_tbl_id = odp_int_name_tbl_add(name, handle_kind,
+						   profile_handle);
+		if (name_tbl_id == ODP_INVALID_NAME) {
+			free_dynamic_tbl_entry(dynamic_tbl, object_size,
+					       dynamic_tbl_idx);
+			return NULL;
+		}
+	}
+
+	*profile_handle_ptr = profile_handle;
+	if (name_tbl_id_ptr)
+		*name_tbl_id_ptr = name_tbl_id;
+	return object_ptr;
+}
+
+static void *tm_get_profile_params(tm_handle_t profile_handle,
+				   profile_kind_t expected_profile_kind)
+{
+	profile_kind_t profile_kind;
+	dynamic_tbl_t *dynamic_tbl;
+	uint32_t dynamic_tbl_idx;
+
+	profile_kind = GET_PROFILE_KIND(profile_handle);
+	if (profile_kind != expected_profile_kind)
+		return NULL;
+	/* @todo Call some odp error function */
+
+	dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
+	dynamic_tbl_idx = GET_TBL_IDX(profile_handle);
+	return dynamic_tbl->array_ptrs[dynamic_tbl_idx];
+}
+
+static uint64_t tm_bps_to_rate(uint64_t bps)
+{
+	/* This code assumes that bps is in the range 1 kbps .. 1 tbps. */
+	if ((bps >> 32) == 0)
+		return (bps << 29) / odp_tm_cycles_per_sec;
+	else
+		return ((bps << 21) / odp_tm_cycles_per_sec) << 8;
+}
+
+static uint64_t tm_rate_to_bps(uint64_t rate)
+{
+	/* This code assumes that bps is in the range 1 kbps .. 1 tbps. */
+	return (rate * odp_tm_cycles_per_sec) >> 29;
+}
+
+static uint64_t tm_max_time_delta(uint64_t rate)
+{
+	if (rate == 0)
+		return 0;
+	else
+		return (1ULL << (26 + 30)) / rate;
+}
+
+static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
+				    tm_shaper_params_t *tm_shaper_params)
+{
+	uint64_t commit_rate, peak_rate, max_commit_time_delta, highest_rate;
+	uint64_t max_peak_time_delta;
+	uint32_t min_time_delta;
+	int64_t commit_burst, peak_burst;
+
+	commit_rate = tm_bps_to_rate(odp_shaper_params->commit_bps);
+	peak_rate = tm_bps_to_rate(odp_shaper_params->peak_bps);
+	max_commit_time_delta = tm_max_time_delta(commit_rate);
+	max_peak_time_delta = tm_max_time_delta(peak_rate);
+	highest_rate = MAX(commit_rate, peak_rate);
+	min_time_delta = (uint32_t)((1 << 26) / highest_rate);
+	commit_burst = (int64_t)odp_shaper_params->commit_burst;
+	peak_burst = (int64_t)odp_shaper_params->peak_burst;
+
+	tm_shaper_params->max_commit_time_delta = max_commit_time_delta;
+	tm_shaper_params->max_peak_time_delta = max_peak_time_delta;
+
+	tm_shaper_params->commit_rate = commit_rate;
+	tm_shaper_params->peak_rate = peak_rate;
+	tm_shaper_params->max_commit = commit_burst << (26 - 3);
+	tm_shaper_params->max_peak = peak_burst << (26 - 3);
+	tm_shaper_params->min_time_delta = min_time_delta;
+	tm_shaper_params->len_adjust = odp_shaper_params->shaper_len_adjust;
+	tm_shaper_params->dual_rate = odp_shaper_params->dual_rate;
+	tm_shaper_params->enabled = commit_rate != 0;
+}
+
+static void
+tm_shaper_params_cvt_from(tm_shaper_params_t *tm_shaper_params,
+			  odp_tm_shaper_params_t *odp_shaper_params)
+{
+	uint64_t commit_bps, peak_bps, commit_burst, peak_burst;
+
+	commit_bps = tm_rate_to_bps(tm_shaper_params->commit_rate);
+	peak_bps = tm_rate_to_bps(tm_shaper_params->peak_rate);
+	commit_burst = tm_shaper_params->max_commit >> (26 - 3);
+	peak_burst = tm_shaper_params->max_peak >> (26 - 3);
+
+	odp_shaper_params->commit_bps = commit_bps;
+	odp_shaper_params->peak_bps = peak_bps;
+	odp_shaper_params->commit_burst = (uint32_t)commit_burst;
+	odp_shaper_params->peak_burst = (uint32_t)peak_burst;
+	odp_shaper_params->shaper_len_adjust = tm_shaper_params->len_adjust;
+	odp_shaper_params->dual_rate = tm_shaper_params->dual_rate;
+}
+
+static void tm_shaper_obj_init(tm_system_t *tm_system,
+			       tm_shaper_params_t *shaper_params,
+			       tm_shaper_obj_t *shaper_obj)
+{
+	shaper_obj->shaper_params = shaper_params;
+	shaper_obj->last_update_time = tm_system->current_cycles;
+	shaper_obj->callback_time = 0;
+	shaper_obj->commit_cnt = shaper_params->max_commit;
+	shaper_obj->peak_cnt = shaper_params->max_peak;
+	shaper_obj->callback_reason = NO_CALLBACK;
+	shaper_obj->initialized = 1;
+}
+
+/* Any locking required must be done by the caller! */
+static void tm_shaper_config_set(tm_system_t *tm_system,
+				 odp_tm_shaper_t shaper_profile,
+				 tm_shaper_obj_t *shaper_obj)
+{
+	tm_shaper_params_t *shaper_params;
+
+	if (shaper_profile == ODP_TM_INVALID)
+		return;
+
+	shaper_params = tm_get_profile_params(shaper_profile,
+					      TM_SHAPER_PROFILE);
+	if (shaper_obj->initialized == 0)
+		tm_shaper_obj_init(tm_system, shaper_params, shaper_obj);
+	else
+		shaper_obj->shaper_params = shaper_params;
+}
+
+static void update_shaper_elapsed_time(tm_system_t *tm_system,
+				       tm_shaper_params_t *shaper_params,
+				       tm_shaper_obj_t *shaper_obj)
+{
+	uint64_t time_delta;
+	int64_t commit, peak, commit_inc, peak_inc, max_commit, max_peak;
+
+	/* If the time_delta is "too small" then we just exit without making
+	 * any changes.  Too small is defined such that
+	 * time_delta/cycles_per_sec * MAX(commit_rate, peak_rate) is less
+	 * than a byte.
+	 */
+	time_delta = tm_system->current_cycles - shaper_obj->last_update_time;
+	if (time_delta < (uint64_t)shaper_params->min_time_delta)
+		return;
+
+	commit = shaper_obj->commit_cnt;
+	max_commit = shaper_params->max_commit;
+
+	/* If the time_delta is "too large" then we need to prevent overflow
+	 * of the multiplications of time_delta and either commit_rate or
+	 * peak_rate.
+	 */
+	if (shaper_params->max_commit_time_delta <= time_delta)
+		commit_inc = max_commit;
+	else
+		commit_inc = time_delta * shaper_params->commit_rate;
+
+	shaper_obj->commit_cnt = (int64_t)MIN(max_commit, commit + commit_inc);
+
+	if (shaper_params->peak_rate != 0) {
+		peak = shaper_obj->peak_cnt;
+		max_peak = shaper_params->max_peak;
+		if (shaper_params->max_peak_time_delta <= time_delta)
+			peak_inc = max_peak;
+		else
+			peak_inc = time_delta * shaper_params->peak_rate;
+
+		shaper_obj->peak_cnt = (int64_t)MIN(max_peak, peak + peak_inc);
+	}
+
+	shaper_obj->last_update_time = tm_system->current_cycles;
+}
+
+static uint64_t cycles_till_not_red(tm_shaper_params_t *shaper_params,
+				    tm_shaper_obj_t *shaper_obj)
+{
+	uint64_t min_time_delay, commit_delay, peak_delay;
+
+	/* Normal case requires that peak_cnt be <= commit_cnt and that
+	 * peak_rate be >= commit_rate, but just in case this code handles other
+	 * weird cases that might actually be invalid.
+	 */
+	commit_delay = 0;
+	if (shaper_obj->commit_cnt < 0)
+		commit_delay = (-shaper_obj->commit_cnt)
+				/ shaper_params->commit_rate;
+
+	min_time_delay = MAX(shaper_obj->shaper_params->min_time_delta, 256);
+	commit_delay = MAX(commit_delay, min_time_delay);
+	if (shaper_params->peak_rate == 0)
+		return commit_delay;
+
+	peak_delay = 0;
+	if (shaper_obj->peak_cnt < 0)
+		peak_delay = (-shaper_obj->peak_cnt) / shaper_params->peak_rate;
+
+	peak_delay = MAX(peak_delay, min_time_delay);
+	if (0 < shaper_obj->commit_cnt)
+		return peak_delay;
+	else if (0 < shaper_obj->peak_cnt)
+		return commit_delay;
+	else
+		return MIN(commit_delay, peak_delay);
+}
+
+static uint64_t cycles_till_green(tm_shaper_params_t *shaper_params,
+				  tm_shaper_obj_t *shaper_obj)
+{
+	if (0 <= shaper_obj->commit_cnt)
+		return 0;
+	/*Shouldn't happen. */
+
+	return (-shaper_obj->commit_cnt) / shaper_params->commit_rate;
+}
+
+/* Returns < 0 if no pkt_desc output (i.e. action == DELAY_PKT), 0 if there
+ * is a pkt_desc output but it is unchanged, or > 0 if there is a pkt_desc
+ * output and it is different than what was just the output.
+ */
+static int tm_run_shaper(tm_system_t *tm_system, tm_shaper_obj_t *shaper_obj,
+			 pkt_desc_t *pkt_desc, uint8_t priority)
+{
+	odp_tm_shaper_color_t shaper_color;
+	tm_shaper_params_t *shaper_params;
+	odp_bool_t output_change;
+	tm_prop_t propagation;
+	uint64_t delay_cycles, wakeup_time;
+
+	if (!pkt_desc) {
+		output_change = !shaper_obj->out_pkt_desc;
+		shaper_obj->in_pkt_desc = pkt_desc;
+		shaper_obj->out_pkt_desc = pkt_desc;
+		shaper_obj->out_priority = 0;
+		return output_change;
+	}
+
+	shaper_params = shaper_obj->shaper_params;
+	shaper_obj->in_pkt_desc = pkt_desc;
+	shaper_obj->input_priority = priority;
+
+	if ((!shaper_params) || (shaper_params->enabled == 0)) {
+		output_change = (shaper_obj->out_pkt_desc != pkt_desc) ||
+			(shaper_obj->out_priority != priority);
+
+		shaper_obj->out_pkt_desc = pkt_desc;
+		shaper_obj->out_priority = priority;
+		if (output_change)
+			shaper_obj->valid_finish_time = 0;
+
+		return output_change;
+	}
+
+	update_shaper_elapsed_time(tm_system, shaper_params, shaper_obj);
+	if (0 < shaper_obj->commit_cnt)
+		shaper_color = SHAPER_GREEN;
+	else if (shaper_params->peak_rate == 0)
+		shaper_color = SHAPER_RED;
+	else if (shaper_obj->peak_cnt <= 0)
+		shaper_color = SHAPER_RED;
+	else
+		shaper_color = SHAPER_YELLOW;
+
+	if (shaper_color == SHAPER_GREEN)
+		tm_system->shaper_green_cnt++;
+	else if (shaper_color == SHAPER_YELLOW)
+		tm_system->shaper_yellow_cnt++;
+	else
+		tm_system->shaper_red_cnt++;
+
+	/* Run thru propagation tbl to get shaper_action and out_priority */
+	if (shaper_params->dual_rate)
+		propagation = dual_rate_prop_tbl[priority][shaper_color];
+	else
+		propagation = basic_prop_tbl[priority][shaper_color];
+
+	shaper_obj->propagation_result = propagation;
+	if (propagation.action == DELAY_PKT) {
+		/* First see if we already have an outstanding timer for this
+		 * shaper_obj, and if not insert one into the timer_wheel.
+		 */
+		if (shaper_obj->timer_outstanding == 0) {
+			/* Calculate elapsed time before this pkt will be
+			 * green or yellow.
+			 */
+			delay_cycles = cycles_till_not_red(shaper_params,
+							   shaper_obj);
+			wakeup_time = tm_system->current_cycles + delay_cycles;
+
+			/* Insert into timer wheel. */
+			odp_timer_wheel_insert(
+					tm_system->odp_int_timer_wheel,
+					wakeup_time, (void *)shaper_obj);
+			shaper_obj->callback_time = wakeup_time;
+			shaper_obj->timer_outstanding = 1;
+		}
+
+		shaper_obj->callback_reason = UNDELAY_PKT;
+		shaper_obj->out_pkt_desc = NULL;
+		shaper_obj->valid_finish_time = 0;
+		return -1;
+	}
+
+	if (priority < propagation.output_priority) {
+		if (shaper_obj->timer_outstanding == 0) {
+			/*Calculate elapsed time before this pkt will be able
+			 * to be full priority
+			 */
+			delay_cycles = cycles_till_green(shaper_params,
+							 shaper_obj);
+			wakeup_time = tm_system->current_cycles + delay_cycles;
+
+			/*Insert into timer wheel*/
+			odp_timer_wheel_insert(
+					tm_system->odp_int_timer_wheel,
+					wakeup_time, (void *)shaper_obj);
+			shaper_obj->callback_time = wakeup_time;
+			shaper_obj->timer_outstanding = 1;
+		}
+
+		shaper_obj->callback_reason = BOOST_PRIORITY;
+	} else {
+		shaper_obj->callback_time = 0;
+		shaper_obj->callback_reason = NO_CALLBACK;
+	}
+
+	/*Propagate pkt_desc to the next level*/
+	priority = propagation.output_priority;
+	output_change = (shaper_obj->out_pkt_desc != pkt_desc) ||
+		(shaper_obj->out_priority != priority);
+
+	shaper_obj->out_pkt_desc = pkt_desc;
+	shaper_obj->out_priority = priority;
+	if (output_change)
+		shaper_obj->valid_finish_time = 0;
+
+	return output_change;
+}
+
+static void update_shaper_for_pkt_sent(tm_system_t *tm_system ODP_UNUSED,
+				       tm_shaper_obj_t *shaper_obj,
+				       pkt_desc_t *sent_pkt_desc)
+{
+	tm_shaper_params_t *shaper_params;
+	tm_shaper_action_t shaper_action;
+	uint32_t frame_len, tkn_count;
+
+	shaper_action = shaper_obj->propagation_result.action;
+	shaper_obj->callback_time = 0;
+	shaper_obj->callback_reason = NO_CALLBACK;
+	shaper_obj->propagation_result.action = DECR_NOTHING;
+
+	if (shaper_action == DECR_NOTHING)
+		return;
+	else if (shaper_action == DELAY_PKT)
+		return; /* ERROR - should never happen. */
+
+	shaper_params = shaper_obj->shaper_params;
+	frame_len = sent_pkt_desc->pkt_len + sent_pkt_desc->shaper_len_adjust
+			+ shaper_params->len_adjust;
+
+	tkn_count = frame_len << 26;
+	if ((shaper_action == DECR_BOTH) || (shaper_action == DECR_COMMIT))
+		shaper_obj->commit_cnt -= tkn_count;
+
+	if (shaper_params->peak_rate != 0)
+		if ((shaper_action == DECR_BOTH) ||
+		    (shaper_action == DECR_PEAK))
+			shaper_obj->peak_cnt -= tkn_count;
+}
+
+static int tm_set_finish_time(tm_schedulers_obj_t *schedulers_obj,
+			      tm_shaper_obj_t *prod_shaper_obj,
+			      pkt_desc_t *new_pkt_desc,
+			      uint8_t new_priority)
+{
+	odp_tm_sched_mode_t mode;
+	tm_sched_params_t *sched_params;
+	tm_sched_state_t *sched_state;
+	uint64_t base_virtual_time, virtual_finish_time;
+	uint32_t inverted_weight, frame_len, frame_weight;
+
+	/*Check to see if this pkt_desc is "new" or not */
+	if (prod_shaper_obj->valid_finish_time) {
+		/* return 0 or 1 depending on whether this pkt_desc is the
+		 * current chosen one
+		 */
+		return 0;
+	}
+
+	/*First determine the virtual finish_time of this new pkt.*/
+	sched_params = prod_shaper_obj->sched_params;
+	if (!sched_params) {
+		mode = ODP_TM_BYTE_BASED_WEIGHTS;
+		inverted_weight = 4096;
+		/*Same as a weight of 16.*/
+	} else {
+		mode = sched_params->sched_modes[new_priority];
+		inverted_weight = sched_params->inverted_weights[new_priority];
+	}
+
+	frame_len = new_pkt_desc->pkt_len;
+	if (mode == ODP_TM_BYTE_BASED_WEIGHTS)
+		frame_len = 256;
+
+	frame_weight = ((inverted_weight * frame_len) + (1 << 15)) >> 16;
+
+	sched_state = &schedulers_obj->sched_states[new_priority];
+	base_virtual_time = MAX(prod_shaper_obj->virtual_finish_time,
+				sched_state->base_virtual_time);
+
+	virtual_finish_time = base_virtual_time + frame_weight;
+	prod_shaper_obj->virtual_finish_time = virtual_finish_time;
+	prod_shaper_obj->valid_finish_time = 1;
+	return 1;
+}
+
+static uint32_t tm_run_scheduler(tm_system_t *tm_system,
+				 tm_shaper_obj_t *prod_shaper_obj,
+				 tm_schedulers_obj_t *schedulers_obj,
+				 pkt_desc_t *new_pkt_desc,
+				 uint8_t new_priority,
+				 pkt_desc_t *sent_pkt_desc,
+				 uint32_t propagate_flags)
+{
+	odp_int_sorted_pool_t sorted_pool;
+	tm_sched_state_t *best_sched_state, *new_sched_state;
+	pkt_desc_t *best_pkt_desc, *pkt_desc;
+	uint64_t virtual_finish_time, new_virtual_finish_time;
+	uint32_t best_priority;
+	int rc;
+
+	if (propagate_flags & PF_PRIORITY_BOOST) {
+		/* Check to see if the new_pkt_desc (the one whose priority is
+		 * being boosted) is the current best or will be.
+		 */
+		ODP_DBG("%s BOOST_PRIORITY Not Yet Implemented\n", __func__);
+		return propagate_flags;
+		/* @todo */
+	}
+
+	if (propagate_flags & PF_RM_CURRENT_BEST) {
+		best_priority = schedulers_obj->highest_priority;
+		best_sched_state = &schedulers_obj->sched_states[best_priority];
+		if (best_sched_state->smallest_pkt_desc != sent_pkt_desc) {
+			/* ODP_DBG("%s nonmatching sent_pkt_desc\n",
+			 * __func__); @todo */
+			return propagate_flags;
+		}
+
+		best_sched_state->smallest_finish_time = -1;
+		best_sched_state->smallest_pkt_desc = NULL;
+		schedulers_obj->out_pkt_desc = NULL;
+		schedulers_obj->priority_bit_mask &= ~(1ULL << best_priority);
+		propagate_flags |= PF_CHANGED_OUT_PKT;
+
+		/* Get the pkt_desc with the smallest virtual finish time from
+		 * the sorted list at this priority level.  This could be
+		 * NULL.  If non-NULL this needs to be recorded as the current
+		 * best choice.  (if non-NULL).
+		 */
+		if (best_sched_state->sorted_list_cnt != 0) {
+			sorted_pool = tm_system->odp_int_sorted_pool;
+			pkt_desc =
+				odp_sorted_list_remove
+				(sorted_pool,
+				 best_sched_state->sorted_list,
+				 &virtual_finish_time);
+			if (!pkt_desc)
+				return propagate_flags;
+			/* @todo */
+
+			best_sched_state->smallest_finish_time =
+				virtual_finish_time;
+			best_sched_state->smallest_pkt_desc = pkt_desc;
+			schedulers_obj->out_pkt_desc = pkt_desc;
+			schedulers_obj->priority_bit_mask |=
+				1ULL << best_priority;
+			best_sched_state->sorted_list_cnt--;
+		} else if (schedulers_obj->priority_bit_mask != 0) {
+			/* Set the highest_priority and out_pkt_desc of the
+			 * schedulers_obj.  Such a pkt_desc must exist because
+			 * otherwise priority_bit_mask would be 0.
+			 */
+			best_priority = __builtin_ctz(
+				schedulers_obj->priority_bit_mask);
+			best_sched_state =
+				&schedulers_obj->sched_states[best_priority];
+			best_pkt_desc = best_sched_state->smallest_pkt_desc;
+
+			schedulers_obj->highest_priority = best_priority;
+			schedulers_obj->out_pkt_desc = best_pkt_desc;
+		}
+	}
+
+	if (!new_pkt_desc)
+		return propagate_flags;
+
+	/* Determine the virtual finish_time of this new pkt. */
+	tm_set_finish_time(schedulers_obj, prod_shaper_obj, new_pkt_desc,
+			   new_priority);
+	new_virtual_finish_time = prod_shaper_obj->virtual_finish_time;
+	new_sched_state = &schedulers_obj->sched_states[new_priority];
+
+	if (new_sched_state->smallest_pkt_desc) {
+		/* See if this new pkt has the smallest virtual finish time
+		 * for all fanin at the given input priority level, as
+		 * represented by the new_sched_state.
+		 */
+		if (new_sched_state->smallest_finish_time
+		    <= new_virtual_finish_time) {
+			/* Since this new pkt doesn't have the smallest
+			 * virtual finish time, just insert it into this
+			 * sched_state's list sorted by virtual finish times.
+			 */
+			rc = odp_sorted_list_insert(
+				tm_system->odp_int_sorted_pool,
+				new_sched_state->sorted_list,
+				new_virtual_finish_time, new_pkt_desc);
+			if (rc < 0)
+				return propagate_flags;
+			/* @todo */
+
+			new_sched_state->sorted_list_cnt++;
+			return propagate_flags;
+		}
+
+		/* Since this new pkt does have the smallest virtual finish
+		 * time, insert the previous best into this sched_state's list
+		 * sorted by virtual finish times (though it should always be
+		 * inserted at the front), and continue processing.
+		 */
+		rc = odp_sorted_list_insert
+			(tm_system->odp_int_sorted_pool,
+			 new_sched_state->sorted_list,
+			 new_sched_state->smallest_finish_time,
+			 new_sched_state->smallest_pkt_desc);
+		if (rc < 0)
+			return propagate_flags;
+		/* @todo */
+
+		new_sched_state->sorted_list_cnt++;
+	}
+
+	/* Record the fact that this new pkt is now the best choice of this
+	 * sched_state.
+	 */
+	new_sched_state->smallest_finish_time = new_virtual_finish_time;
+	new_sched_state->smallest_pkt_desc = new_pkt_desc;
+	schedulers_obj->priority_bit_mask |= 1ULL << new_priority;
+
+	/* Next see if this new pkt is the highest priority pkt of all of the
+	 * schedulers within this tm_node (recalling that the highest priority
+	 * has the lowest priority value).
+	 */
+	if ((schedulers_obj->priority_bit_mask & ((1ULL << new_priority) - 1))
+	    != 0)
+		return propagate_flags;
+
+	schedulers_obj->highest_priority = new_priority;
+	schedulers_obj->out_pkt_desc = new_pkt_desc;
+	return propagate_flags | PF_CHANGED_OUT_PKT;
+}
+
+static void update_scheduler_for_sent_pkt(tm_schedulers_obj_t *schedulers_obj)
+{
+	tm_sched_state_t *sched_state;
+	uint32_t priority;
+
+	priority = schedulers_obj->highest_priority;
+	sched_state = &schedulers_obj->sched_states[priority];
+	sched_state->base_virtual_time = sched_state->smallest_finish_time;
+}
+
+static uint32_t tm_propagate_pkt_desc(tm_system_t *tm_system,
+				      tm_shaper_obj_t *shaper_obj,
+				      pkt_desc_t *new_pkt_desc,
+				      uint8_t new_priority,
+				      pkt_desc_t *sent_pkt_desc,
+				      uint32_t propagate_flags)
+{
+	tm_schedulers_obj_t *schedulers_obj;
+	tm_node_obj_t *tm_node_obj;
+	uint32_t flags;
+
+	/*Run shaper. */
+	tm_run_shaper(tm_system, shaper_obj, new_pkt_desc, new_priority);
+
+	new_pkt_desc = shaper_obj->out_pkt_desc;
+	new_priority = shaper_obj->out_priority;
+	if (!new_pkt_desc)
+		propagate_flags &= ~PF_NEW_PKT_IN;
+	else
+		propagate_flags |= PF_NEW_PKT_IN;
+
+	tm_node_obj = shaper_obj->next_tm_node;
+	while (tm_node_obj) /*not at egress */ {
+		/* Run scheduler, including priority multiplexor. */
+		schedulers_obj = tm_node_obj->schedulers_obj;
+		flags = tm_run_scheduler(tm_system, shaper_obj, schedulers_obj,
+					 new_pkt_desc, new_priority,
+					 sent_pkt_desc,
+					 propagate_flags);
+		if (flags & PF_ERROR)
+			return -1;
+
+		/* Run shaper. */
+		new_pkt_desc = schedulers_obj->out_pkt_desc;
+		new_priority = schedulers_obj->highest_priority;
+		shaper_obj = &tm_node_obj->shaper_obj;
+		tm_run_shaper(tm_system, shaper_obj, new_pkt_desc,
+			      new_priority);
+
+		new_pkt_desc = shaper_obj->out_pkt_desc;
+		new_priority = shaper_obj->out_priority;
+		if (!shaper_obj->out_pkt_desc)
+			propagate_flags &= ~PF_NEW_PKT_IN;
+		else
+			propagate_flags |= PF_NEW_PKT_IN;
+
+		tm_node_obj = shaper_obj->next_tm_node;
+	}
+
+	if (!new_pkt_desc)
+		return propagate_flags;
+
+	/*reached egress. return 1. */
+	tm_system->egress_pkt_desc = new_pkt_desc;
+	return propagate_flags | PF_REACHED_EGRESS;
+}
+
+static void tm_pkt_desc_init(pkt_desc_t *pkt_desc, odp_packet_t pkt,
+			     uint32_t queue_num)
+{
+	pkt_desc->queue_num = queue_num;
+	pkt_desc->pkt_len = odp_packet_len(pkt);
+	pkt_desc->shaper_len_adjust = odp_packet_shaper_len_adjust(pkt);
+	pkt_desc->drop_eligible = odp_packet_drop_eligible(pkt);
+	pkt_desc->pkt_color = odp_packet_color(pkt);
+}
+
+static void tm_pkt_sent_updates(tm_system_t *tm_system,
+				pkt_desc_t *sent_pkt_desc)
+{
+	tm_shaper_obj_t *shaper_obj;
+	tm_queue_obj_t *tm_queue_obj;
+	tm_node_obj_t *tm_node_obj;
+	uint32_t queue_num, pkt_len;
+
+	queue_num = sent_pkt_desc->queue_num;
+	tm_queue_obj = tm_system->queue_num_tbl[queue_num];
+	pkt_len = sent_pkt_desc->pkt_len;
+	tm_queue_obj->pkts_consumed_cnt++;
+	tm_queue_cnts_decrement(tm_system, tm_queue_obj->tm_wred_node,
+				tm_queue_obj->priority, pkt_len);
+
+	shaper_obj = &tm_queue_obj->shaper_obj;
+	update_shaper_for_pkt_sent(tm_system, shaper_obj, sent_pkt_desc);
+
+	tm_node_obj = shaper_obj->next_tm_node;
+	while (tm_node_obj) /*not at egress */ {
+		update_scheduler_for_sent_pkt(tm_node_obj->schedulers_obj);
+
+		shaper_obj = &tm_node_obj->shaper_obj;
+		update_shaper_for_pkt_sent(tm_system, shaper_obj,
+					   sent_pkt_desc);
+
+		tm_node_obj = shaper_obj->next_tm_node;
+	}
+}
+
+static int tm_consume_pkt_desc(tm_system_t *tm_system,
+			       pkt_desc_t *sent_pkt_desc)
+{
+	odp_int_pkt_queue_t odp_int_pkt_queue;
+	tm_queue_obj_t *tm_queue_obj;
+	odp_packet_t pkt;
+	pkt_desc_t *new_pkt_desc;
+	uint32_t queue_num, propagate_flags, flags;
+	int rc;
+
+	queue_num = sent_pkt_desc->queue_num;
+	tm_queue_obj = tm_system->queue_num_tbl[queue_num];
+	tm_pkt_sent_updates(tm_system, sent_pkt_desc);
+
+	/* Get the next pkt in the tm_queue, if there is one. */
+	odp_int_pkt_queue = tm_queue_obj->odp_int_pkt_queue;
+	rc = odp_pkt_queue_remove(tm_system->odp_int_queue_pool,
+				  odp_int_pkt_queue, &pkt);
+	if (rc < 0)
+		return rc;
+
+	propagate_flags = PF_RM_CURRENT_BEST;
+	new_pkt_desc = NULL;
+	if (0 < rc) {
+		propagate_flags |= PF_NEW_PKT_IN;
+		tm_queue_obj->pkt = pkt;
+		tm_pkt_desc_init(&tm_queue_obj->in_pkt_desc, pkt, queue_num);
+		new_pkt_desc = &tm_queue_obj->in_pkt_desc;
+		tm_queue_obj->pkts_dequeued_cnt++;
+	}
+
+	flags = tm_propagate_pkt_desc(tm_system, &tm_queue_obj->shaper_obj,
+				      new_pkt_desc, tm_queue_obj->priority,
+				      sent_pkt_desc,
+				      propagate_flags);
+	return (flags & PF_REACHED_EGRESS) != 0;
+}
+
+static odp_tm_percent_t tm_queue_fullness(odp_tm_wred_params_t *wred_params,
+					  tm_queue_thresholds_t *thresholds,
+					  tm_queue_cnts_t *queue_cnts)
+{
+	uint64_t current_cnt, max_cnt, fullness;
+
+	if (wred_params->use_byte_fullness) {
+		current_cnt = odp_atomic_load_u64(&queue_cnts->byte_cnt);
+		max_cnt = thresholds->max_bytes;
+	} else {
+		current_cnt = odp_atomic_load_u64(&queue_cnts->pkt_cnt);
+		max_cnt = thresholds->max_pkts;
+	}
+
+	if (max_cnt == 0)
+		return 0;
+
+	fullness = (10000 * current_cnt) / max_cnt;
+	return (odp_tm_percent_t)MIN(fullness, 50000);
+}
+
+static odp_bool_t tm_local_random_drop(tm_system_t *tm_system,
+				       odp_tm_wred_params_t *wred_params,
+				       odp_tm_percent_t queue_fullness)
+{
+	odp_tm_percent_t min_threshold, med_threshold, first_threshold,
+		drop_prob;
+	odp_tm_percent_t med_drop_prob, max_drop_prob;
+	uint32_t denom, numer;
+
+	if (wred_params->enable_wred == 0)
+		return 0;
+
+	min_threshold = wred_params->min_threshold;
+	med_threshold = wred_params->med_threshold;
+	first_threshold = (min_threshold != 0) ? min_threshold : med_threshold;
+	if (10000 <= queue_fullness)
+		return 1;
+	else if (queue_fullness <= first_threshold)
+		return 0;
+
+	/* Determine if we have two active thresholds, min_threshold and
+	 * med_threshold or just med_threshold.
+	 */
+	med_drop_prob = wred_params->med_drop_prob;
+	max_drop_prob = wred_params->max_drop_prob;
+	if (min_threshold == 0) {
+		denom = (uint32_t)(10000 - med_threshold);
+		numer = (uint32_t)max_drop_prob;
+		drop_prob =
+			(numer * (uint32_t)(queue_fullness - med_threshold)) /
+			denom;
+	} else if ((min_threshold < queue_fullness) &&
+		   (queue_fullness < med_threshold)) {
+		denom = (uint32_t)(med_threshold - min_threshold);
+		numer = (uint32_t)med_drop_prob;
+		drop_prob =
+			(numer * (uint32_t)(queue_fullness - min_threshold)) /
+			denom;
+	} else /* med_threshold <= queue_fullness. */ {
+		denom = (uint32_t)(10000 - med_threshold);
+		numer = (uint32_t)(max_drop_prob - med_drop_prob);
+		drop_prob = max_drop_prob -
+			((numer * (10000 - queue_fullness)) / denom);
+	}
+
+	if (drop_prob == 0)
+		return 0;
+	else if (10000 <= drop_prob)
+		return 1;
+	else
+		return tm_random_drop(&tm_system->tm_random_data, drop_prob);
+}
+
+static odp_bool_t tm_queue_is_full(tm_queue_thresholds_t *thresholds,
+				   tm_queue_cnts_t *queue_cnts)
+{
+	odp_bool_t queue_is_full;
+	uint64_t max_bytes;
+	uint32_t max_pkts;
+
+	max_bytes = thresholds->max_bytes;
+	max_pkts = thresholds->max_pkts;
+	queue_is_full = 0;
+	if (max_pkts != 0)
+		queue_is_full =
+			max_pkts <= odp_atomic_load_u64(&queue_cnts->pkt_cnt);
+
+	if (max_bytes != 0)
+		queue_is_full |=
+			max_bytes <= odp_atomic_load_u64(&queue_cnts->byte_cnt);
+
+	return queue_is_full;
+}
+
+static odp_bool_t tm_random_early_discard(tm_system_t *tm_system,
+					  tm_queue_obj_t *tm_queue_obj
+					  ODP_UNUSED,
+					  tm_wred_node_t *tm_wred_node,
+					  odp_pkt_color_t pkt_color)
+{
+	tm_queue_thresholds_t *thresholds;
+	odp_tm_wred_params_t *wred_params;
+	odp_tm_percent_t fullness;
+	tm_queue_cnts_t *queue_cnts;
+
+	thresholds = tm_wred_node->threshold_params;
+	if (thresholds) {
+		wred_params = tm_wred_node->wred_params[pkt_color];
+		queue_cnts = &tm_wred_node->queue_cnts;
+		if ((!wred_params) || (wred_params->enable_wred == 0)) {
+			if (tm_queue_is_full(thresholds, queue_cnts))
+				return 1;
+		} else {
+			fullness = tm_queue_fullness(wred_params, thresholds,
+						     queue_cnts);
+			if (tm_local_random_drop(tm_system, wred_params,
+						 fullness))
+				return 1;
+		}
+	}
+
+	/* For each tm_wred_node between the initial and a TM egress, do a
+	 * Random Early Discard check, if enabled.
+	 */
+	tm_wred_node = tm_wred_node->next_tm_wred_node;
+	while (tm_wred_node) {
+		thresholds = tm_wred_node->threshold_params;
+		if (thresholds) {
+			wred_params = tm_wred_node->wred_params[pkt_color];
+			queue_cnts = &tm_wred_node->queue_cnts;
+			if ((wred_params) && wred_params->enable_wred) {
+				fullness = tm_queue_fullness(wred_params,
+							     thresholds,
+							     queue_cnts);
+				if (tm_local_random_drop(tm_system, wred_params,
+							 fullness))
+					return 1;
+			}
+		}
+
+		tm_wred_node = tm_wred_node->next_tm_wred_node;
+	}
+
+	return 0;
+}
+
+/* Returns the current queue pkt_cnt. */
+static uint32_t tm_queue_cnts_increment(tm_system_t *tm_system,
+					tm_wred_node_t *tm_wred_node,
+					uint32_t priority,
+					uint32_t frame_len)
+{
+	tm_queue_cnts_t *queue_cnts;
+	uint32_t tm_queue_pkt_cnt;
+
+	odp_ticketlock_lock(&tm_wred_node->tm_wred_node_lock);
+	queue_cnts = &tm_wred_node->queue_cnts;
+	odp_atomic_inc_u64(&queue_cnts->pkt_cnt);
+	odp_atomic_add_u64(&queue_cnts->byte_cnt, frame_len);
+
+	tm_queue_pkt_cnt = (uint32_t)odp_atomic_load_u64(&queue_cnts->pkt_cnt);
+	odp_ticketlock_unlock(&tm_wred_node->tm_wred_node_lock);
+
+	/* For each tm_wred_node between the initial one and a TM egress,
+	 * increment its queue_cnts, if enabled.
+	 */
+	tm_wred_node = tm_wred_node->next_tm_wred_node;
+	while (tm_wred_node) {
+		odp_ticketlock_lock(&tm_wred_node->tm_wred_node_lock);
+		queue_cnts = &tm_wred_node->queue_cnts;
+		odp_atomic_inc_u64(&queue_cnts->pkt_cnt);
+		odp_atomic_add_u64(&queue_cnts->byte_cnt, frame_len);
+		odp_ticketlock_unlock(&tm_wred_node->tm_wred_node_lock);
+
+		tm_wred_node = tm_wred_node->next_tm_wred_node;
+	}
+
+	queue_cnts = &tm_system->total_info.queue_cnts;
+	odp_atomic_inc_u64(&queue_cnts->pkt_cnt);
+	odp_atomic_add_u64(&queue_cnts->byte_cnt, frame_len);
+
+	queue_cnts = &tm_system->priority_info[priority].queue_cnts;
+	odp_atomic_inc_u64(&queue_cnts->pkt_cnt);
+	odp_atomic_add_u64(&queue_cnts->byte_cnt, frame_len);
+
+	return tm_queue_pkt_cnt;
+}
+
+static void tm_queue_cnts_decrement(tm_system_t *tm_system,
+				    tm_wred_node_t *tm_wred_node,
+				    uint32_t priority,
+				    uint32_t frame_len)
+{
+	tm_queue_cnts_t *queue_cnts;
+
+	odp_ticketlock_lock(&tm_wred_node->tm_wred_node_lock);
+	queue_cnts = &tm_wred_node->queue_cnts;
+	odp_atomic_dec_u64(&queue_cnts->pkt_cnt);
+	odp_atomic_sub_u64(&queue_cnts->byte_cnt, frame_len);
+	odp_ticketlock_unlock(&tm_wred_node->tm_wred_node_lock);
+
+	/* For each tm_wred_node between the initial one and a TM egress,
+	 * decrement its queue_cnts, if enabled.
+	 */
+	tm_wred_node = tm_wred_node->next_tm_wred_node;
+	while (tm_wred_node) {
+		odp_ticketlock_lock(&tm_wred_node->tm_wred_node_lock);
+		queue_cnts = &tm_wred_node->queue_cnts;
+		odp_atomic_dec_u64(&queue_cnts->pkt_cnt);
+		odp_atomic_sub_u64(&queue_cnts->byte_cnt, frame_len);
+		odp_ticketlock_unlock(&tm_wred_node->tm_wred_node_lock);
+		tm_wred_node = tm_wred_node->next_tm_wred_node;
+	}
+
+	queue_cnts = &tm_system->total_info.queue_cnts;
+	odp_atomic_dec_u64(&queue_cnts->pkt_cnt);
+	odp_atomic_sub_u64(&queue_cnts->byte_cnt, frame_len);
+
+	queue_cnts = &tm_system->priority_info[priority].queue_cnts;
+	odp_atomic_dec_u64(&queue_cnts->pkt_cnt);
+	odp_atomic_sub_u64(&queue_cnts->byte_cnt, frame_len);
+}
+
+static int tm_enqueue(tm_system_t *tm_system,
+		      tm_queue_obj_t *tm_queue_obj,
+		      odp_packet_t pkt)
+{
+	input_work_item_t work_item;
+	odp_pkt_color_t pkt_color;
+	tm_wred_node_t *initial_tm_wred_node;
+	odp_bool_t drop_eligible, drop;
+	uint32_t frame_len, pkt_depth;
+	int rc;
+
+	if (tm_system->first_enq == 0) {
+		odp_barrier_wait(&tm_system->tm_system_barrier);
+		tm_system->first_enq = 1;
+	}
+
+	pkt_color = odp_packet_color(pkt);
+	drop_eligible = odp_packet_drop_eligible(pkt);
+
+	initial_tm_wred_node = tm_queue_obj->tm_wred_node;
+	if (drop_eligible) {
+		drop = tm_random_early_discard(tm_system, tm_queue_obj,
+					       initial_tm_wred_node, pkt_color);
+		if (drop)
+			return -1;
+	}
+
+	work_item.tm_queue_obj = tm_queue_obj;
+	work_item.pkt = pkt;
+	rc = input_work_queue_append(tm_system, &work_item);
+	if (rc < 0) {
+		ODP_DBG("%s work queue full\n", __func__);
+		return rc;
+	}
+
+	frame_len = odp_packet_len(pkt);
+	pkt_depth = tm_queue_cnts_increment(tm_system, initial_tm_wred_node,
+					    tm_queue_obj->priority, frame_len);
+	return pkt_depth;
+}
+
+static void tm_send_pkt(tm_system_t *tm_system,
+			uint32_t max_consume_sends ODP_UNUSED)
+{
+	tm_queue_obj_t *tm_queue_obj;
+	pkt_desc_t *pkt_desc;
+	uint32_t cnt;
+	int rc;
+
+	/* for (cnt = 1; cnt < max_consume_sends; cnt++) @todo */
+		for (cnt = 1; cnt < 1000; cnt++) {
+			pkt_desc = tm_system->egress_pkt_desc;
+			if (!pkt_desc)
+				return;
+
+			tm_queue_obj =
+				tm_system->queue_num_tbl[pkt_desc->queue_num];
+			odp_pktio_send(tm_system->egress.pktio,
+				       &tm_queue_obj->pkt, 1);
+
+			tm_queue_obj->pkt = ODP_PACKET_INVALID;
+			tm_system->egress_pkt_desc = NULL;
+			rc = tm_consume_pkt_desc(tm_system,
+						 &tm_queue_obj->in_pkt_desc);
+			if (rc <= 0)
+				return;
+		}
+}
+
+static int tm_process_input_work_queue(tm_system_t *tm_system,
+				       input_work_queue_t *input_work_queue,
+				       uint32_t pkts_to_process)
+{
+	input_work_item_t work_item;
+	tm_queue_obj_t *tm_queue_obj;
+	tm_shaper_obj_t *shaper_obj;
+	odp_packet_t pkt;
+	pkt_desc_t *pkt_desc;
+	uint32_t cnt, queue_num, flags;
+	int rc;
+
+	for (cnt = 1; cnt <= pkts_to_process; cnt++) {
+		rc = input_work_queue_remove(input_work_queue, &work_item);
+		if (rc < 0)
+			return rc;
+
+		tm_queue_obj = work_item.tm_queue_obj;
+		pkt = work_item.pkt;
+		tm_queue_obj->pkts_rcvd_cnt++;
+		if (tm_queue_obj->pkt != ODP_PACKET_INVALID) {
+			/* If the tm_queue_obj already has a pkt to work with,
+			 * then just add this new pkt to the associated
+			 * odp_int_pkt_queue.
+			 */
+			rc = odp_pkt_queue_append(
+				tm_system->odp_int_queue_pool,
+				tm_queue_obj->odp_int_pkt_queue, pkt);
+			tm_queue_obj->pkts_enqueued_cnt++;
+		} else {
+			/* If the tm_queue_obj doesn't have a pkt to work
+			 * with, then make this one the head pkt.
+			 */
+			tm_queue_obj->pkt = pkt;
+			queue_num = tm_queue_obj->queue_num;
+			tm_pkt_desc_init(&tm_queue_obj->in_pkt_desc, pkt,
+					 queue_num);
+
+			pkt_desc = &tm_queue_obj->in_pkt_desc;
+			shaper_obj = &tm_queue_obj->shaper_obj;
+			flags = tm_propagate_pkt_desc(tm_system,
+						      shaper_obj,
+						      pkt_desc,
+						      tm_queue_obj->priority,
+						      NULL,
+						      PF_NEW_PKT_IN);
+			if (flags & PF_REACHED_EGRESS)
+				return 1;
+			/*Send thru spigot */
+		}
+	}
+
+	return 0;
+}
+
+static int tm_process_expired_timers(tm_system_t *tm_system,
+				     odp_timer_wheel_t odp_int_timer_wheel,
+				     uint64_t current_cycles)
+{
+	tm_shaper_obj_t *shaper_obj;
+	pkt_desc_t *pkt_desc;
+	uint32_t cnt, propagate_flags, flags;
+	uint8_t priority;
+
+	for (cnt = 1; cnt <= 4; cnt++) {
+		shaper_obj =
+			odp_timer_wheel_next_expired(odp_int_timer_wheel);
+		if (!shaper_obj)
+			return 0;
+
+		shaper_obj->timer_outstanding = 0;
+		if ((shaper_obj->callback_reason != NO_CALLBACK) &&
+		    (shaper_obj->callback_time <= current_cycles)) {
+			pkt_desc = shaper_obj->in_pkt_desc;
+			priority = shaper_obj->input_priority;
+			if (shaper_obj->callback_reason == BOOST_PRIORITY)
+				propagate_flags = PF_PRIORITY_BOOST;
+			else
+				propagate_flags = PF_NEW_PKT_IN;
+
+			flags = tm_propagate_pkt_desc(tm_system, shaper_obj,
+						      pkt_desc, priority, NULL,
+						      propagate_flags);
+			if (flags & PF_REACHED_EGRESS)
+				tm_send_pkt(tm_system, 4);
+		}
+	}
+
+	return 0;
+}
+
+static void *tm_system_thread(void *arg)
+{
+	odp_timer_wheel_t odp_int_timer_wheel;
+	input_work_queue_t *input_work_queue;
+	tm_system_t *tm_system;
+	uint64_t current_cycles;
+	uint32_t work_queue_cnt;
+	uint8_t destroying;
+	int rc;
+
+	tm_system = arg;
+	odp_int_timer_wheel = tm_system->odp_int_timer_wheel;
+	input_work_queue = tm_system->input_work_queue;
+
+	/* Wait here until we have seen the first enqueue operation. */
+	odp_barrier_wait(&tm_system->tm_system_barrier);
+
+	current_cycles = 100;
+	while (destroying == 0) {
+		tm_system->current_cycles = current_cycles;
+		rc = odp_timer_wheel_curr_time_update(odp_int_timer_wheel,
+						      current_cycles);
+		if (0 < rc) {
+			/* Process a batch of expired timers - each of which
+			 * could cause a pkt to egress the tm system.
+			 */
+			rc = tm_process_expired_timers(tm_system,
+						       odp_int_timer_wheel,
+						       current_cycles);
+			current_cycles += 16;
+		}
+
+		work_queue_cnt = odp_atomic_load_u32(
+			&input_work_queue->queue_cnt);
+		if (work_queue_cnt != 0) {
+			rc = tm_process_input_work_queue(tm_system,
+							 input_work_queue, 1);
+			current_cycles += 8;
+			if (0 < rc) {
+				tm_send_pkt(tm_system, 4);
+				current_cycles += 8;
+			}
+		}
+
+		current_cycles += 2;
+		destroying = odp_atomic_load_u32(&tm_system->destroying);
+	}
+
+	odp_barrier_wait(&tm_system->tm_system_destroy_barrier);
+	return NULL;
+}
+
+void odp_tm_capability_init(odp_tm_capability_t *capability)
+{
+	memset(capability, 0, sizeof(odp_tm_capability_t));
+}
+
+void odp_tm_egress_init(odp_tm_egress_t *egress)
+{
+	memset(egress, 0, sizeof(odp_tm_egress_t));
+}
+
+void odp_tm_params_init(odp_tm_params_t *params)
+{
+	memset(params, 0, sizeof(odp_tm_params_t));
+}
+
+odp_tm_t odp_tm_create(char *name, odp_tm_params_t *params)
+{
+	odp_int_name_t name_tbl_id;
+	tm_system_t *tm_system;
+	odp_bool_t create_fail;
+	pthread_t pthread;
+	odp_tm_t odp_tm;
+	uint64_t current_cycles;
+	uint32_t malloc_len, max_num_queues, max_queued_pkts, max_timers;
+	uint32_t max_sorted_lists;
+	int rc;
+
+	/* Allocate tm_system_t record. */
+	odp_ticketlock_lock(&tm_create_lock);
+	tm_system = tm_system_alloc();
+	if (!tm_system) {
+		odp_ticketlock_unlock(&tm_create_lock);
+		return ODP_TM_INVALID;
+	}
+
+	odp_tm = MAKE_ODP_TM_HANDLE(tm_system);
+	name_tbl_id = odp_int_name_tbl_add(name, ODP_TM_HANDLE, odp_tm);
+	if (name_tbl_id == ODP_INVALID_NAME) {
+		tm_system_free(tm_system);
+		odp_ticketlock_unlock(&tm_create_lock);
+		return ODP_TM_INVALID;
+	}
+
+	tm_system->name_tbl_id = name_tbl_id;
+	memcpy(&tm_system->egress, &params->egress, sizeof(odp_tm_egress_t));
+	memcpy(&tm_system->capability, &params->capability,
+	       sizeof(odp_tm_capability_t));
+
+	malloc_len = params->capability.max_tm_queues
+		* sizeof(tm_queue_obj_t *);
+	tm_system->queue_num_tbl = malloc(malloc_len);
+	memset(tm_system->queue_num_tbl, 0, malloc_len);
+	tm_system->next_queue_num = 1;
+
+	tm_init_random_data(&tm_system->tm_random_data);
+
+	max_sorted_lists = 2 * params->capability.max_tm_queues;
+	max_num_queues = params->capability.max_tm_queues;
+	max_queued_pkts = 16 * params->capability.max_tm_queues;
+	max_timers = 2 * params->capability.max_tm_queues;
+	current_cycles = 10;
+	create_fail = 0;
+
+	tm_system->odp_int_sorted_pool = ODP_INT_SORTED_POOL_INVALID;
+	tm_system->odp_int_queue_pool = ODP_INT_QUEUE_POOL_INVALID;
+	tm_system->odp_int_timer_wheel = ODP_INT_TIMER_WHEEL_INVALID;
+
+	odp_ticketlock_init(&tm_system->tm_system_lock);
+	odp_barrier_init(&tm_system->tm_system_barrier, 2);
+	odp_atomic_init_u32(&tm_system->destroying, 0);
+
+	tm_system->odp_int_sorted_pool = odp_sorted_pool_create(
+		max_sorted_lists);
+	create_fail |= tm_system->odp_int_sorted_pool
+		== ODP_INT_SORTED_POOL_INVALID;
+
+	if (create_fail == 0) {
+		tm_system->odp_int_queue_pool = odp_queue_pool_create(
+			max_num_queues, max_queued_pkts);
+		create_fail |= tm_system->odp_int_queue_pool
+			== ODP_INT_QUEUE_POOL_INVALID;
+	}
+
+	if (create_fail == 0) {
+		tm_system->odp_int_timer_wheel = odp_timer_wheel_create(
+			max_timers, current_cycles);
+		create_fail |= tm_system->odp_int_timer_wheel
+			== ODP_INT_TIMER_WHEEL_INVALID;
+	}
+
+	if (create_fail == 0) {
+		tm_system->input_work_queue = input_work_queue_create();
+		create_fail |= !tm_system->input_work_queue;
+	}
+
+	if (create_fail == 0) {
+		rc = pthread_create(&pthread, NULL, tm_system_thread,
+				    tm_system);
+		create_fail |= rc < 0;
+	}
+
+	if (create_fail) {
+		odp_int_name_tbl_delete(name_tbl_id);
+		if (tm_system->input_work_queue)
+			input_work_queue_destroy(tm_system->input_work_queue);
+
+		if (tm_system->odp_int_sorted_pool
+		    != ODP_INT_SORTED_POOL_INVALID)
+			odp_sorted_pool_destroy(
+				tm_system->odp_int_sorted_pool);
+
+		if (tm_system->odp_int_queue_pool != ODP_INT_QUEUE_POOL_INVALID)
+			odp_queue_pool_destroy(
+				tm_system->odp_int_queue_pool);
+
+		if (tm_system->odp_int_timer_wheel
+		    != ODP_INT_TIMER_WHEEL_INVALID)
+			odp_timer_wheel_destroy(
+				tm_system->odp_int_timer_wheel);
+
+		tm_system_free(tm_system);
+		odp_ticketlock_unlock(&tm_create_lock);
+		return ODP_TM_INVALID;
+	}
+
+	odp_ticketlock_unlock(&tm_create_lock);
+	return odp_tm;
+}
+
+odp_tm_t odp_tm_find(char *name ODP_UNUSED,
+		     odp_tm_capability_t *capability ODP_UNUSED)
+{
+	return ODP_TM_INVALID; /* @todo Not yet implemented. */
+}
+
+int odp_tm_capability(odp_tm_t odp_tm, odp_tm_capability_t *capability)
+{
+	tm_system_t *tm_system;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+	memcpy(capability, &tm_system->capability, sizeof(odp_tm_capability_t));
+	return 0;
+}
+
+int odp_tm_destroy(odp_tm_t odp_tm)
+{
+	tm_system_t *tm_system;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+
+	/* First mark the tm_system as being in the destroying state so that
+	 * all new pkts are prevented from coming in.
+	 */
+	odp_barrier_init(&tm_system->tm_system_destroy_barrier, 2);
+	odp_atomic_inc_u32(&tm_system->destroying);
+	odp_barrier_wait(&tm_system->tm_system_destroy_barrier);
+
+	input_work_queue_destroy(tm_system->input_work_queue);
+	odp_sorted_pool_destroy(tm_system->odp_int_sorted_pool);
+	odp_queue_pool_destroy(tm_system->odp_int_queue_pool);
+	odp_timer_wheel_destroy(tm_system->odp_int_timer_wheel);
+
+	tm_system_free(tm_system);
+	return 0;
+}
+
+void odp_tm_shaper_params_init(odp_tm_shaper_params_t *params)
+{
+	memset(params, 0, sizeof(odp_tm_shaper_params_t));
+}
+
+odp_tm_shaper_t odp_tm_shaper_create(char *name,
+				     odp_tm_shaper_params_t *params)
+{
+	tm_shaper_params_t *profile_obj;
+	odp_tm_shaper_t shaper_handle;
+	odp_int_name_t name_tbl_id;
+
+	profile_obj = tm_common_profile_create(name, TM_SHAPER_PROFILE,
+					       sizeof(tm_shaper_params_t),
+					       &shaper_handle,
+					       &name_tbl_id);
+	if (!profile_obj)
+		return ODP_TM_INVALID;
+
+	tm_shaper_params_cvt_to(params, profile_obj);
+	profile_obj->name_tbl_id = name_tbl_id;
+	return shaper_handle;
+}
+
+int odp_tm_shaper(odp_tm_shaper_t shaper_profile,
+		  odp_tm_shaper_params_t *params)
+{
+	tm_shaper_params_t *profile_obj;
+
+	profile_obj = tm_get_profile_params(shaper_profile, TM_SHAPER_PROFILE);
+	if (!profile_obj)
+		return -1;
+
+	tm_shaper_params_cvt_from(profile_obj, params);
+	return 0;
+}
+
+int odp_tm_shaper_set(odp_tm_shaper_t shaper_profile,
+		      odp_tm_shaper_params_t *params)
+{
+	tm_shaper_params_t *profile_obj;
+
+	profile_obj = tm_get_profile_params(shaper_profile, TM_SHAPER_PROFILE);
+	if (!profile_obj)
+		return -1;
+
+	tm_shaper_params_cvt_to(params, profile_obj);
+	return 0;
+}
+
+odp_tm_shaper_t odp_tm_shaper_lookup(char *name)
+{
+	return odp_int_name_tbl_lookup(name, ODP_TM_SHAPER_PROFILE_HANDLE);
+}
+
+void odp_tm_sched_params_init(odp_tm_sched_params_t *params)
+{
+	memset(params, 0, sizeof(odp_tm_sched_params_t));
+}
+
+odp_tm_sched_t odp_tm_sched_create(char *name,
+				   odp_tm_sched_params_t *params)
+{
+	odp_tm_sched_mode_t sched_mode;
+	tm_sched_params_t *profile_obj;
+	odp_int_name_t name_tbl_id;
+	odp_tm_sched_t sched_handle;
+	uint32_t priority, weight;
+
+	profile_obj = tm_common_profile_create(name, TM_SCHED_PROFILE,
+					       sizeof(tm_sched_params_t),
+					       &sched_handle, &name_tbl_id);
+	if (!profile_obj)
+		return ODP_TM_INVALID;
+
+	profile_obj->name_tbl_id = name_tbl_id;
+
+	for (priority = 0; priority < ODP_TM_MAX_PRIORITIES; priority++) {
+		sched_mode = params->sched_modes[priority];
+		weight = params->sched_weights[priority];
+
+		profile_obj->sched_modes[priority] = sched_mode;
+		profile_obj->inverted_weights[priority] = 0x10000 / weight;
+	}
+
+	return sched_handle;
+}
+
+int odp_tm_sched(odp_tm_sched_t sched_profile, odp_tm_sched_params_t *params)
+{
+	odp_tm_sched_params_t *sched_params;
+
+	sched_params = tm_get_profile_params(sched_profile, TM_SCHED_PROFILE);
+	if (!sched_params)
+		return -1;
+
+	*params = *sched_params;
+	return 0;
+}
+
+int odp_tm_sched_set(odp_tm_sched_t sched_profile,
+		     odp_tm_sched_params_t *params)
+{
+	odp_tm_sched_params_t *sched_params;
+
+	sched_params = tm_get_profile_params(sched_profile, TM_SCHED_PROFILE);
+	if (!sched_params)
+		return -1;
+
+	*sched_params = *params;
+	return 0;
+}
+
+odp_tm_sched_t odp_tm_sched_lookup(char *name)
+{
+	return odp_int_name_tbl_lookup(name, ODP_TM_SCHED_PROFILE_HANDLE);
+}
+
+void odp_tm_threshold_params_init(odp_tm_threshold_params_t *params)
+{
+	memset(params, 0, sizeof(odp_tm_threshold_params_t));
+}
+
+odp_tm_threshold_t odp_tm_threshold_create(char *name,
+					   odp_tm_threshold_params_t *params)
+{
+	tm_queue_thresholds_t *profile_obj;
+	odp_tm_threshold_t threshold_handle;
+	odp_int_name_t name_tbl_id;
+
+	profile_obj =
+		tm_common_profile_create(name, TM_THRESHOLD_PROFILE,
+					 sizeof(odp_tm_threshold_params_t),
+					 &threshold_handle,
+					 &name_tbl_id);
+	if (!profile_obj)
+		return ODP_TM_INVALID;
+
+	profile_obj->max_pkts = params->enable_max_pkts ? params->max_pkts : 0;
+	profile_obj->max_bytes =
+		params->enable_max_bytes ? params->max_bytes : 0;
+	profile_obj->name_tbl_id = name_tbl_id;
+	return threshold_handle;
+}
+
+int odp_tm_thresholds(odp_tm_threshold_t threshold_profile,
+		      odp_tm_threshold_params_t *params)
+{
+	tm_queue_thresholds_t *threshold_params;
+
+	threshold_params = tm_get_profile_params(threshold_profile,
+						 TM_THRESHOLD_PROFILE);
+	if (!threshold_params)
+		return -1;
+
+	params->max_pkts = threshold_params->max_pkts;
+	params->max_bytes = threshold_params->max_bytes;
+	params->enable_max_pkts = threshold_params->max_pkts != 0;
+	params->enable_max_bytes = threshold_params->max_bytes != 0;
+	return 0;
+}
+
+int odp_tm_thresholds_set(odp_tm_threshold_t threshold_profile,
+			  odp_tm_threshold_params_t *params)
+{
+	tm_queue_thresholds_t *profile_obj;
+
+	profile_obj = tm_get_profile_params(threshold_profile,
+					    TM_THRESHOLD_PROFILE);
+	if (!profile_obj)
+		return -1;
+
+	profile_obj->max_pkts = params->enable_max_pkts ? params->max_pkts : 0;
+	profile_obj->max_bytes =
+		params->enable_max_bytes ? params->max_bytes : 0;
+	return 0;
+}
+
+odp_tm_threshold_t odp_tm_thresholds_lookup(char *name)
+{
+	return odp_int_name_tbl_lookup(name, ODP_TM_THRESHOLD_PROFILE_HANDLE);
+}
+
+void odp_tm_wred_params_init(odp_tm_wred_params_t *params)
+{
+	memset(params, 0, sizeof(odp_tm_wred_params_t));
+}
+
+odp_tm_wred_t odp_tm_wred_create(char *name, odp_tm_wred_params_t *params)
+{
+	odp_tm_wred_params_t *profile_obj;
+	odp_tm_wred_t wred_handle;
+	odp_int_name_t name_tbl_id;
+
+	profile_obj = tm_common_profile_create(name, TM_WRED_PROFILE,
+					       sizeof(odp_tm_wred_params_t),
+					       &wred_handle,
+					       &name_tbl_id);
+	if (!profile_obj)
+		return ODP_TM_INVALID;
+
+	*profile_obj = *params;
+	return wred_handle;
+}
+
+int odp_tm_wred(odp_tm_wred_t wred_profile, odp_tm_wred_params_t *params)
+{
+	odp_tm_wred_params_t *wred_params;
+
+	wred_params = tm_get_profile_params(wred_profile, TM_WRED_PROFILE);
+	if (!wred_params)
+		return -1;
+
+	*params = *wred_params;
+	return 0;
+}
+
+int odp_tm_wred_set(odp_tm_wred_t wred_profile, odp_tm_wred_params_t *params)
+{
+	odp_tm_wred_params_t *wred_params;
+
+	wred_params = tm_get_profile_params(wred_profile, TM_WRED_PROFILE);
+	if (!wred_params)
+		return -1;
+
+	*wred_params = *params;
+	return 0;
+}
+
+odp_tm_wred_t odp_tm_wred_lookup(char *name)
+{
+	return odp_int_name_tbl_lookup(name, ODP_TM_WRED_PROFILE_HANDLE);
+}
+
+void odp_tm_node_params_init(odp_tm_node_params_t *params)
+{
+	memset(params, 0, sizeof(odp_tm_node_params_t));
+}
+
+odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, char *name,
+				 odp_tm_node_params_t *params)
+{
+	odp_int_sorted_list_t sorted_list;
+	tm_schedulers_obj_t *schedulers_obj;
+	odp_int_name_t name_tbl_id;
+	tm_wred_node_t *tm_wred_node;
+	tm_node_obj_t *tm_node_obj;
+	odp_tm_node_t odp_tm_node;
+	odp_tm_wred_t wred_profile;
+	tm_system_t *tm_system;
+	uint32_t num_priorities, priority, schedulers_obj_len, color;
+
+	/*Allocatea tm_node_obj_t record. */
+	tm_system = GET_TM_SYSTEM(odp_tm);
+	tm_node_obj = malloc(sizeof(tm_node_obj_t));
+	if (!tm_node_obj)
+		return ODP_TM_INVALID;
+
+	tm_wred_node = malloc(sizeof(tm_wred_node_t));
+	if (!tm_wred_node) {
+		free(tm_node_obj);
+		return ODP_TM_INVALID;
+	}
+
+	num_priorities = tm_system->capability.max_priority + 1;
+	schedulers_obj_len = sizeof(tm_schedulers_obj_t)
+		+ (sizeof(tm_sched_state_t) * num_priorities);
+	schedulers_obj = malloc(schedulers_obj_len);
+	if (!schedulers_obj) {
+		free(tm_wred_node);
+		free(tm_node_obj);
+		return ODP_TM_INVALID;
+	}
+
+	memset(schedulers_obj, 0, schedulers_obj_len);
+	odp_tm_node = MAKE_ODP_TM_NODE(tm_node_obj);
+	name_tbl_id = ODP_INVALID_NAME;
+	if ((name) && (name[0] != '\0')) {
+		name_tbl_id = odp_int_name_tbl_add(name, ODP_TM_NODE_HANDLE,
+						   odp_tm_node);
+		if (name_tbl_id == ODP_INVALID_NAME) {
+			free(schedulers_obj);
+			free(tm_wred_node);
+			free(tm_node_obj);
+			return ODP_TM_INVALID;
+		}
+	}
+
+	memset(tm_node_obj, 0, sizeof(tm_node_obj_t));
+	memset(tm_wred_node, 0, sizeof(tm_wred_node_t));
+	memset(schedulers_obj, 0, schedulers_obj_len);
+	tm_node_obj->name_tbl_id = name_tbl_id;
+	tm_node_obj->max_fanin = params->max_fanin;
+	tm_node_obj->level = params->level;
+	tm_node_obj->tm_idx = tm_system->tm_idx;
+	tm_node_obj->tm_wred_node = tm_wred_node;
+	tm_node_obj->schedulers_obj = schedulers_obj;
+	odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
+
+	schedulers_obj->num_priorities = num_priorities;
+	for (priority = 0; priority < num_priorities; priority++) {
+		sorted_list = odp_sorted_list_create(
+			tm_system->odp_int_sorted_pool,
+			params->max_fanin);
+		schedulers_obj->sched_states[priority].sorted_list =
+			sorted_list;
+	}
+
+	odp_ticketlock_lock(&tm_system->tm_system_lock);
+	if (params->shaper_profile != ODP_TM_INVALID)
+		tm_shaper_config_set(tm_system, params->shaper_profile,
+				     &tm_node_obj->shaper_obj);
+
+	if (params->threshold_profile != ODP_TM_INVALID)
+		tm_wred_node->threshold_params = tm_get_profile_params(
+			params->threshold_profile,
+				TM_THRESHOLD_PROFILE);
+
+	for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+		wred_profile = params->wred_profile[color];
+		if (wred_profile != ODP_TM_INVALID)
+			tm_wred_node->wred_params[color] =
+				tm_get_profile_params(wred_profile,
+						      TM_WRED_PROFILE);
+	}
+
+	odp_ticketlock_unlock(&tm_system->tm_system_lock);
+	return odp_tm_node;
+}
+
+int odp_tm_node_shaper_config(odp_tm_node_t tm_node,
+			      odp_tm_shaper_t shaper_profile)
+{
+	tm_node_obj_t *tm_node_obj;
+	tm_system_t *tm_system;
+
+	tm_node_obj = GET_TM_NODE_OBJ(tm_node);
+	if (!tm_node_obj)
+		return -1;
+
+	tm_system = odp_tm_systems[tm_node_obj->tm_idx];
+	if (!tm_system)
+		return -2;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	tm_shaper_config_set(tm_system, shaper_profile,
+			     &tm_node_obj->shaper_obj);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+int odp_tm_node_sched_config(odp_tm_node_t tm_node,
+			     odp_tm_node_t tm_fan_in_node,
+			     odp_tm_sched_t sched_profile)
+{
+	tm_shaper_obj_t *child_shaper_obj;
+	tm_node_obj_t *tm_node_obj, *child_tm_node_obj;
+
+	tm_node_obj = GET_TM_NODE_OBJ(tm_node);
+	if (!tm_node_obj)
+		return -1;
+
+	child_tm_node_obj = GET_TM_NODE_OBJ(tm_fan_in_node);
+	if (!child_tm_node_obj)
+		return -1;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	child_shaper_obj = &child_tm_node_obj->shaper_obj;
+	child_shaper_obj->sched_params =
+		tm_get_profile_params(sched_profile,
+				      TM_SCHED_PROFILE);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+int odp_tm_node_threshold_config(odp_tm_node_t tm_node,
+				 odp_tm_threshold_t thresholds_profile)
+{
+	tm_node_obj_t *tm_node_obj;
+
+	tm_node_obj = GET_TM_NODE_OBJ(tm_node);
+	if (!tm_node_obj)
+		return -1;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	tm_node_obj->tm_wred_node->threshold_params = tm_get_profile_params(
+		thresholds_profile, TM_THRESHOLD_PROFILE);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+int odp_tm_node_wred_config(odp_tm_node_t tm_node, odp_pkt_color_t pkt_color,
+			    odp_tm_wred_t wred_profile)
+{
+	tm_node_obj_t *tm_node_obj;
+	uint32_t color;
+	int rc;
+
+	tm_node_obj = GET_TM_NODE_OBJ(tm_node);
+	if (!tm_node_obj)
+		return -1;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	rc = 0;
+	if (pkt_color == PKT_ALL_COLORS) {
+		for (color = 0; color < ODP_NUM_PKT_COLORS; color++)
+			tm_node_obj->tm_wred_node->wred_params[color] =
+				tm_get_profile_params(wred_profile,
+						      TM_WRED_PROFILE);
+	} else if (pkt_color < ODP_NUM_PKT_COLORS) {
+		tm_node_obj->tm_wred_node->wred_params[pkt_color] =
+			tm_get_profile_params(wred_profile,
+					      TM_WRED_PROFILE);
+	} else {
+		rc = -1;
+	}
+
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return rc;
+}
+
+odp_tm_node_t odp_tm_node_lookup(odp_tm_t odp_tm ODP_UNUSED, char *name)
+{
+	return odp_int_name_tbl_lookup(name, ODP_TM_NODE_HANDLE);
+}
+
+void odp_tm_queue_params_init(odp_tm_queue_params_t *params)
+{
+	memset(params, 0, sizeof(odp_tm_queue_params_t));
+}
+
+odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
+				   odp_tm_queue_params_t *params)
+{
+	odp_int_pkt_queue_t odp_int_pkt_queue;
+	tm_queue_obj_t *tm_queue_obj;
+	tm_wred_node_t *tm_wred_node;
+	odp_tm_queue_t odp_tm_queue;
+	odp_tm_wred_t wred_profile;
+	tm_system_t *tm_system;
+	uint32_t color;
+
+	/*Allocate a tm_queue_obj_t record. */
+	tm_system = GET_TM_SYSTEM(odp_tm);
+	tm_queue_obj = malloc(sizeof(tm_queue_obj_t));
+	if (!tm_queue_obj)
+		return ODP_TM_INVALID;
+
+	tm_wred_node = malloc(sizeof(tm_wred_node_t));
+	if (!tm_wred_node) {
+		free(tm_queue_obj);
+		return ODP_TM_INVALID;
+	}
+
+	odp_int_pkt_queue = odp_pkt_queue_create(
+		tm_system->odp_int_queue_pool);
+	if (odp_int_pkt_queue == ODP_INT_PKT_QUEUE_INVALID) {
+		free(tm_wred_node);
+		free(tm_queue_obj);
+		return ODP_TM_INVALID;
+	}
+
+	odp_tm_queue = MAKE_ODP_TM_QUEUE(tm_queue_obj);
+	memset(tm_queue_obj, 0, sizeof(tm_queue_obj_t));
+	memset(tm_wred_node, 0, sizeof(tm_wred_node_t));
+	tm_queue_obj->priority = params->priority;
+	tm_queue_obj->tm_idx = tm_system->tm_idx;
+	tm_queue_obj->queue_num = tm_system->next_queue_num++;
+	tm_queue_obj->tm_wred_node = tm_wred_node;
+	tm_queue_obj->odp_int_pkt_queue = odp_int_pkt_queue;
+	tm_queue_obj->pkt = ODP_PACKET_INVALID;
+	odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
+
+	tm_system->queue_num_tbl[tm_queue_obj->queue_num] = tm_queue_obj;
+	odp_ticketlock_lock(&tm_system->tm_system_lock);
+	if (params->shaper_profile != ODP_TM_INVALID)
+		tm_shaper_config_set(tm_system, params->shaper_profile,
+				     &tm_queue_obj->shaper_obj);
+
+	if (params->threshold_profile != ODP_TM_INVALID)
+		tm_wred_node->threshold_params = tm_get_profile_params(
+			params->threshold_profile,
+			TM_THRESHOLD_PROFILE);
+
+	for (color = 0; color < ODP_NUM_PKT_COLORS; color++) {
+		wred_profile = params->wred_profile[color];
+		if (wred_profile != ODP_TM_INVALID)
+			tm_wred_node->wred_params[color] =
+				tm_get_profile_params(wred_profile,
+						      TM_WRED_PROFILE);
+	}
+
+	odp_ticketlock_unlock(&tm_system->tm_system_lock);
+	return odp_tm_queue;
+}
+
+int odp_tm_queue_shaper_config(odp_tm_queue_t tm_queue,
+			       odp_tm_shaper_t shaper_profile)
+{
+	tm_queue_obj_t *tm_queue_obj;
+	tm_system_t *tm_system;
+
+	tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+	if (!tm_queue_obj)
+		return -1;
+
+	tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+	if (!tm_system)
+		return -2;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	tm_shaper_config_set(tm_system, shaper_profile,
+			     &tm_queue_obj->shaper_obj);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+int odp_tm_queue_sched_config(odp_tm_node_t tm_node,
+			      odp_tm_queue_t tm_fan_in_queue,
+			      odp_tm_sched_t sched_profile)
+{
+	tm_shaper_obj_t *child_shaper_obj;
+	tm_queue_obj_t *child_tm_queue_obj;
+	tm_node_obj_t *tm_node_obj;
+
+	tm_node_obj = GET_TM_NODE_OBJ(tm_node);
+	if (!tm_node_obj)
+		return -1;
+
+	child_tm_queue_obj = GET_TM_QUEUE_OBJ(tm_fan_in_queue);
+	if (!child_tm_queue_obj)
+		return -1;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	child_shaper_obj = &child_tm_queue_obj->shaper_obj;
+	child_shaper_obj->sched_params =
+		tm_get_profile_params(sched_profile,
+				      TM_SCHED_PROFILE);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+int odp_tm_queue_threshold_config(odp_tm_queue_t tm_queue,
+				  odp_tm_threshold_t thresholds_profile)
+{
+	tm_queue_obj_t *tm_queue_obj;
+
+	tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+	if (!tm_queue_obj)
+		return -1;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	tm_queue_obj->tm_wred_node->threshold_params = tm_get_profile_params(
+		thresholds_profile, TM_THRESHOLD_PROFILE);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue,
+			     odp_pkt_color_t pkt_color,
+			     odp_tm_wred_t wred_profile)
+{
+	tm_queue_obj_t *tm_queue_obj;
+	uint32_t color;
+	int rc;
+
+	tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+	if (!tm_queue_obj)
+		return -1;
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	rc = 0;
+	if (pkt_color == PKT_ALL_COLORS) {
+		for (color = 0; color < ODP_NUM_PKT_COLORS; color++)
+			tm_queue_obj->tm_wred_node->wred_params[color] =
+				tm_get_profile_params(wred_profile,
+						      TM_WRED_PROFILE);
+	} else if (pkt_color < ODP_NUM_PKT_COLORS) {
+		tm_queue_obj->tm_wred_node->wred_params[pkt_color] =
+			tm_get_profile_params(wred_profile,
+					      TM_WRED_PROFILE);
+	} else {
+		rc = -1;
+	}
+
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return rc;
+}
+
+/* Recurse up the tree towards the egress, setting connected_to_egress along
+ * the way.
+ */
+static odp_bool_t odp_tm_tree_traverse(tm_node_obj_t *tm_node_obj)
+{
+	tm_shaper_obj_t *shaper_obj;
+	tm_node_obj_t *next_tm_node_obj;
+	odp_bool_t connected_to_egress;
+
+	shaper_obj = &tm_node_obj->shaper_obj;
+	next_tm_node_obj = shaper_obj->next_tm_node;
+	if (!next_tm_node_obj)
+		return tm_node_obj->connected_to_egress;
+
+	connected_to_egress = odp_tm_tree_traverse(next_tm_node_obj);
+	tm_node_obj->connected_to_egress = connected_to_egress;
+	return connected_to_egress;
+}
+
+int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node)
+{
+	tm_wred_node_t *src_tm_wred_node, *dst_tm_wred_node;
+	tm_node_obj_t *src_tm_node_obj, *dst_tm_node_obj;
+
+	src_tm_node_obj = GET_TM_NODE_OBJ(src_tm_node);
+	dst_tm_node_obj = GET_TM_NODE_OBJ(dst_tm_node);
+	if (!src_tm_node_obj)
+		return -1;
+
+	if (!dst_tm_node_obj) {
+		/*This src_tm_node_obj connects directly to the egress spigot.*/
+		src_tm_node_obj->connected_to_egress = 1;
+	} else {
+		odp_tm_tree_traverse(dst_tm_node_obj);
+		src_tm_wred_node = src_tm_node_obj->tm_wred_node;
+		dst_tm_wred_node = dst_tm_node_obj->tm_wred_node;
+		src_tm_wred_node->next_tm_wred_node = dst_tm_wred_node;
+		if (dst_tm_node_obj->connected_to_egress)
+			src_tm_node_obj->connected_to_egress = 1;
+	}
+
+	src_tm_node_obj->shaper_obj.next_tm_node = dst_tm_node_obj;
+	return 0;
+}
+
+int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node)
+{
+	tm_wred_node_t *src_tm_wred_node, *dst_tm_wred_node;
+	tm_queue_obj_t *src_tm_queue_obj;
+	tm_node_obj_t *dst_tm_node_obj;
+
+	src_tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+	dst_tm_node_obj = GET_TM_NODE_OBJ(dst_tm_node);
+	if ((!src_tm_queue_obj) || (!dst_tm_node_obj))
+		return -1;
+
+	odp_tm_tree_traverse(dst_tm_node_obj);
+	if (dst_tm_node_obj->connected_to_egress)
+		src_tm_queue_obj->connected_to_egress = 1;
+
+	src_tm_wred_node = src_tm_queue_obj->tm_wred_node;
+	dst_tm_wred_node = dst_tm_node_obj->tm_wred_node;
+	src_tm_wred_node->next_tm_wred_node = dst_tm_wred_node;
+
+	src_tm_queue_obj->shaper_obj.next_tm_node = dst_tm_node_obj;
+	return 0;
+}
+
+int odp_tm_enq(odp_tm_queue_t tm_queue, odp_packet_t pkt)
+{
+	tm_queue_obj_t *tm_queue_obj;
+	tm_system_t *tm_system;
+
+	tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+	if (!tm_queue_obj)
+		return -1; /* @todo fix magic number */
+
+	tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+	if (!tm_system)
+		return -2; /* @todo fix magic number */
+
+	if (odp_atomic_load_u32(&tm_system->destroying))
+		return -6; /* @todo fix magic number */
+
+	return tm_enqueue(tm_system, tm_queue_obj, pkt);
+}
+
+int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt)
+{
+	tm_queue_obj_t *tm_queue_obj;
+	tm_system_t *tm_system;
+	uint32_t pkt_cnt;
+	int rc;
+
+	tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+	if (!tm_queue_obj)
+		return -1;
+
+	tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+	if (!tm_system)
+		return -2;
+
+	if (odp_atomic_load_u32(&tm_system->destroying))
+		return -6;
+
+	rc = tm_enqueue(tm_system, tm_queue_obj, pkt);
+	if (rc < 0)
+		return rc;
+
+	pkt_cnt = rc;
+	return pkt_cnt;
+}
+
+#ifdef NOT_USED /* @todo use or delete */
+static uint32_t odp_tm_input_work_queue_fullness(odp_tm_t odp_tm ODP_UNUSED)
+{
+	input_work_queue_t *input_work_queue;
+	tm_system_t *tm_system;
+	uint32_t queue_cnt, fullness;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+	input_work_queue = tm_system->input_work_queue;
+	queue_cnt = odp_atomic_load_u32(&input_work_queue->queue_cnt);
+	fullness = (100 * queue_cnt) / INPUT_WORK_RING_SIZE;
+	return fullness;
+}
+#endif
+
+static int tm_queue_info_copy(tm_queue_info_t *queue_info, uint32_t query_flags,
+			      odp_tm_queue_info_t *info)
+{
+	tm_queue_thresholds_t *threshold_params;
+
+	memset(info, 0, sizeof(odp_tm_queue_info_t));
+	info->total_pkt_cnt =
+		odp_atomic_load_u64(&queue_info->queue_cnts.pkt_cnt);
+	info->total_byte_cnt =
+		odp_atomic_load_u64(&queue_info->queue_cnts.byte_cnt);
+	info->total_pkt_cnt_valid = 1;
+	info->total_byte_cnt_valid = 1;
+	info->approx_byte_cnt = 0;
+
+	if (query_flags & ODP_TM_QUERY_THRESHOLDS) {
+		threshold_params = queue_info->threshold_params;
+		if (!threshold_params)
+			return -1;
+
+		info->max_pkt_cnt = threshold_params->max_pkts;
+		info->max_byte_cnt = threshold_params->max_bytes;
+		info->max_pkt_cnt_valid = info->max_pkt_cnt != 0;
+		info->max_byte_cnt_valid = info->max_byte_cnt != 0;
+	}
+
+	return 0;
+}
+
+int odp_tm_queue_query(odp_tm_queue_t tm_queue, uint32_t query_flags,
+		       odp_tm_queue_info_t *info)
+{
+	tm_queue_info_t queue_info;
+	tm_queue_obj_t *tm_queue_obj;
+	tm_wred_node_t *tm_wred_node;
+
+	tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+	if (!tm_queue_obj)
+		return -1;
+
+	tm_wred_node = tm_queue_obj->tm_wred_node;
+	if (!tm_wred_node)
+		return -2;
+
+	queue_info.threshold_params = tm_wred_node->threshold_params;
+	queue_info.queue_cnts = tm_wred_node->queue_cnts;
+	return tm_queue_info_copy(&queue_info, query_flags, info);
+}
+
+int odp_tm_priority_query(odp_tm_t odp_tm, uint8_t priority,
+			  uint32_t query_flags, odp_tm_queue_info_t *info)
+{
+	tm_queue_info_t queue_info;
+	tm_system_t *tm_system;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+	queue_info = tm_system->priority_info[priority];
+	return tm_queue_info_copy(&queue_info, query_flags, info);
+}
+
+int odp_tm_total_query(odp_tm_t odp_tm, uint32_t query_flags,
+		       odp_tm_queue_info_t *info)
+{
+	tm_queue_info_t queue_info;
+	tm_system_t *tm_system;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+	queue_info = tm_system->total_info;
+	return tm_queue_info_copy(&queue_info, query_flags, info);
+}
+
+int odp_tm_priority_threshold_config(odp_tm_t odp_tm, uint8_t priority,
+				     odp_tm_threshold_t thresholds_profile)
+{
+	tm_system_t *tm_system;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	tm_system->priority_info[priority].threshold_params =
+		tm_get_profile_params(thresholds_profile,
+				      TM_THRESHOLD_PROFILE);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+int odp_tm_total_threshold_config(odp_tm_t odp_tm,
+				  odp_tm_threshold_t thresholds_profile)
+{
+	tm_system_t *tm_system;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+
+	odp_ticketlock_lock(&tm_profile_lock);
+	tm_system->total_info.threshold_params = tm_get_profile_params(
+		thresholds_profile, TM_THRESHOLD_PROFILE);
+	odp_ticketlock_unlock(&tm_profile_lock);
+	return 0;
+}
+
+#ifdef NOT_USED /* @todo use or dleete this */
+static void odp_tm_stats_print(odp_tm_t odp_tm)
+{
+	input_work_queue_t *input_work_queue;
+	tm_queue_obj_t *tm_queue_obj;
+	tm_system_t *tm_system;
+	uint32_t queue_num;
+
+	tm_system = GET_TM_SYSTEM(odp_tm);
+	input_work_queue = tm_system->input_work_queue;
+
+	ODP_DBG("odp_tm_stats_print - tm_system=0x%lX tm_idx=%u\n", odp_tm,
+		tm_system->tm_idx);
+	ODP_DBG("  input_work_queue size=%u current cnt=%u peak cnt=%u\n",
+		INPUT_WORK_RING_SIZE, input_work_queue->queue_cnt,
+		input_work_queue->peak_cnt);
+	ODP_DBG("  input_work_queue enqueues=%lu dequeues=%lu fail_cnt=%lu\n",
+		input_work_queue->total_enqueues,
+		input_work_queue->total_dequeues,
+		input_work_queue->enqueue_fail_cnt);
+	ODP_DBG("  green_cnt=%lu yellow_cnt=%lu red_cnt=%lu\n",
+		tm_system->shaper_green_cnt,
+		tm_system->shaper_yellow_cnt,
+		tm_system->shaper_red_cnt);
+
+	odp_pkt_queue_stats_print(tm_system->odp_int_queue_pool);
+	odp_timer_wheel_stats_print(tm_system->odp_int_timer_wheel);
+	odp_sorted_list_stats_print(tm_system->odp_int_sorted_pool);
+
+	for (queue_num = 1; queue_num < tm_system->next_queue_num;
+	     queue_num++) {
+		tm_queue_obj = tm_system->queue_num_tbl[queue_num];
+		ODP_DBG("queue_num=%u priority=%u rcvd=%u enqueued=%u "
+			"dequeued=%u consumed=%u\n",
+			queue_num,
+			tm_queue_obj->priority,
+			tm_queue_obj->pkts_rcvd_cnt,
+			tm_queue_obj->pkts_enqueued_cnt,
+			tm_queue_obj->pkts_dequeued_cnt,
+			tm_queue_obj->pkts_consumed_cnt);
+	}
+}
+#endif
+
+void odp_tm_periodic_update(void)
+{
+	return; /* Nothing to be done here for this implementation. */
+}
+
+int odp_tm_init_global(void)
+{
+	odp_ticketlock_init(&tm_create_lock);
+	odp_ticketlock_init(&tm_profile_lock);
+	odp_barrier_init(&tm_first_enq, 2);
+
+	return 0;
+}