diff mbox

[PATCHv2,2/2] Initial review stage for ODP v1.0 buffer packet APIs

Message ID 1415593483-7910-1-git-send-email-bill.fischofer@linaro.org
State New
Headers show

Commit Message

Bill Fischofer Nov. 10, 2014, 4:24 a.m. UTC
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
---

Added missing odp_buffer_inlines.h file that was mistakenly omitted from v1.  
Otherwise identical.

 example/generator/odp_generator.c                  |   69 +-
 example/ipsec/odp_ipsec.c                          |  120 +-
 example/ipsec/odp_ipsec_stream.c                   |   25 +-
 example/l2fwd/odp_l2fwd.c                          |   22 +-
 example/odp_example/odp_example.c                  |   18 +-
 example/packet/odp_pktio.c                         |   28 +-
 example/timer/odp_timer_test.c                     |   20 +-
 helper/include/odph_ip.h                           |   47 +-
 helper/include/odph_packet.h                       |   97 -
 helper/include/odph_udp.h                          |   11 +-
 platform/linux-generic/Makefile.am                 |    4 +-
 platform/linux-generic/include/api/odp.h           |    1 -
 platform/linux-generic/include/api/odp_buffer.h    |  503 ++++-
 .../linux-generic/include/api/odp_buffer_pool.h    |  341 ++-
 platform/linux-generic/include/api/odp_config.h    |    6 +
 platform/linux-generic/include/api/odp_packet.h    | 2294 ++++++++++++++++++--
 .../linux-generic/include/api/odp_packet_flags.h   |  334 ---
 .../linux-generic/include/odp_buffer_inlines.h     |  164 ++
 .../linux-generic/include/odp_buffer_internal.h    |  112 +-
 .../include/odp_buffer_pool_internal.h             |  159 +-
 .../linux-generic/include/odp_packet_internal.h    |  129 +-
 .../linux-generic/include/odp_timer_internal.h     |   15 +-
 platform/linux-generic/odp_buffer.c                |   59 +-
 platform/linux-generic/odp_buffer_pool.c           |  598 ++---
 platform/linux-generic/odp_crypto.c                |    3 +-
 platform/linux-generic/odp_packet.c                | 1016 +++++++--
 platform/linux-generic/odp_packet_flags.c          |  202 --
 platform/linux-generic/odp_packet_io.c             |    7 +-
 platform/linux-generic/odp_packet_socket.c         |   90 +-
 platform/linux-generic/odp_queue.c                 |    7 +-
 platform/linux-generic/odp_schedule.c              |   20 +-
 test/api_test/odp_timer_ping.c                     |   19 +-
 32 files changed, 4602 insertions(+), 1938 deletions(-)
 delete mode 100644 helper/include/odph_packet.h
 delete mode 100644 platform/linux-generic/include/api/odp_packet_flags.h
 create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h
 delete mode 100644 platform/linux-generic/odp_packet_flags.c

Comments

Balasubramanian Manoharan Nov. 10, 2014, 4:39 a.m. UTC | #1
Hi,

Looks like this patch is missing odph_tcp.h file

odp_packet.c:14:22: fatal error: odph_tcp.h: No such file or directory

Regards,
Bala

On 10 November 2014 09:54, Bill Fischofer <bill.fischofer@linaro.org> wrote:

> Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org>
> ---
>
> Added missing odp_buffer_inlines.h file that was mistakenly omitted from
> v1.
> Otherwise identical.
>
>  example/generator/odp_generator.c                  |   69 +-
>  example/ipsec/odp_ipsec.c                          |  120 +-
>  example/ipsec/odp_ipsec_stream.c                   |   25 +-
>  example/l2fwd/odp_l2fwd.c                          |   22 +-
>  example/odp_example/odp_example.c                  |   18 +-
>  example/packet/odp_pktio.c                         |   28 +-
>  example/timer/odp_timer_test.c                     |   20 +-
>  helper/include/odph_ip.h                           |   47 +-
>  helper/include/odph_packet.h                       |   97 -
>  helper/include/odph_udp.h                          |   11 +-
>  platform/linux-generic/Makefile.am                 |    4 +-
>  platform/linux-generic/include/api/odp.h           |    1 -
>  platform/linux-generic/include/api/odp_buffer.h    |  503 ++++-
>  .../linux-generic/include/api/odp_buffer_pool.h    |  341 ++-
>  platform/linux-generic/include/api/odp_config.h    |    6 +
>  platform/linux-generic/include/api/odp_packet.h    | 2294
> ++++++++++++++++++--
>  .../linux-generic/include/api/odp_packet_flags.h   |  334 ---
>  .../linux-generic/include/odp_buffer_inlines.h     |  164 ++
>  .../linux-generic/include/odp_buffer_internal.h    |  112 +-
>  .../include/odp_buffer_pool_internal.h             |  159 +-
>  .../linux-generic/include/odp_packet_internal.h    |  129 +-
>  .../linux-generic/include/odp_timer_internal.h     |   15 +-
>  platform/linux-generic/odp_buffer.c                |   59 +-
>  platform/linux-generic/odp_buffer_pool.c           |  598 ++---
>  platform/linux-generic/odp_crypto.c                |    3 +-
>  platform/linux-generic/odp_packet.c                | 1016 +++++++--
>  platform/linux-generic/odp_packet_flags.c          |  202 --
>  platform/linux-generic/odp_packet_io.c             |    7 +-
>  platform/linux-generic/odp_packet_socket.c         |   90 +-
>  platform/linux-generic/odp_queue.c                 |    7 +-
>  platform/linux-generic/odp_schedule.c              |   20 +-
>  test/api_test/odp_timer_ping.c                     |   19 +-
>  32 files changed, 4602 insertions(+), 1938 deletions(-)
>  delete mode 100644 helper/include/odph_packet.h
>  delete mode 100644 platform/linux-generic/include/api/odp_packet_flags.h
>  create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h
>  delete mode 100644 platform/linux-generic/odp_packet_flags.c
>
> diff --git a/example/generator/odp_generator.c
> b/example/generator/odp_generator.c
> index ffa5e62..efa418f 100644
> --- a/example/generator/odp_generator.c
> +++ b/example/generator/odp_generator.c
> @@ -19,7 +19,6 @@
>  #include <odp.h>
>
>  #include <odph_linux.h>
> -#include <odph_packet.h>
>  #include <odph_eth.h>
>  #include <odph_ip.h>
>  #include <odph_udp.h>
> @@ -168,24 +167,24 @@ static int scan_mac(char *in, odph_ethaddr_t *des)
>   *
>   * @param obuf packet buffer
>  */
> -static void pack_udp_pkt(odp_buffer_t obuf)
> +static void pack_udp_pkt(odp_packet_t pkt)
>  {
>         char *buf;
> -       int max;
> -       odp_packet_t pkt;
> +
>         odph_ethhdr_t *eth;
>         odph_ipv4hdr_t *ip;
>         odph_udphdr_t *udp;
>         unsigned short seq;
> +       size_t seglen;
> +
> +       buf = odp_packet_push_tail_and_map(pkt, args->appl.payload +
> +                                          ODPH_UDPHDR_LEN +
> +                                          ODPH_IPV4HDR_LEN +
> +                                          ODPH_ETHHDR_LEN, &seglen);
>
> -       buf = odp_buffer_addr(obuf);
>         if (buf == NULL)
>                 return;
> -       max = odp_buffer_size(obuf);
> -       if (max <= 0)
> -               return;
>
> -       pkt = odp_packet_from_buffer(obuf);
>         /* ether */
>         odp_packet_set_l2_offset(pkt, 0);
>         eth = (odph_ethhdr_t *)buf;
> @@ -213,8 +212,7 @@ static void pack_udp_pkt(odp_buffer_t obuf)
>         udp->length = odp_cpu_to_be_16(args->appl.payload +
> ODPH_UDPHDR_LEN);
>         udp->chksum = 0;
>         udp->chksum = odp_cpu_to_be_16(odph_ipv4_udp_chksum(pkt));
> -       odp_packet_set_len(pkt, args->appl.payload + ODPH_UDPHDR_LEN +
> -                          ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN);
> +
>  }
>
>  /**
> @@ -222,27 +220,27 @@ static void pack_udp_pkt(odp_buffer_t obuf)
>   *
>   * @param obuf packet buffer
>  */
> -static void pack_icmp_pkt(odp_buffer_t obuf)
> +static void pack_icmp_pkt(odp_packet_t pkt)
>  {
>         char *buf;
> -       int max;
> -       odp_packet_t pkt;
> +
>         odph_ethhdr_t *eth;
>         odph_ipv4hdr_t *ip;
>         odph_icmphdr_t *icmp;
>         struct timeval tval;
>         uint8_t *tval_d;
>         unsigned short seq;
> +       size_t seglen;
>
> -       buf = odp_buffer_addr(obuf);
> +       buf = odp_packet_push_tail_and_map(pkt, args->appl.payload +
> +                                          ODPH_ICMPHDR_LEN +
> +                                          ODPH_IPV4HDR_LEN +
> +                                          ODPH_ETHHDR_LEN, &seglen);
>         if (buf == NULL)
>                 return;
> -       max = odp_buffer_size(obuf);
> -       if (max <= 0)
> -               return;
>
>         args->appl.payload = 56;
> -       pkt = odp_packet_from_buffer(obuf);
> +
>         /* ether */
>         odp_packet_set_l2_offset(pkt, 0);
>         eth = (odph_ethhdr_t *)buf;
> @@ -277,9 +275,6 @@ static void pack_icmp_pkt(odp_buffer_t obuf)
>         icmp->chksum = 0;
>         icmp->chksum = odp_chksum(icmp, args->appl.payload +
>                                   ODPH_ICMPHDR_LEN);
> -
> -       odp_packet_set_len(pkt, args->appl.payload + ODPH_ICMPHDR_LEN +
> -                          ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN);
>  }
>
>  /**
> @@ -295,7 +290,7 @@ static void *gen_send_thread(void *arg)
>         thread_args_t *thr_args;
>         odp_queue_t outq_def;
>
> -       odp_buffer_t buf;
> +       odp_packet_t buf;
>
>         thr = odp_thread_id();
>         thr_args = arg;
> @@ -316,8 +311,8 @@ static void *gen_send_thread(void *arg)
>         printf("  [%02i] created mode: SEND\n", thr);
>         for (;;) {
>                 int err;
> -               buf = odp_buffer_alloc(thr_args->pool);
> -               if (!odp_buffer_is_valid(buf)) {
> +               buf = odp_packet_alloc(thr_args->pool);
> +               if (!odp_packet_is_valid(buf)) {
>                         ODP_ERR("  [%2i] alloc_single failed\n", thr);
>                         return NULL;
>                 }
> @@ -493,13 +488,13 @@ static void *gen_recv_thread(void *arg)
>                 pkt = odp_packet_from_buffer(buf);
>                 /* Drop packets with errors */
>                 if (odp_unlikely(odp_packet_error(pkt))) {
> -                       odph_packet_free(pkt);
> +                       odp_packet_free(pkt);
>                         continue;
>                 }
>
>                 print_pkts(thr, &pkt, 1);
>
> -               odph_packet_free(pkt);
> +               odp_packet_free(pkt);
>         }
>
>         return arg;
> @@ -512,11 +507,11 @@ int main(int argc, char *argv[])
>         odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>         odp_buffer_pool_t pool;
>         int num_workers;
> -       void *pool_base;
>         int i;
>         int first_core;
>         int core_count;
>         odp_shm_t shm;
> +       odp_buffer_pool_param_t params;
>
>         /* Init ODP before calling anything else */
>         if (odp_init_global(NULL, NULL)) {
> @@ -579,20 +574,14 @@ int main(int argc, char *argv[])
>         printf("First core:         %i\n\n", first_core);
>
>         /* Create packet pool */
> -       shm = odp_shm_reserve("shm_packet_pool",
> -                             SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> -       pool_base = odp_shm_addr(shm);
>
> -       if (pool_base == NULL) {
> -               ODP_ERR("Error: packet pool mem alloc failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> +       params.buf_size = SHM_PKT_POOL_BUF_SIZE;
> +       params.buf_num  = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
> +       params.buf_type = ODP_BUFFER_TYPE_PACKET;
> +       params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
> +
> +       pool = odp_buffer_pool_create("packet_pool", &params, NULL);
>
> -       pool = odp_buffer_pool_create("packet_pool", pool_base,
> -                                     SHM_PKT_POOL_SIZE,
> -                                     SHM_PKT_POOL_BUF_SIZE,
> -                                     ODP_CACHE_LINE_SIZE,
> -                                     ODP_BUFFER_TYPE_PACKET);
>         if (pool == ODP_BUFFER_POOL_INVALID) {
>                 ODP_ERR("Error: packet pool create failed.\n");
>                 exit(EXIT_FAILURE);
> diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c
> index da6c48e..3b39be2 100644
> --- a/example/ipsec/odp_ipsec.c
> +++ b/example/ipsec/odp_ipsec.c
> @@ -18,7 +18,6 @@
>  #include <odp.h>
>
>  #include <odph_linux.h>
> -#include <odph_packet.h>
>  #include <odph_eth.h>
>  #include <odph_ip.h>
>  #include <odph_icmp.h>
> @@ -154,8 +153,6 @@ typedef struct {
>  #define SHM_CTX_POOL_BUF_COUNT (SHM_PKT_POOL_BUF_COUNT +
> SHM_OUT_POOL_BUF_COUNT)
>  #define SHM_CTX_POOL_SIZE      (SHM_CTX_POOL_BUF_COUNT *
> SHM_CTX_POOL_BUF_SIZE)
>
> -static odp_buffer_pool_t ctx_pool = ODP_BUFFER_POOL_INVALID;
> -
>  /**
>   * Get per packet processing context from packet buffer
>   *
> @@ -166,33 +163,7 @@ static odp_buffer_pool_t ctx_pool =
> ODP_BUFFER_POOL_INVALID;
>  static
>  pkt_ctx_t *get_pkt_ctx_from_pkt(odp_packet_t pkt)
>  {
> -       return (pkt_ctx_t *)odp_packet_get_ctx(pkt);
> -}
> -
> -/**
> - * Allocate per packet processing context and associate it with
> - * packet buffer
> - *
> - * @param pkt  Packet
> - *
> - * @return pointer to context area
> - */
> -static
> -pkt_ctx_t *alloc_pkt_ctx(odp_packet_t pkt)
> -{
> -       odp_buffer_t ctx_buf = odp_buffer_alloc(ctx_pool);
> -       pkt_ctx_t *ctx;
> -
> -       /* There should always be enough contexts */
> -       if (odp_unlikely(ODP_BUFFER_INVALID == ctx_buf))
> -               abort();
> -
> -       ctx = odp_buffer_addr(ctx_buf);
> -       memset(ctx, 0, sizeof(*ctx));
> -       ctx->buffer = ctx_buf;
> -       odp_packet_set_ctx(pkt, ctx);
> -
> -       return ctx;
> +       return (pkt_ctx_t *)odp_packet_udata_addr(pkt);
>  }
>
>  /**
> @@ -365,8 +336,7 @@ static
>  void ipsec_init_pre(void)
>  {
>         odp_queue_param_t qparam;
> -       void *pool_base;
> -       odp_shm_t shm;
> +       odp_buffer_pool_param_t params;
>
>         /*
>          * Create queues
> @@ -399,16 +369,12 @@ void ipsec_init_pre(void)
>         }
>
>         /* Create output buffer pool */
> -       shm = odp_shm_reserve("shm_out_pool",
> -                             SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> +       params.buf_num  = SHM_OUT_POOL_BUF_COUNT;
> +       params.buf_size = SHM_OUT_POOL_BUF_SIZE;
> +       params.buf_type = ODP_BUFFER_TYPE_PACKET;
> +       params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
>
> -       pool_base = odp_shm_addr(shm);
> -
> -       out_pool = odp_buffer_pool_create("out_pool", pool_base,
> -                                         SHM_OUT_POOL_SIZE,
> -                                         SHM_OUT_POOL_BUF_SIZE,
> -                                         ODP_CACHE_LINE_SIZE,
> -                                         ODP_BUFFER_TYPE_PACKET);
> +       out_pool = odp_buffer_pool_create("out_pool", &params, NULL);
>
>         if (ODP_BUFFER_POOL_INVALID == out_pool) {
>                 ODP_ERR("Error: message pool create failed.\n");
> @@ -637,13 +603,15 @@ pkt_disposition_e do_input_verify(odp_packet_t pkt,
> pkt_ctx_t *ctx ODP_UNUSED)
>  static
>  pkt_disposition_e do_route_fwd_db(odp_packet_t pkt, pkt_ctx_t *ctx)
>  {
> -       odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> +       size_t seglen;
> +       odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt,
> &seglen);
>         fwd_db_entry_t *entry;
>
>         entry = find_fwd_db_entry(odp_be_to_cpu_32(ip->dst_addr));
>
>         if (entry) {
> -               odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2(pkt);
> +               odph_ethhdr_t *eth =
> +                       (odph_ethhdr_t *)odp_packet_l2_map(pkt, &seglen);
>
>                 memcpy(&eth->dst, entry->dst_mac, ODPH_ETHADDR_LEN);
>                 memcpy(&eth->src, entry->src_mac, ODPH_ETHADDR_LEN);
> @@ -673,8 +641,9 @@ pkt_disposition_e do_ipsec_in_classify(odp_packet_t
> pkt,
>                                        pkt_ctx_t *ctx,
>                                        bool *skip)
>  {
> +       size_t seglen;
>         uint8_t *buf = odp_packet_addr(pkt);
> -       odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> +       odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt,
> &seglen);
>         int hdr_len;
>         odph_ahhdr_t *ah = NULL;
>         odph_esphdr_t *esp = NULL;
> @@ -759,6 +728,7 @@ pkt_disposition_e do_ipsec_in_finish(odp_packet_t pkt,
>         odp_crypto_compl_status_t cipher_rc;
>         odp_crypto_compl_status_t auth_rc;
>         odph_ipv4hdr_t *ip;
> +       size_t seglen;
>         int hdr_len = ctx->ipsec.hdr_len;
>         int trl_len = 0;
>
> @@ -769,7 +739,7 @@ pkt_disposition_e do_ipsec_in_finish(odp_packet_t pkt,
>                 return PKT_DROP;
>         if (!is_crypto_compl_status_ok(&auth_rc))
>                 return PKT_DROP;
> -       ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> +       ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
>
>         /*
>          * Finish auth
> @@ -803,11 +773,11 @@ pkt_disposition_e do_ipsec_in_finish(odp_packet_t
> pkt,
>         ip->chksum = 0;
>         odph_ipv4_csum_update(pkt);
>
> -       /* Correct the packet length and move payload into position */
> -       odp_packet_set_len(pkt, odp_packet_get_len(pkt) - (hdr_len +
> trl_len));
> +       /* Move payload into position and correct the packet length */
>         memmove(ipv4_data_p(ip),
>                 ipv4_data_p(ip) + hdr_len,
>                 odp_be_to_cpu_16(ip->tot_len));
> +       odp_packet_pull_tail(pkt, hdr_len + trl_len);
>
>         /* Fall through to next state */
>         return PKT_CONTINUE;
> @@ -833,8 +803,9 @@ pkt_disposition_e do_ipsec_out_classify(odp_packet_t
> pkt,
>                                         pkt_ctx_t *ctx,
>                                         bool *skip)
>  {
> +       size_t seglen;
>         uint8_t *buf = odp_packet_addr(pkt);
> -       odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> +       odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt,
> &seglen);
>         uint16_t ip_data_len = ipv4_data_len(ip);
>         uint8_t *ip_data = ipv4_data_p(ip);
>         ipsec_cache_entry_t *entry;
> @@ -921,7 +892,7 @@ pkt_disposition_e do_ipsec_out_classify(odp_packet_t
> pkt,
>
>         /* Set IPv4 length before authentication */
>         ipv4_adjust_len(ip, hdr_len + trl_len);
> -       odp_packet_set_len(pkt, odp_packet_get_len(pkt) + (hdr_len +
> trl_len));
> +       odp_packet_push_tail(pkt, hdr_len + trl_len);
>
>         /* Save remaining context */
>         ctx->ipsec.hdr_len = hdr_len;
> @@ -995,6 +966,7 @@ pkt_disposition_e do_ipsec_out_finish(odp_packet_t pkt,
>         odp_crypto_compl_status_t cipher_rc;
>         odp_crypto_compl_status_t auth_rc;
>         odph_ipv4hdr_t *ip;
> +       size_t seglen;
>
>         /* Check crypto result */
>         event = odp_packet_to_buffer(pkt);
> @@ -1003,7 +975,7 @@ pkt_disposition_e do_ipsec_out_finish(odp_packet_t
> pkt,
>                 return PKT_DROP;
>         if (!is_crypto_compl_status_ok(&auth_rc))
>                 return PKT_DROP;
> -       ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> +       ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
>
>         /* Finalize the IPv4 header */
>         ip->ttl = ctx->ipsec.ip_ttl;
> @@ -1057,7 +1029,7 @@ void *pktio_thread(void *arg ODP_UNUSED)
>
>                 /* Determine new work versus completion or sequence number
> */
>                 if ((completionq != dispatchq) && (seqnumq != dispatchq)) {
> -                       ctx = alloc_pkt_ctx(pkt);
> +                       ctx = get_pkt_ctx_from_pkt(pkt);
>                         ctx->state = PKT_STATE_INPUT_VERIFY;
>                 } else {
>                         ctx = get_pkt_ctx_from_pkt(pkt);
> @@ -1144,7 +1116,7 @@ void *pktio_thread(void *arg ODP_UNUSED)
>
>                 /* Check for drop */
>                 if (PKT_DROP == rc)
> -                       odph_packet_free(pkt);
> +                       odp_packet_free(pkt);
>
>                 /* Print packet counts every once in a while */
>                 if (PKT_DONE == rc) {
> @@ -1167,12 +1139,13 @@ main(int argc, char *argv[])
>  {
>         odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>         int num_workers;
> -       void *pool_base;
>         int i;
>         int first_core;
>         int core_count;
>         int stream_count;
>         odp_shm_t shm;
> +       odp_buffer_pool_param_t params;
> +       odp_buffer_pool_init_t  init_params;
>
>         /* Init ODP before calling anything else */
>         if (odp_init_global(NULL, NULL)) {
> @@ -1232,47 +1205,22 @@ main(int argc, char *argv[])
>         printf("First core:         %i\n\n", first_core);
>
>         /* Create packet buffer pool */
> -       shm = odp_shm_reserve("shm_packet_pool",
> -                             SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> +       params.buf_num  = SHM_PKT_POOL_BUF_COUNT;
> +       params.buf_size = SHM_PKT_POOL_BUF_SIZE;
> +       params.buf_type = ODP_BUFFER_TYPE_PACKET;
> +       params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
>
> -       pool_base = odp_shm_addr(shm);
> +       init_params.udata_size = sizeof(pkt_ctx_t);
> +       init_params.buf_init   = NULL;
> +       init_params.buf_init_arg = NULL;
>
> -       if (NULL == pool_base) {
> -               ODP_ERR("Error: packet pool mem alloc failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> +       pkt_pool = odp_buffer_pool_create("packet_pool", &params,
> &init_params);
>
> -       pkt_pool = odp_buffer_pool_create("packet_pool", pool_base,
> -                                         SHM_PKT_POOL_SIZE,
> -                                         SHM_PKT_POOL_BUF_SIZE,
> -                                         ODP_CACHE_LINE_SIZE,
> -                                         ODP_BUFFER_TYPE_PACKET);
>         if (ODP_BUFFER_POOL_INVALID == pkt_pool) {
>                 ODP_ERR("Error: packet pool create failed.\n");
>                 exit(EXIT_FAILURE);
>         }
>
> -       /* Create context buffer pool */
> -       shm = odp_shm_reserve("shm_ctx_pool",
> -                             SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> -
> -       pool_base = odp_shm_addr(shm);
> -
> -       if (NULL == pool_base) {
> -               ODP_ERR("Error: context pool mem alloc failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> -
> -       ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base,
> -                                         SHM_CTX_POOL_SIZE,
> -                                         SHM_CTX_POOL_BUF_SIZE,
> -                                         ODP_CACHE_LINE_SIZE,
> -                                         ODP_BUFFER_TYPE_RAW);
> -       if (ODP_BUFFER_POOL_INVALID == ctx_pool) {
> -               ODP_ERR("Error: context pool create failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> -
>         /* Populate our IPsec cache */
>         printf("Using %s mode for crypto API\n\n",
>                (CRYPTO_API_SYNC == args->appl.mode) ? "SYNC" :
> diff --git a/example/ipsec/odp_ipsec_stream.c
> b/example/ipsec/odp_ipsec_stream.c
> index fa9aba8..309cf70 100644
> --- a/example/ipsec/odp_ipsec_stream.c
> +++ b/example/ipsec/odp_ipsec_stream.c
> @@ -14,7 +14,6 @@
>
>  #include <odp.h>
>
> -#include <odph_packet.h>
>  #include <odph_eth.h>
>  #include <odph_ip.h>
>  #include <odph_icmp.h>
> @@ -173,7 +172,6 @@ odp_packet_t create_ipv4_packet(stream_db_entry_t
> *stream,
>                                 odp_buffer_pool_t pkt_pool)
>  {
>         ipsec_cache_entry_t *entry = stream->input.entry;
> -       odp_buffer_t bfr;
>         odp_packet_t pkt;
>         uint8_t *base;
>         uint8_t *data;
> @@ -184,18 +182,19 @@ odp_packet_t create_ipv4_packet(stream_db_entry_t
> *stream,
>         odph_icmphdr_t *icmp;
>         stream_pkt_hdr_t *test;
>         uint i;
> +       size_t seglen;
>
> -       /* Get buffer */
> -       bfr = odp_buffer_alloc(pkt_pool);
> -       if (ODP_BUFFER_INVALID == bfr)
> +       /* Get packet */
> +       pkt = odp_packet_alloc(pkt_pool);
> +       if (ODP_PACKET_INVALID == pkt)
>                 return ODP_PACKET_INVALID;
> -       pkt = odp_packet_from_buffer(bfr);
> -       odp_packet_init(pkt);
> -       base = odp_packet_data(pkt);
> -       data = odp_packet_data(pkt);
> +
> +       base = odp_packet_map(pkt, &seglen);
> +       data = base;
>
>         /* Ethernet */
>         odp_packet_set_inflag_eth(pkt, 1);
> +       odp_packet_set_inflag_l2(pkt, 1);
>         odp_packet_set_l2_offset(pkt, data - base);
>         eth = (odph_ethhdr_t *)data;
>         data += sizeof(*eth);
> @@ -251,6 +250,7 @@ odp_packet_t create_ipv4_packet(stream_db_entry_t
> *stream,
>         /* ICMP header so we can see it on wireshark */
>         icmp = (odph_icmphdr_t *)data;
>         data += sizeof(*icmp);
> +
>         icmp->type = ICMP_ECHO;
>         icmp->code = 0;
>         icmp->un.echo.id = odp_cpu_to_be_16(0x1234);
> @@ -303,7 +303,7 @@ odp_packet_t create_ipv4_packet(stream_db_entry_t
> *stream,
>
>         /* Since ESP can pad we can now fix IP length */
>         ip->tot_len = odp_cpu_to_be_16(data - (uint8_t *)ip);
> -       odp_packet_set_len(pkt, data - base);
> +       odp_packet_push_tail(pkt, data - base);
>
>         /* Close AH if specified */
>         if (ah) {
> @@ -344,9 +344,10 @@ bool verify_ipv4_packet(stream_db_entry_t *stream,
>         int hdr_len;
>         odph_icmphdr_t *icmp;
>         stream_pkt_hdr_t *test;
> +       size_t seglen;
>
>         /* Basic IPv4 verify (add checksum verification) */
> -       data = odp_packet_l3(pkt);
> +       data = odp_packet_l3_map(pkt, &seglen);
>         ip = (odph_ipv4hdr_t *)data;
>         data += sizeof(*ip);
>         if (0x45 != ip->ver_ihl)
> @@ -546,7 +547,7 @@ bool verify_stream_db_outputs(void)
>                                 good = verify_ipv4_packet(stream, pkt);
>                                 if (good)
>                                         stream->verified++;
> -                               odph_packet_free(pkt);
> +                               odp_packet_free(pkt);
>                         }
>                 }
>
> diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c
> index 57037cd..c43ef86 100644
> --- a/example/l2fwd/odp_l2fwd.c
> +++ b/example/l2fwd/odp_l2fwd.c
> @@ -17,7 +17,6 @@
>
>  #include <odp.h>
>  #include <odph_linux.h>
> -#include <odph_packet.h>
>  #include <odph_eth.h>
>  #include <odph_ip.h>
>
> @@ -311,12 +310,12 @@ int main(int argc, char *argv[])
>  {
>         odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>         odp_buffer_pool_t pool;
> -       void *pool_base;
>         int i;
>         int first_core;
>         int core_count;
>         odp_pktio_t pktio;
>         odp_shm_t shm;
> +       odp_buffer_pool_param_t params;
>
>         /* Init ODP before calling anything else */
>         if (odp_init_global(NULL, NULL)) {
> @@ -380,20 +379,13 @@ int main(int argc, char *argv[])
>         printf("First core:         %i\n\n", first_core);
>
>         /* Create packet pool */
> -       shm = odp_shm_reserve("shm_packet_pool",
> -                             SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> -       pool_base = odp_shm_addr(shm);
> +       params.buf_num  = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
> +       params.buf_size = SHM_PKT_POOL_BUF_SIZE;
> +       params.buf_type = ODP_BUFFER_TYPE_PACKET;
> +       params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
>
> -       if (pool_base == NULL) {
> -               ODP_ERR("Error: packet pool mem alloc failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> +       pool = odp_buffer_pool_create("packet_pool", &params, NULL);
>
> -       pool = odp_buffer_pool_create("packet_pool", pool_base,
> -                                     SHM_PKT_POOL_SIZE,
> -                                     SHM_PKT_POOL_BUF_SIZE,
> -                                     ODP_CACHE_LINE_SIZE,
> -                                     ODP_BUFFER_TYPE_PACKET);
>         if (pool == ODP_BUFFER_POOL_INVALID) {
>                 ODP_ERR("Error: packet pool create failed.\n");
>                 exit(EXIT_FAILURE);
> @@ -480,7 +472,7 @@ static int drop_err_pkts(odp_packet_t pkt_tbl[],
> unsigned len)
>                 pkt = pkt_tbl[i];
>
>                 if (odp_unlikely(odp_packet_error(pkt))) {
> -                       odph_packet_free(pkt); /* Drop */
> +                       odp_packet_free(pkt); /* Drop */
>                         pkt_cnt--;
>                 } else if (odp_unlikely(i != j++)) {
>                         pkt_tbl[j-1] = pkt;
> diff --git a/example/odp_example/odp_example.c
> b/example/odp_example/odp_example.c
> index 1ed4a0b..cdb78b6 100644
> --- a/example/odp_example/odp_example.c
> +++ b/example/odp_example/odp_example.c
> @@ -944,13 +944,13 @@ int main(int argc, char *argv[])
>         test_args_t args;
>         int num_workers;
>         odp_buffer_pool_t pool;
> -       void *pool_base;
>         odp_queue_t queue;
>         int i, j;
>         int prios;
>         int first_core;
>         odp_shm_t shm;
>         test_globals_t *globals;
> +       odp_buffer_pool_param_t params;
>
>         printf("\nODP example starts\n\n");
>
> @@ -1032,19 +1032,13 @@ int main(int argc, char *argv[])
>         /*
>          * Create message pool
>          */
> -       shm = odp_shm_reserve("msg_pool",
> -                             MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
>
> -       pool_base = odp_shm_addr(shm);
> +       params.buf_num  = MSG_POOL_SIZE/sizeof(test_message_t);
> +       params.buf_size = sizeof(test_message_t);
> +       params.buf_type = ODP_BUFFER_TYPE_RAW;
> +       params.buf_opts = ODP_BUFFER_OPTS_NONE;
>
> -       if (pool_base == NULL) {
> -               ODP_ERR("Shared memory reserve failed.\n");
> -               return -1;
> -       }
> -
> -       pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
> -                                     sizeof(test_message_t),
> -                                     ODP_CACHE_LINE_SIZE,
> ODP_BUFFER_TYPE_RAW);
> +       pool = odp_buffer_pool_create("msg_pool", &params, NULL);
>
>         if (pool == ODP_BUFFER_POOL_INVALID) {
>                 ODP_ERR("Pool create failed.\n");
> diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c
> index 2cf3f0d..64161f2 100644
> --- a/example/packet/odp_pktio.c
> +++ b/example/packet/odp_pktio.c
> @@ -17,7 +17,6 @@
>
>  #include <odp.h>
>  #include <odph_linux.h>
> -#include <odph_packet.h>
>  #include <odph_eth.h>
>  #include <odph_ip.h>
>
> @@ -292,11 +291,11 @@ int main(int argc, char *argv[])
>         odph_linux_pthread_t thread_tbl[MAX_WORKERS];
>         odp_buffer_pool_t pool;
>         int num_workers;
> -       void *pool_base;
>         int i;
>         int first_core;
>         int core_count;
>         odp_shm_t shm;
> +       odp_buffer_pool_param_t params;
>
>         /* Init ODP before calling anything else */
>         if (odp_init_global(NULL, NULL)) {
> @@ -350,20 +349,13 @@ int main(int argc, char *argv[])
>         printf("First core:         %i\n\n", first_core);
>
>         /* Create packet pool */
> -       shm = odp_shm_reserve("shm_packet_pool",
> -                             SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> -       pool_base = odp_shm_addr(shm);
> +       params.buf_num  = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
> +       params.buf_size = SHM_PKT_POOL_BUF_SIZE;
> +       params.buf_type = ODP_BUFFER_TYPE_PACKET;
> +       params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
>
> -       if (pool_base == NULL) {
> -               ODP_ERR("Error: packet pool mem alloc failed.\n");
> -               exit(EXIT_FAILURE);
> -       }
> +       pool = odp_buffer_pool_create("packet_pool", &params, NULL);
>
> -       pool = odp_buffer_pool_create("packet_pool", pool_base,
> -                                     SHM_PKT_POOL_SIZE,
> -                                     SHM_PKT_POOL_BUF_SIZE,
> -                                     ODP_CACHE_LINE_SIZE,
> -                                     ODP_BUFFER_TYPE_PACKET);
>         if (pool == ODP_BUFFER_POOL_INVALID) {
>                 ODP_ERR("Error: packet pool create failed.\n");
>                 exit(EXIT_FAILURE);
> @@ -427,7 +419,7 @@ static int drop_err_pkts(odp_packet_t pkt_tbl[],
> unsigned len)
>                 pkt = pkt_tbl[i];
>
>                 if (odp_unlikely(odp_packet_error(pkt))) {
> -                       odph_packet_free(pkt); /* Drop */
> +                       odp_packet_free(pkt); /* Drop */
>                         pkt_cnt--;
>                 } else if (odp_unlikely(i != j++)) {
>                         pkt_tbl[j-1] = pkt;
> @@ -452,11 +444,12 @@ static void swap_pkt_addrs(odp_packet_t pkt_tbl[],
> unsigned len)
>         odph_ipv4hdr_t *ip;
>         uint32be_t ip_tmp_addr; /* tmp ip addr */
>         unsigned i;
> +       size_t seglen;
>
>         for (i = 0; i < len; ++i) {
>                 pkt = pkt_tbl[i];
>                 if (odp_packet_inflag_eth(pkt)) {
> -                       eth = (odph_ethhdr_t *)odp_packet_l2(pkt);
> +                       eth = (odph_ethhdr_t *)odp_packet_l2_map(pkt,
> &seglen);
>
>                         tmp_addr = eth->dst;
>                         eth->dst = eth->src;
> @@ -464,7 +457,8 @@ static void swap_pkt_addrs(odp_packet_t pkt_tbl[],
> unsigned len)
>
>                         if (odp_packet_inflag_ipv4(pkt)) {
>                                 /* IPv4 */
> -                               ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> +                               ip = (odph_ipv4hdr_t *)
> +                                       odp_packet_l3_map(pkt, &seglen);
>
>                                 ip_tmp_addr  = ip->src_addr;
>                                 ip->src_addr = ip->dst_addr;
> diff --git a/example/timer/odp_timer_test.c
> b/example/timer/odp_timer_test.c
> index 78b2ae2..c0fcf49 100644
> --- a/example/timer/odp_timer_test.c
> +++ b/example/timer/odp_timer_test.c
> @@ -242,12 +242,11 @@ int main(int argc, char *argv[])
>         test_args_t args;
>         int num_workers;
>         odp_buffer_pool_t pool;
> -       void *pool_base;
>         odp_queue_t queue;
>         int first_core;
>         uint64_t cycles, ns;
>         odp_queue_param_t param;
> -       odp_shm_t shm;
> +       odp_buffer_pool_param_t params;
>
>         printf("\nODP timer example starts\n");
>
> @@ -306,17 +305,12 @@ int main(int argc, char *argv[])
>         printf("period:             %i usec\n", args.period_us);
>         printf("timeouts:           %i\n", args.tmo_count);
>
> -       /*
> -        * Create message pool
> -        */
> -       shm = odp_shm_reserve("msg_pool",
> -                             MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
> -       pool_base = odp_shm_addr(shm);
> -
> -       pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
> -                                     0,
> -                                     ODP_CACHE_LINE_SIZE,
> -                                     ODP_BUFFER_TYPE_TIMEOUT);
> +       params.buf_num  = 1024;
> +       params.buf_size = 0;
> +       params.buf_type = ODP_BUFFER_TYPE_TIMEOUT;
> +       params.buf_opts = ODP_BUFFER_OPTS_NONE;
> +
> +       pool = odp_buffer_pool_create("msg_pool", &params, NULL);
>
>         if (pool == ODP_BUFFER_POOL_INVALID) {
>                 ODP_ERR("Pool create failed.\n");
> diff --git a/helper/include/odph_ip.h b/helper/include/odph_ip.h
> index 2c83c0f..2dab164 100644
> --- a/helper/include/odph_ip.h
> +++ b/helper/include/odph_ip.h
> @@ -79,10 +79,12 @@ static inline int odph_ipv4_csum_valid(odp_packet_t
> pkt)
>         odph_ipv4hdr_t ip;
>         uint16be_t chksum;
>
> -       if (!odp_packet_l3_offset(pkt))
> +       if (!odp_packet_inflag_ipv4(pkt))
>                 return 0;
>
> -       memcpy(&ip, odp_packet_l3(pkt), sizeof(odph_ipv4hdr_t));
> +       odp_packet_copy_to_memory(&ip, pkt, odp_packet_l3_offset(pkt),
> +                                 sizeof(odph_ipv4hdr_t));
> +
>         w = (uint16_t *)(void *)&ip;
>         chksum = ip.chksum;
>         ip.chksum = 0x0;
> @@ -105,12 +107,13 @@ static inline uint16sum_t
> odph_ipv4_csum_update(odp_packet_t pkt)
>  {
>         uint16_t *w;
>         odph_ipv4hdr_t *ip;
> +       size_t seglen;
>         int nleft = sizeof(odph_ipv4hdr_t);
>
> -       if (!odp_packet_l3_offset(pkt))
> +       if (!odp_packet_inflag_ipv4(pkt))
>                 return 0;
>
> -       ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> +       ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
>         w = (uint16_t *)(void *)ip;
>         ip->chksum = odp_chksum(w, nleft);
>         return ip->chksum;
> @@ -126,7 +129,14 @@ static inline uint16sum_t
> odph_ipv4_csum_update(odp_packet_t pkt)
>   * IPv6 header
>   */
>  typedef struct ODP_PACKED {
> -       uint32be_t ver_tc_flow;  /**< Version / Traffic class / Flow label
> */
> +       union {
> +               uint32be_t ver_tc_flow;  /**< Version / TC / Flow label */
> +               struct {
> +                       uint32be_t ver:4;    /**< Version (must be 6) */
> +                       uint32be_t tc:8;     /**< Traffic class */
> +                       uint32be_t flow:20;  /**< Flow label */
> +               };
> +       };
>         uint16be_t payload_len;  /**< Payload length */
>         uint8_t    next_hdr;     /**< Next header */
>         uint8_t    hop_limit;    /**< Hop limit */
> @@ -137,16 +147,29 @@ typedef struct ODP_PACKED {
>  /** @internal Compile time assert */
>  ODP_STATIC_ASSERT(sizeof(odph_ipv6hdr_t) == ODPH_IPV6HDR_LEN,
> "ODPH_IPV6HDR_T__SIZE_ERROR");
>
> +/**
> + * IPv6 Header extensions
> + */
> +typedef struct ODP_PACKED {
> +       uint8_t    next_hdr;     /**< Protocol of next header */
> +       uint8_t    ext_len;      /**< Length of this extention in 8 byte
> units,
> +                                   not counting first 8 bytes, so 0 = 8
> bytes
> +                                   1 = 16 bytes, etc. */
> +       uint8_t    filler[6];    /**< Fill out first 8 byte segment */
> +} odph_ipv6hdr_ext_t;
> +
>  /** @name
>   * IP protocol values (IPv4:'proto' or IPv6:'next_hdr')
>   * @{*/
> -#define ODPH_IPPROTO_ICMP 0x01 /**< Internet Control Message Protocol (1)
> */
> -#define ODPH_IPPROTO_TCP  0x06 /**< Transmission Control Protocol (6) */
> -#define ODPH_IPPROTO_UDP  0x11 /**< User Datagram Protocol (17) */
> -#define ODPH_IPPROTO_SCTP 0x84 /**< Stream Control Transmission Protocol
> (132) */
> -#define ODPH_IPPROTO_FRAG 0x2C /**< Fragment (44) */
> -#define ODPH_IPPROTO_AH   0x33 /**< Authentication Header (51) */
> -#define ODPH_IPPROTO_ESP  0x32 /**< Encapsulating Security Payload (50) */
> +#define ODPH_IPPROTO_HOPOPTS 0x00 /**< IPv6 hop-by-hop options */
> +#define ODPH_IPPROTO_ICMP    0x01 /**< Internet Control Message Protocol
> (1) */
> +#define ODPH_IPPROTO_TCP     0x06 /**< Transmission Control Protocol (6)
> */
> +#define ODPH_IPPROTO_UDP     0x11 /**< User Datagram Protocol (17) */
> +#define ODPH_IPPROTO_ROUTE   0x2B /**< IPv6 Routing header (43) */
> +#define ODPH_IPPROTO_FRAG    0x2C /**< IPv6 Fragment (44) */
> +#define ODPH_IPPROTO_AH      0x33 /**< Authentication Header (51) */
> +#define ODPH_IPPROTO_ESP     0x32 /**< Encapsulating Security Payload
> (50) */
> +#define ODPH_IPPROTO_INVALID 0xFF /**< Reserved invalid by IANA */
>  /**@}*/
>
>  #ifdef __cplusplus
> diff --git a/helper/include/odph_packet.h b/helper/include/odph_packet.h
> deleted file mode 100644
> index 3d53593..0000000
> --- a/helper/include/odph_packet.h
> +++ /dev/null
> @@ -1,97 +0,0 @@
> -/* Copyright (c) 2014, Linaro Limited
> - * All rights reserved.
> - *
> - * SPDX-License-Identifier:     BSD-3-Clause
> - */
> -
> -
> -/**
> - * @file
> - *
> - * Optional ODP packet helper functions
> - */
> -
> -#ifndef ODPH_PACKET_HELPER_H_
> -#define ODPH_PACKET_HELPER_H_
> -
> -#ifdef __cplusplus
> -extern "C" {
> -#endif
> -
> -#include <odp.h>
> -
> -/**
> - * Helper: Tests if packet is valid
> - *
> - * Allows for more thorough checking than "if (pkt == ODP_PACKET_INVALID)"
> - *
> - * @param pkt  Packet handle
> - *
> - * @return 1 if valid, otherwise 0
> - */
> -static inline int odph_packet_is_valid(odp_packet_t pkt)
> -{
> -       odp_buffer_t buf = odp_packet_to_buffer(pkt);
> -
> -       return odp_buffer_is_valid(buf);
> -}
> -
> -/**
> - * Helper: Allocate and initialize a packet buffer from a packet pool
> - *
> - * @param pool_id  Pool handle
> - *
> - * @note  The pool must have been created with
> 'buf_type=ODP_BUFFER_TYPE_PACKET'
> - *
> - * @return Packet handle or ODP_PACKET_INVALID
> - */
> -static inline odp_packet_t odph_packet_alloc(odp_buffer_pool_t pool_id)
> -{
> -       odp_packet_t pkt;
> -       odp_buffer_t buf;
> -
> -       buf = odp_buffer_alloc(pool_id);
> -       if (odp_unlikely(!odp_buffer_is_valid(buf)))
> -               return ODP_PACKET_INVALID;
> -
> -       pkt = odp_packet_from_buffer(buf);
> -       odp_packet_init(pkt);
> -
> -       return pkt;
> -}
> -
> -/**
> - * Helper: Free a packet buffer back into the packet pool
> - *
> - * @param pkt  Packet handle
> - */
> -static inline void odph_packet_free(odp_packet_t pkt)
> -{
> -       odp_buffer_t buf = odp_packet_to_buffer(pkt);
> -
> -       odp_buffer_free(buf);
> -}
> -
> -/**
> - * Helper: Packet buffer maximum data size
> - *
> - * @note odp_packet_buf_size(pkt) != odp_packet_get_len(pkt), the former
> returns
> - *       the max length of the buffer, the latter the size of a received
> packet.
> - *
> - * @param pkt  Packet handle
> - *
> - * @return Packet buffer maximum data size
> - */
> -static inline size_t odph_packet_buf_size(odp_packet_t pkt)
> -{
> -       odp_buffer_t buf = odp_packet_to_buffer(pkt);
> -
> -       return odp_buffer_size(buf);
> -}
> -
> -
> -#ifdef __cplusplus
> -}
> -#endif
> -
> -#endif
> diff --git a/helper/include/odph_udp.h b/helper/include/odph_udp.h
> index b2eaf03..bd0fb68 100644
> --- a/helper/include/odph_udp.h
> +++ b/helper/include/odph_udp.h
> @@ -57,15 +57,14 @@ static inline uint16_t
> odph_ipv4_udp_chksum(odp_packet_t pkt)
>         odph_udphdr_t *udph;
>         odph_ipv4hdr_t *iph;
>         uint16_t udplen;
> +       size_t l3_seglen, l4_seglen;
>
> -       if (!odp_packet_l3_offset(pkt))
> +       if (odp_packet_l3_protocol(pkt) != 0x800 ||
> +           odp_packet_l4_protocol(pkt) != ODPH_IPPROTO_UDP)
>                 return 0;
>
> -       if (!odp_packet_l4_offset(pkt))
> -               return 0;
> -
> -       iph = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
> -       udph = (odph_udphdr_t *)odp_packet_l4(pkt);
> +       iph = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &l3_seglen);
> +       udph = (odph_udphdr_t *)odp_packet_l4_map(pkt, &l4_seglen);
>         udplen = odp_be_to_cpu_16(udph->length);
>
>         /* the source ip */
> diff --git a/platform/linux-generic/Makefile.am
> b/platform/linux-generic/Makefile.am
> index 0153a22..08c147c 100644
> --- a/platform/linux-generic/Makefile.am
> +++ b/platform/linux-generic/Makefile.am
> @@ -21,7 +21,6 @@ include_HEADERS = \
>
> $(top_srcdir)/platform/linux-generic/include/api/odp_debug.h \
>
> $(top_srcdir)/platform/linux-generic/include/api/odp_hints.h \
>
> $(top_srcdir)/platform/linux-generic/include/api/odp_init.h \
> -
>  $(top_srcdir)/platform/linux-generic/include/api/odp_packet_flags.h \
>
> $(top_srcdir)/platform/linux-generic/include/api/odp_packet.h \
>
> $(top_srcdir)/platform/linux-generic/include/api/odp_packet_io.h \
>
> $(top_srcdir)/platform/linux-generic/include/api/odp_queue.h \
> @@ -46,8 +45,8 @@ subdirheaders_HEADERS = \
>                         $(top_srcdir)/helper/include/odph_ip.h \
>                         $(top_srcdir)/helper/include/odph_ipsec.h \
>                         $(top_srcdir)/helper/include/odph_linux.h \
> -                       $(top_srcdir)/helper/include/odph_packet.h \
>                         $(top_srcdir)/helper/include/odph_ring.h \
> +                       $(top_srcdir)/helper/include/odph_tcp.h \
>                         $(top_srcdir)/helper/include/odph_udp.h
>
>  __LIB__libodp_la_SOURCES = \
> @@ -60,7 +59,6 @@ __LIB__libodp_la_SOURCES = \
>                            odp_init.c \
>                            odp_linux.c \
>                            odp_packet.c \
> -                          odp_packet_flags.c \
>                            odp_packet_io.c \
>                            odp_packet_socket.c \
>                            odp_queue.c \
> diff --git a/platform/linux-generic/include/api/odp.h
> b/platform/linux-generic/include/api/odp.h
> index 6e4f69e..ffdf1f3 100644
> --- a/platform/linux-generic/include/api/odp.h
> +++ b/platform/linux-generic/include/api/odp.h
> @@ -44,7 +44,6 @@ extern "C" {
>  #include <odp_schedule.h>
>  #include <odp_sync.h>
>  #include <odp_packet.h>
> -#include <odp_packet_flags.h>
>  #include <odp_packet_io.h>
>  #include <odp_crypto.h>
>  #include <odp_rwlock.h>
> diff --git a/platform/linux-generic/include/api/odp_buffer.h
> b/platform/linux-generic/include/api/odp_buffer.h
> index 289e0eb..3dc4cde 100644
> --- a/platform/linux-generic/include/api/odp_buffer.h
> +++ b/platform/linux-generic/include/api/odp_buffer.h
> @@ -1,4 +1,4 @@
> -/* Copyright (c) 2013, Linaro Limited
> +/* Copyright (c) 2013-2014, Linaro Limited
>   * All rights reserved.
>   *
>   * SPDX-License-Identifier:     BSD-3-Clause
> @@ -8,7 +8,88 @@
>  /**
>   * @file
>   *
> - * ODP buffer descriptor
> + * @par Buffer
> + * A buffer is an element of a buffer pool used for storing
> + * information. Buffers are referenced by an abstract handle of type
> + * odp_buffer_t. Buffers have associated buffer types that describe
> + * their intended use and the type of metadata that is associated
> + * with them. Buffers of a specific type may be referenced for
> + * processing by cores or by offload engines. Buffers are also
> + * transmitted via queues from one processing element to another.
> + *
> + * @par Buffer Types
> + * An ODP buffer type is identified by the
> + * odp_buffer_type_e enum. It defines the semantics that are to be
> + * attached to the buffer and defines the type of metadata that is
> + * associated with it. ODP implementations MUST support the following
> + * buffer types:
> + *
> + * - ODP_BUFFER_TYPE_RAW
> + * This is the “basic” buffer type
> + * which simply consists of a single fixed-sized block of contiguous
> + * memory. Buffers of this type do not support user metadata and the
> + * only built-in metadata supported for this type of buffer are those
> + * that are statically computable, such as pool and size. This type of
> + * buffer is entirely under application control and most of the buffer
> + * APIs defined in this document are not available. APIs for this
> + * type of buffer are described in this document.
> + *
> + * - ODP_BUFFER_TYPE_PACKET
> + * This buffer type is suitable for receiving,
> + * processing, and transmitting network packet data. Included in this
> + * type is a rich set of primitives for manipulating buffer aggregates
> + * and for storing system and user metadata. APIs for this type of
> + * buffer are described here and in the ODP Packet Management Design
> + * document.
> + *
> + * - ODP_BUFFER_TYPE_TIMEOUT
> + * This buffer type is suitable for
> + * representing timer timeout events. Does not support buffer
> + * aggregation but does support user metadata. APIs for this type of
> + * buffer are described here and in the ODP Timer Management Design
> + * document.
> + *
> + * - ODP_BUFFER_TYPE_ANY
> + * A “universal” buffer type capable of
> + * storing information needed for any other buffer type. It is not
> + * intended to be used directly, but exists for possible
> + * implementation convenience.
> + *
> + * @par Metadata
> + * Metadata is additional information relating to a
> + * buffer that is distinct from the application data normally held in
> + * the buffer. Implementations MAY choose to implement metadata as
> + * contiguous with a buffer (e.g., in an implementation-managed prefix
> + * area of the buffer) or in a physically separate metadata area
> + * efficiently accessible by the implementation using the same
> + * identifier as the buffer itself. ODP applications MUST NOT make
> + * assumptions about the addressability relationship between a buffer
> + * and its associated metadata, or between metadata items.
> + * Application use of metadata MUST only be via accessor functions.
> + *
> + * @par Note on OPTIONAL APIs
> + * Every conforming ODP implementation MUST
> + * provide implementations for each API described here. If an API is
> + * designated as OPTIONAL, this means that it is acceptable for an
> + * implementation to do nothing except return
> + * ODP_FUNCTION_NOT_AVAILABLE in response to this call. Note that this
> + * may limit the range of ODP applications supported by a given
> + * implementation since applications needing the functionality of the
> + * optional API will likely choose to deploy on other ODP platforms.
> + *
> + * @par
> + * APIs are designated as OPTIONAL under two conditions:
> + *
> + * -# The API is expected to be difficult to provide efficiently on all
> + *  platforms.
> + *
> + * -# A significant number of ODP applications are expected to exist
> + *  that will not need or use this API.
> + *
> + * @par
> + * Under these circumstances, an API is designated as OPTIONAL to
> + * permit ODP implementations to be conformant while still expecting
> + * to be able to run a significant number of ODP applications.
>   */
>
>  #ifndef ODP_BUFFER_H_
> @@ -21,10 +102,9 @@ extern "C" {
>
>  #include <odp_std_types.h>
>
> -
>  /** @defgroup odp_buffer ODP BUFFER
> - *  Operations on a buffer.
> - *  @{
> + *
> + * @{
>   */
>
>  /**
> @@ -32,62 +112,445 @@ extern "C" {
>   */
>  typedef uint32_t odp_buffer_t;
>
> -#define ODP_BUFFER_INVALID (0xffffffff) /**< Invalid buffer */
> +/**
> + * ODP buffer segment
> + */
> +typedef uint32_t odp_buffer_segment_t;
> +
> +/**
> + * ODP Buffer pool
> + */
> +typedef uint32_t odp_buffer_pool_t;
> +
> +/**
> + * ODP buffer type
> + */
> +typedef enum odp_buffer_type {
> +       ODP_BUFER_TYPE_INVALID  = -1, /**< Buffer type invalid */
> +       ODP_BUFFER_TYPE_ANY     = 0,  /**< Buffer type can hold any other
> +                                        buffer type */
> +       ODP_BUFFER_TYPE_RAW     = 1,  /**< Raw buffer,
> +                                        no additional metadata */
> +       ODP_BUFFER_TYPE_PACKET  = 2,  /**< Packet buffer */
> +       ODP_BUFFER_TYPE_TIMEOUT = 3,  /**< Timeout buffer */
> +} odp_buffer_type_e;
> +
> +/**
> + * ODP buffer options
> + *
> + * @note These options are additive so an application can simply
> + * specify a buf_opts by ORing together the options needed. Note that
> + * buffer pool options are themselves OPTIONAL and a given
> + * implementation MAY fail the buffer pool creation request with an
> + * appropriate errno if the requested option is not supported by the
> + * underlying ODP implementation, with the exception that UNSEGMENTED
> + * pools MUST be supported for non-packet types and for packet types
> + * as long as the requested size is less than the
> + * implementation-defined native packet segment size.
> + *
> + * Use ODP_BUFFER_OPTS_NONE to specify default buffer pool options
> + * with no additions. The ODP_BUFFER_OPTS_UNSEGMENTED option
> + * specifies that the buffer pool should be unsegmented.
> + *
> + * @par Segmented vs. Unsegmented Buffer Pools
> + * By default, the buffers
> + * in ODP buffer pools are logical buffers that support transparent
> + * segmentation managed by ODP on behalf of the application and have a
> + * rich set of associated semantics as described here.
> + * ODP_BUFFER_OPTS_UNSEGMENTED indicates that the buf_size specified
> + * for the pool should be regarded as a fixed buffer size for all pool
> + * elements and that segmentation support is not needed for the pool.
> + * This MAY result in greater efficiency on some implementations. For
> + * packet processing, a typical use of unsegmented pools would be in
> + * conjunction with classification rules that sort packets into
> + * different pools based on their lengths, thus ensuring that each
> + * packet occupies a single segment within an appropriately-sized
> + * buffer.
> + */
> +typedef enum odp_buffer_opts {
> +       ODP_BUFFER_OPTS_NONE,        /**< Default, no buffer options */
> +       ODP_BUFFER_OPTS_UNSEGMENTED, /**< No segments, please */
> +} odp_buffer_opts_e;
>
> +/**
> + * Error returns
> + */
> +#define ODP_BUFFER_INVALID (odp_buffer_t)(-1)
>
>  /**
>   * Buffer start address
>   *
> - * @param buf      Buffer handle
> + * @param[in] buf  Buffer handle
>   *
>   * @return Buffer start address
>   */
>  void *odp_buffer_addr(odp_buffer_t buf);
>
>  /**
> - * Buffer maximum data size
> + * Buffer application data size
> + *
> + * @param[in] buf  Buffer handle
>   *
> - * @param buf      Buffer handle
> + * @return Buffer application data size
>   *
> - * @return Buffer maximum data size
> + * @note The size returned by this rouine is the size of the
> + * application data contained within the buffer and does not include
> + * any inplementation-defined overhead to support metadata, etc. ODP
> + * does not define APIs for determining the amount of storage that is
> + * physically allocated by an implementation to support ODP buffers.
>   */
>  size_t odp_buffer_size(odp_buffer_t buf);
>
>  /**
>   * Buffer type
>   *
> - * @param buf      Buffer handle
> + * @param[in] buf    Buffer handle
>   *
>   * @return Buffer type
>   */
> -int odp_buffer_type(odp_buffer_t buf);
> +odp_buffer_type_e odp_buffer_type(odp_buffer_t buf);
>
> -#define ODP_BUFFER_TYPE_INVALID (-1) /**< Buffer type invalid */
> -#define ODP_BUFFER_TYPE_ANY       0  /**< Buffer that can hold any other
> -                                         buffer type */
> -#define ODP_BUFFER_TYPE_RAW       1  /**< Raw buffer, no additional
> metadata */
> -#define ODP_BUFFER_TYPE_PACKET    2  /**< Packet buffer */
> -#define ODP_BUFFER_TYPE_TIMEOUT   3  /**< Timeout buffer */
> +/**
> + * Get address and size of user metadata for buffer
> + *
> + * @param[in]  buf        Buffer handle
> + * @param[out] udata_size Number of bytes of user metadata available
> + *                        at the returned address
> + * @return                Address of the user metadata for this buffer
> + *                        or NULL if the buffer has no user metadata.
> + */
> +void *odp_buffer_udata(odp_buffer_t buf, size_t *udata_size);
>
> +/**
> + * Get address of user metadata for buffer
> + *
> + * @param[in] buf         Buffer handle
> + *
> + * @return                Address of the user metadata for this buffer
> + *                        or NULL if the buffer has no user metadata.
> + *
> + * @note This is a "fastpath" version of odp_buffer_udata() since it
> + * omits returning the size of the user metadata area. Callers are
> + * expected to know and honor this limit nonetheless.
> + */
> +void *odp_buffer_udata_addr(odp_buffer_t buf);
>
>  /**
>   * Tests if buffer is valid
>   *
> - * @param buf      Buffer handle
> + * @param[in] buf    Buffer handle
>   *
>   * @return 1 if valid, otherwise 0
> + *
> + * @note Since buffer operations typically occur in fastpath sections
> + * of applications, by default most ODP APIs assume that valid buffer
> + * handles are passed to them and results are undefined if this
> + * assumption is not met. This routine exists to enable an
> + * application to request explicit validation of a buffer handle. It
> + * is understood that the performance of this operation MAY vary
> + * considerably on a per-implementation basis.
>   */
>  int odp_buffer_is_valid(odp_buffer_t buf);
>
>  /**
> + * Tests if buffer is segmented
> + *
> + * @param[in] buf    Buffer handle
> + *
> + * @return 1 if buffer has more then one segment, otherwise 0
> + *
> + * @note This routine behaves identically to the test
> + * odp_buffer_segment_count() > 1, but is potentially more efficient
> + * and represents the preferred method of determining a buffer's
> + * segmentation status.
> + */
> +int odp_buffer_is_segmented(odp_buffer_t buf);
> +
> +/**
>   * Print buffer metadata to STDOUT
>   *
> - * @param buf      Buffer handle
> + * @param[in] buf    Buffer handle
>   *
> + * @note This routine is intended for diagnostic use and prints
> + * implementation-defined information concerning the buffer to the ODP
> + * LOG. It's provision is OPTIONAL.
>   */
>  void odp_buffer_print(odp_buffer_t buf);
>
>  /**
> + * Get count of number of segments in a buffer
> + *
> + * @param[in] buf    Buffer handle
> + *
> + * @return           Count of the number of segments in buf
> + */
> +size_t odp_buffer_segment_count(odp_buffer_t buf);
> +
> +/**
> + * Get the segment identifier for a buffer segment by index
> + *
> + * @param[in] buf    Buffer handle
> + * @param[in] ndx    Segment index of segment of interest
> + *
> + * @return           Segment handle or ODP_SEGMENT_INVALID if the
> + *                   supplied ndx is out of range.
> + */
> +odp_buffer_segment_t odp_buffer_segment_by_index(odp_buffer_t buf, size_t
> ndx);
> +
> +/**
> + * Get the next segment handle for a buffer segment
> + *
> + * @param[in] buf    Buffer handle
> + * @param[in] seg    Segment identifier of the previous segment
> + *
> + * @return           Segment identifier of next segment or
> ODP_SEGMENT_INVALID
> + *
> + * @note This routine returns the identifier (odp_buffer_segment_t) of
> + * the next buffer segment in a buffer aggregate. The input
> + * specifies the buffer and the previous segment identifier. There are
> + * three use cases for this routine:
> + * @note
> + * -# If the input seg is ODP_SEGMENT_START then the segment identifier
> returned
> + * is that of the first segment in the buffer. ODP_SEGMENT_NULL MAY be
> used
> + * as a synonym for ODP_SEGMENT_START for symmetry if desired.
> + *
> + * -# If the input seg is not the last segment in the buffer then the
> + * segment handle of the next segment following seg is returned.
> + *
> + * -# If the input seg is the segment identifier of the last segment in
> + * the buffer then ODP_SEGMENT_NULL is returned.
> + *
> + */
> +odp_buffer_segment_t odp_buffer_segment_next(odp_buffer_t buf,
> +                                            odp_buffer_segment_t seg);
> +
> +/**
> + * Get addressability for a specified buffer segment
> + *
> + * @param[in] buf     Buffer handle
> + * @param[in] seg     Segment handle of the segment to be mapped
> + * @param[in] seglen  Returned number of bytes in this buffer segment
> + *                    available at the returned address
> + *
> + * @return            Segment start address or NULL
> + *
> + * @note This routine is used to obtain addressability to a segment within
> + * a buffer aggregate at a specified segment identifier. The returned
> seglen
> + * indicates the number of bytes addressable at the returned address.
> + */
> +void *odp_buffer_segment_map(odp_buffer_t buf, odp_buffer_segment_t seg,
> +                            size_t *seglen);
> +
> +/**
> + * Unmap a buffer segment
> + *
> + * @param[in] seg     Buffer segment handle
> + *
> + * @note This routine is used to unmap a buffer segment previously
> + * mapped by odp_buffer_segment_map(). Following this call,
> + * applications MUST NOT attempt to reference the segment via any
> + * pointer returned from a previous odp_buffer_segment_map() call
> + * referring to it. It is intended to allow certain NUMA
> + * architectures to better manage the coherency of mapped segments.
> + * For non-NUMA architectures this routine will be a no-op. Note that
> + * implementations SHOULD implicitly unmap all buffer segments
> + * whenever a buffer is added to a queue as this indicates that the
> + * caller is relinquishing control of the buffer.
> + */
> +void odp_buffer_segment_unmap(odp_buffer_segment_t seg);
> +
> +/**
> + * Get start address for a specified buffer offset
> + *
> + * @param[in]  buf     Buffer handle
> + * @param[in]  offset  Byte offset within the buffer to be addressed
> + * @param[out] seglen  Returned number of bytes in this buffer
> + *                     segment available at returned address
> + *
> + * @return             Offset start address or NULL
> + *
> + * @note This routine is used to obtain addressability to a segment
> + * within a buffer at a specified byte offset. Note that because the
> + * offset is independent of any implementation-defined physical
> + * segmentation the returned seglen may be “short” and will range from
> + * 1 to whatever physical segment size is used by the underlying
> + * implementation.
> + */
> +void *odp_buffer_offset_map(odp_buffer_t buf, size_t offset,
> +                           size_t *seglen);
> +
> +/**
> + * Unmap a buffer segment by offset
> + *
> + * @param[in] buf    Buffer handle
> + * @param[in] offset Buffer offset
> + *
> + * @note This routine is used to unmap a buffer segment previously
> + * mapped by odp_buffer_offset_map(). Following this call
> + * the application MUST NOT attempt to reference the segment via any
> + * pointer returned by a prior odp_buffer_offset_map() call relating
> + * to this offset. It is intended to allow certain NUMA architectures
> + * to better manage the coherency of mapped segments. For non-NUMA
> + * architectures this routine will be a no-op. Note that
> + * implementations SHOULD implicitly unmap all buffer segments
> + * whenever a buffer is added to a queue as this indicates that the
> + * caller is relinquishing control of the buffer.
> + */
> +void odp_buffer_offset_unmap(odp_buffer_t buf, size_t offset);
> +
> +/**
> + * Split a buffer into two buffers at a specified split point
> + *
> + * @param[in] buf    Handle of buffer to split
> + * @param[in] offset Byte offset within buf to split buffer
> + *
> + * @return           Buffer handle of the created split buffer
> + *
> + * @note This routine splits a buffer into two buffers at the
> + * specified byte offset. The odp_buffer_t returned by the function
> + * is the handle of the new buffer created at the split point. If the
> + * original buffer was allocated from a buffer pool then the split is
> + * allocated from the same pool. If the original buffer was size
> + * bytes in length then upon return the original buffer is of size
> + * offset while the split buffer is of size (size-offset).
> + *
> + * @note Upon return from this function, the system metadata for both
> + * buffers has been updated appropriately by the call since system
> + * metadata maintenance is the responsibility of the ODP
> + * implementation. Any required updates to the user metadata is the
> + * responsibility of the caller.
> + */
> +odp_buffer_t odp_buffer_split(odp_buffer_t buf, size_t offset);
> +
> +/**
> + * Join two buffers into a single buffer
> + *
> + * @param[in] buf1  Buffer handle of first buffer to join
> + * @param[in] buf2  Buffer handle of second buffer to join
> + *
> + * @return          Buffer handle of the joined buffer
> + *
> + * @note This routine joins two buffers into a single buffer. Both
> + * buf1 and buf2 MUST be from the same buffer pool and the resulting
> + * joined buffer will be an element of that same pool. The
> + * application MUST NOT assume that either buf1 or buf2 survive the
> + * join or that the returned joined buffer is contiguous with or
> + * otherwise related to the input buffers. An implementation SHOULD
> + * free either or both input buffers if they are not reused as part of
> + * the construction of the returned joined buffer. If the join cannot
> + * be performed (e.g., if the two input buffers are not from the same
> + * buffer pool, insufficient space in the target buffer pool, etc.)
> + * then ODP_BUFFER_INVALID SHOULD be returned to indicate that the
> + * operation could not be performed, and an appropriate errno set. In
> + * such case the input buffers MUST NOT be freed as part of the failed
> + * join attempt and should be unchanged from their input values and
> + * content.
> + *
> + * @note The result of odp_buffer_join() is the logical concatenation
> + * of the two buffers using an implementation-defined buffer
> + * aggregation mechanism. The application data contents of the
> + * returned buffer is identical to that of the two joined input
> + * buffers however certain associated metadata (e.g., information
> + * about the buffer size) will likely differ.
> + *
> + * @note If user metadata is present in the buffer pool containing the
> + * input buffers, then the user metadata associated with the returned
> + * buffer MUST be copied by this routine from the source buf1.
> + */
> +odp_buffer_t odp_buffer_join(odp_buffer_t buf1, odp_buffer_t buf2);
> +
> +/**
> + * Trim a buffer at a specified trim point
> + *
> + * @param[in] buf    buffer handle of buffer to trim
> + * @param[in] offset byte offset within buf to trim
> + *
> + * @return           Handle of the trimmed buffer or ODP_BUFFER_INVALID
> + *                   if the operation was not performed
> + *
> + * @note This routine discards bytes from the end of a buffer. It is
> + * logically equivalent to a split followed by a free of the split
> + * portion of the input buffer. The input offset must be less than or
> + * equal to the odp_buffer_size() of the input buffer. Upon
> + * successful return the odp_buffer_size() routine would now return
> + * offset as the size of the trimmed buffer. Note that the returned
> + * odp_buffer_t may not necessarily be the same as the input
> + * odp_buffer_t. The caller should use the returned value when
> + * referencing the trimmed buffer instead of the original in case they
> + * are different.
> + *
> + * @note If the input buf contains user metadata, then this data MUST
> + * be copied to the returned buffer if needed by the API
> + * implementation.
> + */
> +odp_buffer_t odp_buffer_trim(odp_buffer_t buf, size_t offset);
> +
> +/**
> + * Extend a buffer for a specified number of bytes
> + *
> + * @param[in] buf  buffer handle of buffer to expand
> + * @param[in] ext  size, in bytes, of the extent to add to the
> + *                 existing buffer.
> + *
> + * @return         Handle of the extended buffer or ODP_BUFFER_INVALID
> + *                 if the operation was not performed
> + *
> + * @note This routine extends a buffer by increasing its size by ext
> + * bytes. It is logically equivalent to an odp_buffer_join() of a
> + * buffer of size ext to the original buffer. Upon successful return
> + * the odp_buffer_size() routine would now return size+ext as the size
> + * of the extended buffer.
> + *
> + * @note Note that the returned odp_buffer_t may not necessarily be the
> + * same as the input odp_buffer_t. The caller should use the returned
> + * value when referencing the extended buffer instead of the original
> + * in case they are different. If the input buf contains user meta
> + * data, then this data MUST be copied to the returned buffer if
> + * needed by the API implementation.
> + */
> +odp_buffer_t odp_buffer_extend(odp_buffer_t buf, size_t ext);
> +
> +/**
> + * Clone a buffer, returning an exact copy of it
> + *
> + * @param[in] buf  buffer handle of buffer to duplicate
> + *
> + * @return         Handle of the duplicated buffer or ODP_BUFFER_INVALID
> + *                 if the operation was not performed
> + *
> + * @note This routine allows an ODP buffer to be cloned in an
> + * implementation-defined manner. The application data contents of
> + * the returned odp_buffer_t is an exact copy of the application data
> + * of the input buffer. The implementation MAY perform this operation
> + * via reference counts, resegmentation, or any other technique it
> + * wishes to employ. The cloned buffer is an element of the same
> + * buffer pool as the input buf. If the input buf contains user meta
> + * data, then this data MUST be copied to the returned buffer by the
> + * ODP implementation.
> + */
> +odp_buffer_t odp_buffer_clone(odp_buffer_t buf);
> +
> +/**
> + * Copy a buffer, returning an exact copy of it
> + *
> + * @param[in] buf  buffer handle of buffer to copy
> + * @param[in] pool buffer pool to contain the copied buffer
> + *
> + * @return         Handle of the copied buffer or ODP_BUFFER_INVALID
> + *                 if the operation was not performed
> + *
> + * @note This routine allows an ODP buffer to be copied in an
> + * implementation-defined manner to a specified buffer pool. The
> + * specified pool may or may not be different from the source buffer’s
> + * pool. The application data contents of the returned odp_buffer_t
> + * is an exact separate copy of the application data of the input
> + * buffer. If the input buf contains user metadata, then this data
> + * MUST be copied to the returned buffer by the ODP implementation.
> + */
> +odp_buffer_t odp_buffer_copy(odp_buffer_t buf, odp_buffer_pool_t pool);
> +
> +
> +/**
>   * @}
>   */
>
> diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h
> b/platform/linux-generic/include/api/odp_buffer_pool.h
> index d04abf0..b71c727 100644
> --- a/platform/linux-generic/include/api/odp_buffer_pool.h
> +++ b/platform/linux-generic/include/api/odp_buffer_pool.h
> @@ -1,4 +1,4 @@
> -/* Copyright (c) 2013, Linaro Limited
> +/* Copyright (c) 2013-2014, Linaro Limited
>   * All rights reserved.
>   *
>   * SPDX-License-Identifier:     BSD-3-Clause
> @@ -8,7 +8,43 @@
>  /**
>   * @file
>   *
> - * ODP buffer pool
> + * @par Buffer Pools
> + * Buffers are elements of buffer pools that represent an equivalence
> + * class of buffer objects that are managed by a buffer pool manager.
> + * ODP implementations MAY support buffer pool managers implemented in
> + * hardware, software, or a combination of the two. An ODP
> + * implementation MUST support at least one buffer pool and MAY
> + * support as many as it wishes. The implementation MAY support one
> + * or more predefined buffer pools that are not explicitly allocated
> + * by an ODP application. It SHOULD also support application creation
> + * of buffer pools via the odp_buffer_pool_create() API, however it
> + * MAY restrict the types of buffers that can be so created.
> + *
> + * @par
> + * Buffer pools are represented by the abstract type odp_buffer_pool_t
> + * that is returned by buffer pool creation and lookup/enumeration
> + * routines. Applications refer to buffer pools via a name of
> + * implementation-defined maximum length that MUST be a minimummap of
> + * eight characters in length and MAY be longer. It is RECOMMENDED
> + * that 32 character buffer pool names be supported to provide
> + * application naming flexibility. The supported maximum length of
> + * buffer pool names is exposed via the ODP_BUFFER_POOL_NAME_LEN
> + * predefined implementation limit.
> + *
> + * @par Segmented vs. Unsegmented Buffer Pools
> + * By default, the buffers in
> + * ODP buffer pools are logical buffers that support transparent
> + * segmentation managed by ODP on behalf of the application and have a
> + * rich set of associated semantics as described here.
> + * ODP_BUFFER_OPTS_UNSEGMENTED indicates that the buf_size specified
> + * for the pool should be regarded as a fixed buffer size for all pool
> + * elements and that segmentation support is not needed for the pool.
> + * This MAY result in greater efficiency on some implementations. For
> + * packet processing, a typical use of unsegmented pools would be in
> + * conjunction with classification rules that sort packets into
> + * different pools based on their lengths, thus ensuring that each
> + * packet occupies a single segment within an appropriately-sized
> + * buffer.
>   */
>
>  #ifndef ODP_BUFFER_POOL_H_
> @@ -34,43 +70,316 @@ extern "C" {
>  /** Invalid buffer pool */
>  #define ODP_BUFFER_POOL_INVALID   0
>
> -/** ODP buffer pool */
> -typedef uint32_t odp_buffer_pool_t;
> +/**
> + * Buffer initialization routine prototype
> + *
> + * @note Routines of this type MAY be passed as part of the
> + * odp_buffer_pool_init_t structure to be called whenever a
> + * buffer is allocated to initialize the user metadata
> + * associated with that buffer.
> + */
> +typedef void (odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg);
> +
> +/**
> + * Buffer pool parameters
> + *
> + * @param[in] buf_num    Number of buffers that pool should contain
> + * @param[in] buf_size   Size of application data in each buffer
> + * @param[in] buf_type   Buffer type
> + * @param[in] buf_opts   Buffer options
> + */
> +typedef struct odp_buffer_pool_param_t {
> +       size_t buf_num;             /**< Number of buffers in this pool */
> +       size_t buf_size;            /**< Application data size of each
> buffer */
> +       odp_buffer_type_e buf_type; /**< Buffer type */
> +       odp_buffer_opts_e buf_opts; /**< Buffer options */
> +} odp_buffer_pool_param_t;          /**< Type of buffer pool parameter
> struct */
>
> +/**
> + * Buffer pool initialization parameters
> + *
> + * @param[in] udata_size     Size of the user metadata for each buffer
> + * @param[in] buf_init       Function pointer to be called to initialize
> the
> + *                           user metadata for each buffer in the pool.
> + * @param[in] buf_init_arg   Argument to be passed to buf_init().
> + *
> + */
> +typedef struct odp_buffer_pool_init_t {
> +       size_t udata_size;         /**< Size of user metadata for each
> buffer */
> +       odp_buf_init_t *buf_init;  /**< Buffer initialization routine to
> use */
> +       void *buf_init_arg;        /**< Argument to be passed to
> buf_init() */
> +} odp_buffer_pool_init_t;          /**< Type of buffer initialization
> struct */
>
>  /**
>   * Create a buffer pool
>   *
> - * @param name      Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1
> chars)
> - * @param base_addr Pool base address
> - * @param size      Pool size in bytes
> - * @param buf_size  Buffer size in bytes
> - * @param buf_align Minimum buffer alignment
> - * @param buf_type  Buffer type
> + * @param[in] name           Name of the pool
> + *                           (max ODP_BUFFER_POOL_NAME_LEN - 1 chars)
> + *
> + * @param[in] params         Parameters controlling the creation of this
> + *                           buffer pool
>   *
> - * @return Buffer pool handle
> + * @param[in] init_params    Parameters controlling the initialization of
> + *                           this buffer pool
> + *
> + * @return Buffer pool handle or ODP_BUFFER_POOL_NULL with errno set
> + *
> + * @note This routine is used to create a buffer pool. It takes three
> + * arguments: the name of the pool to be created, a parameter
> + * structure that controls the pool creation, and an optional
> + * parameter that controls pool initialization. In the creation
> + * parameter structure, the application specifies the number of
> + * buffers that the pool should contain as well as the application
> + * data size for each buffer in the pool, the type of buffers it
> + * should contain, and their associated options. In the
> + * initialization parameters, the application specifies the size of
> + * the user metadata that should be associated with each buffer in
> + * the pool. If no user metadata is required, the init_params SHOULD
> + * be specified as NULL. If user metadata is requested, then
> + * udata_size SHOULD be set to the requested size of the per-buffer
> + * user metadata. Also specified is the address of an
> + * application-provided buffer initialization routine to be called for
> + * each buffer in the pool at the time the pool is initialized, or
> + * when the buffer is allocated. If no application buffer
> + * initialization is needed, then buf_init and buf_init_arg SHOULD be
> + * set to NULL.
>   */
>  odp_buffer_pool_t odp_buffer_pool_create(const char *name,
> -                                        void *base_addr, uint64_t size,
> -                                        size_t buf_size, size_t buf_align,
> -                                        int buf_type);
> +                                        odp_buffer_pool_param_t *params,
> +                                        odp_buffer_pool_init_t
> *init_params);
>
> +/**
> + * Destroy a buffer pool previously created by odp_buffer_pool_create()
> + *
> + * @param[in] pool    Handle of the buffer pool to be destroyed
> + *
> + * @return            0 on Success, -1 on Failure.
> + *
> + * @note This routine destroys a previously created buffer pool.
> + * Attempts to destroy a predefined buffer pool will be rejected
> + * since the application did not create it.  Results are undefined if
> + * an attempt is made to destroy a buffer pool that contains allocated
> + * or otherwise active buffers.
> + */
> +int odp_buffer_pool_destroy(odp_buffer_pool_t pool);
>
>  /**
>   * Find a buffer pool by name
>   *
> - * @param name      Name of the pool
> + * @param[in] name  Name of the pool
>   *
>   * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found.
>   */
>  odp_buffer_pool_t odp_buffer_pool_lookup(const char *name);
>
> +/**
> + * Get the next buffer pool from its predecessor
> + *
> + * @param[in]  pool       Buffer pool handle
> + * @param[out] name       Name of the pool
> + *                        (max ODP_BUFFER_POOL_NAME_LEN - 1 chars)
> + * @param[out] udata_size Size of user metadata used by this pool.
> + * @param[out] params     Output structure for pool parameters
> + * @param[out] predef     Predefined (1) or Created (0).
> + *
> + * @return                Buffer pool handle
> + *
> + * @note This routine returns the abstract identifier
> + * (odp_buffer_pool_t) of a buffer pool and is used to obtain the list
> + * of all buffer pools. In this manner an application can discover
> + * both application created and implementation predefined buffer pools
> + * and their characteristics. The input specifies the previous buffer
> + * pool identifier. There are three use cases for this
> + * routine:
> + *
> + * -# If the input pool is ODP_BUFFER_POOL_START then the buffer pool
> handle
> + * returned is that of the first buffer pool in the list.
> + * ODP_BUFFER_POOL_NULL MAY be used as a synonym for ODP_BUFFER_POOL_START
> + * if desired.
> + *
> + * -# If the input pool is not the last element in the buffer pool list
> + * then the buffer pool handle of the next buffer pool following  pool is
> + * returned.
> + *
> + * -# If the input pool is the buffer pool handle of the last buffer pool
> + * in the list then ODP_BUFFER_POOL_NULL is returned.
> + *
> + * @note Returned with the buffer pool handle is the name of the pool as
> + * well as its dimensions, type of buffers it contains, and a flag
> + * that says whether the pool is predefined or was created by the
> + * application. Note that the buf_size reported for a buffer pool is
> + * simply the declared expected size of the buffers in the pool and
> + * serves only to estimate the total amount of application data that
> + * can be stored in the pool. Actual sizes of individual buffers
> + * within the pool are dynamic and variable since physical buffer
> + * segments MAY be aggregated to create buffers of arbitrary size (up
> + * to the pool memory limits). Note that for predefined buffer pools,
> + * some implementations MAY return the physical segment counts and
> + * sizes used to construct the pool as output of this routine.
> + */
> +odp_buffer_pool_t odp_buffer_pool_next(odp_buffer_pool_t pool,
> +                                      char *name, size_t *udata_size,
> +                                      odp_buffer_pool_param_t *params,
> +                                      int *predef);
> +
> +/**
> + * Get the high/low watermarks for a buffer pool
> + *
> + * @param[in]  pool     Handle of the buffer pool
> + * @param[out] high_wm  The high water mark of the designated buffer pool
> + * @param[out] low_wm   The low water mark of the designated buffer pool
> + *
> + * @return Success or ODP_BUFFER_POOL_INVALID if pool is unknown
> + *                 or ODP_BUFFER_POOL_NO_WATERMARKS if no watermarks
> + *                 are associated with this buffer pool.
> + *
> + * @note This routine gets the high/low watermarks associated with a
> + * given buffer pool. If the buffer pool does not have or support
> + * watermarks then an error will be returned and both high_wm and
> + * low_wm will be unchanged.
> + *
> + * @note It is RECOMMENDED that buffer pools of all types support the
> + * setting and getting of watermarks for use in flow control
> + * processing.  Watermarks are designed to trigger flow control
> + * actions based on utilization levels of a buffer pool. When the
> + * number of free buffers in the buffer pool hits the configured low
> + * watermark for the pool, the pool asserts a low watermark condition
> + * and an implementation-defined action in response to this condition
> + * is triggered. Once in a low watermark state, the condition is
> + * maintained until the number of free buffers reaches the configured
> + * high watermark. At this point the low watermark condition is
> + * deasserted and normal pool processing resumes. Having separate high
> + * and low watermarks permits configurable hysteresis to avoid jitter
> + * in handling transient buffer shortages in the pool.
> + *
> + * @note In general, two types of actions are common. The first is to
> + * control Random Early Detection (RED) or Weighted RED (WRED)
> + * processing for the pool, while the second is to control IEEE
> + * 802.1Qbb priority-based flow control (PFC) processing for so-called
> + * “lossless Ethernet” support. The use of watermarks for flow control
> + * processing is most often used for pools containing packets and this
> + * is discussed in further detail in the Class of Service (CoS) ODP
> + * Classification APIs.
> + */
> +int odp_buffer_pool_watermarks(odp_buffer_pool_t pool,
> +                              size_t *high_wm, size_t *low_wm);
> +
> +/**
> + * Set the high/low watermarks for a buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + * @param[in] high_wm   The high water mark of the designated buffer pool
> + * @param[in] low_wm    The low water mark of the designated buffer pool
> + *
> + * @return Success or ODP_BUFFER_POOL_INVALID if pool is unknown
> + *                 or ODP_BUFFER_POOL_NO_WATERMARKS if no watermarks
> + *                 are associated with this buffer pool.
> + *
> + * @note This routine sets the high/low watermarks associated with a
> + * specified buffer pool. If the buffer pool does not support
> + * watermarks then errno ODP_BUFFER_POOL_NO_WATERMARKS is set and no
> + * function is performed.
> + */
> +int odp_buffer_pool_set_watermarks(odp_buffer_pool_t pool,
> +                                  size_t high_wm, size_t low_wm);
> +
> +/**
> + * Get the headroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + *
> + * @return              The headroom for the pool.  If the pool is
> invalid,
> + *                      returns -1 and errno set to
> ODP_BUFFER_POOL_INVALID.
> + *
> + * @note This routine returns the headroom associated with the buffer
> + * pool.  This is the headroom that will be set for packets allocated
> + * from this packet buffer pool.
> + */
> +size_t odp_buffer_pool_headroom(odp_buffer_pool_t pool);
> +
> +/**
> + * Set the headroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + * @param[in] hr        The headroom for the pool
> + *
> + * @return              0 on Success or -1 on error.  For errors, errno
> set to
> + *                      ODP_BUFFER_POOL_INVALID if pool is unknown
> + *                      or ODP_INVALID_RANGE if hr exceeds
> + *                      ODP_PACKET_MAX_HEADROOM
> + *
> + * @note This routine sets the default headroom associated with
> + * buffers allocated from this packet pool.  Note that headroom is a
> + * per-packet attribute.  The headroom associated with the buffer pool
> + * is the default headroom to assign to a packet allocated from this
> + * buffer pool by the odp_packet_alloc() routine By contrast, the
> + * odp_cos_set_headroom() classification API sets the default headroom
> + * to assign to a packet by the classifier for packets matching a
> + * particular Class of Service (CoS).  The allowable range of
> + * supported headroom sizes is subject to the ODP_PACKET_MAX_HEADROOM
> + * limit defined by the implementation.  The valid range for hr is
> + * 0..ODP_PACKET_MAX_HEADROOM.
> + *
> + * @note Headroom serves two purposes.  The first is to reserve a prefix
> area
> + * of buffers that will hold packets for header expansion.  Applications
> + * can add headers to packets via the odp_packet_push_headroom() to make
> + * headroom space available for new headers.
> + *
> + * @note The second use of headroom is to control packet alignment
> + * within buffers.  The buffers in a buffer pool MUST be "naturally
> + * aligned" for addressing purposes by the implementation.  It is
> + * RECOMMENDED that this be cache aligned.  Because a standard
> + * Ethernet header is 14 octets in length, it is usually convenient to
> + * offset packets by 2 octets so that the following Layer 3 header
> + * (typically IPv4 or IPv6) is naturally aligned on a word boundary.
> + * So applications SHOULD specify an offset that reflects the packet
> + * alignment they wish to see.  For example, a call like
> + * odp_buffer_pool_set_headroom(pool, hr+2); would force packets to by
> + * offset by two bytes to achieve the desired Layer 3 alignment while
> + * also reserving hr bytes of headroom for application use.
> + */
> +int odp_buffer_pool_set_headroom(odp_buffer_pool_t pool, size_t hr);
> +
> +/**
> + * Get the tailroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + *
> + * @return              The tailroom for the pool.  If the pool is
> invalid,
> + *                      returns -1 and errno set to
> ODP_BUFFER_POOL_INVALID.
> + *
> + * @note This routine returns the tailroom associated with buffers
> + * allocated from a packet buffer pool.
> + */
> +size_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool);
> +
> +/**
> + * Set the tailroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + * @param[in] tr        The tailroom for the pool
> + *
> + * @return              0 on Success or -1 on error.  For errors, errno
> set to
> + *                      ODP_BUFFER_POOL_INVALID if pool is unknown
> + *                      or ODP_INVALID_RANGE if hr exceeds
> + *                      ODP_PACKET_MAX_TAILROOM
> + *
> + * @note This routine sets the tailroom associated with buffers
> + * allocated from a packet pool.  The allowable range of supported
> + * tailroom sizes is subject to the ODP_PACKET_MAX_TAILROOM limit
> + * defined by the implementation. The valid range for tr is
> + * 0..ODP_PACKET_MAX_TAILROOM.
> + */
> +int odp_buffer_pool_set_tailroom(odp_buffer_pool_t pool, size_t tr);
>
>  /**
>   * Print buffer pool info
>   *
> - * @param pool      Pool handle
> + * @param[in] pool   Pool handle
>   *
> + * @note This is a diagnostic routine that prints statistics regarding
> + * the specified buffer pool to the ODP LOG. This routine is OPTIONAL
> + * and if present its output is implementation-defined.
>   */
>  void odp_buffer_pool_print(odp_buffer_pool_t pool);
>
> diff --git a/platform/linux-generic/include/api/odp_config.h
> b/platform/linux-generic/include/api/odp_config.h
> index 906897c..65cc5b5 100644
> --- a/platform/linux-generic/include/api/odp_config.h
> +++ b/platform/linux-generic/include/api/odp_config.h
> @@ -49,6 +49,12 @@ extern "C" {
>  #define ODP_CONFIG_PKTIO_ENTRIES 64
>
>  /**
> + * Packet processing limits
> + */
> +#define ODP_CONFIG_BUF_SEG_SIZE (512*3)
> +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7)
> +
> +/**
>   * @}
>   */
>
> diff --git a/platform/linux-generic/include/api/odp_packet.h
> b/platform/linux-generic/include/api/odp_packet.h
> index 688e047..6d36b02 100644
> --- a/platform/linux-generic/include/api/odp_packet.h
> +++ b/platform/linux-generic/include/api/odp_packet.h
> @@ -1,4 +1,4 @@
> -/* Copyright (c) 2013, Linaro Limited
> +/* Copyright (c) 2013-2014, Linaro Limited
>   * All rights reserved.
>   *
>   * SPDX-License-Identifier:     BSD-3-Clause
> @@ -8,7 +8,262 @@
>  /**
>   * @file
>   *
> - * ODP packet descriptor
> + * @par ODP Packet Management APIs
> + * Described here are the fundamental
> + * concepts and supporting APIs of the ODP Packet Management routines.
> + * All conforming ODP implementations MUST provide these data types
> + * and APIs. Latitude in how routines MAY be implemented are noted
> + * when applicable.
> + *
> + * @par Inherited and New Concepts
> + * As a type of buffer, packets are
> + * allocated from its containing buffer pool created via
> + * odp_buffer_pool_create() with a buffer type of
> + * ODP_BUFFER_TYPE_PACKET. Packets are referenced by an abstract
> + * odp_packet_t handle defined by each implementation.
> + *
> + * @par
> + * Packet objects are normally created at ingress when they arrive
> + * at a source odp_pktio_t and are received by an application either
> + * directly or (more typically) via a scheduled receive queue. They
> + * MAY be implicitly freed when they are transmitted to an output
> + * odp_pktio_t via an associated transmit queue, or freed directly via
> + * the odp_packet_free() API.
> + *
> + * @par
> + * Packets contain additional system meta data beyond those found
> + * in buffers that is populated by the parse function of the ODP
> + * classifier. See below for a discussion of this meta data and the
> + * accessor functions provided for application reference to them.
> + *
> + * @par
> + * Occasionally an application may originate a packet itself,
> + * either de novo or by deriving it from an existing packet, and APIs
> + * are provided to assist in these cases as well. Application-created
> + * packets can be recycled back through a loopback interface to reparse
> + * and reclassify them, or the application can explicitly re-invoke the
> + * parser or do its own parsing as desired. This can also occur as a
> + * result of packet decryption or decapsulation when dealing with
> + * ingress tunnels. See the ODP classification design document for
> + * further details. Additionally, the meta data set as a result of
> + * parsing MAY be directly set by the application as needed.
> + *
> + * @par Packet Structure and Concepts
> + * A packet consists of a sequence
> + * of octets conforming to an architected format, such as Ethernet,
> + * that can be received and transmitted via the ODP pktio abstraction.
> + * Packets have a length, which is the number of bytes in the packet.
> + * Packet data in ODP is referenced to via offsets since these reflect
> + * the logical contents and structure of a packet independent of how
> + * particular ODP implementations store that data.
> + *
> + * @par
> + * These concepts are shown in the following diagram:
> + *
> + * @image html packet.png "ODP Packet Structure" width=\textwidth
> + * @image latex packet.eps "ODP Packet Structure" width=\textwidth
> + *
> + * @par
> + * Packet data consists of zero or more headers, followed by 0 or
> + * more bytes of payload, followed by zero or more trailers.
> + *
> + * @par
> + * Packet Segments and Addressing Network SoCs use various
> + * methods and techniques to store and process packets efficiently.
> + * These vary considerably from platform to platform, so to ensure
> + * portability across them ODP adopts certain conventions for
> + * referencing packets.
> + *
> + * @par
> + * ODP APIs use a handle of type odp_packet_t to refer to packet
> + * objects. Associated with packets are various bits of system meta
> + * data that describe the packet. By referring to the meta data, ODP
> + * applications accelerate packet processing by minimizing the need to
> + * examine packet data. This is because the packet meta data is
> + * populated by parsing and classification functions that are coupled
> + * to ingress processing that occur prior to a packet being presented
> + * to the application via the ODP scheduler.
> + *
> + * @par
> + * When an ODP implementation needs to examine the contents of a
> + * packet, it requests addressability to it via a mapping API that
> + * makes the packet (or a contiguously addressable segment of it)
> + * available for coherent access by the application. While ODP
> + * applications MAY request that packets be stored in unsegmented
> + * buffer pools, not all platforms supporting ODP are able to provide
> + * contiguity guarantees for packets and as a result such requests may
> + * either fail or else result in degraded performance compared to
> + * native operation.
> + *
> + * @par
> + * Instead, ODP applications SHOULD assume that the underlying
> + * implementation stores packets in segments of implementation-defined
> + * and managed size. These represent the contiguously addressable
> + * portions of a packet that the application may refer to via normal
> + * memory accesses. ODP provides APIs that allow applications to
> + * operate on packet segments in an efficient and portable manner as
> + * needed. By combining these with the meta data provided for
> + * packets, ODP applications can operate in a fully
> + * platform-independent manner while still achieving optimal
> + * performance across the range of platforms that support ODP.
> + *
> + * @par
> + * The use of segments for packet addressing and their
> + * relationship to meta data is shown in this diagram:
> + *
> + * @image html segments.png "ODP Packet Segmentation Structure"
> width=\textwidth
> + * @image latex segments.eps "ODP Packet Segmentation Structure"
> width=\textwidth
> + *
> + * @par
> + * The packet meta data is set during parsing and identifies the
> + * starting offsets of the various headers contained in the packet.
> + * The packet itself is physically stored as a sequence of segments
> + * that are managed by the ODP implementation. Segment 0 is the first
> + * segment of the packet and is where the packet’s headroom and
> + * headers typically reside. Depending on the length of the packet,
> + * additional segments may be part of the packet and contain the
> + * remaining packet payload and tailroom. The application need not
> + * concern itself with segments except that when the application
> + * requires addressability to a packet it understands that
> + * addressability is provided on a per-segment basis. So, for
> + * example, if the application makes a call like
> + * odp_packet_payload_map() to obtain addressability to the packet
> + * payload, the returned seglen from that call is the number of bytes
> + * from the start of the payload that are contiguously addressable to
> + * the application from the returned payload address. This is because
> + * the following byte occupies a different segment that may be stored
> + * elsewhere. To obtain access to those bytes, the application simply
> + * requests addressability to that offset and it will be able to
> + * address the payload bytes that occupy segment 1, etc. Note that
> + * the returned seglen for any mapping call is always the lesser of
> + * the remaining packet length and the size of its containing segment.
> + * So a mapping request for segment 2, for example, would return a
> + * seglen that extends only to the end of the packet since the
> + * remaining bytes are part of the tailroom reserved for the packet
> + * and are not usable by the application until made available to it by
> + * an appropriate API call.
> + *
> + * @par Headroom and Tailroom
> + * Because data plane applications will
> + * often manipulate packets by adding or removing headers and/or
> + * trailers, ODP implementations MUST support the concepts of headroom
> + * and tailroom for packets. How implementations choose to support
> + * these concepts is unspecified by ODP.
> + *
> + * @par
> + * Headroom is an area that logically prepends the start of a
> + * packet and is reserved for the insertion of additional header
> + * information to the front of a packet. Typical use of headroom
> + * might be packet encapsulation as part of tunnel operations.
> + * Tailroom is a similar area that logically follows a packet reserved
> + * for the insertion of trailer information at the end of a packet.
> + * Typical use of tailroom might be in payload manipulation or in
> + * additional checksum insertion. The idea behind headroom and
> + * tailroom is to support efficient manipulation of packet headers
> + * and/or trailers by preallocating buffer space and/or meta data to
> + * support the insertion of packet headers and/or trailers while
> + * avoiding the overhead of more general split/join buffer operations.
> + *
> + * @par
> + * Note that not every application or communication protocol will
> + * need these and ODP implementations MAY impose restrictions or
> + * modifications on when and how these capabilities are used. For
> + * example, headroom MAY indicate the byte offset into a packet buffer
> + * at which packet data is received from an associated odp_pktio_t.
> + * An implementation MAY add to the requested headroom or tailroom for
> + * implementation-defined alignment or other reasons. Note also that
> + * implementations MUST NOT assume that headroom and/or tailroom is
> + * necessarily contiguous with any other segment of the packet unless
> + * the underlying buffer pool the packet has been allocated from has
> + * been explicitly defined as unsegmented. See the ODP Buffer API
> + * design for discussion of segmented vs. unsegmented buffers and
> + * their implementation models. This convention is observed
> + * automatically because every mapping call returns a corresponding
> + * seglen that tells the application the number of bytes it may
> + * reference from the address returned by that call. Applications
> + * MUST observe these limits to avoid programming errors and
> + * portability issues.
> + *
> + * @par Packet Parsing and Inflags
> + * ODP packets are intended to be
> + * processed by the ODP Classifier upon receipt. As part of its
> + * processing, the classifier parses information from the packet
> + * headers and makes this information available as system meta data so
> + * that applications using ODP do not have to reference packets or
> + * their headers directly for most processing. The set of headers
> + * supported by the ODP parse functions MUST include at minimum the
> + * following:
> + *
> + * - Layer 2: ARP, SNAP (recognition), VLAN (C-Tag and S-Tag)
> + * - Layer 3: IPv4, IPv6
> + * - Layer 4: TCP, UDP, ICMP, ICMPv6, IPsec (ESP and AH)
> + *
> + * @par
> + * Other protocols MAY be supported, however ODP v1.0 does not
> + * define APIs for referencing them.
> + *
> + * @par
> + * Parsing results are stored as meta data associated with the
> + * packet. These include various precomputed offsets used for direct
> + * access to parsed headers as well as indicators of packet contents
> + * that are collectively referred to as inflags. Inflags are packet
> + * meta data that may be inspected or set via accessor functions as
> + * described below. Setters are provided to enable applications that
> + * create or modify packet headers to update these attributes
> + * efficiently. Applications that use them take responsibility for
> + * ensuring that the results are consistent. ODP itself does not
> + * validate an inflag setter to ensure that it reflects actual packet
> + * contents. Applications that wish this additional assurance should
> + * request an explicit packet reparse.
> + *
> + * @par Packet Outflags
> + * Packet transmission options are controlled by
> + * packet meta data collectively referred to as outflags. An
> + * application sets these to request various services related to
> + * packet transmission.
> + *
> + * @par
> + * Note: The outflags controlling checksum offload processing are
> + * overrides. That is, they have no effect unless they are set
> + * explicitly by the application. By default, checksum offloads are
> + * controlled by the corresponding settings of the odp_pktio_t through
> + * which a packet is transmitted. The purpose of these bits is to
> + * permit this offload processing to be overridden on a per-packet
> + * basis. Note that not every implementation may support such
> + * override capabilities, which is why the setters here return a
> + * success/failure indicator.
> + *
> + * @par Packet Headroom and Tailroom Routines
> + * Data plane applications frequently manipulate the headers and trailers
> + * associated with packets. These operations involve either stripping
> + * headers or trailers from packets or inserting new headers or
> + * trailers onto them. To enable this manipulation, ODP provides the
> + * notion of headroom and tailroom, as well as a set of APIs that
> + * enable their efficient manipulation.
> + *
> + * @par
> + * Headroom is a set of bytes that logically precede the start of
> + * a packet, enabling additional headers to be created that become
> + * part of the packet. Similarly, tailroom is a set of bytes that
> + * logically follow the end of a packet, enabling additional payload
> + * and/or trailers to be created that become part of the packet. Both
> + * headroom and tailroom are meta data associated with packets, and
> + * are assigned at packet creation.
> + *
> + * @par
> + * Packet headroom and tailroom is manipulated by the following
> + * routines that MUST be provided by conforming ODP implementations.
> + * These operations define push and pull operations. The convention
> + * is that push operations move away from packet data while pull
> + * operations move towards packet data. Alternately, push operations
> + * add to packet data, while pull operations remove packet data.
> + *
> + * @par
> + * These concepts are shown as operations on the packet diagram
> + * we saw previously:
> + *
> + * @image html hrtr.png "Headroom and Tailroom Manipulation"
> width=\textwidth
> + * @image latex hrtr.eps "Headroom and Tailroom Manipulation"
> width=\textwidth
>   */
>
>  #ifndef ODP_PACKET_H_
> @@ -21,7 +276,7 @@ extern "C" {
>  #include <odp_buffer.h>
>
>  /** @defgroup odp_packet ODP PACKET
> - *  Operations on a packet.
> + *
>   *  @{
>   */
>
> @@ -31,7 +286,7 @@ extern "C" {
>  typedef odp_buffer_t odp_packet_t;
>
>  /** Invalid packet */
> -#define ODP_PACKET_INVALID ODP_BUFFER_INVALID
> +#define ODP_PACKET_INVALID (odp_packet_t)(-1)
>
>  /** Invalid offset */
>  #define ODP_PACKET_OFFSET_INVALID ((uint32_t)-1)
> @@ -40,411 +295,2038 @@ typedef odp_buffer_t odp_packet_t;
>  /**
>   * ODP packet segment handle
>   */
> -typedef int odp_packet_seg_t;
> +typedef uint32_t odp_packet_segment_t;
>
>  /** Invalid packet segment */
> -#define ODP_PACKET_SEG_INVALID -1
> +#define ODP_PACKET_SEGMENT_INVALID (odp_packet_segment_t)(-1)
> +
> +/**
> + * Convert a buffer handle to a packet handle
> + *
> + * @param[in] buf  Buffer handle
> + *
> + * @return Packet handle
> + *
> + * @note This routine converts a buffer handle to a packet handle.
> + * Only meaningful if buffer is of type ODP_BUFFER_TYPE_PACKET.
> + * Results are undefined otherwise.
> + */
> +odp_packet_t odp_packet_from_buffer(odp_buffer_t buf);
> +
> +/**
> + * Convert a packet handle to a buffer handle
> + *
> + * @param[in] pkt  Packet handle
> + *
> + * @return Buffer handle
> + *
> + * @note This routine converts a packet handle to a buffer handle.
> + * This routine always succeeds (assuming pkt is a valid packet
> + * handle) since all packets are buffers.
> + */
> +odp_buffer_t odp_packet_to_buffer(odp_packet_t pkt);
> +
> +/**
> + * Get the headroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + *
> + * @return              The headroom for the pool. If the pool is invalid,
> + *                      returns -1 and errno set to
> ODP_BUFFER_POOL_INVALID.
> + */
> +size_t odp_buffer_pool_headroom(odp_buffer_pool_t pool);
>
>  /**
> - * ODP packet segment info
> + * Set the headroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + * @param[in] hr        The headroom for the pool
> + *
> + * @return              0 on Success or -1 on error. For errors, errno
> set to
> + *                      ODP_BUFFER_POOL_INVALID if pool is unknown
> + *                      or ODP_INVALID_RANGE if hr exceeds
> + *                      ODP_PACKET_MAX_HEADROOM
> + *
> + * @note This routine sets the default headroom associated with
> + * buffers allocated from this packet pool. Note that headroom is a
> + * per-packet attribute. The headroom associated with the buffer pool
> + * is the default headroom to assign to a packet allocated from this
> + * buffer pool by the odp_packet_alloc() routine By contrast, the
> + * odp_cos_set_headroom() classification API sets the default headroom
> + * to assign to a packet by the classifier for packets matching a
> + * particular Class of Service (CoS). The allowable range of
> + * supported headroom sizes is subject to the ODP_PACKET_MAX_HEADROOM
> + * limit defined by the implementation. The valid range for hr is
> + * 0..ODP_PACKET_MAX_HEADROOM.
> + *
> + * @note Note also that if the buffer is unsegmented, the specified
> + * headroom will subtract from the preallocated segments that comprise
> + * the pool. Applications need to take this into account when sizing
> + * unsegmented buffer pools.
> + *
> + * @note Specifying a new headroom for an existing buffer pool does not
> + * affect the headroom associated with existing buffers. The buffer
> + * pool headroom setting only affects new buffers allocated from the
> + * pool.
>   */
> -typedef struct odp_packet_seg_info_t {
> -       void   *addr;      /**< Segment start address */
> -       size_t  size;      /**< Segment maximum data size */
> -       void   *data;      /**< Segment data address */
> -       size_t  data_len;  /**< Segment data length */
> -} odp_packet_seg_info_t;
> +int odp_buffer_pool_set_headroom(odp_buffer_pool_t pool, size_t hr);
>
> +/**
> + * Get the tailroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + *
> + * @return              The tailroom for the pool. If the pool is invalid,
> + *                      returns -1 and errno set to
> ODP_BUFFER_POOL_INVALID.
> + */
> +size_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool);
> +
> +/**
> + * Set the tailroom for a packet buffer pool
> + *
> + * @param[in] pool      Handle of the buffer pool
> + * @param[in] tr        The tailroom for the pool
> + *
> + * @return              0 on Success or -1 on error. For errors, errno
> set to
> + *                      ODP_BUFFER_POOL_INVALID if pool is unknown
> + *                      or ODP_INVALID_RANGE if hr exceeds
> + *                      ODP_PACKET_MAX_TAILROOM
> + *
> + * @note This routine sets the tailroom associated with buffers
> + * allocated from a packet pool. The allowable range of supported
> + * tailroom sizes is subject to the ODP_PACKET_MAX_TAILROOM limit
> + * defined by the implementation. The valid range for tr is
> + * 0..ODP_PACKET_MAX_TAILROOM.
> + *
> + * @note Note also that if the buffer is unsegmented, the specified
> + * tailroom will subtract from the preallocated segments that comprise
> + * the pool. Applications need to take this into account when sizing
> + * unsegmented buffer pools.
> + *
> + * @par
> + * Specifying a new tailroom for an existing buffer pool does not
> + * affect the tailroom associated with existing buffers. The buffer
> + * pool tailroom setting only affects new buffers allocated from the
> + * pool.
> + */
> +int odp_buffer_pool_set_tailroom(odp_buffer_pool_t pool, size_t tr);
>
>  /**
> - * Initialize the packet
> + * Packet alloc
>   *
> - * Needs to be called if the user allocates a packet buffer, i.e. the
> packet
> - * has not been received from I/O through ODP.
> + * @param[in] pool    Pool handle for a pool of type
> ODP_BUFFER_TYPE_PACKET
>   *
> - * @param pkt  Packet handle
> + * @return Packet handle or ODP_PACKET_INVALID
> + *
> + * @note This routine is used to allocate a packet from a buffer pool
> + * of type ODP_BUFFER_TYPE_PACKET. The returned odp_packet_t is an
> + * opaque handle for the packet that can be used in further calls to
> + * manipulate the allocated packet. The value ODP_PACKET_INVALID is
> + * returned if the request cannot be satisfied. The length of the
> + * allocated packet is set to 0.
> + *
> + * @note If non-persistent user meta data is associated with the
> + * underlying buffer that contains the packet, the buf_init() routine
> + * specified as part of the containing buffer pool will be called as
> + * part of buffer allocation to enable the application to initialize
> + * the user meta data associated with it.
> + */
> +odp_packet_t odp_packet_alloc(odp_buffer_pool_t pool);
> +
> +/**
> + * Allocate a packet from a buffer pool of a specified length
> + *
> + * @param[in] pool  Pool handle
> + * @param[in] len   Length of packet requested
> + *
> + * @return          Packet handle or ODP_PACKET_INVALID
> + *
> + * @note This routine is used to allocate a packet of a given length
> + * from a packet buffer pool. The returned odp_packet_t is an opaque
> + * handle for the packet that can be used in further calls to
> + * manipulate the allocated packet. The returned buffer is
> + * initialized as an ODP packet and with the length set to the
> + * requested len. The caller will then initialize the packet with
> + * headers and payload as needed. This call itself does not
> + * initialize packet contents or the meta data that would be present
> + * following a packet parse.
> + */
> +odp_packet_t odp_packet_alloc_len(odp_buffer_pool_t pool, size_t len);
> +
> +/**
> + * Packet free
> + *
> + * @param[in] pkt     Handle of the packet to be freed
> + *
> + * @note This routine is used to return a packet back to its
> + * containing buffer pool. Results are undefined if an application
> + * attempts to reference a packet after it is freed.
> + */
> +void odp_packet_free(odp_packet_t pkt);
> +
> +/**
> + * Initialize a packet
> + *
> + * @param[in] pkt     Handle of the packet to be initialized
> + *
> + * @note This routine is called following packet allocation to
> + * initialize the packet meta data and internal structure to support
> + * packet operations. Note that this function is performed whenever a
> + * packet is allocated so it would only be used if an application
> + * wished to re-initialize a packet to permit it to discard whatever
> + * previous contents existed and start a fresh packet without having
> + * to free and re-allocate the packet. Re-initializing a packet
> + * resets its headroom and tailroom to their default values (from the
> + * containing packet pool) and sets the packet length to 0.
>   */
>  void odp_packet_init(odp_packet_t pkt);
>
>  /**
> - * Convert a buffer handle to a packet handle
> + * Obtain buffer pool handle of a packet
>   *
> - * @param buf  Buffer handle
> + * @param[in] pkt   Packet handle
>   *
> - * @return Packet handle
> + * @return Buffer pool the packet was allocated from
> + *
> + * @note This routine is an accessor function that returns the handle
> + * of the buffer pool containing the referenced packet.
>   */
> -odp_packet_t odp_packet_from_buffer(odp_buffer_t buf);
> +odp_buffer_pool_t odp_packet_pool(odp_packet_t pkt);
>
>  /**
> - * Convert a packet handle to a buffer handle
> + * Get the headroom available for a packet
>   *
> - * @param pkt  Packet handle
> + * @param[in] pkt   Packet handle
>   *
> - * @return Buffer handle
> + * @return Headroom available for this packet, in bytes.
> + *
> + * @note This routine returns the current headroom available for a
> + * buffer. The initial value for this is taken either from the
> + * containing buffer pool (for explicit packet allocation) or from the
> + * Class of Service (CoS) on packet reception. It is adjusted
> + * dynamically by the odp_packet_push_head() and
> + * odp_packet_pull_head() routines.
>   */
> -odp_buffer_t odp_packet_to_buffer(odp_packet_t pkt);
> +size_t odp_packet_headroom(odp_packet_t pkt);
>
>  /**
> - * Set the packet length
> + * Get the tailroom available for a packet
> + *
> + * @param[in] pkt   Packet handle
>   *
> - * @param pkt  Packet handle
> - * @param len  Length of packet in bytes
> + * @return Tailroom available for this packet, in bytes.
> + *
> + * @note This routine returns the current tailroom available for a
> + * buffer. The initial value for this is taken either from the
> + * containing buffer pool. It is adjusted dynamically by the
> + * odp_packet_push_tail() and odp_packet_pull_tail() routines.
>   */
> -void odp_packet_set_len(odp_packet_t pkt, size_t len);
> +size_t odp_packet_tailroom(odp_packet_t pkt);
>
>  /**
> - * Get the packet length
> + * Get packet length
>   *
> - * @param pkt  Packet handle
> + * @param[in] pkt  Packet handle
>   *
>   * @return   Packet length in bytes
> + *
> + * @note This routine is an accessor function that returns the length
> + * (in bytes) of a packet. This is the total number of octets that
> + * would transmit for the packet, not including the Ethernet Frame
> + * Check Sequence (FCS), and includes all packet headers as well as
> + * payload. Results are undefined if the supplied pkt does not
> + * specify a valid packet. Note that packet length will change in
> + * response to headroom/tailroom and/or split/join operations. As a
> + * result, this attribute does not have a setter accessor function.
>   */
> -size_t odp_packet_get_len(odp_packet_t pkt);
> +size_t odp_packet_len(odp_packet_t pkt);
>
>  /**
> - * Set packet user context
> + * Get address and size of user meta data associated with a packet
> + *
> + * @param[in]  pkt        Packet handle
> + * @param[out] udata_size Number of bytes of user meta data available
> + *                        at the returned address
>   *
> - * @param buf      Packet handle
> - * @param ctx      User context
> + * @return                Address of the user meta data for this packet
> + *                        or NULL if the buffer has no user meta data.
>   *
> + * @note This routine returns the address of the user meta data
> + * associated with an ODP pac...
>
> [Message clipped]
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
>
diff mbox

Patch

diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c
index ffa5e62..efa418f 100644
--- a/example/generator/odp_generator.c
+++ b/example/generator/odp_generator.c
@@ -19,7 +19,6 @@ 
 #include <odp.h>
 
 #include <odph_linux.h>
-#include <odph_packet.h>
 #include <odph_eth.h>
 #include <odph_ip.h>
 #include <odph_udp.h>
@@ -168,24 +167,24 @@  static int scan_mac(char *in, odph_ethaddr_t *des)
  *
  * @param obuf packet buffer
 */
-static void pack_udp_pkt(odp_buffer_t obuf)
+static void pack_udp_pkt(odp_packet_t pkt)
 {
 	char *buf;
-	int max;
-	odp_packet_t pkt;
+
 	odph_ethhdr_t *eth;
 	odph_ipv4hdr_t *ip;
 	odph_udphdr_t *udp;
 	unsigned short seq;
+	size_t seglen;
+
+	buf = odp_packet_push_tail_and_map(pkt, args->appl.payload +
+					   ODPH_UDPHDR_LEN +
+					   ODPH_IPV4HDR_LEN +
+					   ODPH_ETHHDR_LEN, &seglen);
 
-	buf = odp_buffer_addr(obuf);
 	if (buf == NULL)
 		return;
-	max = odp_buffer_size(obuf);
-	if (max <= 0)
-		return;
 
-	pkt = odp_packet_from_buffer(obuf);
 	/* ether */
 	odp_packet_set_l2_offset(pkt, 0);
 	eth = (odph_ethhdr_t *)buf;
@@ -213,8 +212,7 @@  static void pack_udp_pkt(odp_buffer_t obuf)
 	udp->length = odp_cpu_to_be_16(args->appl.payload + ODPH_UDPHDR_LEN);
 	udp->chksum = 0;
 	udp->chksum = odp_cpu_to_be_16(odph_ipv4_udp_chksum(pkt));
-	odp_packet_set_len(pkt, args->appl.payload + ODPH_UDPHDR_LEN +
-			   ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN);
+
 }
 
 /**
@@ -222,27 +220,27 @@  static void pack_udp_pkt(odp_buffer_t obuf)
  *
  * @param obuf packet buffer
 */
-static void pack_icmp_pkt(odp_buffer_t obuf)
+static void pack_icmp_pkt(odp_packet_t pkt)
 {
 	char *buf;
-	int max;
-	odp_packet_t pkt;
+
 	odph_ethhdr_t *eth;
 	odph_ipv4hdr_t *ip;
 	odph_icmphdr_t *icmp;
 	struct timeval tval;
 	uint8_t *tval_d;
 	unsigned short seq;
+	size_t seglen;
 
-	buf = odp_buffer_addr(obuf);
+	buf = odp_packet_push_tail_and_map(pkt, args->appl.payload +
+					   ODPH_ICMPHDR_LEN +
+					   ODPH_IPV4HDR_LEN +
+					   ODPH_ETHHDR_LEN, &seglen);
 	if (buf == NULL)
 		return;
-	max = odp_buffer_size(obuf);
-	if (max <= 0)
-		return;
 
 	args->appl.payload = 56;
-	pkt = odp_packet_from_buffer(obuf);
+
 	/* ether */
 	odp_packet_set_l2_offset(pkt, 0);
 	eth = (odph_ethhdr_t *)buf;
@@ -277,9 +275,6 @@  static void pack_icmp_pkt(odp_buffer_t obuf)
 	icmp->chksum = 0;
 	icmp->chksum = odp_chksum(icmp, args->appl.payload +
 				  ODPH_ICMPHDR_LEN);
-
-	odp_packet_set_len(pkt, args->appl.payload + ODPH_ICMPHDR_LEN +
-			   ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN);
 }
 
 /**
@@ -295,7 +290,7 @@  static void *gen_send_thread(void *arg)
 	thread_args_t *thr_args;
 	odp_queue_t outq_def;
 
-	odp_buffer_t buf;
+	odp_packet_t buf;
 
 	thr = odp_thread_id();
 	thr_args = arg;
@@ -316,8 +311,8 @@  static void *gen_send_thread(void *arg)
 	printf("  [%02i] created mode: SEND\n", thr);
 	for (;;) {
 		int err;
-		buf = odp_buffer_alloc(thr_args->pool);
-		if (!odp_buffer_is_valid(buf)) {
+		buf = odp_packet_alloc(thr_args->pool);
+		if (!odp_packet_is_valid(buf)) {
 			ODP_ERR("  [%2i] alloc_single failed\n", thr);
 			return NULL;
 		}
@@ -493,13 +488,13 @@  static void *gen_recv_thread(void *arg)
 		pkt = odp_packet_from_buffer(buf);
 		/* Drop packets with errors */
 		if (odp_unlikely(odp_packet_error(pkt))) {
-			odph_packet_free(pkt);
+			odp_packet_free(pkt);
 			continue;
 		}
 
 		print_pkts(thr, &pkt, 1);
 
-		odph_packet_free(pkt);
+		odp_packet_free(pkt);
 	}
 
 	return arg;
@@ -512,11 +507,11 @@  int main(int argc, char *argv[])
 	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
 	odp_buffer_pool_t pool;
 	int num_workers;
-	void *pool_base;
 	int i;
 	int first_core;
 	int core_count;
 	odp_shm_t shm;
+	odp_buffer_pool_param_t params;
 
 	/* Init ODP before calling anything else */
 	if (odp_init_global(NULL, NULL)) {
@@ -579,20 +574,14 @@  int main(int argc, char *argv[])
 	printf("First core:         %i\n\n", first_core);
 
 	/* Create packet pool */
-	shm = odp_shm_reserve("shm_packet_pool",
-			      SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
-	pool_base = odp_shm_addr(shm);
 
-	if (pool_base == NULL) {
-		ODP_ERR("Error: packet pool mem alloc failed.\n");
-		exit(EXIT_FAILURE);
-	}
+	params.buf_size = SHM_PKT_POOL_BUF_SIZE;
+	params.buf_num  = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
+	params.buf_type = ODP_BUFFER_TYPE_PACKET;
+	params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
+
+	pool = odp_buffer_pool_create("packet_pool", &params, NULL);
 
-	pool = odp_buffer_pool_create("packet_pool", pool_base,
-				      SHM_PKT_POOL_SIZE,
-				      SHM_PKT_POOL_BUF_SIZE,
-				      ODP_CACHE_LINE_SIZE,
-				      ODP_BUFFER_TYPE_PACKET);
 	if (pool == ODP_BUFFER_POOL_INVALID) {
 		ODP_ERR("Error: packet pool create failed.\n");
 		exit(EXIT_FAILURE);
diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c
index da6c48e..3b39be2 100644
--- a/example/ipsec/odp_ipsec.c
+++ b/example/ipsec/odp_ipsec.c
@@ -18,7 +18,6 @@ 
 #include <odp.h>
 
 #include <odph_linux.h>
-#include <odph_packet.h>
 #include <odph_eth.h>
 #include <odph_ip.h>
 #include <odph_icmp.h>
@@ -154,8 +153,6 @@  typedef struct {
 #define SHM_CTX_POOL_BUF_COUNT (SHM_PKT_POOL_BUF_COUNT + SHM_OUT_POOL_BUF_COUNT)
 #define SHM_CTX_POOL_SIZE      (SHM_CTX_POOL_BUF_COUNT * SHM_CTX_POOL_BUF_SIZE)
 
-static odp_buffer_pool_t ctx_pool = ODP_BUFFER_POOL_INVALID;
-
 /**
  * Get per packet processing context from packet buffer
  *
@@ -166,33 +163,7 @@  static odp_buffer_pool_t ctx_pool = ODP_BUFFER_POOL_INVALID;
 static
 pkt_ctx_t *get_pkt_ctx_from_pkt(odp_packet_t pkt)
 {
-	return (pkt_ctx_t *)odp_packet_get_ctx(pkt);
-}
-
-/**
- * Allocate per packet processing context and associate it with
- * packet buffer
- *
- * @param pkt  Packet
- *
- * @return pointer to context area
- */
-static
-pkt_ctx_t *alloc_pkt_ctx(odp_packet_t pkt)
-{
-	odp_buffer_t ctx_buf = odp_buffer_alloc(ctx_pool);
-	pkt_ctx_t *ctx;
-
-	/* There should always be enough contexts */
-	if (odp_unlikely(ODP_BUFFER_INVALID == ctx_buf))
-		abort();
-
-	ctx = odp_buffer_addr(ctx_buf);
-	memset(ctx, 0, sizeof(*ctx));
-	ctx->buffer = ctx_buf;
-	odp_packet_set_ctx(pkt, ctx);
-
-	return ctx;
+	return (pkt_ctx_t *)odp_packet_udata_addr(pkt);
 }
 
 /**
@@ -365,8 +336,7 @@  static
 void ipsec_init_pre(void)
 {
 	odp_queue_param_t qparam;
-	void *pool_base;
-	odp_shm_t shm;
+	odp_buffer_pool_param_t params;
 
 	/*
 	 * Create queues
@@ -399,16 +369,12 @@  void ipsec_init_pre(void)
 	}
 
 	/* Create output buffer pool */
-	shm = odp_shm_reserve("shm_out_pool",
-			      SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
+	params.buf_num  = SHM_OUT_POOL_BUF_COUNT;
+	params.buf_size = SHM_OUT_POOL_BUF_SIZE;
+	params.buf_type = ODP_BUFFER_TYPE_PACKET;
+	params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
 
-	pool_base = odp_shm_addr(shm);
-
-	out_pool = odp_buffer_pool_create("out_pool", pool_base,
-					  SHM_OUT_POOL_SIZE,
-					  SHM_OUT_POOL_BUF_SIZE,
-					  ODP_CACHE_LINE_SIZE,
-					  ODP_BUFFER_TYPE_PACKET);
+	out_pool = odp_buffer_pool_create("out_pool", &params, NULL);
 
 	if (ODP_BUFFER_POOL_INVALID == out_pool) {
 		ODP_ERR("Error: message pool create failed.\n");
@@ -637,13 +603,15 @@  pkt_disposition_e do_input_verify(odp_packet_t pkt, pkt_ctx_t *ctx ODP_UNUSED)
 static
 pkt_disposition_e do_route_fwd_db(odp_packet_t pkt, pkt_ctx_t *ctx)
 {
-	odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
+	size_t seglen;
+	odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
 	fwd_db_entry_t *entry;
 
 	entry = find_fwd_db_entry(odp_be_to_cpu_32(ip->dst_addr));
 
 	if (entry) {
-		odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2(pkt);
+		odph_ethhdr_t *eth =
+			(odph_ethhdr_t *)odp_packet_l2_map(pkt, &seglen);
 
 		memcpy(&eth->dst, entry->dst_mac, ODPH_ETHADDR_LEN);
 		memcpy(&eth->src, entry->src_mac, ODPH_ETHADDR_LEN);
@@ -673,8 +641,9 @@  pkt_disposition_e do_ipsec_in_classify(odp_packet_t pkt,
 				       pkt_ctx_t *ctx,
 				       bool *skip)
 {
+	size_t seglen;
 	uint8_t *buf = odp_packet_addr(pkt);
-	odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
+	odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
 	int hdr_len;
 	odph_ahhdr_t *ah = NULL;
 	odph_esphdr_t *esp = NULL;
@@ -759,6 +728,7 @@  pkt_disposition_e do_ipsec_in_finish(odp_packet_t pkt,
 	odp_crypto_compl_status_t cipher_rc;
 	odp_crypto_compl_status_t auth_rc;
 	odph_ipv4hdr_t *ip;
+	size_t seglen;
 	int hdr_len = ctx->ipsec.hdr_len;
 	int trl_len = 0;
 
@@ -769,7 +739,7 @@  pkt_disposition_e do_ipsec_in_finish(odp_packet_t pkt,
 		return PKT_DROP;
 	if (!is_crypto_compl_status_ok(&auth_rc))
 		return PKT_DROP;
-	ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
+	ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
 
 	/*
 	 * Finish auth
@@ -803,11 +773,11 @@  pkt_disposition_e do_ipsec_in_finish(odp_packet_t pkt,
 	ip->chksum = 0;
 	odph_ipv4_csum_update(pkt);
 
-	/* Correct the packet length and move payload into position */
-	odp_packet_set_len(pkt, odp_packet_get_len(pkt) - (hdr_len + trl_len));
+	/* Move payload into position and correct the packet length */
 	memmove(ipv4_data_p(ip),
 		ipv4_data_p(ip) + hdr_len,
 		odp_be_to_cpu_16(ip->tot_len));
+	odp_packet_pull_tail(pkt, hdr_len + trl_len);
 
 	/* Fall through to next state */
 	return PKT_CONTINUE;
@@ -833,8 +803,9 @@  pkt_disposition_e do_ipsec_out_classify(odp_packet_t pkt,
 					pkt_ctx_t *ctx,
 					bool *skip)
 {
+	size_t seglen;
 	uint8_t *buf = odp_packet_addr(pkt);
-	odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
+	odph_ipv4hdr_t *ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
 	uint16_t ip_data_len = ipv4_data_len(ip);
 	uint8_t *ip_data = ipv4_data_p(ip);
 	ipsec_cache_entry_t *entry;
@@ -921,7 +892,7 @@  pkt_disposition_e do_ipsec_out_classify(odp_packet_t pkt,
 
 	/* Set IPv4 length before authentication */
 	ipv4_adjust_len(ip, hdr_len + trl_len);
-	odp_packet_set_len(pkt, odp_packet_get_len(pkt) + (hdr_len + trl_len));
+	odp_packet_push_tail(pkt, hdr_len + trl_len);
 
 	/* Save remaining context */
 	ctx->ipsec.hdr_len = hdr_len;
@@ -995,6 +966,7 @@  pkt_disposition_e do_ipsec_out_finish(odp_packet_t pkt,
 	odp_crypto_compl_status_t cipher_rc;
 	odp_crypto_compl_status_t auth_rc;
 	odph_ipv4hdr_t *ip;
+	size_t seglen;
 
 	/* Check crypto result */
 	event = odp_packet_to_buffer(pkt);
@@ -1003,7 +975,7 @@  pkt_disposition_e do_ipsec_out_finish(odp_packet_t pkt,
 		return PKT_DROP;
 	if (!is_crypto_compl_status_ok(&auth_rc))
 		return PKT_DROP;
-	ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
+	ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
 
 	/* Finalize the IPv4 header */
 	ip->ttl = ctx->ipsec.ip_ttl;
@@ -1057,7 +1029,7 @@  void *pktio_thread(void *arg ODP_UNUSED)
 
 		/* Determine new work versus completion or sequence number */
 		if ((completionq != dispatchq) && (seqnumq != dispatchq)) {
-			ctx = alloc_pkt_ctx(pkt);
+			ctx = get_pkt_ctx_from_pkt(pkt);
 			ctx->state = PKT_STATE_INPUT_VERIFY;
 		} else {
 			ctx = get_pkt_ctx_from_pkt(pkt);
@@ -1144,7 +1116,7 @@  void *pktio_thread(void *arg ODP_UNUSED)
 
 		/* Check for drop */
 		if (PKT_DROP == rc)
-			odph_packet_free(pkt);
+			odp_packet_free(pkt);
 
 		/* Print packet counts every once in a while */
 		if (PKT_DONE == rc) {
@@ -1167,12 +1139,13 @@  main(int argc, char *argv[])
 {
 	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
 	int num_workers;
-	void *pool_base;
 	int i;
 	int first_core;
 	int core_count;
 	int stream_count;
 	odp_shm_t shm;
+	odp_buffer_pool_param_t params;
+	odp_buffer_pool_init_t  init_params;
 
 	/* Init ODP before calling anything else */
 	if (odp_init_global(NULL, NULL)) {
@@ -1232,47 +1205,22 @@  main(int argc, char *argv[])
 	printf("First core:         %i\n\n", first_core);
 
 	/* Create packet buffer pool */
-	shm = odp_shm_reserve("shm_packet_pool",
-			      SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
+	params.buf_num  = SHM_PKT_POOL_BUF_COUNT;
+	params.buf_size = SHM_PKT_POOL_BUF_SIZE;
+	params.buf_type = ODP_BUFFER_TYPE_PACKET;
+	params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
 
-	pool_base = odp_shm_addr(shm);
+	init_params.udata_size = sizeof(pkt_ctx_t);
+	init_params.buf_init   = NULL;
+	init_params.buf_init_arg = NULL;
 
-	if (NULL == pool_base) {
-		ODP_ERR("Error: packet pool mem alloc failed.\n");
-		exit(EXIT_FAILURE);
-	}
+	pkt_pool = odp_buffer_pool_create("packet_pool", &params, &init_params);
 
-	pkt_pool = odp_buffer_pool_create("packet_pool", pool_base,
-					  SHM_PKT_POOL_SIZE,
-					  SHM_PKT_POOL_BUF_SIZE,
-					  ODP_CACHE_LINE_SIZE,
-					  ODP_BUFFER_TYPE_PACKET);
 	if (ODP_BUFFER_POOL_INVALID == pkt_pool) {
 		ODP_ERR("Error: packet pool create failed.\n");
 		exit(EXIT_FAILURE);
 	}
 
-	/* Create context buffer pool */
-	shm = odp_shm_reserve("shm_ctx_pool",
-			      SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
-
-	pool_base = odp_shm_addr(shm);
-
-	if (NULL == pool_base) {
-		ODP_ERR("Error: context pool mem alloc failed.\n");
-		exit(EXIT_FAILURE);
-	}
-
-	ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base,
-					  SHM_CTX_POOL_SIZE,
-					  SHM_CTX_POOL_BUF_SIZE,
-					  ODP_CACHE_LINE_SIZE,
-					  ODP_BUFFER_TYPE_RAW);
-	if (ODP_BUFFER_POOL_INVALID == ctx_pool) {
-		ODP_ERR("Error: context pool create failed.\n");
-		exit(EXIT_FAILURE);
-	}
-
 	/* Populate our IPsec cache */
 	printf("Using %s mode for crypto API\n\n",
 	       (CRYPTO_API_SYNC == args->appl.mode) ? "SYNC" :
diff --git a/example/ipsec/odp_ipsec_stream.c b/example/ipsec/odp_ipsec_stream.c
index fa9aba8..309cf70 100644
--- a/example/ipsec/odp_ipsec_stream.c
+++ b/example/ipsec/odp_ipsec_stream.c
@@ -14,7 +14,6 @@ 
 
 #include <odp.h>
 
-#include <odph_packet.h>
 #include <odph_eth.h>
 #include <odph_ip.h>
 #include <odph_icmp.h>
@@ -173,7 +172,6 @@  odp_packet_t create_ipv4_packet(stream_db_entry_t *stream,
 				odp_buffer_pool_t pkt_pool)
 {
 	ipsec_cache_entry_t *entry = stream->input.entry;
-	odp_buffer_t bfr;
 	odp_packet_t pkt;
 	uint8_t *base;
 	uint8_t *data;
@@ -184,18 +182,19 @@  odp_packet_t create_ipv4_packet(stream_db_entry_t *stream,
 	odph_icmphdr_t *icmp;
 	stream_pkt_hdr_t *test;
 	uint i;
+	size_t seglen;
 
-	/* Get buffer */
-	bfr = odp_buffer_alloc(pkt_pool);
-	if (ODP_BUFFER_INVALID == bfr)
+	/* Get packet */
+	pkt = odp_packet_alloc(pkt_pool);
+	if (ODP_PACKET_INVALID == pkt)
 		return ODP_PACKET_INVALID;
-	pkt = odp_packet_from_buffer(bfr);
-	odp_packet_init(pkt);
-	base = odp_packet_data(pkt);
-	data = odp_packet_data(pkt);
+
+	base = odp_packet_map(pkt, &seglen);
+	data = base;
 
 	/* Ethernet */
 	odp_packet_set_inflag_eth(pkt, 1);
+	odp_packet_set_inflag_l2(pkt, 1);
 	odp_packet_set_l2_offset(pkt, data - base);
 	eth = (odph_ethhdr_t *)data;
 	data += sizeof(*eth);
@@ -251,6 +250,7 @@  odp_packet_t create_ipv4_packet(stream_db_entry_t *stream,
 	/* ICMP header so we can see it on wireshark */
 	icmp = (odph_icmphdr_t *)data;
 	data += sizeof(*icmp);
+
 	icmp->type = ICMP_ECHO;
 	icmp->code = 0;
 	icmp->un.echo.id = odp_cpu_to_be_16(0x1234);
@@ -303,7 +303,7 @@  odp_packet_t create_ipv4_packet(stream_db_entry_t *stream,
 
 	/* Since ESP can pad we can now fix IP length */
 	ip->tot_len = odp_cpu_to_be_16(data - (uint8_t *)ip);
-	odp_packet_set_len(pkt, data - base);
+	odp_packet_push_tail(pkt, data - base);
 
 	/* Close AH if specified */
 	if (ah) {
@@ -344,9 +344,10 @@  bool verify_ipv4_packet(stream_db_entry_t *stream,
 	int hdr_len;
 	odph_icmphdr_t *icmp;
 	stream_pkt_hdr_t *test;
+	size_t seglen;
 
 	/* Basic IPv4 verify (add checksum verification) */
-	data = odp_packet_l3(pkt);
+	data = odp_packet_l3_map(pkt, &seglen);
 	ip = (odph_ipv4hdr_t *)data;
 	data += sizeof(*ip);
 	if (0x45 != ip->ver_ihl)
@@ -546,7 +547,7 @@  bool verify_stream_db_outputs(void)
 				good = verify_ipv4_packet(stream, pkt);
 				if (good)
 					stream->verified++;
-				odph_packet_free(pkt);
+				odp_packet_free(pkt);
 			}
 		}
 
diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c
index 57037cd..c43ef86 100644
--- a/example/l2fwd/odp_l2fwd.c
+++ b/example/l2fwd/odp_l2fwd.c
@@ -17,7 +17,6 @@ 
 
 #include <odp.h>
 #include <odph_linux.h>
-#include <odph_packet.h>
 #include <odph_eth.h>
 #include <odph_ip.h>
 
@@ -311,12 +310,12 @@  int main(int argc, char *argv[])
 {
 	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
 	odp_buffer_pool_t pool;
-	void *pool_base;
 	int i;
 	int first_core;
 	int core_count;
 	odp_pktio_t pktio;
 	odp_shm_t shm;
+	odp_buffer_pool_param_t params;
 
 	/* Init ODP before calling anything else */
 	if (odp_init_global(NULL, NULL)) {
@@ -380,20 +379,13 @@  int main(int argc, char *argv[])
 	printf("First core:         %i\n\n", first_core);
 
 	/* Create packet pool */
-	shm = odp_shm_reserve("shm_packet_pool",
-			      SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
-	pool_base = odp_shm_addr(shm);
+	params.buf_num  = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
+	params.buf_size = SHM_PKT_POOL_BUF_SIZE;
+	params.buf_type = ODP_BUFFER_TYPE_PACKET;
+	params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
 
-	if (pool_base == NULL) {
-		ODP_ERR("Error: packet pool mem alloc failed.\n");
-		exit(EXIT_FAILURE);
-	}
+	pool = odp_buffer_pool_create("packet_pool", &params, NULL);
 
-	pool = odp_buffer_pool_create("packet_pool", pool_base,
-				      SHM_PKT_POOL_SIZE,
-				      SHM_PKT_POOL_BUF_SIZE,
-				      ODP_CACHE_LINE_SIZE,
-				      ODP_BUFFER_TYPE_PACKET);
 	if (pool == ODP_BUFFER_POOL_INVALID) {
 		ODP_ERR("Error: packet pool create failed.\n");
 		exit(EXIT_FAILURE);
@@ -480,7 +472,7 @@  static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned len)
 		pkt = pkt_tbl[i];
 
 		if (odp_unlikely(odp_packet_error(pkt))) {
-			odph_packet_free(pkt); /* Drop */
+			odp_packet_free(pkt); /* Drop */
 			pkt_cnt--;
 		} else if (odp_unlikely(i != j++)) {
 			pkt_tbl[j-1] = pkt;
diff --git a/example/odp_example/odp_example.c b/example/odp_example/odp_example.c
index 1ed4a0b..cdb78b6 100644
--- a/example/odp_example/odp_example.c
+++ b/example/odp_example/odp_example.c
@@ -944,13 +944,13 @@  int main(int argc, char *argv[])
 	test_args_t args;
 	int num_workers;
 	odp_buffer_pool_t pool;
-	void *pool_base;
 	odp_queue_t queue;
 	int i, j;
 	int prios;
 	int first_core;
 	odp_shm_t shm;
 	test_globals_t *globals;
+	odp_buffer_pool_param_t params;
 
 	printf("\nODP example starts\n\n");
 
@@ -1032,19 +1032,13 @@  int main(int argc, char *argv[])
 	/*
 	 * Create message pool
 	 */
-	shm = odp_shm_reserve("msg_pool",
-			      MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
 
-	pool_base = odp_shm_addr(shm);
+	params.buf_num  = MSG_POOL_SIZE/sizeof(test_message_t);
+	params.buf_size = sizeof(test_message_t);
+	params.buf_type = ODP_BUFFER_TYPE_RAW;
+	params.buf_opts = ODP_BUFFER_OPTS_NONE;
 
-	if (pool_base == NULL) {
-		ODP_ERR("Shared memory reserve failed.\n");
-		return -1;
-	}
-
-	pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
-				      sizeof(test_message_t),
-				      ODP_CACHE_LINE_SIZE, ODP_BUFFER_TYPE_RAW);
+	pool = odp_buffer_pool_create("msg_pool", &params, NULL);
 
 	if (pool == ODP_BUFFER_POOL_INVALID) {
 		ODP_ERR("Pool create failed.\n");
diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c
index 2cf3f0d..64161f2 100644
--- a/example/packet/odp_pktio.c
+++ b/example/packet/odp_pktio.c
@@ -17,7 +17,6 @@ 
 
 #include <odp.h>
 #include <odph_linux.h>
-#include <odph_packet.h>
 #include <odph_eth.h>
 #include <odph_ip.h>
 
@@ -292,11 +291,11 @@  int main(int argc, char *argv[])
 	odph_linux_pthread_t thread_tbl[MAX_WORKERS];
 	odp_buffer_pool_t pool;
 	int num_workers;
-	void *pool_base;
 	int i;
 	int first_core;
 	int core_count;
 	odp_shm_t shm;
+	odp_buffer_pool_param_t params;
 
 	/* Init ODP before calling anything else */
 	if (odp_init_global(NULL, NULL)) {
@@ -350,20 +349,13 @@  int main(int argc, char *argv[])
 	printf("First core:         %i\n\n", first_core);
 
 	/* Create packet pool */
-	shm = odp_shm_reserve("shm_packet_pool",
-			      SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
-	pool_base = odp_shm_addr(shm);
+	params.buf_num  = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
+	params.buf_size = SHM_PKT_POOL_BUF_SIZE;
+	params.buf_type = ODP_BUFFER_TYPE_PACKET;
+	params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
 
-	if (pool_base == NULL) {
-		ODP_ERR("Error: packet pool mem alloc failed.\n");
-		exit(EXIT_FAILURE);
-	}
+	pool = odp_buffer_pool_create("packet_pool", &params, NULL);
 
-	pool = odp_buffer_pool_create("packet_pool", pool_base,
-				      SHM_PKT_POOL_SIZE,
-				      SHM_PKT_POOL_BUF_SIZE,
-				      ODP_CACHE_LINE_SIZE,
-				      ODP_BUFFER_TYPE_PACKET);
 	if (pool == ODP_BUFFER_POOL_INVALID) {
 		ODP_ERR("Error: packet pool create failed.\n");
 		exit(EXIT_FAILURE);
@@ -427,7 +419,7 @@  static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned len)
 		pkt = pkt_tbl[i];
 
 		if (odp_unlikely(odp_packet_error(pkt))) {
-			odph_packet_free(pkt); /* Drop */
+			odp_packet_free(pkt); /* Drop */
 			pkt_cnt--;
 		} else if (odp_unlikely(i != j++)) {
 			pkt_tbl[j-1] = pkt;
@@ -452,11 +444,12 @@  static void swap_pkt_addrs(odp_packet_t pkt_tbl[], unsigned len)
 	odph_ipv4hdr_t *ip;
 	uint32be_t ip_tmp_addr; /* tmp ip addr */
 	unsigned i;
+	size_t seglen;
 
 	for (i = 0; i < len; ++i) {
 		pkt = pkt_tbl[i];
 		if (odp_packet_inflag_eth(pkt)) {
-			eth = (odph_ethhdr_t *)odp_packet_l2(pkt);
+			eth = (odph_ethhdr_t *)odp_packet_l2_map(pkt, &seglen);
 
 			tmp_addr = eth->dst;
 			eth->dst = eth->src;
@@ -464,7 +457,8 @@  static void swap_pkt_addrs(odp_packet_t pkt_tbl[], unsigned len)
 
 			if (odp_packet_inflag_ipv4(pkt)) {
 				/* IPv4 */
-				ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
+				ip = (odph_ipv4hdr_t *)
+					odp_packet_l3_map(pkt, &seglen);
 
 				ip_tmp_addr  = ip->src_addr;
 				ip->src_addr = ip->dst_addr;
diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c
index 78b2ae2..c0fcf49 100644
--- a/example/timer/odp_timer_test.c
+++ b/example/timer/odp_timer_test.c
@@ -242,12 +242,11 @@  int main(int argc, char *argv[])
 	test_args_t args;
 	int num_workers;
 	odp_buffer_pool_t pool;
-	void *pool_base;
 	odp_queue_t queue;
 	int first_core;
 	uint64_t cycles, ns;
 	odp_queue_param_t param;
-	odp_shm_t shm;
+	odp_buffer_pool_param_t params;
 
 	printf("\nODP timer example starts\n");
 
@@ -306,17 +305,12 @@  int main(int argc, char *argv[])
 	printf("period:             %i usec\n", args.period_us);
 	printf("timeouts:           %i\n", args.tmo_count);
 
-	/*
-	 * Create message pool
-	 */
-	shm = odp_shm_reserve("msg_pool",
-			      MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
-	pool_base = odp_shm_addr(shm);
-
-	pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
-				      0,
-				      ODP_CACHE_LINE_SIZE,
-				      ODP_BUFFER_TYPE_TIMEOUT);
+	params.buf_num  = 1024;
+	params.buf_size = 0;
+	params.buf_type = ODP_BUFFER_TYPE_TIMEOUT;
+	params.buf_opts = ODP_BUFFER_OPTS_NONE;
+
+	pool = odp_buffer_pool_create("msg_pool", &params, NULL);
 
 	if (pool == ODP_BUFFER_POOL_INVALID) {
 		ODP_ERR("Pool create failed.\n");
diff --git a/helper/include/odph_ip.h b/helper/include/odph_ip.h
index 2c83c0f..2dab164 100644
--- a/helper/include/odph_ip.h
+++ b/helper/include/odph_ip.h
@@ -79,10 +79,12 @@  static inline int odph_ipv4_csum_valid(odp_packet_t pkt)
 	odph_ipv4hdr_t ip;
 	uint16be_t chksum;
 
-	if (!odp_packet_l3_offset(pkt))
+	if (!odp_packet_inflag_ipv4(pkt))
 		return 0;
 
-	memcpy(&ip, odp_packet_l3(pkt), sizeof(odph_ipv4hdr_t));
+	odp_packet_copy_to_memory(&ip, pkt, odp_packet_l3_offset(pkt),
+				  sizeof(odph_ipv4hdr_t));
+
 	w = (uint16_t *)(void *)&ip;
 	chksum = ip.chksum;
 	ip.chksum = 0x0;
@@ -105,12 +107,13 @@  static inline uint16sum_t odph_ipv4_csum_update(odp_packet_t pkt)
 {
 	uint16_t *w;
 	odph_ipv4hdr_t *ip;
+	size_t seglen;
 	int nleft = sizeof(odph_ipv4hdr_t);
 
-	if (!odp_packet_l3_offset(pkt))
+	if (!odp_packet_inflag_ipv4(pkt))
 		return 0;
 
-	ip = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
+	ip = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &seglen);
 	w = (uint16_t *)(void *)ip;
 	ip->chksum = odp_chksum(w, nleft);
 	return ip->chksum;
@@ -126,7 +129,14 @@  static inline uint16sum_t odph_ipv4_csum_update(odp_packet_t pkt)
  * IPv6 header
  */
 typedef struct ODP_PACKED {
-	uint32be_t ver_tc_flow;  /**< Version / Traffic class / Flow label */
+	union {
+		uint32be_t ver_tc_flow;  /**< Version / TC / Flow label */
+		struct {
+			uint32be_t ver:4;    /**< Version (must be 6) */
+			uint32be_t tc:8;     /**< Traffic class */
+			uint32be_t flow:20;  /**< Flow label */
+		};
+	};
 	uint16be_t payload_len;  /**< Payload length */
 	uint8_t    next_hdr;     /**< Next header */
 	uint8_t    hop_limit;    /**< Hop limit */
@@ -137,16 +147,29 @@  typedef struct ODP_PACKED {
 /** @internal Compile time assert */
 ODP_STATIC_ASSERT(sizeof(odph_ipv6hdr_t) == ODPH_IPV6HDR_LEN, "ODPH_IPV6HDR_T__SIZE_ERROR");
 
+/**
+ * IPv6 Header extensions
+ */
+typedef struct ODP_PACKED {
+	uint8_t    next_hdr;     /**< Protocol of next header */
+	uint8_t    ext_len;      /**< Length of this extention in 8 byte units,
+				    not counting first 8 bytes, so 0 = 8 bytes
+				    1 = 16 bytes, etc. */
+	uint8_t    filler[6];    /**< Fill out first 8 byte segment */
+} odph_ipv6hdr_ext_t;
+
 /** @name
  * IP protocol values (IPv4:'proto' or IPv6:'next_hdr')
  * @{*/
-#define ODPH_IPPROTO_ICMP 0x01 /**< Internet Control Message Protocol (1) */
-#define ODPH_IPPROTO_TCP  0x06 /**< Transmission Control Protocol (6) */
-#define ODPH_IPPROTO_UDP  0x11 /**< User Datagram Protocol (17) */
-#define ODPH_IPPROTO_SCTP 0x84 /**< Stream Control Transmission Protocol (132) */
-#define ODPH_IPPROTO_FRAG 0x2C /**< Fragment (44) */
-#define ODPH_IPPROTO_AH   0x33 /**< Authentication Header (51) */
-#define ODPH_IPPROTO_ESP  0x32 /**< Encapsulating Security Payload (50) */
+#define ODPH_IPPROTO_HOPOPTS 0x00 /**< IPv6 hop-by-hop options */
+#define ODPH_IPPROTO_ICMP    0x01 /**< Internet Control Message Protocol (1) */
+#define ODPH_IPPROTO_TCP     0x06 /**< Transmission Control Protocol (6) */
+#define ODPH_IPPROTO_UDP     0x11 /**< User Datagram Protocol (17) */
+#define ODPH_IPPROTO_ROUTE   0x2B /**< IPv6 Routing header (43) */
+#define ODPH_IPPROTO_FRAG    0x2C /**< IPv6 Fragment (44) */
+#define ODPH_IPPROTO_AH      0x33 /**< Authentication Header (51) */
+#define ODPH_IPPROTO_ESP     0x32 /**< Encapsulating Security Payload (50) */
+#define ODPH_IPPROTO_INVALID 0xFF /**< Reserved invalid by IANA */
 /**@}*/
 
 #ifdef __cplusplus
diff --git a/helper/include/odph_packet.h b/helper/include/odph_packet.h
deleted file mode 100644
index 3d53593..0000000
--- a/helper/include/odph_packet.h
+++ /dev/null
@@ -1,97 +0,0 @@ 
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier:     BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * Optional ODP packet helper functions
- */
-
-#ifndef ODPH_PACKET_HELPER_H_
-#define ODPH_PACKET_HELPER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp.h>
-
-/**
- * Helper: Tests if packet is valid
- *
- * Allows for more thorough checking than "if (pkt == ODP_PACKET_INVALID)"
- *
- * @param pkt  Packet handle
- *
- * @return 1 if valid, otherwise 0
- */
-static inline int odph_packet_is_valid(odp_packet_t pkt)
-{
-	odp_buffer_t buf = odp_packet_to_buffer(pkt);
-
-	return odp_buffer_is_valid(buf);
-}
-
-/**
- * Helper: Allocate and initialize a packet buffer from a packet pool
- *
- * @param pool_id  Pool handle
- *
- * @note  The pool must have been created with 'buf_type=ODP_BUFFER_TYPE_PACKET'
- *
- * @return Packet handle or ODP_PACKET_INVALID
- */
-static inline odp_packet_t odph_packet_alloc(odp_buffer_pool_t pool_id)
-{
-	odp_packet_t pkt;
-	odp_buffer_t buf;
-
-	buf = odp_buffer_alloc(pool_id);
-	if (odp_unlikely(!odp_buffer_is_valid(buf)))
-		return ODP_PACKET_INVALID;
-
-	pkt = odp_packet_from_buffer(buf);
-	odp_packet_init(pkt);
-
-	return pkt;
-}
-
-/**
- * Helper: Free a packet buffer back into the packet pool
- *
- * @param pkt  Packet handle
- */
-static inline void odph_packet_free(odp_packet_t pkt)
-{
-	odp_buffer_t buf = odp_packet_to_buffer(pkt);
-
-	odp_buffer_free(buf);
-}
-
-/**
- * Helper: Packet buffer maximum data size
- *
- * @note odp_packet_buf_size(pkt) != odp_packet_get_len(pkt), the former returns
- *       the max length of the buffer, the latter the size of a received packet.
- *
- * @param pkt  Packet handle
- *
- * @return Packet buffer maximum data size
- */
-static inline size_t odph_packet_buf_size(odp_packet_t pkt)
-{
-	odp_buffer_t buf = odp_packet_to_buffer(pkt);
-
-	return odp_buffer_size(buf);
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/helper/include/odph_udp.h b/helper/include/odph_udp.h
index b2eaf03..bd0fb68 100644
--- a/helper/include/odph_udp.h
+++ b/helper/include/odph_udp.h
@@ -57,15 +57,14 @@  static inline uint16_t odph_ipv4_udp_chksum(odp_packet_t pkt)
 	odph_udphdr_t *udph;
 	odph_ipv4hdr_t *iph;
 	uint16_t udplen;
+	size_t l3_seglen, l4_seglen;
 
-	if (!odp_packet_l3_offset(pkt))
+	if (odp_packet_l3_protocol(pkt) != 0x800 ||
+	    odp_packet_l4_protocol(pkt) != ODPH_IPPROTO_UDP)
 		return 0;
 
-	if (!odp_packet_l4_offset(pkt))
-		return 0;
-
-	iph = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
-	udph = (odph_udphdr_t *)odp_packet_l4(pkt);
+	iph = (odph_ipv4hdr_t *)odp_packet_l3_map(pkt, &l3_seglen);
+	udph = (odph_udphdr_t *)odp_packet_l4_map(pkt, &l4_seglen);
 	udplen = odp_be_to_cpu_16(udph->length);
 
 	/* the source ip */
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 0153a22..08c147c 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -21,7 +21,6 @@  include_HEADERS = \
 		  $(top_srcdir)/platform/linux-generic/include/api/odp_debug.h \
 		  $(top_srcdir)/platform/linux-generic/include/api/odp_hints.h \
 		  $(top_srcdir)/platform/linux-generic/include/api/odp_init.h \
-		  $(top_srcdir)/platform/linux-generic/include/api/odp_packet_flags.h \
 		  $(top_srcdir)/platform/linux-generic/include/api/odp_packet.h \
 		  $(top_srcdir)/platform/linux-generic/include/api/odp_packet_io.h \
 		  $(top_srcdir)/platform/linux-generic/include/api/odp_queue.h \
@@ -46,8 +45,8 @@  subdirheaders_HEADERS = \
 			$(top_srcdir)/helper/include/odph_ip.h \
 			$(top_srcdir)/helper/include/odph_ipsec.h \
 			$(top_srcdir)/helper/include/odph_linux.h \
-			$(top_srcdir)/helper/include/odph_packet.h \
 			$(top_srcdir)/helper/include/odph_ring.h \
+			$(top_srcdir)/helper/include/odph_tcp.h \
 			$(top_srcdir)/helper/include/odph_udp.h
 
 __LIB__libodp_la_SOURCES = \
@@ -60,7 +59,6 @@  __LIB__libodp_la_SOURCES = \
 			   odp_init.c \
 			   odp_linux.c \
 			   odp_packet.c \
-			   odp_packet_flags.c \
 			   odp_packet_io.c \
 			   odp_packet_socket.c \
 			   odp_queue.c \
diff --git a/platform/linux-generic/include/api/odp.h b/platform/linux-generic/include/api/odp.h
index 6e4f69e..ffdf1f3 100644
--- a/platform/linux-generic/include/api/odp.h
+++ b/platform/linux-generic/include/api/odp.h
@@ -44,7 +44,6 @@  extern "C" {
 #include <odp_schedule.h>
 #include <odp_sync.h>
 #include <odp_packet.h>
-#include <odp_packet_flags.h>
 #include <odp_packet_io.h>
 #include <odp_crypto.h>
 #include <odp_rwlock.h>
diff --git a/platform/linux-generic/include/api/odp_buffer.h b/platform/linux-generic/include/api/odp_buffer.h
index 289e0eb..3dc4cde 100644
--- a/platform/linux-generic/include/api/odp_buffer.h
+++ b/platform/linux-generic/include/api/odp_buffer.h
@@ -1,4 +1,4 @@ 
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2014, Linaro Limited
  * All rights reserved.
  *
  * SPDX-License-Identifier:     BSD-3-Clause
@@ -8,7 +8,88 @@ 
 /**
  * @file
  *
- * ODP buffer descriptor
+ * @par Buffer
+ * A buffer is an element of a buffer pool used for storing
+ * information. Buffers are referenced by an abstract handle of type
+ * odp_buffer_t. Buffers have associated buffer types that describe
+ * their intended use and the type of metadata that is associated
+ * with them. Buffers of a specific type may be referenced for
+ * processing by cores or by offload engines. Buffers are also
+ * transmitted via queues from one processing element to another.
+ *
+ * @par Buffer Types
+ * An ODP buffer type is identified by the
+ * odp_buffer_type_e enum. It defines the semantics that are to be
+ * attached to the buffer and defines the type of metadata that is
+ * associated with it. ODP implementations MUST support the following
+ * buffer types:
+ *
+ * - ODP_BUFFER_TYPE_RAW
+ * This is the “basic” buffer type
+ * which simply consists of a single fixed-sized block of contiguous
+ * memory. Buffers of this type do not support user metadata and the
+ * only built-in metadata supported for this type of buffer are those
+ * that are statically computable, such as pool and size. This type of
+ * buffer is entirely under application control and most of the buffer
+ * APIs defined in this document are not available. APIs for this
+ * type of buffer are described in this document.
+ *
+ * - ODP_BUFFER_TYPE_PACKET
+ * This buffer type is suitable for receiving,
+ * processing, and transmitting network packet data. Included in this
+ * type is a rich set of primitives for manipulating buffer aggregates
+ * and for storing system and user metadata. APIs for this type of
+ * buffer are described here and in the ODP Packet Management Design
+ * document.
+ *
+ * - ODP_BUFFER_TYPE_TIMEOUT
+ * This buffer type is suitable for
+ * representing timer timeout events. Does not support buffer
+ * aggregation but does support user metadata. APIs for this type of
+ * buffer are described here and in the ODP Timer Management Design
+ * document.
+ *
+ * - ODP_BUFFER_TYPE_ANY
+ * A “universal” buffer type capable of
+ * storing information needed for any other buffer type. It is not
+ * intended to be used directly, but exists for possible
+ * implementation convenience.
+ *
+ * @par Metadata
+ * Metadata is additional information relating to a
+ * buffer that is distinct from the application data normally held in
+ * the buffer. Implementations MAY choose to implement metadata as
+ * contiguous with a buffer (e.g., in an implementation-managed prefix
+ * area of the buffer) or in a physically separate metadata area
+ * efficiently accessible by the implementation using the same
+ * identifier as the buffer itself. ODP applications MUST NOT make
+ * assumptions about the addressability relationship between a buffer
+ * and its associated metadata, or between metadata items.
+ * Application use of metadata MUST only be via accessor functions.
+ *
+ * @par Note on OPTIONAL APIs
+ * Every conforming ODP implementation MUST
+ * provide implementations for each API described here. If an API is
+ * designated as OPTIONAL, this means that it is acceptable for an
+ * implementation to do nothing except return
+ * ODP_FUNCTION_NOT_AVAILABLE in response to this call. Note that this
+ * may limit the range of ODP applications supported by a given
+ * implementation since applications needing the functionality of the
+ * optional API will likely choose to deploy on other ODP platforms.
+ *
+ * @par
+ * APIs are designated as OPTIONAL under two conditions:
+ *
+ * -# The API is expected to be difficult to provide efficiently on all
+ *  platforms.
+ *
+ * -# A significant number of ODP applications are expected to exist
+ *  that will not need or use this API.
+ *
+ * @par
+ * Under these circumstances, an API is designated as OPTIONAL to
+ * permit ODP implementations to be conformant while still expecting
+ * to be able to run a significant number of ODP applications.
  */
 
 #ifndef ODP_BUFFER_H_
@@ -21,10 +102,9 @@  extern "C" {
 
 #include <odp_std_types.h>
 
-
 /** @defgroup odp_buffer ODP BUFFER
- *  Operations on a buffer.
- *  @{
+ *
+ * @{
  */
 
 /**
@@ -32,62 +112,445 @@  extern "C" {
  */
 typedef uint32_t odp_buffer_t;
 
-#define ODP_BUFFER_INVALID (0xffffffff) /**< Invalid buffer */
+/**
+ * ODP buffer segment
+ */
+typedef uint32_t odp_buffer_segment_t;
+
+/**
+ * ODP Buffer pool
+ */
+typedef uint32_t odp_buffer_pool_t;
+
+/**
+ * ODP buffer type
+ */
+typedef enum odp_buffer_type {
+	ODP_BUFER_TYPE_INVALID  = -1, /**< Buffer type invalid */
+	ODP_BUFFER_TYPE_ANY     = 0,  /**< Buffer type can hold any other
+					 buffer type */
+	ODP_BUFFER_TYPE_RAW     = 1,  /**< Raw buffer,
+					 no additional metadata */
+	ODP_BUFFER_TYPE_PACKET  = 2,  /**< Packet buffer */
+	ODP_BUFFER_TYPE_TIMEOUT = 3,  /**< Timeout buffer */
+} odp_buffer_type_e;
+
+/**
+ * ODP buffer options
+ *
+ * @note These options are additive so an application can simply
+ * specify a buf_opts by ORing together the options needed. Note that
+ * buffer pool options are themselves OPTIONAL and a given
+ * implementation MAY fail the buffer pool creation request with an
+ * appropriate errno if the requested option is not supported by the
+ * underlying ODP implementation, with the exception that UNSEGMENTED
+ * pools MUST be supported for non-packet types and for packet types
+ * as long as the requested size is less than the
+ * implementation-defined native packet segment size.
+ *
+ * Use ODP_BUFFER_OPTS_NONE to specify default buffer pool options
+ * with no additions. The ODP_BUFFER_OPTS_UNSEGMENTED option
+ * specifies that the buffer pool should be unsegmented.
+ *
+ * @par Segmented vs. Unsegmented Buffer Pools
+ * By default, the buffers
+ * in ODP buffer pools are logical buffers that support transparent
+ * segmentation managed by ODP on behalf of the application and have a
+ * rich set of associated semantics as described here.
+ * ODP_BUFFER_OPTS_UNSEGMENTED indicates that the buf_size specified
+ * for the pool should be regarded as a fixed buffer size for all pool
+ * elements and that segmentation support is not needed for the pool.
+ * This MAY result in greater efficiency on some implementations. For
+ * packet processing, a typical use of unsegmented pools would be in
+ * conjunction with classification rules that sort packets into
+ * different pools based on their lengths, thus ensuring that each
+ * packet occupies a single segment within an appropriately-sized
+ * buffer.
+ */
+typedef enum odp_buffer_opts {
+	ODP_BUFFER_OPTS_NONE,        /**< Default, no buffer options */
+	ODP_BUFFER_OPTS_UNSEGMENTED, /**< No segments, please */
+} odp_buffer_opts_e;
 
+/**
+ * Error returns
+ */
+#define ODP_BUFFER_INVALID (odp_buffer_t)(-1)
 
 /**
  * Buffer start address
  *
- * @param buf      Buffer handle
+ * @param[in] buf  Buffer handle
  *
  * @return Buffer start address
  */
 void *odp_buffer_addr(odp_buffer_t buf);
 
 /**
- * Buffer maximum data size
+ * Buffer application data size
+ *
+ * @param[in] buf  Buffer handle
  *
- * @param buf      Buffer handle
+ * @return Buffer application data size
  *
- * @return Buffer maximum data size
+ * @note The size returned by this rouine is the size of the
+ * application data contained within the buffer and does not include
+ * any inplementation-defined overhead to support metadata, etc. ODP
+ * does not define APIs for determining the amount of storage that is
+ * physically allocated by an implementation to support ODP buffers.
  */
 size_t odp_buffer_size(odp_buffer_t buf);
 
 /**
  * Buffer type
  *
- * @param buf      Buffer handle
+ * @param[in] buf    Buffer handle
  *
  * @return Buffer type
  */
-int odp_buffer_type(odp_buffer_t buf);
+odp_buffer_type_e odp_buffer_type(odp_buffer_t buf);
 
-#define ODP_BUFFER_TYPE_INVALID (-1) /**< Buffer type invalid */
-#define ODP_BUFFER_TYPE_ANY       0  /**< Buffer that can hold any other
-					  buffer type */
-#define ODP_BUFFER_TYPE_RAW       1  /**< Raw buffer, no additional metadata */
-#define ODP_BUFFER_TYPE_PACKET    2  /**< Packet buffer */
-#define ODP_BUFFER_TYPE_TIMEOUT   3  /**< Timeout buffer */
+/**
+ * Get address and size of user metadata for buffer
+ *
+ * @param[in]  buf        Buffer handle
+ * @param[out] udata_size Number of bytes of user metadata available
+ *                        at the returned address
+ * @return                Address of the user metadata for this buffer
+ *                        or NULL if the buffer has no user metadata.
+ */
+void *odp_buffer_udata(odp_buffer_t buf, size_t *udata_size);
 
+/**
+ * Get address of user metadata for buffer
+ *
+ * @param[in] buf         Buffer handle
+ *
+ * @return                Address of the user metadata for this buffer
+ *                        or NULL if the buffer has no user metadata.
+ *
+ * @note This is a "fastpath" version of odp_buffer_udata() since it
+ * omits returning the size of the user metadata area. Callers are
+ * expected to know and honor this limit nonetheless.
+ */
+void *odp_buffer_udata_addr(odp_buffer_t buf);
 
 /**
  * Tests if buffer is valid
  *
- * @param buf      Buffer handle
+ * @param[in] buf    Buffer handle
  *
  * @return 1 if valid, otherwise 0
+ *
+ * @note Since buffer operations typically occur in fastpath sections
+ * of applications, by default most ODP APIs assume that valid buffer
+ * handles are passed to them and results are undefined if this
+ * assumption is not met. This routine exists to enable an
+ * application to request explicit validation of a buffer handle. It
+ * is understood that the performance of this operation MAY vary
+ * considerably on a per-implementation basis.
  */
 int odp_buffer_is_valid(odp_buffer_t buf);
 
 /**
+ * Tests if buffer is segmented
+ *
+ * @param[in] buf    Buffer handle
+ *
+ * @return 1 if buffer has more then one segment, otherwise 0
+ *
+ * @note This routine behaves identically to the test
+ * odp_buffer_segment_count() > 1, but is potentially more efficient
+ * and represents the preferred method of determining a buffer's
+ * segmentation status.
+ */
+int odp_buffer_is_segmented(odp_buffer_t buf);
+
+/**
  * Print buffer metadata to STDOUT
  *
- * @param buf      Buffer handle
+ * @param[in] buf    Buffer handle
  *
+ * @note This routine is intended for diagnostic use and prints
+ * implementation-defined information concerning the buffer to the ODP
+ * LOG. It's provision is OPTIONAL.
  */
 void odp_buffer_print(odp_buffer_t buf);
 
 /**
+ * Get count of number of segments in a buffer
+ *
+ * @param[in] buf    Buffer handle
+ *
+ * @return           Count of the number of segments in buf
+ */
+size_t odp_buffer_segment_count(odp_buffer_t buf);
+
+/**
+ * Get the segment identifier for a buffer segment by index
+ *
+ * @param[in] buf    Buffer handle
+ * @param[in] ndx    Segment index of segment of interest
+ *
+ * @return           Segment handle or ODP_SEGMENT_INVALID if the
+ *                   supplied ndx is out of range.
+ */
+odp_buffer_segment_t odp_buffer_segment_by_index(odp_buffer_t buf, size_t ndx);
+
+/**
+ * Get the next segment handle for a buffer segment
+ *
+ * @param[in] buf    Buffer handle
+ * @param[in] seg    Segment identifier of the previous segment
+ *
+ * @return           Segment identifier of next segment or ODP_SEGMENT_INVALID
+ *
+ * @note This routine returns the identifier (odp_buffer_segment_t) of
+ * the next buffer segment in a buffer aggregate. The input
+ * specifies the buffer and the previous segment identifier. There are
+ * three use cases for this routine:
+ * @note
+ * -# If the input seg is ODP_SEGMENT_START then the segment identifier returned
+ * is that of the first segment in the buffer. ODP_SEGMENT_NULL MAY be used
+ * as a synonym for ODP_SEGMENT_START for symmetry if desired.
+ *
+ * -# If the input seg is not the last segment in the buffer then the
+ * segment handle of the next segment following seg is returned.
+ *
+ * -# If the input seg is the segment identifier of the last segment in
+ * the buffer then ODP_SEGMENT_NULL is returned.
+ *
+ */
+odp_buffer_segment_t odp_buffer_segment_next(odp_buffer_t buf,
+					     odp_buffer_segment_t seg);
+
+/**
+ * Get addressability for a specified buffer segment
+ *
+ * @param[in] buf     Buffer handle
+ * @param[in] seg     Segment handle of the segment to be mapped
+ * @param[in] seglen  Returned number of bytes in this buffer segment
+ *                    available at the returned address
+ *
+ * @return            Segment start address or NULL
+ *
+ * @note This routine is used to obtain addressability to a segment within
+ * a buffer aggregate at a specified segment identifier. The returned seglen
+ * indicates the number of bytes addressable at the returned address.
+ */
+void *odp_buffer_segment_map(odp_buffer_t buf, odp_buffer_segment_t seg,
+			     size_t *seglen);
+
+/**
+ * Unmap a buffer segment
+ *
+ * @param[in] seg     Buffer segment handle
+ *
+ * @note This routine is used to unmap a buffer segment previously
+ * mapped by odp_buffer_segment_map(). Following this call,
+ * applications MUST NOT attempt to reference the segment via any
+ * pointer returned from a previous odp_buffer_segment_map() call
+ * referring to it. It is intended to allow certain NUMA
+ * architectures to better manage the coherency of mapped segments.
+ * For non-NUMA architectures this routine will be a no-op. Note that
+ * implementations SHOULD implicitly unmap all buffer segments
+ * whenever a buffer is added to a queue as this indicates that the
+ * caller is relinquishing control of the buffer.
+ */
+void odp_buffer_segment_unmap(odp_buffer_segment_t seg);
+
+/**
+ * Get start address for a specified buffer offset
+ *
+ * @param[in]  buf     Buffer handle
+ * @param[in]  offset  Byte offset within the buffer to be addressed
+ * @param[out] seglen  Returned number of bytes in this buffer
+ *                     segment available at returned address
+ *
+ * @return             Offset start address or NULL
+ *
+ * @note This routine is used to obtain addressability to a segment
+ * within a buffer at a specified byte offset. Note that because the
+ * offset is independent of any implementation-defined physical
+ * segmentation the returned seglen may be “short” and will range from
+ * 1 to whatever physical segment size is used by the underlying
+ * implementation.
+ */
+void *odp_buffer_offset_map(odp_buffer_t buf, size_t offset,
+			    size_t *seglen);
+
+/**
+ * Unmap a buffer segment by offset
+ *
+ * @param[in] buf    Buffer handle
+ * @param[in] offset Buffer offset
+ *
+ * @note This routine is used to unmap a buffer segment previously
+ * mapped by odp_buffer_offset_map(). Following this call
+ * the application MUST NOT attempt to reference the segment via any
+ * pointer returned by a prior odp_buffer_offset_map() call relating
+ * to this offset. It is intended to allow certain NUMA architectures
+ * to better manage the coherency of mapped segments. For non-NUMA
+ * architectures this routine will be a no-op. Note that
+ * implementations SHOULD implicitly unmap all buffer segments
+ * whenever a buffer is added to a queue as this indicates that the
+ * caller is relinquishing control of the buffer.
+ */
+void odp_buffer_offset_unmap(odp_buffer_t buf, size_t offset);
+
+/**
+ * Split a buffer into two buffers at a specified split point
+ *
+ * @param[in] buf    Handle of buffer to split
+ * @param[in] offset Byte offset within buf to split buffer
+ *
+ * @return           Buffer handle of the created split buffer
+ *
+ * @note This routine splits a buffer into two buffers at the
+ * specified byte offset. The odp_buffer_t returned by the function
+ * is the handle of the new buffer created at the split point. If the
+ * original buffer was allocated from a buffer pool then the split is
+ * allocated from the same pool. If the original buffer was size
+ * bytes in length then upon return the original buffer is of size
+ * offset while the split buffer is of size (size-offset).
+ *
+ * @note Upon return from this function, the system metadata for both
+ * buffers has been updated appropriately by the call since system
+ * metadata maintenance is the responsibility of the ODP
+ * implementation. Any required updates to the user metadata is the
+ * responsibility of the caller.
+ */
+odp_buffer_t odp_buffer_split(odp_buffer_t buf, size_t offset);
+
+/**
+ * Join two buffers into a single buffer
+ *
+ * @param[in] buf1  Buffer handle of first buffer to join
+ * @param[in] buf2  Buffer handle of second buffer to join
+ *
+ * @return          Buffer handle of the joined buffer
+ *
+ * @note This routine joins two buffers into a single buffer. Both
+ * buf1 and buf2 MUST be from the same buffer pool and the resulting
+ * joined buffer will be an element of that same pool. The
+ * application MUST NOT assume that either buf1 or buf2 survive the
+ * join or that the returned joined buffer is contiguous with or
+ * otherwise related to the input buffers. An implementation SHOULD
+ * free either or both input buffers if they are not reused as part of
+ * the construction of the returned joined buffer. If the join cannot
+ * be performed (e.g., if the two input buffers are not from the same
+ * buffer pool, insufficient space in the target buffer pool, etc.)
+ * then ODP_BUFFER_INVALID SHOULD be returned to indicate that the
+ * operation could not be performed, and an appropriate errno set. In
+ * such case the input buffers MUST NOT be freed as part of the failed
+ * join attempt and should be unchanged from their input values and
+ * content.
+ *
+ * @note The result of odp_buffer_join() is the logical concatenation
+ * of the two buffers using an implementation-defined buffer
+ * aggregation mechanism. The application data contents of the
+ * returned buffer is identical to that of the two joined input
+ * buffers however certain associated metadata (e.g., information
+ * about the buffer size) will likely differ.
+ *
+ * @note If user metadata is present in the buffer pool containing the
+ * input buffers, then the user metadata associated with the returned
+ * buffer MUST be copied by this routine from the source buf1.
+ */
+odp_buffer_t odp_buffer_join(odp_buffer_t buf1, odp_buffer_t buf2);
+
+/**
+ * Trim a buffer at a specified trim point
+ *
+ * @param[in] buf    buffer handle of buffer to trim
+ * @param[in] offset byte offset within buf to trim
+ *
+ * @return           Handle of the trimmed buffer or ODP_BUFFER_INVALID
+ *                   if the operation was not performed
+ *
+ * @note This routine discards bytes from the end of a buffer. It is
+ * logically equivalent to a split followed by a free of the split
+ * portion of the input buffer. The input offset must be less than or
+ * equal to the odp_buffer_size() of the input buffer. Upon
+ * successful return the odp_buffer_size() routine would now return
+ * offset as the size of the trimmed buffer. Note that the returned
+ * odp_buffer_t may not necessarily be the same as the input
+ * odp_buffer_t. The caller should use the returned value when
+ * referencing the trimmed buffer instead of the original in case they
+ * are different.
+ *
+ * @note If the input buf contains user metadata, then this data MUST
+ * be copied to the returned buffer if needed by the API
+ * implementation.
+ */
+odp_buffer_t odp_buffer_trim(odp_buffer_t buf, size_t offset);
+
+/**
+ * Extend a buffer for a specified number of bytes
+ *
+ * @param[in] buf  buffer handle of buffer to expand
+ * @param[in] ext  size, in bytes, of the extent to add to the
+ *                 existing buffer.
+ *
+ * @return         Handle of the extended buffer or ODP_BUFFER_INVALID
+ *                 if the operation was not performed
+ *
+ * @note This routine extends a buffer by increasing its size by ext
+ * bytes. It is logically equivalent to an odp_buffer_join() of a
+ * buffer of size ext to the original buffer. Upon successful return
+ * the odp_buffer_size() routine would now return size+ext as the size
+ * of the extended buffer.
+ *
+ * @note Note that the returned odp_buffer_t may not necessarily be the
+ * same as the input odp_buffer_t. The caller should use the returned
+ * value when referencing the extended buffer instead of the original
+ * in case they are different. If the input buf contains user meta
+ * data, then this data MUST be copied to the returned buffer if
+ * needed by the API implementation.
+ */
+odp_buffer_t odp_buffer_extend(odp_buffer_t buf, size_t ext);
+
+/**
+ * Clone a buffer, returning an exact copy of it
+ *
+ * @param[in] buf  buffer handle of buffer to duplicate
+ *
+ * @return         Handle of the duplicated buffer or ODP_BUFFER_INVALID
+ *                 if the operation was not performed
+ *
+ * @note This routine allows an ODP buffer to be cloned in an
+ * implementation-defined manner. The application data contents of
+ * the returned odp_buffer_t is an exact copy of the application data
+ * of the input buffer. The implementation MAY perform this operation
+ * via reference counts, resegmentation, or any other technique it
+ * wishes to employ. The cloned buffer is an element of the same
+ * buffer pool as the input buf. If the input buf contains user meta
+ * data, then this data MUST be copied to the returned buffer by the
+ * ODP implementation.
+ */
+odp_buffer_t odp_buffer_clone(odp_buffer_t buf);
+
+/**
+ * Copy a buffer, returning an exact copy of it
+ *
+ * @param[in] buf  buffer handle of buffer to copy
+ * @param[in] pool buffer pool to contain the copied buffer
+ *
+ * @return         Handle of the copied buffer or ODP_BUFFER_INVALID
+ *                 if the operation was not performed
+ *
+ * @note This routine allows an ODP buffer to be copied in an
+ * implementation-defined manner to a specified buffer pool. The
+ * specified pool may or may not be different from the source buffer’s
+ * pool. The application data contents of the returned odp_buffer_t
+ * is an exact separate copy of the application data of the input
+ * buffer. If the input buf contains user metadata, then this data
+ * MUST be copied to the returned buffer by the ODP implementation.
+ */
+odp_buffer_t odp_buffer_copy(odp_buffer_t buf, odp_buffer_pool_t pool);
+
+
+/**
  * @}
  */
 
diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h b/platform/linux-generic/include/api/odp_buffer_pool.h
index d04abf0..b71c727 100644
--- a/platform/linux-generic/include/api/odp_buffer_pool.h
+++ b/platform/linux-generic/include/api/odp_buffer_pool.h
@@ -1,4 +1,4 @@ 
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2014, Linaro Limited
  * All rights reserved.
  *
  * SPDX-License-Identifier:     BSD-3-Clause
@@ -8,7 +8,43 @@ 
 /**
  * @file
  *
- * ODP buffer pool
+ * @par Buffer Pools
+ * Buffers are elements of buffer pools that represent an equivalence
+ * class of buffer objects that are managed by a buffer pool manager.
+ * ODP implementations MAY support buffer pool managers implemented in
+ * hardware, software, or a combination of the two. An ODP
+ * implementation MUST support at least one buffer pool and MAY
+ * support as many as it wishes. The implementation MAY support one
+ * or more predefined buffer pools that are not explicitly allocated
+ * by an ODP application. It SHOULD also support application creation
+ * of buffer pools via the odp_buffer_pool_create() API, however it
+ * MAY restrict the types of buffers that can be so created.
+ *
+ * @par
+ * Buffer pools are represented by the abstract type odp_buffer_pool_t
+ * that is returned by buffer pool creation and lookup/enumeration
+ * routines. Applications refer to buffer pools via a name of
+ * implementation-defined maximum length that MUST be a minimummap of
+ * eight characters in length and MAY be longer. It is RECOMMENDED
+ * that 32 character buffer pool names be supported to provide
+ * application naming flexibility. The supported maximum length of
+ * buffer pool names is exposed via the ODP_BUFFER_POOL_NAME_LEN
+ * predefined implementation limit.
+ *
+ * @par Segmented vs. Unsegmented Buffer Pools
+ * By default, the buffers in
+ * ODP buffer pools are logical buffers that support transparent
+ * segmentation managed by ODP on behalf of the application and have a
+ * rich set of associated semantics as described here.
+ * ODP_BUFFER_OPTS_UNSEGMENTED indicates that the buf_size specified
+ * for the pool should be regarded as a fixed buffer size for all pool
+ * elements and that segmentation support is not needed for the pool.
+ * This MAY result in greater efficiency on some implementations. For
+ * packet processing, a typical use of unsegmented pools would be in
+ * conjunction with classification rules that sort packets into
+ * different pools based on their lengths, thus ensuring that each
+ * packet occupies a single segment within an appropriately-sized
+ * buffer.
  */
 
 #ifndef ODP_BUFFER_POOL_H_
@@ -34,43 +70,316 @@  extern "C" {
 /** Invalid buffer pool */
 #define ODP_BUFFER_POOL_INVALID   0
 
-/** ODP buffer pool */
-typedef uint32_t odp_buffer_pool_t;
+/**
+ * Buffer initialization routine prototype
+ *
+ * @note Routines of this type MAY be passed as part of the
+ * odp_buffer_pool_init_t structure to be called whenever a
+ * buffer is allocated to initialize the user metadata
+ * associated with that buffer.
+ */
+typedef void (odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg);
+
+/**
+ * Buffer pool parameters
+ *
+ * @param[in] buf_num    Number of buffers that pool should contain
+ * @param[in] buf_size   Size of application data in each buffer
+ * @param[in] buf_type   Buffer type
+ * @param[in] buf_opts   Buffer options
+ */
+typedef struct odp_buffer_pool_param_t {
+	size_t buf_num;             /**< Number of buffers in this pool */
+	size_t buf_size;            /**< Application data size of each buffer */
+	odp_buffer_type_e buf_type; /**< Buffer type */
+	odp_buffer_opts_e buf_opts; /**< Buffer options */
+} odp_buffer_pool_param_t;          /**< Type of buffer pool parameter struct */
 
+/**
+ * Buffer pool initialization parameters
+ *
+ * @param[in] udata_size     Size of the user metadata for each buffer
+ * @param[in] buf_init       Function pointer to be called to initialize the
+ *                           user metadata for each buffer in the pool.
+ * @param[in] buf_init_arg   Argument to be passed to buf_init().
+ *
+ */
+typedef struct odp_buffer_pool_init_t {
+	size_t udata_size;         /**< Size of user metadata for each buffer */
+	odp_buf_init_t *buf_init;  /**< Buffer initialization routine to use */
+	void *buf_init_arg;        /**< Argument to be passed to buf_init() */
+} odp_buffer_pool_init_t;          /**< Type of buffer initialization struct */
 
 /**
  * Create a buffer pool
  *
- * @param name      Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 chars)
- * @param base_addr Pool base address
- * @param size      Pool size in bytes
- * @param buf_size  Buffer size in bytes
- * @param buf_align Minimum buffer alignment
- * @param buf_type  Buffer type
+ * @param[in] name           Name of the pool
+ *                           (max ODP_BUFFER_POOL_NAME_LEN - 1 chars)
+ *
+ * @param[in] params         Parameters controlling the creation of this
+ *                           buffer pool
  *
- * @return Buffer pool handle
+ * @param[in] init_params    Parameters controlling the initialization of
+ *                           this buffer pool
+ *
+ * @return Buffer pool handle or ODP_BUFFER_POOL_NULL with errno set
+ *
+ * @note This routine is used to create a buffer pool. It takes three
+ * arguments: the name of the pool to be created, a parameter
+ * structure that controls the pool creation, and an optional
+ * parameter that controls pool initialization. In the creation
+ * parameter structure, the application specifies the number of
+ * buffers that the pool should contain as well as the application
+ * data size for each buffer in the pool, the type of buffers it
+ * should contain, and their associated options. In the
+ * initialization parameters, the application specifies the size of
+ * the user metadata that should be associated with each buffer in
+ * the pool. If no user metadata is required, the init_params SHOULD
+ * be specified as NULL. If user metadata is requested, then
+ * udata_size SHOULD be set to the requested size of the per-buffer
+ * user metadata. Also specified is the address of an
+ * application-provided buffer initialization routine to be called for
+ * each buffer in the pool at the time the pool is initialized, or
+ * when the buffer is allocated. If no application buffer
+ * initialization is needed, then buf_init and buf_init_arg SHOULD be
+ * set to NULL.
  */
 odp_buffer_pool_t odp_buffer_pool_create(const char *name,
-					 void *base_addr, uint64_t size,
-					 size_t buf_size, size_t buf_align,
-					 int buf_type);
+					 odp_buffer_pool_param_t *params,
+					 odp_buffer_pool_init_t *init_params);
 
+/**
+ * Destroy a buffer pool previously created by odp_buffer_pool_create()
+ *
+ * @param[in] pool    Handle of the buffer pool to be destroyed
+ *
+ * @return            0 on Success, -1 on Failure.
+ *
+ * @note This routine destroys a previously created buffer pool.
+ * Attempts to destroy a predefined buffer pool will be rejected
+ * since the application did not create it.  Results are undefined if
+ * an attempt is made to destroy a buffer pool that contains allocated
+ * or otherwise active buffers.
+ */
+int odp_buffer_pool_destroy(odp_buffer_pool_t pool);
 
 /**
  * Find a buffer pool by name
  *
- * @param name      Name of the pool
+ * @param[in] name  Name of the pool
  *
  * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found.
  */
 odp_buffer_pool_t odp_buffer_pool_lookup(const char *name);
 
+/**
+ * Get the next buffer pool from its predecessor
+ *
+ * @param[in]  pool       Buffer pool handle
+ * @param[out] name       Name of the pool
+ *                        (max ODP_BUFFER_POOL_NAME_LEN - 1 chars)
+ * @param[out] udata_size Size of user metadata used by this pool.
+ * @param[out] params     Output structure for pool parameters
+ * @param[out] predef     Predefined (1) or Created (0).
+ *
+ * @return                Buffer pool handle
+ *
+ * @note This routine returns the abstract identifier
+ * (odp_buffer_pool_t) of a buffer pool and is used to obtain the list
+ * of all buffer pools. In this manner an application can discover
+ * both application created and implementation predefined buffer pools
+ * and their characteristics. The input specifies the previous buffer
+ * pool identifier. There are three use cases for this
+ * routine:
+ *
+ * -# If the input pool is ODP_BUFFER_POOL_START then the buffer pool handle
+ * returned is that of the first buffer pool in the list.
+ * ODP_BUFFER_POOL_NULL MAY be used as a synonym for ODP_BUFFER_POOL_START
+ * if desired.
+ *
+ * -# If the input pool is not the last element in the buffer pool list
+ * then the buffer pool handle of the next buffer pool following  pool is
+ * returned.
+ *
+ * -# If the input pool is the buffer pool handle of the last buffer pool
+ * in the list then ODP_BUFFER_POOL_NULL is returned.
+ *
+ * @note Returned with the buffer pool handle is the name of the pool as
+ * well as its dimensions, type of buffers it contains, and a flag
+ * that says whether the pool is predefined or was created by the
+ * application. Note that the buf_size reported for a buffer pool is
+ * simply the declared expected size of the buffers in the pool and
+ * serves only to estimate the total amount of application data that
+ * can be stored in the pool. Actual sizes of individual buffers
+ * within the pool are dynamic and variable since physical buffer
+ * segments MAY be aggregated to create buffers of arbitrary size (up
+ * to the pool memory limits). Note that for predefined buffer pools,
+ * some implementations MAY return the physical segment counts and
+ * sizes used to construct the pool as output of this routine.
+ */
+odp_buffer_pool_t odp_buffer_pool_next(odp_buffer_pool_t pool,
+				       char *name, size_t *udata_size,
+				       odp_buffer_pool_param_t *params,
+				       int *predef);
+
+/**
+ * Get the high/low watermarks for a buffer pool
+ *
+ * @param[in]  pool     Handle of the buffer pool
+ * @param[out] high_wm  The high water mark of the designated buffer pool
+ * @param[out] low_wm   The low water mark of the designated buffer pool
+ *
+ * @return Success or ODP_BUFFER_POOL_INVALID if pool is unknown
+ *                 or ODP_BUFFER_POOL_NO_WATERMARKS if no watermarks
+ *                 are associated with this buffer pool.
+ *
+ * @note This routine gets the high/low watermarks associated with a
+ * given buffer pool. If the buffer pool does not have or support
+ * watermarks then an error will be returned and both high_wm and
+ * low_wm will be unchanged.
+ *
+ * @note It is RECOMMENDED that buffer pools of all types support the
+ * setting and getting of watermarks for use in flow control
+ * processing.  Watermarks are designed to trigger flow control
+ * actions based on utilization levels of a buffer pool. When the
+ * number of free buffers in the buffer pool hits the configured low
+ * watermark for the pool, the pool asserts a low watermark condition
+ * and an implementation-defined action in response to this condition
+ * is triggered. Once in a low watermark state, the condition is
+ * maintained until the number of free buffers reaches the configured
+ * high watermark. At this point the low watermark condition is
+ * deasserted and normal pool processing resumes. Having separate high
+ * and low watermarks permits configurable hysteresis to avoid jitter
+ * in handling transient buffer shortages in the pool.
+ *
+ * @note In general, two types of actions are common. The first is to
+ * control Random Early Detection (RED) or Weighted RED (WRED)
+ * processing for the pool, while the second is to control IEEE
+ * 802.1Qbb priority-based flow control (PFC) processing for so-called
+ * “lossless Ethernet” support. The use of watermarks for flow control
+ * processing is most often used for pools containing packets and this
+ * is discussed in further detail in the Class of Service (CoS) ODP
+ * Classification APIs.
+ */
+int odp_buffer_pool_watermarks(odp_buffer_pool_t pool,
+			       size_t *high_wm, size_t *low_wm);
+
+/**
+ * Set the high/low watermarks for a buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ * @param[in] high_wm   The high water mark of the designated buffer pool
+ * @param[in] low_wm    The low water mark of the designated buffer pool
+ *
+ * @return Success or ODP_BUFFER_POOL_INVALID if pool is unknown
+ *                 or ODP_BUFFER_POOL_NO_WATERMARKS if no watermarks
+ *                 are associated with this buffer pool.
+ *
+ * @note This routine sets the high/low watermarks associated with a
+ * specified buffer pool. If the buffer pool does not support
+ * watermarks then errno ODP_BUFFER_POOL_NO_WATERMARKS is set and no
+ * function is performed.
+ */
+int odp_buffer_pool_set_watermarks(odp_buffer_pool_t pool,
+				   size_t high_wm, size_t low_wm);
+
+/**
+ * Get the headroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ *
+ * @return              The headroom for the pool.  If the pool is invalid,
+ *                      returns -1 and errno set to ODP_BUFFER_POOL_INVALID.
+ *
+ * @note This routine returns the headroom associated with the buffer
+ * pool.  This is the headroom that will be set for packets allocated
+ * from this packet buffer pool.
+ */
+size_t odp_buffer_pool_headroom(odp_buffer_pool_t pool);
+
+/**
+ * Set the headroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ * @param[in] hr        The headroom for the pool
+ *
+ * @return              0 on Success or -1 on error.  For errors, errno set to
+ *                      ODP_BUFFER_POOL_INVALID if pool is unknown
+ *                      or ODP_INVALID_RANGE if hr exceeds
+ *                      ODP_PACKET_MAX_HEADROOM
+ *
+ * @note This routine sets the default headroom associated with
+ * buffers allocated from this packet pool.  Note that headroom is a
+ * per-packet attribute.  The headroom associated with the buffer pool
+ * is the default headroom to assign to a packet allocated from this
+ * buffer pool by the odp_packet_alloc() routine By contrast, the
+ * odp_cos_set_headroom() classification API sets the default headroom
+ * to assign to a packet by the classifier for packets matching a
+ * particular Class of Service (CoS).  The allowable range of
+ * supported headroom sizes is subject to the ODP_PACKET_MAX_HEADROOM
+ * limit defined by the implementation.  The valid range for hr is
+ * 0..ODP_PACKET_MAX_HEADROOM.
+ *
+ * @note Headroom serves two purposes.  The first is to reserve a prefix area
+ * of buffers that will hold packets for header expansion.  Applications
+ * can add headers to packets via the odp_packet_push_headroom() to make
+ * headroom space available for new headers.
+ *
+ * @note The second use of headroom is to control packet alignment
+ * within buffers.  The buffers in a buffer pool MUST be "naturally
+ * aligned" for addressing purposes by the implementation.  It is
+ * RECOMMENDED that this be cache aligned.  Because a standard
+ * Ethernet header is 14 octets in length, it is usually convenient to
+ * offset packets by 2 octets so that the following Layer 3 header
+ * (typically IPv4 or IPv6) is naturally aligned on a word boundary.
+ * So applications SHOULD specify an offset that reflects the packet
+ * alignment they wish to see.  For example, a call like
+ * odp_buffer_pool_set_headroom(pool, hr+2); would force packets to by
+ * offset by two bytes to achieve the desired Layer 3 alignment while
+ * also reserving hr bytes of headroom for application use.
+ */
+int odp_buffer_pool_set_headroom(odp_buffer_pool_t pool, size_t hr);
+
+/**
+ * Get the tailroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ *
+ * @return              The tailroom for the pool.  If the pool is invalid,
+ *                      returns -1 and errno set to ODP_BUFFER_POOL_INVALID.
+ *
+ * @note This routine returns the tailroom associated with buffers
+ * allocated from a packet buffer pool.
+ */
+size_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool);
+
+/**
+ * Set the tailroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ * @param[in] tr        The tailroom for the pool
+ *
+ * @return              0 on Success or -1 on error.  For errors, errno set to
+ *                      ODP_BUFFER_POOL_INVALID if pool is unknown
+ *                      or ODP_INVALID_RANGE if hr exceeds
+ *                      ODP_PACKET_MAX_TAILROOM
+ *
+ * @note This routine sets the tailroom associated with buffers
+ * allocated from a packet pool.  The allowable range of supported
+ * tailroom sizes is subject to the ODP_PACKET_MAX_TAILROOM limit
+ * defined by the implementation. The valid range for tr is
+ * 0..ODP_PACKET_MAX_TAILROOM.
+ */
+int odp_buffer_pool_set_tailroom(odp_buffer_pool_t pool, size_t tr);
 
 /**
  * Print buffer pool info
  *
- * @param pool      Pool handle
+ * @param[in] pool   Pool handle
  *
+ * @note This is a diagnostic routine that prints statistics regarding
+ * the specified buffer pool to the ODP LOG. This routine is OPTIONAL
+ * and if present its output is implementation-defined.
  */
 void odp_buffer_pool_print(odp_buffer_pool_t pool);
 
diff --git a/platform/linux-generic/include/api/odp_config.h b/platform/linux-generic/include/api/odp_config.h
index 906897c..65cc5b5 100644
--- a/platform/linux-generic/include/api/odp_config.h
+++ b/platform/linux-generic/include/api/odp_config.h
@@ -49,6 +49,12 @@  extern "C" {
 #define ODP_CONFIG_PKTIO_ENTRIES 64
 
 /**
+ * Packet processing limits
+ */
+#define ODP_CONFIG_BUF_SEG_SIZE (512*3)
+#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7)
+
+/**
  * @}
  */
 
diff --git a/platform/linux-generic/include/api/odp_packet.h b/platform/linux-generic/include/api/odp_packet.h
index 688e047..6d36b02 100644
--- a/platform/linux-generic/include/api/odp_packet.h
+++ b/platform/linux-generic/include/api/odp_packet.h
@@ -1,4 +1,4 @@ 
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2014, Linaro Limited
  * All rights reserved.
  *
  * SPDX-License-Identifier:     BSD-3-Clause
@@ -8,7 +8,262 @@ 
 /**
  * @file
  *
- * ODP packet descriptor
+ * @par ODP Packet Management APIs
+ * Described here are the fundamental
+ * concepts and supporting APIs of the ODP Packet Management routines.
+ * All conforming ODP implementations MUST provide these data types
+ * and APIs. Latitude in how routines MAY be implemented are noted
+ * when applicable.
+ *
+ * @par Inherited and New Concepts
+ * As a type of buffer, packets are
+ * allocated from its containing buffer pool created via
+ * odp_buffer_pool_create() with a buffer type of
+ * ODP_BUFFER_TYPE_PACKET. Packets are referenced by an abstract
+ * odp_packet_t handle defined by each implementation.
+ *
+ * @par
+ * Packet objects are normally created at ingress when they arrive
+ * at a source odp_pktio_t and are received by an application either
+ * directly or (more typically) via a scheduled receive queue. They
+ * MAY be implicitly freed when they are transmitted to an output
+ * odp_pktio_t via an associated transmit queue, or freed directly via
+ * the odp_packet_free() API.
+ *
+ * @par
+ * Packets contain additional system meta data beyond those found
+ * in buffers that is populated by the parse function of the ODP
+ * classifier. See below for a discussion of this meta data and the
+ * accessor functions provided for application reference to them.
+ *
+ * @par
+ * Occasionally an application may originate a packet itself,
+ * either de novo or by deriving it from an existing packet, and APIs
+ * are provided to assist in these cases as well. Application-created
+ * packets can be recycled back through a loopback interface to reparse
+ * and reclassify them, or the application can explicitly re-invoke the
+ * parser or do its own parsing as desired. This can also occur as a
+ * result of packet decryption or decapsulation when dealing with
+ * ingress tunnels. See the ODP classification design document for
+ * further details. Additionally, the meta data set as a result of
+ * parsing MAY be directly set by the application as needed.
+ *
+ * @par Packet Structure and Concepts
+ * A packet consists of a sequence
+ * of octets conforming to an architected format, such as Ethernet,
+ * that can be received and transmitted via the ODP pktio abstraction.
+ * Packets have a length, which is the number of bytes in the packet.
+ * Packet data in ODP is referenced to via offsets since these reflect
+ * the logical contents and structure of a packet independent of how
+ * particular ODP implementations store that data.
+ *
+ * @par
+ * These concepts are shown in the following diagram:
+ *
+ * @image html packet.png "ODP Packet Structure" width=\textwidth
+ * @image latex packet.eps "ODP Packet Structure" width=\textwidth
+ *
+ * @par
+ * Packet data consists of zero or more headers, followed by 0 or
+ * more bytes of payload, followed by zero or more trailers.
+ *
+ * @par
+ * Packet Segments and Addressing Network SoCs use various
+ * methods and techniques to store and process packets efficiently.
+ * These vary considerably from platform to platform, so to ensure
+ * portability across them ODP adopts certain conventions for
+ * referencing packets.
+ *
+ * @par
+ * ODP APIs use a handle of type odp_packet_t to refer to packet
+ * objects. Associated with packets are various bits of system meta
+ * data that describe the packet. By referring to the meta data, ODP
+ * applications accelerate packet processing by minimizing the need to
+ * examine packet data. This is because the packet meta data is
+ * populated by parsing and classification functions that are coupled
+ * to ingress processing that occur prior to a packet being presented
+ * to the application via the ODP scheduler.
+ *
+ * @par
+ * When an ODP implementation needs to examine the contents of a
+ * packet, it requests addressability to it via a mapping API that
+ * makes the packet (or a contiguously addressable segment of it)
+ * available for coherent access by the application. While ODP
+ * applications MAY request that packets be stored in unsegmented
+ * buffer pools, not all platforms supporting ODP are able to provide
+ * contiguity guarantees for packets and as a result such requests may
+ * either fail or else result in degraded performance compared to
+ * native operation.
+ *
+ * @par
+ * Instead, ODP applications SHOULD assume that the underlying
+ * implementation stores packets in segments of implementation-defined
+ * and managed size. These represent the contiguously addressable
+ * portions of a packet that the application may refer to via normal
+ * memory accesses. ODP provides APIs that allow applications to
+ * operate on packet segments in an efficient and portable manner as
+ * needed. By combining these with the meta data provided for
+ * packets, ODP applications can operate in a fully
+ * platform-independent manner while still achieving optimal
+ * performance across the range of platforms that support ODP.
+ *
+ * @par
+ * The use of segments for packet addressing and their
+ * relationship to meta data is shown in this diagram:
+ *
+ * @image html segments.png "ODP Packet Segmentation Structure" width=\textwidth
+ * @image latex segments.eps "ODP Packet Segmentation Structure" width=\textwidth
+ *
+ * @par
+ * The packet meta data is set during parsing and identifies the
+ * starting offsets of the various headers contained in the packet.
+ * The packet itself is physically stored as a sequence of segments
+ * that are managed by the ODP implementation. Segment 0 is the first
+ * segment of the packet and is where the packet’s headroom and
+ * headers typically reside. Depending on the length of the packet,
+ * additional segments may be part of the packet and contain the
+ * remaining packet payload and tailroom. The application need not
+ * concern itself with segments except that when the application
+ * requires addressability to a packet it understands that
+ * addressability is provided on a per-segment basis. So, for
+ * example, if the application makes a call like
+ * odp_packet_payload_map() to obtain addressability to the packet
+ * payload, the returned seglen from that call is the number of bytes
+ * from the start of the payload that are contiguously addressable to
+ * the application from the returned payload address. This is because
+ * the following byte occupies a different segment that may be stored
+ * elsewhere. To obtain access to those bytes, the application simply
+ * requests addressability to that offset and it will be able to
+ * address the payload bytes that occupy segment 1, etc. Note that
+ * the returned seglen for any mapping call is always the lesser of
+ * the remaining packet length and the size of its containing segment.
+ * So a mapping request for segment 2, for example, would return a
+ * seglen that extends only to the end of the packet since the
+ * remaining bytes are part of the tailroom reserved for the packet
+ * and are not usable by the application until made available to it by
+ * an appropriate API call.
+ *
+ * @par Headroom and Tailroom
+ * Because data plane applications will
+ * often manipulate packets by adding or removing headers and/or
+ * trailers, ODP implementations MUST support the concepts of headroom
+ * and tailroom for packets. How implementations choose to support
+ * these concepts is unspecified by ODP.
+ *
+ * @par
+ * Headroom is an area that logically prepends the start of a
+ * packet and is reserved for the insertion of additional header
+ * information to the front of a packet. Typical use of headroom
+ * might be packet encapsulation as part of tunnel operations.
+ * Tailroom is a similar area that logically follows a packet reserved
+ * for the insertion of trailer information at the end of a packet.
+ * Typical use of tailroom might be in payload manipulation or in
+ * additional checksum insertion. The idea behind headroom and
+ * tailroom is to support efficient manipulation of packet headers
+ * and/or trailers by preallocating buffer space and/or meta data to
+ * support the insertion of packet headers and/or trailers while
+ * avoiding the overhead of more general split/join buffer operations.
+ *
+ * @par
+ * Note that not every application or communication protocol will
+ * need these and ODP implementations MAY impose restrictions or
+ * modifications on when and how these capabilities are used. For
+ * example, headroom MAY indicate the byte offset into a packet buffer
+ * at which packet data is received from an associated odp_pktio_t.
+ * An implementation MAY add to the requested headroom or tailroom for
+ * implementation-defined alignment or other reasons. Note also that
+ * implementations MUST NOT assume that headroom and/or tailroom is
+ * necessarily contiguous with any other segment of the packet unless
+ * the underlying buffer pool the packet has been allocated from has
+ * been explicitly defined as unsegmented. See the ODP Buffer API
+ * design for discussion of segmented vs. unsegmented buffers and
+ * their implementation models. This convention is observed
+ * automatically because every mapping call returns a corresponding
+ * seglen that tells the application the number of bytes it may
+ * reference from the address returned by that call. Applications
+ * MUST observe these limits to avoid programming errors and
+ * portability issues.
+ *
+ * @par Packet Parsing and Inflags
+ * ODP packets are intended to be
+ * processed by the ODP Classifier upon receipt. As part of its
+ * processing, the classifier parses information from the packet
+ * headers and makes this information available as system meta data so
+ * that applications using ODP do not have to reference packets or
+ * their headers directly for most processing. The set of headers
+ * supported by the ODP parse functions MUST include at minimum the
+ * following:
+ *
+ * - Layer 2: ARP, SNAP (recognition), VLAN (C-Tag and S-Tag)
+ * - Layer 3: IPv4, IPv6
+ * - Layer 4: TCP, UDP, ICMP, ICMPv6, IPsec (ESP and AH)
+ *
+ * @par
+ * Other protocols MAY be supported, however ODP v1.0 does not
+ * define APIs for referencing them.
+ *
+ * @par
+ * Parsing results are stored as meta data associated with the
+ * packet. These include various precomputed offsets used for direct
+ * access to parsed headers as well as indicators of packet contents
+ * that are collectively referred to as inflags. Inflags are packet
+ * meta data that may be inspected or set via accessor functions as
+ * described below. Setters are provided to enable applications that
+ * create or modify packet headers to update these attributes
+ * efficiently. Applications that use them take responsibility for
+ * ensuring that the results are consistent. ODP itself does not
+ * validate an inflag setter to ensure that it reflects actual packet
+ * contents. Applications that wish this additional assurance should
+ * request an explicit packet reparse.
+ *
+ * @par Packet Outflags
+ * Packet transmission options are controlled by
+ * packet meta data collectively referred to as outflags. An
+ * application sets these to request various services related to
+ * packet transmission.
+ *
+ * @par
+ * Note: The outflags controlling checksum offload processing are
+ * overrides. That is, they have no effect unless they are set
+ * explicitly by the application. By default, checksum offloads are
+ * controlled by the corresponding settings of the odp_pktio_t through
+ * which a packet is transmitted. The purpose of these bits is to
+ * permit this offload processing to be overridden on a per-packet
+ * basis. Note that not every implementation may support such
+ * override capabilities, which is why the setters here return a
+ * success/failure indicator.
+ *
+ * @par Packet Headroom and Tailroom Routines
+ * Data plane applications frequently manipulate the headers and trailers
+ * associated with packets. These operations involve either stripping
+ * headers or trailers from packets or inserting new headers or
+ * trailers onto them. To enable this manipulation, ODP provides the
+ * notion of headroom and tailroom, as well as a set of APIs that
+ * enable their efficient manipulation.
+ *
+ * @par
+ * Headroom is a set of bytes that logically precede the start of
+ * a packet, enabling additional headers to be created that become
+ * part of the packet. Similarly, tailroom is a set of bytes that
+ * logically follow the end of a packet, enabling additional payload
+ * and/or trailers to be created that become part of the packet. Both
+ * headroom and tailroom are meta data associated with packets, and
+ * are assigned at packet creation.
+ *
+ * @par
+ * Packet headroom and tailroom is manipulated by the following
+ * routines that MUST be provided by conforming ODP implementations.
+ * These operations define push and pull operations. The convention
+ * is that push operations move away from packet data while pull
+ * operations move towards packet data. Alternately, push operations
+ * add to packet data, while pull operations remove packet data.
+ *
+ * @par
+ * These concepts are shown as operations on the packet diagram
+ * we saw previously:
+ *
+ * @image html hrtr.png "Headroom and Tailroom Manipulation" width=\textwidth
+ * @image latex hrtr.eps "Headroom and Tailroom Manipulation" width=\textwidth
  */
 
 #ifndef ODP_PACKET_H_
@@ -21,7 +276,7 @@  extern "C" {
 #include <odp_buffer.h>
 
 /** @defgroup odp_packet ODP PACKET
- *  Operations on a packet.
+ *
  *  @{
  */
 
@@ -31,7 +286,7 @@  extern "C" {
 typedef odp_buffer_t odp_packet_t;
 
 /** Invalid packet */
-#define ODP_PACKET_INVALID ODP_BUFFER_INVALID
+#define ODP_PACKET_INVALID (odp_packet_t)(-1)
 
 /** Invalid offset */
 #define ODP_PACKET_OFFSET_INVALID ((uint32_t)-1)
@@ -40,411 +295,2038 @@  typedef odp_buffer_t odp_packet_t;
 /**
  * ODP packet segment handle
  */
-typedef int odp_packet_seg_t;
+typedef uint32_t odp_packet_segment_t;
 
 /** Invalid packet segment */
-#define ODP_PACKET_SEG_INVALID -1
+#define ODP_PACKET_SEGMENT_INVALID (odp_packet_segment_t)(-1)
+
+/**
+ * Convert a buffer handle to a packet handle
+ *
+ * @param[in] buf  Buffer handle
+ *
+ * @return Packet handle
+ *
+ * @note This routine converts a buffer handle to a packet handle.
+ * Only meaningful if buffer is of type ODP_BUFFER_TYPE_PACKET.
+ * Results are undefined otherwise.
+ */
+odp_packet_t odp_packet_from_buffer(odp_buffer_t buf);
+
+/**
+ * Convert a packet handle to a buffer handle
+ *
+ * @param[in] pkt  Packet handle
+ *
+ * @return Buffer handle
+ *
+ * @note This routine converts a packet handle to a buffer handle.
+ * This routine always succeeds (assuming pkt is a valid packet
+ * handle) since all packets are buffers.
+ */
+odp_buffer_t odp_packet_to_buffer(odp_packet_t pkt);
+
+/**
+ * Get the headroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ *
+ * @return              The headroom for the pool. If the pool is invalid,
+ *                      returns -1 and errno set to ODP_BUFFER_POOL_INVALID.
+ */
+size_t odp_buffer_pool_headroom(odp_buffer_pool_t pool);
 
 /**
- * ODP packet segment info
+ * Set the headroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ * @param[in] hr        The headroom for the pool
+ *
+ * @return              0 on Success or -1 on error. For errors, errno set to
+ *                      ODP_BUFFER_POOL_INVALID if pool is unknown
+ *                      or ODP_INVALID_RANGE if hr exceeds
+ *                      ODP_PACKET_MAX_HEADROOM
+ *
+ * @note This routine sets the default headroom associated with
+ * buffers allocated from this packet pool. Note that headroom is a
+ * per-packet attribute. The headroom associated with the buffer pool
+ * is the default headroom to assign to a packet allocated from this
+ * buffer pool by the odp_packet_alloc() routine By contrast, the
+ * odp_cos_set_headroom() classification API sets the default headroom
+ * to assign to a packet by the classifier for packets matching a
+ * particular Class of Service (CoS). The allowable range of
+ * supported headroom sizes is subject to the ODP_PACKET_MAX_HEADROOM
+ * limit defined by the implementation. The valid range for hr is
+ * 0..ODP_PACKET_MAX_HEADROOM.
+ *
+ * @note Note also that if the buffer is unsegmented, the specified
+ * headroom will subtract from the preallocated segments that comprise
+ * the pool. Applications need to take this into account when sizing
+ * unsegmented buffer pools.
+ *
+ * @note Specifying a new headroom for an existing buffer pool does not
+ * affect the headroom associated with existing buffers. The buffer
+ * pool headroom setting only affects new buffers allocated from the
+ * pool.
  */
-typedef struct odp_packet_seg_info_t {
-	void   *addr;      /**< Segment start address */
-	size_t  size;      /**< Segment maximum data size */
-	void   *data;      /**< Segment data address */
-	size_t  data_len;  /**< Segment data length */
-} odp_packet_seg_info_t;
+int odp_buffer_pool_set_headroom(odp_buffer_pool_t pool, size_t hr);
 
+/**
+ * Get the tailroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ *
+ * @return              The tailroom for the pool. If the pool is invalid,
+ *                      returns -1 and errno set to ODP_BUFFER_POOL_INVALID.
+ */
+size_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool);
+
+/**
+ * Set the tailroom for a packet buffer pool
+ *
+ * @param[in] pool      Handle of the buffer pool
+ * @param[in] tr        The tailroom for the pool
+ *
+ * @return              0 on Success or -1 on error. For errors, errno set to
+ *                      ODP_BUFFER_POOL_INVALID if pool is unknown
+ *                      or ODP_INVALID_RANGE if hr exceeds
+ *                      ODP_PACKET_MAX_TAILROOM
+ *
+ * @note This routine sets the tailroom associated with buffers
+ * allocated from a packet pool. The allowable range of supported
+ * tailroom sizes is subject to the ODP_PACKET_MAX_TAILROOM limit
+ * defined by the implementation. The valid range for tr is
+ * 0..ODP_PACKET_MAX_TAILROOM.
+ *
+ * @note Note also that if the buffer is unsegmented, the specified
+ * tailroom will subtract from the preallocated segments that comprise
+ * the pool. Applications need to take this into account when sizing
+ * unsegmented buffer pools.
+ *
+ * @par
+ * Specifying a new tailroom for an existing buffer pool does not
+ * affect the tailroom associated with existing buffers. The buffer
+ * pool tailroom setting only affects new buffers allocated from the
+ * pool.
+ */
+int odp_buffer_pool_set_tailroom(odp_buffer_pool_t pool, size_t tr);
 
 /**
- * Initialize the packet
+ * Packet alloc
  *
- * Needs to be called if the user allocates a packet buffer, i.e. the packet
- * has not been received from I/O through ODP.
+ * @param[in] pool    Pool handle for a pool of type ODP_BUFFER_TYPE_PACKET
  *
- * @param pkt  Packet handle
+ * @return Packet handle or ODP_PACKET_INVALID
+ *
+ * @note This routine is used to allocate a packet from a buffer pool
+ * of type ODP_BUFFER_TYPE_PACKET. The returned odp_packet_t is an
+ * opaque handle for the packet that can be used in further calls to
+ * manipulate the allocated packet. The value ODP_PACKET_INVALID is
+ * returned if the request cannot be satisfied. The length of the
+ * allocated packet is set to 0.
+ *
+ * @note If non-persistent user meta data is associated with the
+ * underlying buffer that contains the packet, the buf_init() routine
+ * specified as part of the containing buffer pool will be called as
+ * part of buffer allocation to enable the application to initialize
+ * the user meta data associated with it.
+ */
+odp_packet_t odp_packet_alloc(odp_buffer_pool_t pool);
+
+/**
+ * Allocate a packet from a buffer pool of a specified length
+ *
+ * @param[in] pool  Pool handle
+ * @param[in] len   Length of packet requested
+ *
+ * @return          Packet handle or ODP_PACKET_INVALID
+ *
+ * @note This routine is used to allocate a packet of a given length
+ * from a packet buffer pool. The returned odp_packet_t is an opaque
+ * handle for the packet that can be used in further calls to
+ * manipulate the allocated packet. The returned buffer is
+ * initialized as an ODP packet and with the length set to the
+ * requested len. The caller will then initialize the packet with
+ * headers and payload as needed. This call itself does not
+ * initialize packet contents or the meta data that would be present
+ * following a packet parse.
+ */
+odp_packet_t odp_packet_alloc_len(odp_buffer_pool_t pool, size_t len);
+
+/**
+ * Packet free
+ *
+ * @param[in] pkt     Handle of the packet to be freed
+ *
+ * @note This routine is used to return a packet back to its
+ * containing buffer pool. Results are undefined if an application
+ * attempts to reference a packet after it is freed.
+ */
+void odp_packet_free(odp_packet_t pkt);
+
+/**
+ * Initialize a packet
+ *
+ * @param[in] pkt     Handle of the packet to be initialized
+ *
+ * @note This routine is called following packet allocation to
+ * initialize the packet meta data and internal structure to support
+ * packet operations. Note that this function is performed whenever a
+ * packet is allocated so it would only be used if an application
+ * wished to re-initialize a packet to permit it to discard whatever
+ * previous contents existed and start a fresh packet without having
+ * to free and re-allocate the packet. Re-initializing a packet
+ * resets its headroom and tailroom to their default values (from the
+ * containing packet pool) and sets the packet length to 0.
  */
 void odp_packet_init(odp_packet_t pkt);
 
 /**
- * Convert a buffer handle to a packet handle
+ * Obtain buffer pool handle of a packet
  *
- * @param buf  Buffer handle
+ * @param[in] pkt   Packet handle
  *
- * @return Packet handle
+ * @return Buffer pool the packet was allocated from
+ *
+ * @note This routine is an accessor function that returns the handle
+ * of the buffer pool containing the referenced packet.
  */
-odp_packet_t odp_packet_from_buffer(odp_buffer_t buf);
+odp_buffer_pool_t odp_packet_pool(odp_packet_t pkt);
 
 /**
- * Convert a packet handle to a buffer handle
+ * Get the headroom available for a packet
  *
- * @param pkt  Packet handle
+ * @param[in] pkt   Packet handle
  *
- * @return Buffer handle
+ * @return Headroom available for this packet, in bytes.
+ *
+ * @note This routine returns the current headroom available for a
+ * buffer. The initial value for this is taken either from the
+ * containing buffer pool (for explicit packet allocation) or from the
+ * Class of Service (CoS) on packet reception. It is adjusted
+ * dynamically by the odp_packet_push_head() and
+ * odp_packet_pull_head() routines.
  */
-odp_buffer_t odp_packet_to_buffer(odp_packet_t pkt);
+size_t odp_packet_headroom(odp_packet_t pkt);
 
 /**
- * Set the packet length
+ * Get the tailroom available for a packet
+ *
+ * @param[in] pkt   Packet handle
  *
- * @param pkt  Packet handle
- * @param len  Length of packet in bytes
+ * @return Tailroom available for this packet, in bytes.
+ *
+ * @note This routine returns the current tailroom available for a
+ * buffer. The initial value for this is taken either from the
+ * containing buffer pool. It is adjusted dynamically by the
+ * odp_packet_push_tail() and odp_packet_pull_tail() routines.
  */
-void odp_packet_set_len(odp_packet_t pkt, size_t len);
+size_t odp_packet_tailroom(odp_packet_t pkt);
 
 /**
- * Get the packet length
+ * Get packet length
  *
- * @param pkt  Packet handle
+ * @param[in] pkt  Packet handle
  *
  * @return   Packet length in bytes
+ *
+ * @note This routine is an accessor function that returns the length
+ * (in bytes) of a packet. This is the total number of octets that
+ * would transmit for the packet, not including the Ethernet Frame
+ * Check Sequence (FCS), and includes all packet headers as well as
+ * payload. Results are undefined if the supplied pkt does not
+ * specify a valid packet. Note that packet length will change in
+ * response to headroom/tailroom and/or split/join operations. As a
+ * result, this attribute does not have a setter accessor function.
  */
-size_t odp_packet_get_len(odp_packet_t pkt);
+size_t odp_packet_len(odp_packet_t pkt);
 
 /**
- * Set packet user context
+ * Get address and size of user meta data associated with a packet
+ *
+ * @param[in]  pkt        Packet handle
+ * @param[out] udata_size Number of bytes of user meta data available
+ *                        at the returned address
  *
- * @param buf      Packet handle
- * @param ctx      User context
+ * @return                Address of the user meta data for this packet
+ *                        or NULL if the buffer has no user meta data.
  *
+ * @note This routine returns the address of the user meta data
+ * associated with an ODP packet. This enables the caller to read or
+ * write the user meta data associated with the buffer. The caller
+ * MUST honor the returned udata_size in referencing this storage.
  */
-void odp_packet_set_ctx(odp_packet_t buf, const void *ctx);
+void *odp_packet_udata(odp_packet_t pkt, size_t *udata_size);
 
 /**
- * Get packet user context
+ * Get address of user meta data associated with a packet
  *
- * @param buf      Packet handle
+ * @param[in] pkt         Packet handle
  *
- * @return User context
+ * @return                Address of the user meta data for this packet
+ *                        or NULL if the buffer has no user meta data.
+ *
+ * @note This routine returns the address of the user meta data
+ * associated with an ODP packet. This enables the caller to read or
+ * write the user meta data associated with the packet. This routine
+ * is intended as a fast-path version of odp_packet_udata() for
+ * callers that only require the address of the user meta data area
+ * associated with the packet. This routine assumes that the caller
+ * already knows and will honor the size limits of this area.
  */
-void *odp_packet_get_ctx(odp_packet_t buf);
+void *odp_packet_udata_addr(odp_packet_t pkt);
 
 /**
- * Packet buffer start address
+ * Tests if packet is valid
+ *
+ * @param[in] pkt  Packet handle
  *
- * Returns a pointer to the start of the packet buffer. The address is not
- * necessarily the same as packet data address. E.g. on a received Ethernet
- * frame, the protocol header may start 2 or 6 bytes within the buffer to
- * ensure 32 or 64-bit alignment of the IP header.
+ * @return         1 if valid, otherwise 0
  *
- * Use odp_packet_l2(pkt) to get the start address of a received valid frame
- * or odp_packet_data(pkt) to get the current packet data address.
+ * @note This routine tests whether a packet is valid.  A packet is
+ * valid if the packet identified by the supplied odp_packet_t exists
+ * and has been allocated.
+ */
+int odp_packet_is_valid(odp_packet_t pkt);
+
+/**
+ * Tests if packet is segmented
  *
- * @param pkt  Packet handle
+ * @param[in] pkt  Packet handle
  *
- * @return  Pointer to the start of the packet buffer
+ * @return         1 if packet has more than one segment, otherwise 0
  *
- * @see odp_packet_l2(), odp_packet_data()
+ * @note This routine tests whether a packet is segmented. Logically
+ * equivalent to testing whether odp_packet_segment_count(pkt) > 1,
+ * but may be more efficient in some implementations.
  */
-uint8_t *odp_packet_addr(odp_packet_t pkt);
+int odp_packet_is_segmented(odp_packet_t pkt);
 
 /**
- * Packet data address
+ * Print packet metadata to ODP Log
  *
- * Returns the current packet data address. When a packet is received from
- * packet input, the data address points to the first byte of the packet.
+ * @param[in] pkt  Packet handle
  *
- * @param pkt  Packet handle
+ * @note This routine is used for debug purposes to print the metadata
+ * associated with a packet to the ODP log. This routine is OPTIONAL
+ * and MAY be treated as a no-op if the function is not available or
+ * if the supplied odp_packet_t is not valid.
+ */
+void odp_packet_print(odp_packet_t pkt);
+
+/**
+ * Parse a packet and set its meta data.
+ *
+ * @param[in] pkt Packet handle of packet to be parsed
+ *
+ * @return 1 if packet has any parse errors, 0 otherwise
+ *
+ * @note This routine requests that the specified packet by parsed and
+ * the meta data associated with it be set. The return value
+ * indicates whether the parse was successful or if any parse errors
+ * were encountered. The intent of this routine is to allow
+ * applications that construct or modify packets to force an
+ * implementation-provided re-parse to set the relevant packet meta
+ * data. As an alternative, the application is free to set these
+ * individually as it desires with appropriate setter functions,
+ * however in this case it is the application’s responsibility to
+ * ensure that they are set consistently as no error checking is
+ * performed by the setters. Calling odp_packet_parse(), by contrast,
+ * guarantees that they will be set properly to reflect the actual
+ * contents of the packet.
+ */
+int odp_packet_parse(odp_packet_t pkt);
+
+/**
+ * Check for packet errors
+ *
+ * Checks all error flags at once.
+ *
+ * @param[in] pkt   Packet handle
  *
- * @return  Pointer to the packet data
+ * @return 1 if packet has errors, 0 otherwise
  *
- * @see odp_packet_l2(), odp_packet_addr()
+ * @note This routine is a summary routine that says whether the
+ * referenced packet contains any errors. If odp_packet_error() is 0
+ * then the packet is well-formed.
  */
-uint8_t *odp_packet_data(odp_packet_t pkt);
+int odp_packet_error(odp_packet_t pkt);
 
 /**
- * Get pointer to the start of the L2 frame
+ * Control indication of packet error.
  *
- * The L2 frame header address is not necessarily the same as the address of the
- * packet buffer, see odp_packet_addr()
+ * @param[in] pkt Packet handle
+ * @param[in] val Value to set for this bit (0 or 1).
  *
- * @param pkt  Packet handle
+ * @note This routine is used to set the error flag for a packet.
+ * Note that while error is a summary bit, at present ODP does not
+ * define any error detail bits.
+ */
+void odp_packet_set_error(odp_packet_t pkt, int val);
+
+/**
+ * Examine packet reference count
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @return reference count of the packet
+ *
+ * @note This routine examines the reference count associated with a
+ * packet. The reference count is used to control when a packet is
+ * freed. When initially allocated, the refcount for a packet is set
+ * to 1. When a packet is transmitted its refcount is decremented and
+ * if the refcount is 0 then the packet is freed by the transmit
+ * function of the odp_pktio_t that transmits it. If the refcount is
+ * greater than zero then the packet is not freed and instead is
+ * returned to the application for further processing. Note that a
+ * packet refcount is an unsigned integer and can never be less than
+ * 0.
+ */
+unsigned int odp_packet_refcount(odp_packet_t pkt);
+
+/**
+ * Increment a packet’s refcount.
  *
- * @return  Pointer to L2 header or NULL if not found
+ * @param[in] pkt Packet handle
+ * @param[in] val Value to increment refcount by
  *
- * @see odp_packet_addr(), odp_packet_data()
+ * @return The packet refcount following increment
+ *
+ * @note This routine is used to increment the refcount for a packet
+ * by a specified amount.
  */
-uint8_t *odp_packet_l2(odp_packet_t pkt);
+unsigned int odp_packet_incr_refcount(odp_packet_t pkt, unsigned int val);
 
 /**
- * Return the byte offset from the packet buffer to the L2 frame
+ * Decrement a packet’s refcount.
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val Value to decrement refcount by
  *
- * @param pkt  Packet handle
+ * @return The packet refcount following decrement
  *
- * @return  L2 byte offset or ODP_PACKET_OFFSET_INVALID if not found
+ * @note This routine is used to decrement the refcount for a packet
+ * by a specified amount. The refcount will never be decremented
+ * below 0 regardless of the specified val.
  */
-size_t odp_packet_l2_offset(odp_packet_t pkt);
+unsigned int odp_packet_decr_refcount(odp_packet_t pkt, unsigned int val);
 
 /**
- * Set the byte offset to the L2 frame
+ * Check for L2 header, e.g., Ethernet
  *
- * @param pkt     Packet handle
- * @param offset  L2 byte offset
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains a valid & known L2 header, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * a valid Layer 2 header.
  */
-void odp_packet_set_l2_offset(odp_packet_t pkt, size_t offset);
+int odp_packet_inflag_l2(odp_packet_t pkt);
 
+/**
+ * Control indication of Layer 2 presence.
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @param val[in] 1 if packet contains a valid & known L2 header, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains a
+ * valid Layer 2 header.
+ */
+void odp_packet_set_inflag_l2(odp_packet_t pkt, int val);
 
 /**
- * Get pointer to the start of the L3 packet
+ * Check for L3 header, e.g. IPv4, IPv6
  *
- * @param pkt  Packet handle
+ * @param[in] pkt Packet handle
  *
- * @return  Pointer to L3 packet or NULL if not found
+ * @return 1 if packet contains a valid & known L3 header, 0 otherwise
  *
+ * @note This routine indicates whether the referenced packet contains
+ * a valid Layer 3 header.
  */
-uint8_t *odp_packet_l3(odp_packet_t pkt);
+int odp_packet_inflag_l3(odp_packet_t pkt);
 
 /**
- * Return the byte offset from the packet buffer to the L3 packet
+ * Control indication of L3 header, e.g. IPv4, IPv6
+ *
+ * @param[in] pkt Packet handle
  *
- * @param pkt  Packet handle
+ * @param[in] val 1 if packet contains a valid & known L3 header, 0 otherwise
  *
- * @return  L3 byte offset or ODP_PACKET_OFFSET_INVALID if not found
+ * @note This routine sets whether the referenced packet contains a
+ * valid Layer 3 header.
  */
-size_t odp_packet_l3_offset(odp_packet_t pkt);
+void odp_packet_set_inflag_l3(odp_packet_t pkt, int val);
 
 /**
- * Set the byte offset to the L3 packet
+ * Check for L4 header, e.g. UDP, TCP, (also ICMP)
  *
- * @param pkt     Packet handle
- * @param offset  L3 byte offset
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains a valid & known L4 header, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * a valid Layer 4 header.
  */
-void odp_packet_set_l3_offset(odp_packet_t pkt, size_t offset);
+int odp_packet_inflag_l4(odp_packet_t pkt);
 
+/**
+ * Control indication of L4 header, e.g. UDP, TCP, (also ICMP)
+ *
+ * @param pkt[in] Packet handle
+ * @param val[in] 1 if packet contains a valid & known L4 header, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains a
+ * valid Layer 4 header.
+ */
+void odp_packet_set_inflag_l4(odp_packet_t pkt, int val);
 
 /**
- * Get pointer to the start of the L4 packet
+ * Check for Ethernet header
  *
- * @param pkt  Packet handle
+ * @param[in] pkt Packet handle
  *
- * @return  Pointer to L4 packet or NULL if not found
+ * @return 1 if packet contains a valid eth header, 0 otherwise
  *
+ * @note This routine indicates whether the referenced packet contains
+ * a valid Ethernet header.
  */
-uint8_t *odp_packet_l4(odp_packet_t pkt);
+int odp_packet_inflag_eth(odp_packet_t pkt);
 
 /**
- * Return the byte offset from the packet buffer to the L4 packet
+ * Control indication of Ethernet header
  *
- * @param pkt  Packet handle
+ * @param[in] pkt Packet handle
  *
- * @return  L4 byte offset or ODP_PACKET_OFFSET_INVALID if not found
+ * @return 1 if packet contains a valid eth header, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains a
+ * valid Ethernet header.
  */
-size_t odp_packet_l4_offset(odp_packet_t pkt);
+void odp_packet_set_inflag_eth(odp_packet_t pkt, int val);
 
 /**
- * Set the byte offset to the L4 packet
+ * Check for Ethernet SNAP vs. DIX format
+ *
+ * @param[in] pkt Packet handle
  *
- * @param pkt     Packet handle
- * @param offset  L4 byte offset
+ * @return 1 if packet is SNAP, 0 if it is DIX
+ *
+ * @note This routine indicates whether the referenced packet Ethernet
+ * is SNAP. If odp_packet_inflag_eth() is 1 and
+ * odp_packet_inflag_snap() is 0 then the packet is in DIX format.
  */
-void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset);
+int odp_packet_inflag_snap(odp_packet_t pkt);
 
 /**
- * Print (debug) information about the packet
+ * Control indication of Ethernet SNAP vs. DIX format
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet is SNAP, 0 if it is DIX
  *
- * @param pkt  Packet handle
+ * @note This routine sets whether the referenced packet Ethernet is
+ * SNAP.
  */
-void odp_packet_print(odp_packet_t pkt);
+void odp_packet_set_inflag_snap(odp_packet_t pkt, int val);
 
 /**
- * Copy contents and metadata from pkt_src to pkt_dst
- * Useful when creating copies of packets
+ * Check for jumbo frame
+ *
+ * @param[in] pkt Packet handle
  *
- * @param pkt_dst Destination packet
- * @param pkt_src Source packet
+ * @return 1 if packet contains jumbo frame, 0 otherwise
  *
- * @return 0 if successful
+ * @note This routine indicates whether the referenced packet contains
+ * a jumbo frame. A jumbo frame has a length greater than 1500 bytes.
  */
-int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src);
+int odp_packet_inflag_jumbo(odp_packet_t pkt);
 
 /**
- * Tests if packet is segmented (a scatter/gather list)
+ * Control indication of jumbo frame
  *
- * @param pkt  Packet handle
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains jumbo frame, 0 otherwise
  *
- * @return Non-zero if packet is segmented, otherwise 0
+ * @note This routine sets whether the referenced packet contains a
+ * jumbo frame. A jumbo frame has a length greater than 1500 bytes.
  */
-int odp_packet_is_segmented(odp_packet_t pkt);
+void odp_packet_set_inflag_jumbo(odp_packet_t pkt, int val);
 
 /**
- * Segment count
+ * Check for VLAN
  *
- * Returns number of segments in the packet. A packet has always at least one
- * segment (the packet buffer itself).
+ * @param[in] pkt Packet handle
  *
- * @param pkt  Packet handle
+ * @return 1 if packet contains a VLAN header, 0 otherwise
  *
- * @return Segment count
+ * @note This routine indicates whether the referenced packet contains
+ * one or more VLAN headers.
  */
-int odp_packet_seg_count(odp_packet_t pkt);
+int odp_packet_inflag_vlan(odp_packet_t pkt);
 
 /**
- * Get segment by index
+ * Control indication of VLAN
+ *
+ * @param[in] pkt Packet handle
  *
- * @param pkt   Packet handle
- * @param index Segment index (0 ... seg_count-1)
+ * @param[in] val 1 if packet contains a VLAN header, 0 otherwise
  *
- * @return Segment handle, or ODP_PACKET_SEG_INVALID on an error
+ * @note This routine sets whether the referenced packet contains one
+ * or more VLAN headers.
  */
-odp_packet_seg_t odp_packet_seg(odp_packet_t pkt, int index);
+void odp_packet_set_inflag_vlan(odp_packet_t pkt, int val);
 
 /**
- * Get next segment
+ * Check for VLAN QinQ (stacked VLAN)
+ *
+ * @param[in] pkt Packet handle
  *
- * @param pkt   Packet handle
- * @param seg   Current segment handle
+ * @return 1 if packet contains a VLAN QinQ header, 0 otherwise
  *
- * @return Handle to next segment, or ODP_PACKET_SEG_INVALID on an error
+ * @note This routine indicates whether the referenced packet contains
+ * a double VLAN header (Q-in-Q) matching the IEEE 802.1ad
+ * specification.
  */
-odp_packet_seg_t odp_packet_seg_next(odp_packet_t pkt, odp_packet_seg_t seg);
+int odp_packet_inflag_vlan_qinq(odp_packet_t pkt);
 
 /**
- * Segment info
+ * Controls indication of VLAN QinQ (stacked VLAN)
  *
- * Copies segment parameters into the info structure.
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains a VLAN QinQ header, 0 otherwise
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
- * @param info Pointer to segment info structure
+ * @note This routine sets whether the referenced packet contains a
+ * double VLAN header (Q-in-Q) matching the IEEE 802.1ad
+ * specification.
+ */
+void odp_packet_set_inflag_vlan_qinq(odp_packet_t pkt, int val);
+
+/**
+ * Check for ARP
  *
- * @return 0 if successful, otherwise non-zero
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains an ARP header, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * an ARP header.
  */
-int odp_packet_seg_info(odp_packet_t pkt, odp_packet_seg_t seg,
-			odp_packet_seg_info_t *info);
+int odp_packet_inflag_arp(odp_packet_t pkt);
 
 /**
- * Segment start address
+ * Controls indication of ARP
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains an ARP header, 0 otherwise
  *
- * @return Segment start address, or NULL on an error
+ * @note This routine sets whether the referenced packet contains an
+ * ARP header.
  */
-void *odp_packet_seg_addr(odp_packet_t pkt, odp_packet_seg_t seg);
+void odp_packet_set_inflag_arp(odp_packet_t pkt, int val);
 
 /**
- * Segment maximum data size
+ * Check for IPv4
+ *
+ * @param[in] pkt Packet handle
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
+ * @return 1 if packet contains an IPv4 header, 0 otherwise
  *
- * @return Segment maximum data size
+ * @note This routine indicates whether the referenced packet contains
+ * an IPv4 header.
  */
-size_t odp_packet_seg_size(odp_packet_t pkt, odp_packet_seg_t seg);
+int odp_packet_inflag_ipv4(odp_packet_t pkt);
 
 /**
- * Segment data address
+ * Control indication of IPv4
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains an IPv4 header, 0 otherwise
  *
- * @return Segment data address
+ * @note This routine sets whether the referenced packet contains an
+ * IPv4 header.
  */
-void *odp_packet_seg_data(odp_packet_t pkt, odp_packet_seg_t seg);
+void odp_packet_set_inflag_ipv4(odp_packet_t pkt, int val);
 
 /**
- * Segment data length
+ * Check for IPv6
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
+ * @param[in] pkt Packet handle
  *
- * @return Segment data length
+ * @return 1 if packet contains an IPv6 header, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * an IPv6 header.
+ */
+int odp_packet_inflag_ipv6(odp_packet_t pkt);
+
+/**
+ * Control indication of IPv6
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains an IPv6 header, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains an
+ * IPv6 header.
  */
-size_t odp_packet_seg_data_len(odp_packet_t pkt, odp_packet_seg_t seg);
+void odp_packet_set_inflag_ipv6(odp_packet_t pkt, int val);
 
 /**
- * Segment headroom
+ * Check for IP fragment
  *
- * seg_headroom = seg_data - seg_addr
+ * @param[in] pkt Packet handle
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
+ * @return 1 if packet is an IP fragment, 0 otherwise
  *
- * @return Number of octets from seg_addr to seg_data
+ * @note This routine indicates whether the referenced packet contains
+ * an IP fragment.
  */
-size_t odp_packet_seg_headroom(odp_packet_t pkt, odp_packet_seg_t seg);
+int odp_packet_inflag_ipfrag(odp_packet_t pkt);
 
 /**
- * Segment tailroom
+ * Controls indication of IP fragment
  *
- * seg_tailroom = seg_size - seg_headroom - seg_data_len
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet is an IP fragment, 0 otherwise
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
+ * @note This routine sets whether the referenced packet contains an
+ * IP fragment.
+ */
+void odp_packet_set_inflag_ipfrag(odp_packet_t pkt, int val);
+
+/**
+ * Check for IP options
  *
- * @return Number of octets from end-of-data to end-of-segment
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains IP options, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * IP options.
  */
-size_t odp_packet_seg_tailroom(odp_packet_t pkt, odp_packet_seg_t seg);
+int odp_packet_inflag_ipopt(odp_packet_t pkt);
 
 /**
- * Push out segment head
+ * Controls indication of IP options
  *
- * Push out segment data address (away from data) and increase data length.
- * Does not modify packet in case of an error.
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains IP options, 0 otherwise
  *
- * seg_data     -= len
- * seg_data_len += len
+ * @note This routine sets whether the referenced packet contains IP
+ * options.
+ */
+void odp_packet_set_inflag_ipopt(odp_packet_t pkt, int val);
+
+/**
+ * Check for IPSec
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
- * @param len  Number of octets to push head (0 ... seg_headroom)
+ * @param[in] pkt Packet handle
  *
- * @return New segment data address, or NULL on an error
+ * @return 1 if packet requires IPSec processing, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * an IPSec header (ESP or AH).
  */
-void *odp_packet_seg_push_head(odp_packet_t pkt, odp_packet_seg_t seg,
-			       size_t len);
+int odp_packet_inflag_ipsec(odp_packet_t pkt);
 
 /**
- * Pull in segment head
+ * Control indication of IPSec
  *
- * Pull in segment data address (towards data) and decrease data length.
- * Does not modify packet in case of an error.
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet requires IPSec processing, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains an
+ * IPSec header (ESP or AH).
+ */
+void odp_packet_set_inflag_ipsec(odp_packet_t pkt, int val);
+
+/**
+ * Check for UDP
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains a UDP header, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * a UDP header.
+ */
+int odp_packet_inflag_udp(odp_packet_t pkt);
+
+/**
+ * Control indication of UDP
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains a UDP header, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains a
+ * UDP header.
+ */
+void odp_packet_set_inflag_udp(odp_packet_t pkt, int val);
+
+/**
+ * Check for TCP
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains a TCP header, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * a TCP header.
+ */
+int odp_packet_inflag_tcp(odp_packet_t pkt);
+
+/**
+ * Control indication of TCP
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains a TCP header, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains a
+ * TCP header.
+ */
+void odp_packet_set_inflag_tcp(odp_packet_t pkt, int val);
+
+/**
+ * Check for TCP options
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains TCP options, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * TCP options.
+ */
+int odp_packet_inflag_tcpopt(odp_packet_t pkt);
+
+/**
+ * Controls indication of TCP options
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains TCP options, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains TCP
+ * options.
+ */
+void odp_packet_set_inflag_tcpopt(odp_packet_t pkt, int val);
+
+/**
+ * Check for ICMP
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @return 1 if packet contains an ICMP header, 0 otherwise
+ *
+ * @note This routine indicates whether the referenced packet contains
+ * an ICMP header.
+ */
+int odp_packet_inflag_icmp(odp_packet_t pkt);
+
+/**
+ * Control indication of ICMP
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 1 if packet contains an ICMP header, 0 otherwise
+ *
+ * @note This routine sets whether the referenced packet contains an
+ * ICMP header.
+ */
+void odp_packet_set_inflag_icmp(odp_packet_t pkt, int val);
+
+/**
+ * Query Layer 3 checksum offload override setting
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @return 0 if no Layer 3 checksum to be performed, 1 if yes, -1 if not set
+ *
+ * @note This routine indicates whether Layer 3 checksum offload
+ * processing is to be performed for the referenced packet. Since
+ * this is an override bit, if the application has not set this
+ * attribute an error (-1) is returned indicating that this bit has
+ * not been specified.
+ */
+int odp_packet_outflag_l3_chksum(odp_packet_t pkt);
+
+/**
+ * Override Layer 3 checksum calculation
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 0 if no Layer 3 checksum to be performed, 1 if yes
+ *
+ * @return 0 if override successful, -1 if not
+ *
+ * @note This routine sets whether Layer 3 checksum offload processing
+ * is to be performed for the referenced packet. An error return (-1)
+ * indicates that the implementation is unable to provide per-packet
+ * overrides of this function.
+ */
+int odp_packet_set_outflag_l3_chksum(odp_packet_t pkt, int val);
+
+/**
+ * Request Layer 4 checksum offload override setting
+ *
+ * @param[in] pkt Packet handle
+ *
+ * @return 0 if no Layer 4 checksum to be performed, 1 if yes, -1 if not set
+ *
+ * @note This routine indicates whether Layer 4 checksum offload
+ * processing is to be performed for the referenced packet. Since
+ * this is an override bit, if the application has not set this
+ * attribute an error (-1) is returned indicating that this bit has
+ * not been specified.
+ */
+int odp_packet_outflag_l4_chksum(odp_packet_t pkt);
+
+/**
+ * Request L4 checksum calculation
+ *
+ * @param[in] pkt Packet handle
+ * @param[in] val 0 if no Layer 4 checksum to be performed, 1 if yes
+ *
+ * @return 0 if override successful, -1 if not
  *
- * seg_data     += len
- * seg_data_len -= len
+ * @note This routine specifies whether Layer 4 checksums offload
+ * processing is to be performed for the referenced packet. An error
+ * return (-1) indicates that the implementation is unable to provide
+ * per-packet overrides of this function.
+ */
+int odp_packet_set_outflag_l4_chksum(odp_packet_t pkt, int val);
+
+/**
+ * Get offset of start of Layer 2 headers
+ *
+ * @param[in] pkt  Packet handle
+ *
+ * @return         Byte offset into packet of start of Layer 2 headers
+ *                 or ODP_PACKET_OFFSET_INVALID if not found.
+ *
+ * @note This routine is an accessor function that returns the byte
+ * offset of the start of the Layer 2 headers of a packet. Results
+ * are undefined if the supplied pkt does not specify a valid packet.
+ * Note that if the packet contains unusual Layer 2 tags the caller
+ * will use this function to allow it to parse the Layer 2 headers
+ * directly if desired.
+ *
+ */
+size_t odp_packet_l2_offset(odp_packet_t pkt);
+
+/**
+ * Specify start of Layer 2 headers
+ *
+ * @param[in] pkt    Packet handle
+ * @param[in] offset Byte offset into packet of start of Layer 2 headers.
+ *
+ * @return  0 on Success, -1 on errors
+ *
+ * @note This routine is an accessor function that sets the byte
+ * offset of the start of the Layer 2 headers of a packet. Results
+ * are undefined if the supplied pkt does not specify a valid packet.
+ * An error return results if the specified offset is out of range.
+ * Note that this routine does not verify that the specified offset
+ * correlates with packet contents. The application assumes that
+ * responsibility when using this routine.
+ */
+int odp_packet_set_l2_offset(odp_packet_t pkt, size_t offset);
+
+/**
+ * Returns the VLAN S-Tag and C-Tag associated with packet
+ *
+ * @param[in]  pkt   Packet handle
+ * @param[out] stag  S-Tag associated with packet or 0x00000000
+ * @param[out] ctag  C-Tag associated with packet or 0x00000000
+ *
+ * @note This routine returns the S-Tag (Ethertype 0x88A8) and C-Tag
+ * (Ethertype 0x8100) associated with the referenced packet. Note
+ * that the full tag (including the Ethertype) is returned so that the
+ * caller can easily distinguish between the two as well as handle
+ * older sources that use 0x8100 for both tags (QinQ). If the packet
+ * contains only one VLAN tag, it will be returned as the “S-Tag”. If
+ * the packet does not contain VLAN tags then both arguments will be
+ * returned as zeros.
+ *
+ * @par
+ * Note: as meta data values returned by this routine are in
+ * host-endian format. VLAN tags themselves are always received and
+ * transmitted in network byte order.
+ *
+ */
+void odp_packet_vlans(odp_packet_t pkt, uint32_t *stag, uint32_t *ctag);
+
+/**
+ * Specifies the VLAN S-Tag and C-Tag associated with packet
+ *
+ * @param[in] pkt    Packet handle
+ * @param[in] stag   S-Tag associated with packet or 0xFFFFFFFF
+ * @param[in] ctag   C-Tag associated with packet or 0xFFFFFFFF
+ *
+ * @note This routine sets the S-Tag (Ethertype 0x88A8) and C-Tag
+ * (Ethertype 0x8100) associated with the referenced packet. A value
+ * of 0xFFFFFFFF is specified to indicate that no corresponding S-Tag
+ * or C-Tag is present. Note: This routine simply sets the VLAN meta
+ * data for the packet. It does not affect packet contents. It is
+ * the caller’s responsibility to ensure that the packet contents
+ * matches the specified values.
+ */
+void odp_packet_set_vlans(odp_packet_t pkt, uint32_t stag, uint32_t ctag);
+
+/**
+ * Get offset of start of Layer 3 headers
+ *
+ * @param[in] pkt  Packet handle
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
- * @param len  Number of octets to pull head (0 ... seg_data_len)
+ * @return         Byte offset into packet of start of Layer 3 headers
+ *                 or ODP_PACKET_OFFSET_INVALID if not found.
  *
- * @return New segment data address, or NULL on an error
+ * @note This routine is an accessor function that returns the byte
+ * offset of the start of the Layer 3 headers of a packet. Results
+ * are undefined if the supplied pkt does not specify a valid packet.
+ * In conjunction with the odp_packet_l3_protocol() routine, this
+ * routine allows the caller to process the Layer 3 header(s) of the
+ * packet directly, if desired.
+ */
+size_t odp_packet_l3_offset(odp_packet_t pkt);
+
+/**
+ * Set offset of start of Layer 3 headers
+ *
+ * @param[in] pkt    Packet handle
+ * @param[in] offset Byte offset into packet of start of Layer 3 headers
+ *
+ * @return  0 on Success, -1 on errors
+ *
+ * @note This routine is an accessor function that returns the byte
+ * offset of the start of the Layer 3 headers of a packet. Results
+ * are undefined if the supplied pkt does not specify a valid packet.
+ * An error return results if the specified offset is out of range.
+ * In conjunction with the odp_packet_set_l3_protocol() routine, this
+ * routine allows the caller to specify the Layer 3 header meta data
+ * of the packet directly, if desired. Note that this routine does not
+ * verify that the specified offset correlates with packet contents.
+ * The application assumes that responsibility when using this
+ * routine.
+ */
+int odp_packet_set_l3_offset(odp_packet_t pkt, size_t offset);
+
+/**
+ * Get the Layer 3 protocol of this packet
+ *
+ * @param[in] pkt  Packet handle
+ *
+ * @return         Ethertype of the Layer 3 protocol used or
+ *                 ODP_NO_L3_PROTOCOL if no Layer 3 protocol exists.
+ *
+ * @note This routine returns the IANA-assigned Ethertype of the Layer
+ * 3 protocol used in the packet. This is the last Layer 2 Ethertype
+ * that defines the Layer 3 protocol. This is widened from a uint16_t
+ * to an int to allow for error return codes. Note: This value is
+ * returned in host-endian format.
+ */
+uint32_t odp_packet_l3_protocol(odp_packet_t pkt);
+
+/**
+ * Set the Layer 3 protocol of this packet
+ *
+ * @param[in] pkt  Packet handle
+ * @param[in] pcl  Layer 3 protocol value
+ *
+ * @note This routine sets the IANA-assigned Ethertype of the Layer 3
+ * protocol used in the packet. This is the last Layer 2 Ethertype
+ * that defines the Layer 3 protocol. Note: This routine simply sets
+ * the Layer 3 protocol meta data for the packet. It does not affect
+ * packet contents. It is the caller’s responsibility to ensure that
+ * the packet contents matches the specified value.
+ */
+void odp_packet_set_l3_protocol(odp_packet_t pkt, uint16_t pcl);
+
+/**
+ * Get offset of start of Layer 4 headers
+ *
+ * @param[in] pkt  Packet handle
+ *
+ * @return         Byte offset into packet of start of Layer 4 headers
+ *                 or ODP_PACKET_OFFSET_INVALID if not found.
+ *
+ * @note This routine is an accessor function that returns the byte
+ * offset of the start of the Layer 4 headers of a packet. Results
+ * are undefined if the supplied pkt does not specify a valid packet.
+ * In conjunction with the odp_packet_l4_protocol() routine, this
+ * routine allows the caller to process the Layer 4 header associated
+ * with the packet directly if desired.
+ */
+size_t odp_packet_l4_offset(odp_packet_t pkt);
+
+/**
+ * Set offset of start of Layer 4 headers
+ *
+ * @param[in] pkt    Packet handle
+ * @param[in] offset Packet handle
+ *
+ * @return 0 on Success, -1 on error.
+ *
+ * @note This routine is an accessor function that sets the byte
+ * offset of the start of the Layer 4 headers of a packet. Results
+ * are undefined if the supplied pkt does not specify a valid
+ * packet. An error return results if the specified offset is out of
+ * range. In conjunction with the odp_packet_set_l4_protocol()
+ * routine, this routine allows the caller to specify the Layer 4
+ * header meta data with the packet directly if desired. Note that
+ * this routine does not verify that the specified offset correlates
+ * with packet contents. The application assumes that responsibility
+ * when using this routine.
  */
-void *odp_packet_seg_pull_head(odp_packet_t pkt, odp_packet_seg_t seg,
-			       size_t len);
+int odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset);
 
 /**
- * Push out segment tail
+ * Get the Layer 4 protocol of this packet
  *
- * Increase segment data length.
+ * @param[in] pkt  Packet handle
+ *
+ * @return         Protocol number of the Layer 4 protocol used or
+ *                 ODP_NO_L4_PROTOCOL if none exists.
+ *
+ * @note This routine returns the IANA-assigned Protocol number of the
+ * Layer 4 protocol used in the packet. This is widened from uint8_t
+ * to an int to allow for error return codes.
+ */
+uint32_t odp_packet_l4_protocol(odp_packet_t pkt);
+
+/**
+ * Set the Layer 4 protocol of this packet
+ *
+ * @param[in] pkt  Packet handle
+ * @param[in] pcl  Layer 4 protocol value
+ *
+ * @note This routine sets the IANA-assigned Protocol number of the
+ * Layer 4 protocol used in the packet. Note: This routine simply
+ * sets the Layer 4 protocol meta data for the packet. It does not
+ * affect packet contents. It is the caller’s responsibility to
+ * ensure that the packet contents matches the specified value.
+ */
+void odp_packet_set_l4_protocol(odp_packet_t pkt, uint8_t pcl);
+
+/**
+ * Get offset of start of packet payload
+ *
+ * @param[in] pkt  Packet handle
+ *
+ * @return         Byte offset into packet of start of packet payload
+ *                 or ODP_PACKET_OFFSET_INVALID if not found.
+ *
+ * @note This routine is an accessor function that returns the byte
+ * offset of the start of the packet payload. Results are undefined
+ * if the supplied pkt does not specify a valid packet. For ODP, the
+ * packet payload is defined as the first byte beyond the last packet
+ * header recognized by the ODP packet parser. For certain protocols
+ * this may in fact be the start of a Layer 5 header, or an
+ * unrecognized Layer 3 or Layer 4 header, however ODP does not make
+ * this distinction.
+ */
+size_t odp_packet_payload_offset(odp_packet_t pkt);
+
+/**
+ * Set offset of start of packet payload
+ *
+ * @param[in] pkt    Packet handle
+ * @param[in] offset Packet handle
+ *
+ * @return 0 on Success, -1 on error
+ *
+ * @note This routine is an accessor function that sets the byte
+ * offset of the start of the packet payload. Results are undefined
+ * if the supplied pkt does not specify a valid packet. An error
+ * return results if the specified offset is out of range. For ODP,
+ * the packet payload is defined as the first byte beyond the last
+ * packet header recognized by the ODP packet parser. For certain
+ * protocols this may in fact be the start of a Layer 5 header, or an
+ * unrecognized Layer 3 or Layer 4 header, however ODP does not make
+ * this distinction. Note that this routine does not verify that the
+ * specified offset correlates with packet contents. The application
+ * assumes that responsibility when using this routine.
+ */
+int odp_packet_set_payload_offset(odp_packet_t pkt, size_t offset);
+
+/**
+ * Get count of number of segments in a packet
+ *
+ * @param[in] pkt  Packet handle
+ *
+ * @return         Count of the number of segments in pkt
+ *
+ * @note This routine returns the number of physical segments in the
+ * referenced packet. A packet that is not in an aggregated buffer
+ * will return 1 since it is comprised of a single segment. The
+ * packet segments of the aggregate buffer are in the range
+ * [0..odp_packet_segment_count-1]. Results are undefined if the
+ * supplied pkt is invalid. Use odp_packet_is_valid() to verify
+ * packet validity if needed.
+ */
+int odp_packet_segment_count(odp_packet_t pkt);
+
+/**
+ * Get the segment identifier for a packet segment by index
+ *
+ * @param[in] pkt  Packet handle
+ * @param[in] ndx  Segment index of segment of interest
+ *
+ * @return         Segment identifier or ODP_SEGMENT_INVALID if the
+ *                 supplied ndx is out of range.
+ *
+ * @note This routine returns the abstract identifier
+ * (odp_packet_segment_t) of a particular segment by its index value.
+ * Valid ndx values are in the range
+ * [0..odp_packet_segment_count(pkt)-1]. Results are undefined if the
+ * supplied pkt is invalid. Use odp_packet_is_valid() to verify
+ * packet validity if needed.
+ */
+odp_packet_segment_t odp_packet_segment_by_index(odp_packet_t pkt, size_t ndx);
+
+/**
+ * Get the next segment identifier for a packet segment
+ *
+ * @param[in] pkt  Packet handle
+ * @param[in] seg  Segment identifier of the previous segment
+ *
+ * @return         Segment identifier of next segment or ODP_SEGMENT_INVALID
+ *
+ * @note This routine returns the abstract identifier
+ * (odp_packet_segment_t) of the next packet segment in a buffer
+ * aggregate. The input specifies the packet and the previous segment
+ * identifier. There are three use cases for this routine:
+ *
+ * -# If the input seg is ODP_SEGMENT_START then the segment
+ * identifier returned is that of the first segment in the packet.
+ * ODP_SEGMENT_NULL MAY be used as a synonym for ODP_SEGMENT_START
+ * for symmetry if desired.
+ *
+ * -# If the input seg is not the last segment in the packet then the
+ * segment identifier of the next segment following seg is returned.
+ *
+ * -# If the input seg is the segment identifier of the last segment
+ * in the packet then ODP_SEGMENT_NULL is returned.
+ *
+ */
+odp_packet_segment_t odp_packet_segment_next(odp_packet_t pkt,
+					     odp_packet_segment_t seg);
+
+/**
+ * Get start address for a specified packet segment
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[in]  seg     Segment identifier of the packet to be addressed
+ * @param[out] seglen  Returned number of bytes in this packet
+ *                     segment available at returned address
+ *
+ * @return             Start address of packet within segment or NULL
+ *
+ * @note This routine is used to obtain addressability to a segment
+ * within a packet aggregate at a specified segment identifier. The
+ * returned seglen indicates the number of bytes addressable at the
+ * returned address. Note that the returned address is always within
+ * the packet and the address returned is the first packet byte within
+ * the specified segment. So if the packet itself begins at a
+ * non-zero byte offset into the physical segment then the address
+ * returned by this call will not be the same as the starting address
+ * of the physical segment containing the packet.
+ */
+void *odp_packet_segment_map(odp_packet_t pkt, odp_packet_segment_t seg,
+			     size_t *seglen);
+
+/**
+ * Unmap a packet segment
+ *
+ * @param[in] seg  Packet segment handle
+ *
+ * @note This routine is used to unmap a packet segment previously
+ * mapped by odp_packet_segment_map(). Following this call,
+ * applications MUST NOT attempt to reference the segment via any
+ * pointer returned from a previous odp_packet_segment_map() call
+ * referring to it. It is intended to allow certain NUMA
+ * architectures to better manage the coherency of mapped segments.
+ * For non-NUMA architectures this routine will be a no-op. Note that
+ * implementations SHOULD implicitly unmap all packet segments
+ * whenever a packet is freed or added to a queue as this indicates
+ * that the caller is relinquishing control of the packet.
+ */
+void odp_packet_segment_unmap(odp_packet_segment_t seg);
+
+/**
+ * Get start address for a specified packet offset
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[in]  offset  Byte offset within the packet to be addressed
+ * @param[out] seglen  Returned number of bytes in this packet
+ *                     segment available at returned address
+ *
+ * @return             Offset start address or NULL
+ *
+ * @note This routine returns the address of the packet starting at
+ * the specified byte offset. The returned seglen indicates the
+ * number of addressable bytes available at the returned address.
+ * This limit MUST be honored by the caller.
+ *
+ * @par
+ * Note that this is a general routine for accessing arbitrary
+ * byte offsets within a packet and is the bases for the “shortcut”
+ * APIs described below that access specific parser-identified offsets
+ * of interest.
+ *
+ * @par
+ * Note also that the returned seglen is always the minimum of
+ * the physical buffer segment size available at the starting offset
+ * and odp_packet_len() - offset. This rule applies to the “shortcut”
+ * routines that follow as well.
+ *
+ * @par
+ * For example, suppose the underlying implementation uses 256
+ * byte physical segment sizes and odp_packet_len() is 900. In this
+ * case a call to odp_packet_map() for offset 200 would return a
+ * seglen of 56, a call to odp_packet_map() for offset 256 would
+ * return a seglen of 256, and a call to odp_packet_map() for offset
+ * 768 would return a seglen of 132 since the packet ends there.
+ */
+void *odp_packet_offset_map(odp_packet_t pkt, size_t offset,
+			    size_t *seglen);
+
+/**
+ * Unmap a packet segment by offset
+ *
+ * @param[in] pkt    Packet handle
+ * @param[in] offset Packet offset
+ *
+ * @note This routine is used to unmap a buffer segment previously
+ * implicitly mapped by odp_packet_offset_map(). Following this call
+ * the application MUST NOT attempt to reference the segment via any
+ * pointer returned by a prior odp_packet_offset_map() call relating
+ * to this offset. It is intended to allow certain NUMA architectures
+ * to better manage the coherency of mapped segments. For non-NUMA
+ * architectures this routine will be a no-op. Note that
+ * implementations SHOULD implicitly unmap all packet segments
+ * whenever a packet is added to a queue as this indicates that the
+ * caller is relinquishing control of the packet.
+ */
+void odp_packet_offset_unmap(odp_packet_t pkt, size_t offset);
+
+/**
+ * Map packet to provide addressability to it
+ *
+ * @param[in]  pkt    Packet handle
+ * @param[out] seglen Number of contiguous bytes available at returned address
+ *
+ * @return         Packet start address or NULL
+ *
+ * @note This routine is an accessor function that returns the
+ * starting address of the packet. This is the first byte that would
+ * be placed on the wire if the packet were transmitted at the time of
+ * the call. This is normally the same as the first byte of the
+ * Ethernet frame that was received, and would normally be the start
+ * of the L2 header. Behavior of this routine is equivalent to the
+ * call:
+ *
+ * @code
+ * odp_packet_offset_map(pkt,0,&seglen);
+ * @endcode
+ *
+ * @par
+ * It is thus a shortcut for rapid access to the raw packet
+ * headers. Note that the returned seglen is the minimum of the
+ * packet length and the number of contiguous bytes available in the
+ * packet segment containing the returned starting address. It is a
+ * programming error to attempt to address beyond this returned
+ * length.
+ *
+ * @par
+ * For packets created by odp_packet_alloc() or
+ * odp_packet_alloc_len() this is the first byte of the allocated
+ * packet’s contents. Note that in the case of odp_packet_alloc() the
+ * packet length defaults to 0 and in the case of
+ * odp_packet_alloc_len() the contents of the packet is indeterminate
+ * until the application creates that content. Results are undefined
+ * if the supplied pkt does not represent a valid packet.
+ *
+ * @par
+ * Note that applications would normally not use this routine
+ * unless they need to do their own parsing of header fields or are
+ * otherwise directly adding or manipulating their own packet headers.
+ * Applications SHOULD normally use accessor functions to obtain the
+ * parsed header information they need directly.
+ *
+ */
+void *odp_packet_map(odp_packet_t pkt, size_t *seglen);
+
+/**
+ * Get addressability to first packet segment
+ *
+ * @param[in]  pkt    Packet handle
+ *
+ * @return         Packet start address or NULL
+ *
+ * @warning Deprecated API!
+ * @warning
+ * This API provides a fast path for addressability to the first
+ * segment of a packet.  Calls to this routine SHOULD be replaced
+ * with corresponding calls to odp_packet_map() since this routine
+ * gives no indication of addressing limits of the returned pointer.
+ */
+void *odp_packet_addr(odp_packet_t pkt);
+
+/**
+ * Get address for the preparsed Layer 2 header
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[out] seglen  Returned number of bytes in this packet
+ *                     segment available at returned address
+ *
+ * @return             Layer 2 start address or NULL
+ *
+ * @note This routine provides the caller with addressability to the
+ * first Layer 2 header of the packet, as identified by the ODP
+ * parser. Note that this may not necessarily represent the first
+ * byte of the packet as the caller may have pushed additional
+ * (unparsed) headers onto the packet. Also, if the packet does not
+ * have a recognized Layer 2 header then this routine will return NULL
+ * while odp_packet_map() will always return the address of the first
+ * byte of the packet (even if the packet is of null length).
+ *
+ * @par
+ *  Note that the behavior of this routine is identical to the
+ * call odp_packet_offset_map(pkt,odp_packet_l2_offset(pkt),&seglen).
+ *
+ */
+void *odp_packet_l2_map(odp_packet_t pkt, size_t *seglen);
+
+/**
+ * Get address for the preparsed Layer 3 header
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[out] seglen  Returned number of bytes in this packet
+ *                     segment available at returned address
+ *
+ * @return             Layer 3 start address or NULL
+ *
+ * @note This routine provides the caller with addressability to the
+ * first Layer 3 header of the packet, as identified by the ODP
+ * parser. If the packet does not have a recognized Layer 3 header
+ * then this routine will return NULL.
+ *
+ * @par
+ * Note that the behavior of this routine is identical to the
+ * call odp_packet_offset_map(pkt,odp_packet_l3_offset(pkt),&seglen).
+ *
+ */
+void *odp_packet_l3_map(odp_packet_t pkt, size_t *seglen);
+
+/**
+ * Get address for the preparsed Layer 4 header
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[out] seglen  Returned number of bytes in this packet
+ *                     segment available at returned address
+ *
+ * @return             Layer 4 start address or NULL
+ *
+ * @note This routine provides the caller with addressability to the
+ * first Layer 4 header of the packet, as identified by the ODP
+ * parser. If the packet does not have a recognized Layer 4 header
+ * then this routine will return NULL.
+ *
+ * @par
+ * Note that the behavior of this routine is identical to the
+ * call odp_packet_offset_map(pkt,odp_packet_l4_offset(pkt),&seglen).
+ *
+ */
+void *odp_packet_l4_map(odp_packet_t pkt, size_t *seglen);
+
+/**
+ * Get address for the packet payload
+ *
+ * @param[in]  pkt      Packet handle
+ * @param[out] seglen  Returned number of bytes in this packet
+ *                     segment available at returned address
+ *
+ * @return             Payload start address or NULL
+ *
+ * @note This routine provides the caller with addressability to the
+ * payload of the packet, as identified by the ODP parser. If the
+ * packet does not have a recognized payload (e.g., a TCP ACK packet)
+ * then this routine will return NULL. As noted above, ODP defines
+ * the packet payload to be the first byte after the last recognized
+ * header. This may in fact represent a Layer 5 header, or an
+ * unrecognized Layer 3 or Layer 4 header. It is an application
+ * responsibility to know how to deal with these bytes based on its
+ * protocol knowledge.
+ *
+ * @par
+ * Note that the behavior of this routine is identical to the call
+ * odp_packet_offset_map(pkt,odp_packet_payload_offset(pkt),&seglen).
+ *
+ */
+void *odp_packet_payload_map(odp_packet_t pkt, size_t *seglen);
+
+/**
+ * Clone a packet, returning an exact copy of it
+ *
+ * @param[in] pkt  Packet handle of packet to duplicate
+ *
+ * @return         Handle of the duplicated packet or ODP_PACKET_INVALID
+ *                 if the operation was not performed
+ *
+ * @note This routine allows an ODP packet to be cloned in an
+ * implementation-defined manner. The contents of the returned
+ * odp_packet_t is an exact copy of the input packet. The
+ * implementation MAY perform this operation via reference counts,
+ * resegmentation, or any other technique it wishes to employ. The
+ * cloned packet is an element of the same buffer pool as the input
+ * pkt and shares the same system meta data such as headroom and
+ * tailroom. If the input pkt contains user meta data, then this data
+ * MUST be copied to the returned packet by the ODP implementation.
+ *
+ * @par
+ * This routine is OPTIONAL. An implementation that does not
+ * support this function MUST provide a matching routine that simply
+ * returns ODP_PACKET_INVALID with an errno of
+ * ODP_FUNCTION_NOT_AVAILABLE.
+ */
+odp_packet_t odp_packet_clone(odp_packet_t pkt);
+
+/**
+ * Copy a packet, returning an exact copy of it
+ *
+ * @param[in] pkt  Packet handle of packet to copy
+ * @param[in] pool Buffer pool to contain copied packet
+ *
+ * @return         Handle of the copied packet or ODP_PACKET_INVALID
+ *                 if the operation was not performed
+ *
+ * @note This routine allows an ODP packet to be copied in an
+ * implementation-defined manner. The specified pool may or may not
+ * be different from that of the source packet, but if different MUST
+ * be of type ODP_BUFFER_TYPE_PACKET. The contents of the returned
+ * odp_packet_t is an exact separate copy of the input packet, and as
+ * such inherits its initial headroom and tailroom settings from the
+ * buffer pool from which it is allocated. If the input pkt contains
+ * user meta data, then this data MUST be copied to the returned
+ * packet if needed by the ODP implementation.
+ *
+ * @par
+ * This routine is OPTIONAL. An implementation that does not
+ * support this function MUST provide a matching routine that simply
+ * returns ODP_PACKET_INVALID with an errno of
+ * ODP_FUNCTION_NOT_AVAILABLE.
+ */
+odp_packet_t odp_packet_copy(odp_packet_t pkt, odp_buffer_pool_t pool);
+
+/**
+ * Copy selected bytes from one packet to another
+ *
+ * @param[in] dstpkt    Handle of destination packet
+ * @param[in] dstoffset Byte offset in destination packet to receive bytes
+ * @param[in] srcpkt    Handle of source packet
+ * @param[in] srcoffset Byte offset in source packet from which to copy
+ * @param[in] len       Number of bytes to be copied
+ *
+ * @return 0 on Success, -1 on errors.
+ *
+ * @note This routine copies a slice of an ODP packet to another
+ * packet in an implementation-defined manner. The call copies len
+ * bytes starting at srcoffset from srcpkt to offset dstoffset in
+ * dstpkt. Any existing bytes in the target range of the destination
+ * packet are overwritten by the operation. The operation will fail
+ * if sufficient bytes are not available in the source packet or
+ * sufficient space is not available in the destination packet. This
+ * routine does not change the length of the destination packet. If
+ * the caller wishes to extend the destination packet it must first
+ * push the tailroom of the destination packet to make space available
+ * to receive the copied bytes.
+ *
+ * @par
+ * This routine is OPTIONAL. An implementation that does not
+ * support this function MUST provide a matching routine that simply
+ * returns -1 with an errno of ODP_FUNCTION_NOT_AVAILABLE.
+ */
+int odp_packet_copy_to_packet(odp_packet_t dstpkt, size_t dstoffset,
+			      odp_packet_t srcpkt, size_t srcoffset,
+			      size_t len);
+
+/**
+ * Copy selected bytes from a packet to a memory area
+ *
+ * @param[out] mem       Address to receive copied bytes
+ * @param[in]  srcpkt    Handle of source packet
+ * @param[in]  srcoffset Byte offset in source packet from which to copy
+ * @param[in]  len       Number of bytes to be copied
+ *
+ * @return 0 on Success, -1 on errors.
+ *
+ * @note This routine copies a slice of an ODP packet to an
+ * application-supplied memory area in an implementation-defined
+ * manner. The call copies len bytes starting at srcoffset from
+ * srcpkt to the address specified by mem. Any existing bytes in the
+ * target memory are overwritten by the operation. The operation will
+ * fail if sufficient bytes are not available in the source packet.
+ * It is the caller’s responsibility to ensure that the specified
+ * memory area is large enough to receive the packet bytes being
+ * copied.
+ *
+ * @par
+ * This routine is OPTIONAL. An implementation that does not
+ * support this function MUST provide a matching routine that simply
+ * returns -1 with an errno of ODP_FUNCTION_NOT_AVAILABLE.
+ *
+ */
+int odp_packet_copy_to_memory(void *mem,
+			      odp_packet_t srcpkt, size_t srcoffset,
+			      size_t len);
+
+/**
+ * Copy bytes from a memory area to a specified offset in a packet
+ *
+ * @param[in] dstpkt    Handle of destination packet
+ * @param[in] dstoffset Byte offset in destination packet to receive bytes
+ * @param[in] mem       Address of bytes to be copied
+ * @param[in] len       Number of bytes to be copied
+ *
+ * @return 0 on Success, -1 on errors.
+ *
+ * @note This routine copies len bytes from the application memory
+ * area mem to a specified offset of an ODP packet in an
+ * implementation-defined manner. Any existing bytes in the target
+ * range of the destination packet are overwritten by the operation.
+ * The operation will fail if sufficient space is not available in the
+ * destination packet. This routine does not change the length of the
+ * destination packet. If the caller wishes to extend the destination
+ * packet it must first push the tailroom of the destination packet to
+ * make space available to receive the copied bytes.
+ *
+ * @par
+ * This routine is OPTIONAL. An implementation that does not
+ * support this function MUST provide a matching routine that simply
+ * returns -1 with an errno of ODP_FUNCTION_NOT_AVAILABLE.
+ *
+ */
+int odp_packet_copy_from_memory(odp_packet_t dstpkt, size_t dstoffset,
+				void *mem, size_t len);
+
+/**
+ * Split a packet into two packets at a specified split point
+ *
+ * @param[in] pkt    Handle of packet to split
+ * @param[in] offset Byte offset within pkt to split packet
+ * @param[in] hr     Headroom of split packet
+ * @param[in] tr     Tailroom of source packet
+ *
+ * @return           Packet handle of the created split packet
+ *
+ * @note This routine splits a packet into two packets at the
+ * specified byte offset. The odp_packet_t returned by the function
+ * is the handle of the new packet created at the split point. The new
+ * (split) packet is allocated from the same buffer pool as the
+ * original packet. If the original packet was len bytes in length
+ * then upon return the original packet is of length offset while the
+ * split packet is of length (len-offset).
+ *
+ * @par
+ * The original packet’s headroom is unchanged by this function.
+ * The split packet inherits it’s tailroom from the original packet.
+ * The hr and tr parameters are used to assign new headroom and
+ * tailroom values to the split and original packets, respectively.
+ * This operation is illustrated by the following diagrams. Prior to
+ * the split, the original packet looks like this:
+ *
+ * @image html splitbefore.png "Packet before split" width=\textwidth
+ * @image latex splitbefore.eps "Packet before split" width=\textwidth
+ *
+ * @par
+ * After splitting at the specified split offset the result is this:
+ *
+ * @image html splitafter.png "Packet after split" width=\textwidth
+ * @image latex splitafter.eps "Packet after split" width=\textwidth
+ *
+ * @par
+ * The data from the original packet from the specified split
+ * offset to the end of the original packet becomes the split packet.
+ * The packet data at the split point becomes offset 0 of the new
+ * packet created by the split. The split packet inherits the
+ * original packet’s tailroom and is assigned its own headroom from
+ * hr, while the original packet retains its original headroom while
+ * being assigned a new tailroom from tr.
+ *
+ * @par
+ * Upon return from this function, the system meta data for both
+ * packets has been updated appropriately by the call since system
+ * meta data maintenance is the responsibility of the ODP
+ * implementation. Any required updates to the user meta data is the
+ * responsibility of the caller.
+ *
+ * @par
+ * This routine is OPTIONAL. An implementation that does not
+ * support this function MUST provide a matching routine that simply
+ * returns ODP_PACKET_INVALID with an errno of
+ * ODP_FUNCTION_NOT_AVAILABLE.
+ */
+odp_packet_t odp_packet_split(odp_packet_t pkt, size_t offset,
+			      size_t hr, size_t tr);
+
+/**
+ * Join two packets into a single packet
+ *
+ * @param[in] pkt1  Packet handle of first packet to join
+ * @param[in] pkt2  Packet handle of second packet to join
+ *
+ * @return          Packet handle of the joined packet
+ *
+ * @note This routine joins two packets into a single packet. Both
+ * pkt1 and pkt2 MUST be from the same buffer pool and the resulting
+ * joined packet will be an element of that same pool. The
+ * application MUST NOT assume that either pkt1 or pkt2 survive the
+ * join or that the returned joined packet is contiguous with or
+ * otherwise related to the input packets. An implementation SHOULD
+ * free either or both input packets if they are not reused as part of
+ * the construction of the returned joined packet. If the join cannot
+ * be performed (e.g., if the two input packets are not from the same
+ * buffer pool, insufficient space in the target buffer pool, etc.)
+ * then ODP_PACKET_INVALID SHOULD be returned to indicate that the
+ * operation could not be performed, and an appropriate errno set. In
+ * such case the input packets MUST NOT be freed as part of the failed
+ * join attempt and MUST be unchanged from their input values and
+ * content.
+ *
+ * @par
+ * The result of odp_packet_join() is the logical concatenation
+ * of the two packets using an implementation-defined aggregation
+ * mechanism. The application data contents of the returned packet is
+ * identical to that of the two joined input packets however certain
+ * associated meta data (e.g., information about the packet length)
+ * will likely differ. The headroom associated with the joined packet
+ * is the headroom of pkt1 while the tailroom of the joined packet is
+ * the tailroom of pkt2. Any tailroom from pkt1 or headroom from pkt2
+ * from before the join is handled in an implementation-defined manner
+ * and is no longer visible to the application.
+ *
+ * @par
+ * If user meta data is present in the input packets, then the
+ * user meta data associated with the returned packet MUST be copied
+ * by this routine from the source pkt1.
+ *
+ * @par
+ * This routine is OPTIONAL. An implementation that does not
+ * support this function MUST provide a routine matching that simply
+ * returns ODP_PACKET_INVALID with an errno of
+ * ODP_FUNCTION_NOT_AVAILABLE.
+ */
+odp_packet_t odp_packet_join(odp_packet_t pkt1, odp_packet_t pkt2);
+
+/**
+ * Push out packet head
+ *
+ * Push out packet address (away from data) and increase data length.
  * Does not modify packet in case of an error.
  *
- * seg_data_len  += len
+ * @code
+ * odp_packet_headroom       -= len
+ * odp_packet_len            += len
+ * odp_packet_l2_offset      += len
+ * odp_packet_l3_offset      += len
+ * odp_packet_l4_offset      += len
+ * odp_packet_payload_offset += len
+ * @endcode
+ *
+ * @param[in] pkt     Packet handle
+ * @param[in] len     Number of octets to push head [0...odp_packet_headroom]
+ *
+ * @return 0 on Success, -1 on error
+ *
+ * @note This routine pushes the packet start away from the current
+ * start point and into the packet headroom. This would normally be
+ * used by the application to prepend additional header information to
+ * the start of the packet. Note that pushing the header does not
+ * affect the parse results. Upon completion odp_packet_map() now
+ * points to the new start of the packet data area and
+ * odp_packet_len() is increased by the specified len.
+ *
+ * @par
+ * Note that it is the caller’s responsibility to initialize the
+ * new header area with meaningful data. This routine simply
+ * manipulates packet meta data and does not affect packet contents.
+ * The specified len is added to the following:
+ *
+ * - odp_packet_l2_offset
+ * - odp_packet_l3_offset
+ * - odp_packet_l4_offset
+ * - odp_packet_payload_offset
+ * - odp_packet_len
+ *
+ * @par
+ * In addition odp_packet_headroom is decremented by the specified len.
+ *
+ * @par
+ * Note that this routine simply adjusts the headroom and other
+ * meta data. If the caller also wishes to immediately address the
+ * newly added header area it can use the
+ * odp_packet_push_head_and_map() routine instead.
+ */
+int odp_packet_push_head(odp_packet_t pkt, size_t len);
+
+/**
+ * Push out packet head and map resulting packet
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
- * @param len  Number of octets to push tail (0 ... seg_tailroom)
+ * Push out packet address (away from data) and increase data length.
+ * Does not modify packet in case of an error.
  *
- * @return New segment data length, or -1 on an error
+ * @code
+ * odp_packet_headroom       -= len
+ * odp_packet_len            += len
+ * odp_packet_l2_offset      += len
+ * odp_packet_l3_offset      += len
+ * odp_packet_l4_offset      += len
+ * odp_packet_payload_offset += len
+ * @endcode
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[in]  len     Number of octets to push head [0...odp_packet_headroom]
+ * @param[out] seglen  Number of addressable bytes at returned start address
+ *
+ * @return New packet data start address, or NULL on an error
+ *
+ * @note This routine pushes the packet start away from the current
+ * start point and into the packet headroom. This would normally be
+ * used by the application to prepend additional header information to
+ * the start of the packet. Note that pushing the header does not
+ * affect the parse results. Upon completion odp_packet_map() now
+ * points to the new start of the packet data area and
+ * odp_packet_len() is increased by the specified len.
+ *
+ * @par
+ * The returned seglen specifies the number of contiguously
+ * addressable bytes available at the returned start address. The
+ * caller MUST NOT attempt to address beyond this range. To access
+ * additional parts of the packet following odp_packet_push_head() the
+ * odp_packet_offset_map() routine SHOULD be used.
+ *
+ * @par
+ * Note that it is the caller’s responsibility to initialize the
+ * new header area with meaningful data. This routine simply
+ * manipulates packet meta data and does not affect packet contents.
+ * The specified len is added to the following:
+ *
+ * - odp_packet_l2_offset
+ * - odp_packet_l3_offset
+ * - odp_packet_l4_offset
+ * - odp_packet_payload_offset
+ * - odp_packet_len
+ *
+ * @par
+ * In addition odp_packet_headroom is decremented by the specified len.
+ *
+ * @par
+ * This routine is equivalent to the following code:
+ *
+ * @code
+ * odp_packet_push_head(pkt,len);
+ * void *result = odp_packet_map(pkt,&seglen);
+ * @endcode
+ *
+ * @par
+ * It exists for application convenience and MAY offer
+ * implementation efficiency.
  */
-int odp_packet_seg_push_tail(odp_packet_t pkt, odp_packet_seg_t seg,
-			     size_t len);
+void *odp_packet_push_head_and_map(odp_packet_t pkt, size_t len,
+				   size_t *seglen);
 
 /**
- * Pull in segment tail
+ * Pull in packet head
  *
- * Decrease segment data length.
+ * Pull in packet address (consuming data) and decrease data length.
  * Does not modify packet in case of an error.
  *
- * seg_data_len  -= len
+ * @code
+ * odp_packet_headroom       += len
+ * odp_packet_len            -= len
+ * odp_packet_l2_offset      -= len
+ * odp_packet_l3_offset      -= len
+ * odp_packet_l4_offset      -= len
+ * odp_packet_payload_offset -= len
+ * @endcode
+ *
+ * @param[in] pkt     Packet handle
+ * @param[in] len     Number of octets to pull head [0...odp_packet_len]
+ *
+ * @return 0 on Success, -1 on error
+ *
+ * @note This routine pulls (consumes) bytes from the start of a
+ * packet, adding to the packet headroom. Typical use of this is to
+ * remove (pop) headers from a packet, possibly prior to pushing new
+ * headers. odp_packet_len() is decreased to reflect the shortened
+ * packet data resulting from the pull. This routine does not affect
+ * the contents of the packet, only meta data that describes it. The
+ * affected parsed offsets are decremented by the specified len,
+ * however no offset is decremented below 0.
+ *
+ * @par
+ * Note: Since odp_packet_push_head() and odp_packet_pull_head()
+ * simply manipulate meta data, it is likely that the meaning of the
+ * pre-parsed header offsets may be lost if headers are stripped and
+ * new headers are inserted. If the application is doing significant
+ * header manipulation, it MAY wish to call odp_packet_parse() when it
+ * is finished to cause the packet to be reparsed and the meaning of
+ * the various parsed meta data to be restored to reflect the new
+ * packet contents.
+ */
+int odp_packet_pull_head(odp_packet_t pkt, size_t len);
+
+/**
+ * Pull in packet head and make results addressable to caller
+ *
+ * Pull in packet address (consuming data) and decrease data length.
+ * Does not modify packet in case of an error.
+ *
+ * @code
+ * odp_packet_headroom       += len
+ * odp_packet_len            -= len
+ * odp_packet_l2_offset      -= len
+ * odp_packet_l3_offset      -= len
+ * odp_packet_l4_offset      -= len
+ * odp_packet_payload_offset -= len
+ * @endcode
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[in]  len     Number of octets to pull head [0...odp_packet_len]
+ * @param[out] seglen Number of addressable bytes at returned start address
+ *
+ * @return New packet data start address, or NULL on an error
+ *
+ * @note This routine pulls (consumes) bytes from the start of a
+ * packet, adding to the packet headroom. Typical use of this is to
+ * remove (pop) headers from a packet, possibly prior to pushing new
+ * headers. The return value of this routine is the new
+ * odp_packet_map() for the packet and odp_packet_len() is decreased
+ * to reflect the shortened packet data resulting from the pull. This
+ * routine does not affect the contents of the packet, only meta data
+ * that describes it. The affected parsed offsets are decremented by
+ * the specified len, however no offset is decremented below 0.
+ *
+ * @par
+ * Note: Since odp_packet_push_head() and odp_packet_pull_head()
+ * simply manipulate meta data, it is likely that the meaning of the
+ * pre-parsed header offsets may be lost if headers are stripped and
+ * new headers are inserted. If the application is doing significant
+ * header manipulation, it MAY wish to call odp_packet_parse() when it
+ * is finished to cause the packet to be reparsed and the meaning of
+ * the various parsed meta data to be restored to reflect the new
+ * packet contents.
+ *
+ * @par
+ * Note that this routine is equivalent to the calls:
+ *
+ * @code
+ * odp_packet_pull_head(pkt,len);
+ * void *result = odp_packet_map(pkt,&seglen);
+ * @endcode
+ *
+ * @par
+ * It exists for application convenience and MAY offer
+ * implementation efficiency.
+ */
+void *odp_packet_pull_head_and_map(odp_packet_t pkt, size_t len,
+				   size_t *seglen);
+
+/**
+ * Push out packet tail
+ *
+ * Push out the end of the packet, consuming tailroom and increasing
+ * its length. Does not modify packet in case of an error.
+ *
+ * @code
+ * odp_packet_len      += len
+ * odp_packet_tailroom -= len
+ * @endcode
+ *
+ * @param[in] pkt     Packet handle
+ * @param[in] len     Number of octets to push tail [0...odp_packet_tailroom]
+ *
+ * @return 0 on Success, -1 on Failure
+ *
+ * @note This routine adds additional bytes to the end of a packet,
+ * increasing its length. Note that it does not change the contents
+ * of the packet but simply manipulates the packet meta data. It is
+ * the caller’s responsibility to initialize the new area with
+ * meaningful packet data.
+ *
+ * @par The intended use of this routine is to allow the application
+ * to insert additional payload or trailers onto the packet.
+ */
+int odp_packet_push_tail(odp_packet_t pkt, size_t len);
+
+/**
+ * Push out packet tail and map results
+ *
+ * Push out the end of the packet, consuming tailroom and increasing
+ * its length. Does not modify packet in case of an error.
+ *
+ * @code
+ * odp_packet_len      += len
+ * odp_packet_tailroom -= len
+ * @endcode
+ *
+ * @param[in]  pkt     Packet handle
+ * @param[in]  len     Number of octets to push tail [0...odp_packet_tailroom]
+ * @param[out] seglen  Number of addressable bytes at returned data address
+ *
+ * @return Address of start of additional packet data, or NULL on an error
+ *
+ * @note This routine adds additional bytes to the end of a packet,
+ * increasing its length. Note that it does not change the contents
+ * of the packet but simply manipulates the packet meta data. It is
+ * the caller’s responsibility to initialize the new area with
+ * meaningful packet data.
+ *
+ * @par
+ * This routine is equivalent to the code:
+ *
+ * @code
+ * void *dataptr;
+ * size_t *seglen;
+ * odp_packet_push_tail(pkt, len);
+ * dataptr = odp_packet_offset_map(pkt, odp_packet_len(pkt) - len, &seglen);
+ * @endcode
+ *
+ * @par
+ * The returned pointer is the mapped start of the new data area
+ * (beginning at the former odp_packet_len() offset) and the returned
+ * seglen is the number of contiguously addressable bytes available at
+ * that address. The caller should initialize the additional data
+ * bytes to meaningful values. If seglen is less than the requested
+ * len then odp_packet_offset_map() should be used to address the
+ * remaining bytes.
+ *
+ * @par
+ * The intended use of this routine is to allow the application
+ * to insert additional payload or trailers onto the packet.
+ */
+void *odp_packet_push_tail_and_map(odp_packet_t pkt, size_t len,
+				   size_t *seglen);
+
+/**
+ * Pull in packet tail
+ *
+ * Reduce packet length, trimming data from the end of the packet,
+ * and adding to its tailroom. Does not modify packet in case of an error.
+ *
+ * @code
+ * odp_packet_len      -= len
+ * odp_packet_tailroom += len
+ * @endcode
+ *
+ * @param[in] pkt     Packet handle
+ * @param[in] len     Number of octets to pull tail [0...odp_packet_len]
  *
- * @param pkt  Packet handle
- * @param seg  Segment handle
- * @param len  Number of octets to pull tail (0 ... seg_data_len)
+ * @return 0 on Success, -1 on failure.
  *
- * @return New segment data length, or -1 on an error
+ * @note This routine pulls in the packet tail, adding those bytes to
+ * the packet tailroom. Upon successful return the packet has been
+ * trimmed by len bytes. The intended use of this routine is to allow
+ * the application to remove tailers from the packet.
  */
-int odp_packet_seg_pull_tail(odp_packet_t pkt, odp_packet_seg_t seg,
-			     size_t len);
+int odp_packet_pull_tail(odp_packet_t pkt, size_t len);
 
 /**
  * @}
diff --git a/platform/linux-generic/include/api/odp_packet_flags.h b/platform/linux-generic/include/api/odp_packet_flags.h
deleted file mode 100644
index ccaa04f..0000000
--- a/platform/linux-generic/include/api/odp_packet_flags.h
+++ /dev/null
@@ -1,334 +0,0 @@ 
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier:     BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP packet flags
- */
-
-#ifndef ODP_PACKET_FLAGS_H_
-#define ODP_PACKET_FLAGS_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp_std_types.h>
-#include <odp_packet.h>
-
-/** @addtogroup odp_packet
- *  Boolean operations on a packet.
- *  @{
- */
-
-/**
- * Check for packet errors
- *
- * Checks all error flags at once.
- *
- * @param pkt Packet handle
- * @return 1 if packet has errors, 0 otherwise
- */
-int odp_packet_error(odp_packet_t pkt);
-
-/**
- * Check if error was 'frame length' error
- *
- * @param pkt Packet handle
- * @return 1 if frame length error detected, 0 otherwise
- */
-int odp_packet_errflag_frame_len(odp_packet_t pkt);
-
-/**
- * Check for L2 header, e.g. ethernet
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a valid & known L2 header, 0 otherwise
- */
-int odp_packet_inflag_l2(odp_packet_t pkt);
-
-/**
- * Check for L3 header, e.g. IPv4, IPv6
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a valid & known L3 header, 0 otherwise
- */
-int odp_packet_inflag_l3(odp_packet_t pkt);
-
-/**
- * Check for L4 header, e.g. UDP, TCP, SCTP (also ICMP)
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a valid & known L4 header, 0 otherwise
- */
-int odp_packet_inflag_l4(odp_packet_t pkt);
-
-/**
- * Check for Ethernet header
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a valid eth header, 0 otherwise
- */
-int odp_packet_inflag_eth(odp_packet_t pkt);
-
-/**
- * Check for jumbo frame
- *
- * @param pkt Packet handle
- * @return 1 if packet contains jumbo frame, 0 otherwise
- */
-int odp_packet_inflag_jumbo(odp_packet_t pkt);
-
-/**
- * Check for VLAN
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a VLAN header, 0 otherwise
- */
-int odp_packet_inflag_vlan(odp_packet_t pkt);
-
-/**
- * Check for VLAN QinQ (stacked VLAN)
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a VLAN QinQ header, 0 otherwise
- */
-int odp_packet_inflag_vlan_qinq(odp_packet_t pkt);
-
-/**
- * Check for ARP
- *
- * @param pkt Packet handle
- * @return 1 if packet contains an ARP header, 0 otherwise
- */
-int odp_packet_inflag_arp(odp_packet_t pkt);
-
-/**
- * Check for IPv4
- *
- * @param pkt Packet handle
- * @return 1 if packet contains an IPv4 header, 0 otherwise
- */
-int odp_packet_inflag_ipv4(odp_packet_t pkt);
-
-/**
- * Check for IPv6
- *
- * @param pkt Packet handle
- * @return 1 if packet contains an IPv6 header, 0 otherwise
- */
-int odp_packet_inflag_ipv6(odp_packet_t pkt);
-
-/**
- * Check for IP fragment
- *
- * @param pkt Packet handle
- * @return 1 if packet is an IP fragment, 0 otherwise
- */
-int odp_packet_inflag_ipfrag(odp_packet_t pkt);
-
-/**
- * Check for IP options
- *
- * @param pkt Packet handle
- * @return 1 if packet contains IP options, 0 otherwise
- */
-int odp_packet_inflag_ipopt(odp_packet_t pkt);
-
-/**
- * Check for IPSec
- *
- * @param pkt Packet handle
- * @return 1 if packet requires IPSec processing, 0 otherwise
- */
-int odp_packet_inflag_ipsec(odp_packet_t pkt);
-
-/**
- * Check for UDP
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a UDP header, 0 otherwise
- */
-int odp_packet_inflag_udp(odp_packet_t pkt);
-
-/**
- * Check for TCP
- *
- * @param pkt Packet handle
- * @return 1 if packet contains a TCP header, 0 otherwise
- */
-int odp_packet_inflag_tcp(odp_packet_t pkt);
-
-/**
- * Check for SCTP
- *
- * @param pkt Packet handle
- * @return 1 if packet contains an SCTP header, 0 otherwise
- */
-int odp_packet_inflag_sctp(odp_packet_t pkt);
-
-/**
- * Check for ICMP
- *
- * @param pkt Packet handle
- * @return 1 if packet contains an ICMP header, 0 otherwise
- */
-int odp_packet_inflag_icmp(odp_packet_t pkt);
-
-/**
- * Request L4 checksum calculation
- *
- * @param pkt Packet handle
- */
-void odp_packet_outflag_l4_chksum(odp_packet_t pkt);
-
-/**
- * Set flag for L2 header, e.g. ethernet
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_l2(odp_packet_t pkt, int val);
-
-/**
- * Set flag for L3 header, e.g. IPv4, IPv6
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_l3(odp_packet_t pkt, int val);
-
-/**
- * Set flag for L4 header, e.g. UDP, TCP, SCTP (also ICMP)
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_l4(odp_packet_t pkt, int val);
-
-/**
- * Set flag for Ethernet header
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_eth(odp_packet_t pkt, int val);
-
-/**
- * Set flag for jumbo frame
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_jumbo(odp_packet_t pkt, int val);
-
-/**
- * Set flag for VLAN
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_vlan(odp_packet_t pkt, int val);
-
-/**
- * Set flag for VLAN QinQ (stacked VLAN)
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_vlan_qinq(odp_packet_t pkt, int val);
-
-/**
- * Set flag for ARP
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_arp(odp_packet_t pkt, int val);
-
-/**
- * Set flag for IPv4
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_ipv4(odp_packet_t pkt, int val);
-
-/**
- * Set flag for IPv6
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_ipv6(odp_packet_t pkt, int val);
-
-/**
- * Set flag for IP fragment
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_ipfrag(odp_packet_t pkt, int val);
-
-/**
- * Set flag for IP options
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_ipopt(odp_packet_t pkt, int val);
-
-/**
- * Set flag for IPSec
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_ipsec(odp_packet_t pkt, int val);
-
-/**
- * Set flag for UDP
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_udp(odp_packet_t pkt, int val);
-
-/**
- * Set flag for TCP
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_tcp(odp_packet_t pkt, int val);
-
-/**
- * Set flag for SCTP
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_sctp(odp_packet_t pkt, int val);
-
-/**
- * Set flag for ICMP
- *
- * @param pkt Packet handle
- * @param val Value
- */
-void odp_packet_set_inflag_icmp(odp_packet_t pkt, int val);
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h
new file mode 100644
index 0000000..c7eb3f1
--- /dev/null
+++ b/platform/linux-generic/include/odp_buffer_inlines.h
@@ -0,0 +1,164 @@ 
+/* Copyright (c) 2013-2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier:     BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * Inline functions for ODP buffer mgmt routines - implementation internal
+ */
+
+#ifndef ODP_BUFFER_INLINES_H_
+#define ODP_BUFFER_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr)
+{
+	return hdr->buf_hdl.handle;
+}
+
+static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr)
+{
+	odp_buffer_bits_t handle;
+	uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl);
+	struct pool_entry_s *pool = get_pool_entry(pool_id);
+
+	handle.pool_id = pool_id;
+	handle.index = ((uint8_t *)hdr - pool->pool_base_addr) /
+		ODP_CACHE_LINE_SIZE;
+	handle.seg = 0;
+
+	return handle.u32;
+}
+
+static inline odp_buffer_segment_t odp_hdr_to_seg(odp_buffer_hdr_t *hdr,
+						  size_t ndx)
+{
+	odp_buffer_bits_t handle;
+	uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl);
+	struct pool_entry_s *pool = get_pool_entry(pool_id);
+
+	handle.pool_id = pool_id;
+	handle.index = ((uint8_t *)hdr - pool->pool_base_addr) /
+		ODP_CACHE_LINE_SIZE;
+	handle.seg = ndx;
+
+	return handle.u32;
+}
+
+static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf)
+{
+	odp_buffer_bits_t handle;
+	uint32_t pool_id;
+	uint32_t index;
+	struct pool_entry_s *pool;
+
+	handle.u32 = buf;
+	pool_id    = handle.pool_id;
+	index      = handle.index;
+
+#ifdef POOL_ERROR_CHECK
+	if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) {
+		ODP_ERR("odp_buf_to_hdr: Bad pool id\n");
+		return NULL;
+	}
+#endif
+
+	pool = get_pool_entry(pool_id);
+
+#ifdef POOL_ERROR_CHECK
+	if (odp_unlikely(index > pool->num_bufs - 1)) {
+		ODP_ERR("odp_buf_to_hdr: Bad buffer index\n");
+		return NULL;
+	}
+#endif
+
+	return (odp_buffer_hdr_t *)(pool->pool_base_addr +
+				    (index * ODP_CACHE_LINE_SIZE));
+}
+
+static inline uint32_t odp_buffer_refcount(odp_buffer_hdr_t *buf)
+{
+	return buf->ref_count;
+}
+
+static inline uint32_t odp_buffer_incr_refcount(odp_buffer_hdr_t *buf,
+						uint32_t val)
+{
+	return odp_atomic_fetch_add_u32(&buf->ref_count, val) + val;
+}
+
+static inline uint32_t odp_buffer_decr_refcount(odp_buffer_hdr_t *buf,
+						uint32_t val)
+{
+	uint32_t tmp;
+
+	tmp = odp_atomic_fetch_sub_u32(&buf->ref_count, val);
+
+	if (tmp < val) {
+		odp_atomic_fetch_add_u32(&buf->ref_count, val - tmp);
+		return 0;
+	} else {
+		return tmp - val;
+	}
+}
+
+static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf)
+{
+	odp_buffer_bits_t handle;
+	odp_buffer_hdr_t *buf_hdr;
+	handle.u32 = buf;
+
+	/* For buffer handles, segment index must be 0 */
+	if (handle.seg != 0)
+		return NULL;
+
+	pool_entry_t *pool = odp_pool_to_entry(handle.pool_id);
+
+	/* If pool not created, handle is invalid */
+	if (pool->s.shm == ODP_SHM_INVALID)
+		return NULL;
+
+	/* A valid buffer index must be on stride, and must be in range */
+	if ((handle.index % pool->s.mdata_stride != 0) ||
+	    ((uint32_t)(handle.index / pool->s.mdata_stride) >=
+	     pool->s.num_bufs))
+		return NULL;
+
+	buf_hdr = (odp_buffer_hdr_t *)(pool->s.pool_base_addr +
+				       (handle.index * ODP_CACHE_LINE_SIZE));
+
+	/* Handle is valid, so buffer is valid if it is allocated */
+	if (buf_hdr->segcount == 0)
+		return NULL;
+	else
+		return buf_hdr;
+}
+
+int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf);
+
+static inline void *buffer_map(odp_buffer_hdr_t *buf,
+			       size_t offset,
+			       size_t *seglen,
+			       size_t limit)
+{
+	int seg_index  = offset / buf->segsize;
+	int seg_offset = offset % buf->segsize;
+	size_t buf_left = limit - offset;
+
+	*seglen = buf_left < buf->segsize ?
+		buf_left : buf->segsize - seg_offset;
+
+	return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index 0027bfc..e395dbe 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -1,4 +1,4 @@ 
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2014, Linaro Limited
  * All rights reserved.
  *
  * SPDX-License-Identifier:     BSD-3-Clause
@@ -20,26 +20,38 @@  extern "C" {
 
 #include <odp_std_types.h>
 #include <odp_atomic.h>
-#include <odp_buffer_pool.h>
 #include <odp_buffer.h>
 #include <odp_debug.h>
 #include <odp_align.h>
-
-/* TODO: move these to correct files */
-
-typedef uint64_t odp_phys_addr_t;
-
-#define ODP_BUFFER_MAX_INDEX     (ODP_BUFFER_MAX_BUFFERS - 2)
-#define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1)
-
-#define ODP_BUFS_PER_CHUNK       16
-#define ODP_BUFS_PER_SCATTER      4
-
-#define ODP_BUFFER_TYPE_CHUNK    0xffff
-
+#include <odp_config.h>
+
+#define ODP_BUFFER_MAX_SEG     (ODP_CONFIG_BUF_MAX_SIZE/ODP_CONFIG_BUF_SEG_SIZE)
+
+ODP_STATIC_ASSERT((ODP_CONFIG_BUF_SEG_SIZE % ODP_CACHE_LINE_SIZE) == 0,
+		  "ODP Segment size must be a multiple of cache line size");
+
+#define ODP_SEGBITS(x)				\
+	((x) <    2 ?  1 :			\
+	 ((x) <    4 ?  2 :			\
+	  ((x) <    8 ?  3 :			\
+	   ((x) <   16 ?  4 :			\
+	    ((x) <   32 ?  5 :			\
+	     ((x) <   64 ?  6 :			\
+	      ((x) <  128 ?  7 :		\
+	       ((x) <  256 ?  8 :		\
+		((x) <  512 ?  9 :		\
+		 ((x) < 1024 ? 10 :		\
+		  ((x) < 2048 ? 11 :		\
+		   ((x) < 4096 ? 12 :		\
+		    (0/0)))))))))))))
+
+ODP_STATIC_ASSERT(ODP_SEGBITS(ODP_BUFFER_MAX_SEG) <
+		  ODP_SEGBITS(ODP_CACHE_LINE_SIZE),
+		  "Number of segments must not exceed log of cache line size");
 
 #define ODP_BUFFER_POOL_BITS   4
-#define ODP_BUFFER_INDEX_BITS  (32 - ODP_BUFFER_POOL_BITS)
+#define ODP_BUFFER_SEG_BITS    ODP_SEGBITS(ODP_CACHE_LINE_SIZE)
+#define ODP_BUFFER_INDEX_BITS  (32 - ODP_BUFFER_POOL_BITS - ODP_BUFFER_SEG_BITS)
 #define ODP_BUFFER_MAX_POOLS   (1 << ODP_BUFFER_POOL_BITS)
 #define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS)
 
@@ -50,73 +62,39 @@  typedef union odp_buffer_bits_t {
 	struct {
 		uint32_t pool_id:ODP_BUFFER_POOL_BITS;
 		uint32_t index:ODP_BUFFER_INDEX_BITS;
+		uint32_t seg:ODP_BUFFER_SEG_BITS;
 	};
 } odp_buffer_bits_t;
 
-
 /* forward declaration */
 struct odp_buffer_hdr_t;
 
-
-/*
- * Scatter/gather list of buffers
- */
-typedef struct odp_buffer_scatter_t {
-	/* buffer pointers */
-	struct odp_buffer_hdr_t *buf[ODP_BUFS_PER_SCATTER];
-	int                      num_bufs;   /* num buffers */
-	int                      pos;        /* position on the list */
-	size_t                   total_len;  /* Total length */
-} odp_buffer_scatter_t;
-
-
-/*
- * Chunk of buffers (in single pool)
- */
-typedef struct odp_buffer_chunk_t {
-	uint32_t num_bufs;                      /* num buffers */
-	uint32_t buf_index[ODP_BUFS_PER_CHUNK]; /* buffers */
-} odp_buffer_chunk_t;
-
-
 /* Common buffer header */
 typedef struct odp_buffer_hdr_t {
 	struct odp_buffer_hdr_t *next;       /* next buf in a list */
-	odp_buffer_bits_t        handle;     /* handle */
-	odp_phys_addr_t          phys_addr;  /* physical data start address */
-	void                    *addr;       /* virtual data start address */
-	uint32_t                 index;	     /* buf index in the pool */
+	odp_buffer_bits_t        buf_hdl;    /* handle */
 	size_t                   size;       /* max data size */
-	size_t                   cur_offset; /* current offset */
 	odp_atomic_u32_t         ref_count;  /* reference count */
-	odp_buffer_scatter_t     scatter;    /* Scatter/gather list */
-	int                      type;       /* type of next header */
+	odp_buffer_type_e        type;       /* type of next header */
 	odp_buffer_pool_t        pool_hdl;   /* buffer pool handle */
-
+	void                    *udata_addr; /* user meta data addr */
+	size_t                   udata_size; /* size of user meta data */
+	uint32_t                 segcount;   /* segment count */
+	uint32_t                 segsize;    /* segment size */
+	void                    *addr[ODP_BUFFER_MAX_SEG]; /* Block addrs */
 } odp_buffer_hdr_t;
 
-/* Ensure next header starts from 8 byte align */
-ODP_STATIC_ASSERT((sizeof(odp_buffer_hdr_t) % 8) == 0, "ODP_BUFFER_HDR_T__SIZE_ERROR");
-
-
-/* Raw buffer header */
-typedef struct {
-	odp_buffer_hdr_t buf_hdr;    /* common buffer header */
-	uint8_t          buf_data[]; /* start of buffer data area */
-} odp_raw_buffer_hdr_t;
-
-
-/* Chunk header */
-typedef struct odp_buffer_chunk_hdr_t {
-	odp_buffer_hdr_t   buf_hdr;
-	odp_buffer_chunk_t chunk;
-} odp_buffer_chunk_hdr_t;
-
-
-int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf);
+typedef struct odp_buffer_hdr_stride {
+	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t))];
+} odp_buffer_hdr_stride;
 
-void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src);
+typedef struct odp_buf_blk_t {
+	struct odp_buf_blk_t *next;
+	struct odp_buf_blk_t *prev;
+} odp_buf_blk_t;
 
+/* Forward declaration */
+odp_buffer_t buffer_alloc(odp_buffer_pool_t pool, size_t size);
 
 #ifdef __cplusplus
 }
diff --git a/platform/linux-generic/include/odp_buffer_pool_internal.h b/platform/linux-generic/include/odp_buffer_pool_internal.h
index e0210bd..28ef8f2 100644
--- a/platform/linux-generic/include/odp_buffer_pool_internal.h
+++ b/platform/linux-generic/include/odp_buffer_pool_internal.h
@@ -24,6 +24,7 @@  extern "C" {
 #include <odp_align.h>
 #include <odp_hints.h>
 #include <odp_config.h>
+#include <odp_shared_memory.h>
 #include <odp_debug.h>
 
 /* Use ticketlock instead of spinlock */
@@ -47,66 +48,146 @@  struct pool_entry_s {
 	odp_spinlock_t          lock ODP_ALIGNED_CACHE;
 #endif
 
-	odp_buffer_chunk_hdr_t *head;
-	uint64_t                free_bufs;
 	char                    name[ODP_BUFFER_POOL_NAME_LEN];
 
-	odp_buffer_pool_t       pool_hdl ODP_ALIGNED_CACHE;
-	uintptr_t               buf_base;
-	size_t                  buf_size;
-	size_t                  buf_offset;
+	odp_buffer_pool_t       pool_hdl;
+	odp_buffer_pool_param_t params;
+	odp_buffer_pool_init_t  init_params;
+	odp_shm_t               shm;
+	union {
+		uint32_t all;
+		struct {
+			uint32_t unsegmented:1;
+			uint32_t predefined:1;
+		};
+	} flags;
+	uint8_t                *pool_base_addr;
+	size_t                  pool_size;
+	int                     mdata_stride;
+	uint8_t                *udata_base_addr;
+	int                     buf_udata_size;
+	int                     udata_stride;
+	odp_buffer_hdr_t       *buf_freelist;
+	uint8_t                *blk_freelist;
+	odp_atomic_u32_t        bufcount;
 	uint64_t                num_bufs;
-	void                   *pool_base_addr;
-	uint64_t                pool_size;
-	size_t                  user_size;
-	size_t                  user_align;
-	int                     buf_type;
-	size_t                  hdr_size;
+	size_t                  seg_size;
+	size_t                  high_wm;
+	size_t                  low_wm;
+	size_t                  headroom;
+	size_t                  tailroom;
 };
 
+typedef union pool_entry_u {
+	struct pool_entry_s s;
+
+	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
+
+} pool_entry_t;
 
 extern void *pool_entry_ptr[];
 
+#if UINTPTR_MAX == 0xffffffffffffffff
+#define odp_at odp_atomic_u64_t
+#define odp_cs(p, o, n) odp_atomic_cmpset_u64((odp_at *)(p), \
+					      (uint64_t)(o), (uint64_t)(n))
+#else
+#define odp_at odp_atomic_u32_t
+#define odp_cs(p, o, n) odp_atomic_cmpset_u32((odp_at *)(p), \
+					      (uint32_t)(o), (uint32_t)(n))
+#endif
 
-static inline void *get_pool_entry(uint32_t pool_id)
+/* This macro suggested by Shmulik Ladkani */
+#define odp_ref(p) \
+	((typeof(p))(uintptr_t) *(volatile typeof(p) const *)&(p))
+
+static inline void *get_blk(struct pool_entry_s *pool)
 {
-	return pool_entry_ptr[pool_id];
+	void *oldhead, *newhead;
+
+	do {
+		oldhead = odp_ref(pool->blk_freelist);
+		if (oldhead == NULL)
+			break;
+		newhead = ((odp_buf_blk_t *)oldhead)->next;
+	} while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0);
+
+	return (void *)oldhead;
 }
 
+static inline void ret_blk(struct pool_entry_s *pool, void *block)
+{
+	void *oldhead;
 
-static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf)
+	do {
+		oldhead = odp_ref(pool->blk_freelist);
+		((odp_buf_blk_t *)block)->next = oldhead;
+	} while (odp_cs(pool->blk_freelist, oldhead, block) == 0);
+}
+
+static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool)
 {
-	odp_buffer_bits_t handle;
-	uint32_t pool_id;
-	uint32_t index;
-	struct pool_entry_s *pool;
-	odp_buffer_hdr_t *hdr;
-
-	handle.u32 = buf;
-	pool_id    = handle.pool_id;
-	index      = handle.index;
-
-#ifdef POOL_ERROR_CHECK
-	if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) {
-		ODP_ERR("odp_buf_to_hdr: Bad pool id\n");
-		return NULL;
+	odp_buffer_hdr_t *oldhead, *newhead;
+
+	do {
+		oldhead = odp_ref(pool->buf_freelist);
+		if (oldhead == NULL)
+			break;
+		newhead = oldhead->next;
+	} while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0);
+
+	if (oldhead != NULL) {
+		oldhead->next = oldhead;
+		odp_atomic_inc_u32(&pool->bufcount);
 	}
-#endif
 
-	pool = get_pool_entry(pool_id);
+	return (void *)oldhead;
+}
 
-#ifdef POOL_ERROR_CHECK
-	if (odp_unlikely(index > pool->num_bufs - 1)) {
-		ODP_ERR("odp_buf_to_hdr: Bad buffer index\n");
-		return NULL;
-	}
-#endif
+static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf)
+{
+	odp_buffer_hdr_t *oldhead;
 
-	hdr = (odp_buffer_hdr_t *)(pool->buf_base + index * pool->buf_size);
+	while (buf->segcount > 0)
+		ret_blk(pool, buf->addr[--buf->segcount]);
 
-	return hdr;
+	do {
+		oldhead = odp_ref(pool->buf_freelist);
+		buf->next = oldhead;
+	} while (odp_cs(pool->blk_freelist, oldhead, buf) == 0);
+
+	odp_atomic_dec_u32(&pool->bufcount);
 }
 
+static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id)
+{
+	return pool_id + 1;
+}
+
+static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl)
+{
+	return pool_hdl - 1;
+}
+
+static inline void *get_pool_entry(uint32_t pool_id)
+{
+	return pool_entry_ptr[pool_id];
+}
+
+static inline pool_entry_t *odp_pool_to_entry(odp_buffer_pool_t pool)
+{
+	return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool));
+}
+
+static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf)
+{
+	return odp_pool_to_entry(buf->pool_hdl);
+}
+
+static inline size_t odp_buffer_pool_segment_size(odp_buffer_pool_t pool)
+{
+	return odp_pool_to_entry(pool)->s.seg_size;
+}
 
 #ifdef __cplusplus
 }
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index 49c59b2..656c56c 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -22,6 +22,7 @@  extern "C" {
 #include <odp_debug.h>
 #include <odp_buffer_internal.h>
 #include <odp_buffer_pool_internal.h>
+#include <odp_buffer_inlines.h>
 #include <odp_packet.h>
 #include <odp_packet_io.h>
 
@@ -43,6 +44,7 @@  typedef union {
 		uint32_t vlan:1;      /**< VLAN hdr found */
 		uint32_t vlan_qinq:1; /**< Stacked VLAN found, QinQ */
 
+		uint32_t snap:1;      /**< SNAP */
 		uint32_t arp:1;       /**< ARP */
 
 		uint32_t ipv4:1;      /**< IPv4 */
@@ -53,7 +55,7 @@  typedef union {
 
 		uint32_t udp:1;       /**< UDP */
 		uint32_t tcp:1;       /**< TCP */
-		uint32_t sctp:1;      /**< SCTP */
+		uint32_t tcpopt:1;    /**< TCP Options present */
 		uint32_t icmp:1;      /**< ICMP */
 	};
 } input_flags_t;
@@ -69,7 +71,9 @@  typedef union {
 
 	struct {
 		/* Bitfield flags for each detected error */
+		uint32_t app_error:1; /**< Error bit for application use */
 		uint32_t frame_len:1; /**< Frame length error */
+		uint32_t snap_len:1;  /**< Snap length error */
 		uint32_t l2_chksum:1; /**< L2 checksum error, checks TBD */
 		uint32_t ip_err:1;    /**< IP error,  checks TBD */
 		uint32_t tcp_err:1;   /**< TCP error, checks TBD */
@@ -88,7 +92,10 @@  typedef union {
 
 	struct {
 		/* Bitfield flags for each output option */
-		uint32_t l4_chksum:1; /**< Request L4 checksum calculation */
+		uint32_t l3_chksum_set:1; /**< L3 chksum bit is valid */
+		uint32_t l3_chksum:1;     /**< L3 chksum override */
+		uint32_t l4_chksum_set:1; /**< L3 chksum bit is valid */
+		uint32_t l4_chksum:1;     /**< L4 chksum override  */
 	};
 } output_flags_t;
 
@@ -101,29 +108,33 @@  typedef struct {
 	/* common buffer header */
 	odp_buffer_hdr_t buf_hdr;
 
-	input_flags_t  input_flags;
 	error_flags_t  error_flags;
+	input_flags_t  input_flags;
 	output_flags_t output_flags;
 
-	uint32_t frame_offset; /**< offset to start of frame, even on error */
 	uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */
 	uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */
 	uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+	uint32_t payload_offset; /**< offset to payload */
 
-	uint32_t frame_len;
+	uint32_t vlan_s_tag;     /**< Parsed 1st VLAN header (S-TAG) */
+	uint32_t vlan_c_tag;     /**< Parsed 2nd VLAN header (C-TAG) */
+	uint32_t l3_protocol;    /**< Parsed L3 protocol */
+	uint32_t l3_len;         /**< Layer 3 length */
+	uint32_t l4_protocol;    /**< Parsed L4 protocol */
+	uint32_t l4_len;         /**< Layer 4 length */
 
-	uint64_t user_ctx;        /* user context */
+	uint32_t frame_len;
+	uint32_t headroom;
+	uint32_t tailroom;
 
 	odp_pktio_t input;
-
-	uint32_t pad;
-	uint8_t  buf_data[]; /* start of buffer data area */
 } odp_packet_hdr_t;
 
-ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) == ODP_OFFSETOF(odp_packet_hdr_t, buf_data),
-	   "ODP_PACKET_HDR_T__SIZE_ERR");
-ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) % sizeof(uint64_t) == 0,
-	   "ODP_PACKET_HDR_T__SIZE_ERR2");
+typedef struct odp_packet_hdr_stride {
+	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t))];
+} odp_packet_hdr_stride;
+
 
 /**
  * Return the packet header
@@ -133,10 +144,100 @@  static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt)
 	return (odp_packet_hdr_t *)odp_buf_to_hdr((odp_buffer_t)pkt);
 }
 
+static inline void odp_packet_set_len(odp_packet_t pkt, size_t len)
+{
+	odp_packet_hdr(pkt)->frame_len = len;
+}
+
+static inline odp_packet_hdr_t *odp_packet_hdr_from_buf_hdr(odp_buffer_hdr_t
+							    *buf_hdr)
+{
+	return (odp_packet_hdr_t *)buf_hdr;
+}
+
+static inline odp_buffer_hdr_t *odp_packet_hdr_to_buf_hdr(odp_packet_hdr_t *pkt)
+{
+	return &pkt->buf_hdr;
+}
+
+static inline odp_packet_t odp_packet_from_buf_internal(odp_packet_t buf)
+{
+	return (odp_packet_t)buf;
+}
+
+static inline odp_buffer_t odp_packet_to_buf_internal(odp_packet_t pkt)
+{
+	return (odp_buffer_t)pkt;
+}
+
+static inline void packet_init(pool_entry_t *pool,
+			       odp_packet_hdr_t *pkt_hdr,
+			       size_t size)
+{
+	/* Reset parser metadata */
+	pkt_hdr->error_flags.all  = 0;
+	pkt_hdr->input_flags.all  = 0;
+	pkt_hdr->output_flags.all = 0;
+	pkt_hdr->l2_offset        = 0;
+	pkt_hdr->l3_offset        = 0;
+	pkt_hdr->l4_offset        = 0;
+	pkt_hdr->payload_offset   = 0;
+	pkt_hdr->vlan_s_tag       = 0;
+	pkt_hdr->vlan_c_tag       = 0;
+	pkt_hdr->l3_protocol      = 0;
+	pkt_hdr->l4_protocol      = 0;
+
+       /*
+	* Packet headroom is set from the pool's headroom
+	* Packet tailroom is rounded up to fill the last
+	* segment occupied by the allocated length.
+	*/
+	pkt_hdr->frame_len = size;
+	pkt_hdr->headroom  = pool->s.headroom;
+	pkt_hdr->tailroom  =
+		(pool->s.seg_size * pkt_hdr->buf_hdr.segcount) -
+		(pool->s.headroom + size);
+}
+
+#define pull_offset(x, len) (x = x < len ? 0 : x - len)
+
+static inline void push_head(odp_packet_hdr_t *pkt_hdr, size_t len)
+{
+	pkt_hdr->headroom  -= len;
+	pkt_hdr->frame_len += len;
+	pkt_hdr->l2_offset += len;
+	pkt_hdr->l3_offset += len;
+	pkt_hdr->l4_offset += len;
+	pkt_hdr->payload_offset += len;
+}
+
+static inline void pull_head(odp_packet_hdr_t *pkt_hdr, size_t len)
+{
+	pkt_hdr->headroom  += len;
+	pkt_hdr->frame_len -= len;
+	pull_offset(pkt_hdr->l2_offset, len);
+	pull_offset(pkt_hdr->l3_offset, len);
+	pull_offset(pkt_hdr->l4_offset, len);
+	pull_offset(pkt_hdr->payload_offset, len);
+}
+
+static inline void push_tail(odp_packet_hdr_t *pkt_hdr, size_t len)
+{
+	pkt_hdr->tailroom  -= len;
+	pkt_hdr->frame_len += len;
+}
+
+
+static inline void pull_tail(odp_packet_hdr_t *pkt_hdr, size_t len)
+{
+	pkt_hdr->tailroom  += len;
+	pkt_hdr->frame_len -= len;
+}
+
 /**
  * Parse packet and set internal metadata
  */
-void odp_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset);
+void odph_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset);
 
 #ifdef __cplusplus
 }
diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h
index ad28f53..d06677a 100644
--- a/platform/linux-generic/include/odp_timer_internal.h
+++ b/platform/linux-generic/include/odp_timer_internal.h
@@ -21,8 +21,9 @@  extern "C" {
 #include <odp_std_types.h>
 #include <odp_queue.h>
 #include <odp_buffer.h>
-#include <odp_buffer_internal.h>
 #include <odp_buffer_pool_internal.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_inlines.h>
 #include <odp_timer.h>
 
 struct timeout_t;
@@ -48,17 +49,11 @@  typedef struct odp_timeout_hdr_t {
 
 	timeout_t meta;
 
-	uint8_t buf_data[];
 } odp_timeout_hdr_t;
 
-
-
-ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) ==
-	   ODP_OFFSETOF(odp_timeout_hdr_t, buf_data),
-	   "ODP_TIMEOUT_HDR_T__SIZE_ERR");
-
-ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) % sizeof(uint64_t) == 0,
-	   "ODP_TIMEOUT_HDR_T__SIZE_ERR2");
+typedef struct odp_timeout_hdr_stride {
+	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_hdr_t))];
+} odp_timeout_hdr_stride;
 
 
 /**
diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c
index e54e0e7..882cf45 100644
--- a/platform/linux-generic/odp_buffer.c
+++ b/platform/linux-generic/odp_buffer.c
@@ -5,21 +5,30 @@ 
  */
 
 #include <odp_buffer.h>
-#include <odp_buffer_internal.h>
 #include <odp_buffer_pool_internal.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_inlines.h>
 
 #include <string.h>
 #include <stdio.h>
 
+void *odp_buffer_offset_map(odp_buffer_t buf, size_t offset, size_t *seglen)
+{
+	odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf);
+
+	if (offset > buf_hdr->size)
+		return NULL;
+
+	return buffer_map(buf_hdr, offset, seglen, buf_hdr->size);
+}
 
 void *odp_buffer_addr(odp_buffer_t buf)
 {
 	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
 
-	return hdr->addr;
+	return hdr->addr[0];
 }
 
-
 size_t odp_buffer_size(odp_buffer_t buf)
 {
 	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
@@ -27,24 +36,32 @@  size_t odp_buffer_size(odp_buffer_t buf)
 	return hdr->size;
 }
 
-
-int odp_buffer_type(odp_buffer_t buf)
+odp_buffer_type_e odp_buffer_type(odp_buffer_t buf)
 {
 	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
 
 	return hdr->type;
 }
 
-
-int odp_buffer_is_valid(odp_buffer_t buf)
+void *odp_buffer_udata(odp_buffer_t buf, size_t *usize)
 {
-	odp_buffer_bits_t handle;
+	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
 
-	handle.u32 = buf;
+	*usize = hdr->udata_size;
+	return hdr->udata_addr;
+}
+
+void *odp_buffer_udata_addr(odp_buffer_t buf)
+{
+	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
 
-	return (handle.index != ODP_BUFFER_INVALID_INDEX);
+	return hdr->udata_addr;
 }
 
+int odp_buffer_is_valid(odp_buffer_t buf)
+{
+	return validate_buf(buf) != NULL;
+}
 
 int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf)
 {
@@ -63,27 +80,13 @@  int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf)
 	len += snprintf(&str[len], n-len,
 			"  pool         %i\n",        hdr->pool_hdl);
 	len += snprintf(&str[len], n-len,
-			"  index        %"PRIu32"\n", hdr->index);
-	len += snprintf(&str[len], n-len,
-			"  phy_addr     %"PRIu64"\n", hdr->phys_addr);
-	len += snprintf(&str[len], n-len,
-			"  addr         %p\n",        hdr->addr);
+			"  addr         %p\n",        hdr->addr[0]);
 	len += snprintf(&str[len], n-len,
 			"  size         %zu\n",       hdr->size);
 	len += snprintf(&str[len], n-len,
-			"  cur_offset   %zu\n",       hdr->cur_offset);
-	len += snprintf(&str[len], n-len,
 			"  ref_count    %i\n",        hdr->ref_count);
 	len += snprintf(&str[len], n-len,
 			"  type         %i\n",        hdr->type);
-	len += snprintf(&str[len], n-len,
-			"  Scatter list\n");
-	len += snprintf(&str[len], n-len,
-			"    num_bufs   %i\n",        hdr->scatter.num_bufs);
-	len += snprintf(&str[len], n-len,
-			"    pos        %i\n",        hdr->scatter.pos);
-	len += snprintf(&str[len], n-len,
-			"    total_len  %zu\n",       hdr->scatter.total_len);
 
 	return len;
 }
@@ -100,9 +103,3 @@  void odp_buffer_print(odp_buffer_t buf)
 
 	printf("\n%s\n", str);
 }
-
-void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src)
-{
-	(void)buf_dst;
-	(void)buf_src;
-}
diff --git a/platform/linux-generic/odp_buffer_pool.c b/platform/linux-generic/odp_buffer_pool.c
index a48d7d6..b54dd81 100644
--- a/platform/linux-generic/odp_buffer_pool.c
+++ b/platform/linux-generic/odp_buffer_pool.c
@@ -6,8 +6,9 @@ 
 
 #include <odp_std_types.h>
 #include <odp_buffer_pool.h>
-#include <odp_buffer_pool_internal.h>
 #include <odp_buffer_internal.h>
+#include <odp_buffer_pool_internal.h>
+#include <odp_buffer_inlines.h>
 #include <odp_packet_internal.h>
 #include <odp_timer_internal.h>
 #include <odp_shared_memory.h>
@@ -16,6 +17,7 @@ 
 #include <odp_config.h>
 #include <odp_hints.h>
 #include <odp_debug.h>
+#include <odph_eth.h>
 
 #include <string.h>
 #include <stdlib.h>
@@ -33,36 +35,26 @@ 
 #define LOCK_INIT(a) odp_spinlock_init(a)
 #endif
 
-
-#if ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS
-#error ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS
-#endif
-
-#define NULL_INDEX ((uint32_t)-1)
-
-union buffer_type_any_u {
+typedef union buffer_type_any_u {
 	odp_buffer_hdr_t  buf;
 	odp_packet_hdr_t  pkt;
 	odp_timeout_hdr_t tmo;
-};
-
-ODP_STATIC_ASSERT((sizeof(union buffer_type_any_u) % 8) == 0,
-	   "BUFFER_TYPE_ANY_U__SIZE_ERR");
+} odp_anybuf_t;
 
 /* Any buffer type header */
 typedef struct {
 	union buffer_type_any_u any_hdr;    /* any buffer type */
-	uint8_t                 buf_data[]; /* start of buffer data area */
 } odp_any_buffer_hdr_t;
 
+typedef struct odp_any_hdr_stride {
+	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))];
+} odp_any_hdr_stride;
 
-typedef union pool_entry_u {
-	struct pool_entry_s s;
-
-	uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
-
-} pool_entry_t;
+#if ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS
+#error ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS
+#endif
 
+#define NULL_INDEX ((uint32_t)-1)
 
 typedef struct pool_table_t {
 	pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS];
@@ -76,39 +68,6 @@  static pool_table_t *pool_tbl;
 /* Pool entry pointers (for inlining) */
 void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS];
 
-
-static __thread odp_buffer_chunk_hdr_t *local_chunk[ODP_CONFIG_BUFFER_POOLS];
-
-
-static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id)
-{
-	return pool_id + 1;
-}
-
-
-static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl)
-{
-	return pool_hdl -1;
-}
-
-
-static inline void set_handle(odp_buffer_hdr_t *hdr,
-			      pool_entry_t *pool, uint32_t index)
-{
-	odp_buffer_pool_t pool_hdl = pool->s.pool_hdl;
-	uint32_t          pool_id  = pool_handle_to_index(pool_hdl);
-
-	if (pool_id >= ODP_CONFIG_BUFFER_POOLS)
-		ODP_ABORT("set_handle: Bad pool handle %u\n", pool_hdl);
-
-	if (index > ODP_BUFFER_MAX_INDEX)
-		ODP_ERR("set_handle: Bad buffer index\n");
-
-	hdr->handle.pool_id = pool_id;
-	hdr->handle.index   = index;
-}
-
-
 int odp_buffer_pool_init_global(void)
 {
 	uint32_t i;
@@ -142,269 +101,143 @@  int odp_buffer_pool_init_global(void)
 	return 0;
 }
 
-
-static odp_buffer_hdr_t *index_to_hdr(pool_entry_t *pool, uint32_t index)
-{
-	odp_buffer_hdr_t *hdr;
-
-	hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index * pool->s.buf_size);
-	return hdr;
-}
-
-
-static void add_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr, uint32_t index)
-{
-	uint32_t i = chunk_hdr->chunk.num_bufs;
-	chunk_hdr->chunk.buf_index[i] = index;
-	chunk_hdr->chunk.num_bufs++;
-}
-
-
-static uint32_t rem_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr)
+/**
+ * Buffer pool creation
+ */
+odp_buffer_pool_t odp_buffer_pool_create(const char *name,
+					 odp_buffer_pool_param_t *params,
+					 odp_buffer_pool_init_t *init_params)
 {
-	uint32_t index;
+	odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID;
+	pool_entry_t *pool;
 	uint32_t i;
+	if (params->buf_type != ODP_BUFFER_TYPE_PACKET)
+		params->buf_opts |= ODP_BUFFER_OPTS_UNSEGMENTED;
 
-	i = chunk_hdr->chunk.num_bufs - 1;
-	index = chunk_hdr->chunk.buf_index[i];
-	chunk_hdr->chunk.num_bufs--;
-	return index;
-}
-
+	int unsegmented = ((params->buf_opts & ODP_BUFFER_OPTS_UNSEGMENTED) ==
+			   ODP_BUFFER_OPTS_UNSEGMENTED);
 
-static odp_buffer_chunk_hdr_t *next_chunk(pool_entry_t *pool,
-					  odp_buffer_chunk_hdr_t *chunk_hdr)
-{
-	uint32_t index;
-
-	index = chunk_hdr->chunk.buf_index[ODP_BUFS_PER_CHUNK-1];
-	if (index == NULL_INDEX)
-		return NULL;
-	else
-		return (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index);
-}
+	uint32_t udata_stride =
+		ODP_CACHE_LINE_SIZE_ROUNDUP(init_params->udata_size);
 
+	uint32_t blk_size, buf_stride;
 
-static odp_buffer_chunk_hdr_t *rem_chunk(pool_entry_t *pool)
-{
-	odp_buffer_chunk_hdr_t *chunk_hdr;
-
-	chunk_hdr = pool->s.head;
-	if (chunk_hdr == NULL) {
-		/* Pool is empty */
-		return NULL;
-	}
-
-	pool->s.head = next_chunk(pool, chunk_hdr);
-	pool->s.free_bufs -= ODP_BUFS_PER_CHUNK;
-
-	/* unlink */
-	rem_buf_index(chunk_hdr);
-	return chunk_hdr;
-}
-
-
-static void add_chunk(pool_entry_t *pool, odp_buffer_chunk_hdr_t *chunk_hdr)
-{
-	if (pool->s.head) /* link pool head to the chunk */
-		add_buf_index(chunk_hdr, pool->s.head->buf_hdr.index);
-	else
-		add_buf_index(chunk_hdr, NULL_INDEX);
-
-	pool->s.head = chunk_hdr;
-	pool->s.free_bufs += ODP_BUFS_PER_CHUNK;
-}
-
-
-static void check_align(pool_entry_t *pool, odp_buffer_hdr_t *hdr)
-{
-	if (!ODP_ALIGNED_CHECK_POWER_2(hdr->addr, pool->s.user_align)) {
-		ODP_ABORT("check_align: user data align error %p, align %zu\n",
-			  hdr->addr, pool->s.user_align);
-	}
-
-	if (!ODP_ALIGNED_CHECK_POWER_2(hdr, ODP_CACHE_LINE_SIZE)) {
-		ODP_ABORT("check_align: hdr align error %p, align %i\n",
-			  hdr, ODP_CACHE_LINE_SIZE);
-	}
-}
-
-
-static void fill_hdr(void *ptr, pool_entry_t *pool, uint32_t index,
-		     int buf_type)
-{
-	odp_buffer_hdr_t *hdr = (odp_buffer_hdr_t *)ptr;
-	size_t size = pool->s.hdr_size;
-	uint8_t *buf_data;
-
-	if (buf_type == ODP_BUFFER_TYPE_CHUNK)
-		size = sizeof(odp_buffer_chunk_hdr_t);
-
-	switch (pool->s.buf_type) {
-		odp_raw_buffer_hdr_t *raw_hdr;
-		odp_packet_hdr_t *packet_hdr;
-		odp_timeout_hdr_t *tmo_hdr;
-		odp_any_buffer_hdr_t *any_hdr;
-
+	switch (params->buf_type) {
 	case ODP_BUFFER_TYPE_RAW:
-		raw_hdr  = ptr;
-		buf_data = raw_hdr->buf_data;
+		blk_size = params->buf_size;
+		buf_stride = sizeof(odp_buffer_hdr_stride);
 		break;
+
 	case ODP_BUFFER_TYPE_PACKET:
-		packet_hdr = ptr;
-		buf_data   = packet_hdr->buf_data;
+		if (unsegmented)
+			blk_size =
+				ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size);
+		else
+			blk_size = ODP_ALIGN_ROUNDUP(params->buf_size,
+						     ODP_CONFIG_BUF_SEG_SIZE);
+		buf_stride = sizeof(odp_packet_hdr_stride);
 		break;
+
 	case ODP_BUFFER_TYPE_TIMEOUT:
-		tmo_hdr  = ptr;
-		buf_data = tmo_hdr->buf_data;
+		blk_size = 0;  /* Timeouts have no block data, only metadata */
+		buf_stride = sizeof(odp_timeout_hdr_stride);
 		break;
+
 	case ODP_BUFFER_TYPE_ANY:
-		any_hdr  = ptr;
-		buf_data = any_hdr->buf_data;
+		if (unsegmented)
+			blk_size =
+				ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size);
+		else
+			blk_size = ODP_ALIGN_ROUNDUP(params->buf_size,
+						     ODP_CONFIG_BUF_SEG_SIZE);
+		buf_stride = sizeof(odp_any_hdr_stride);
 		break;
-	default:
-		ODP_ABORT("Bad buffer type\n");
-	}
-
-	memset(hdr, 0, size);
-
-	set_handle(hdr, pool, index);
-
-	hdr->addr     = &buf_data[pool->s.buf_offset - pool->s.hdr_size];
-	hdr->index    = index;
-	hdr->size     = pool->s.user_size;
-	hdr->pool_hdl = pool->s.pool_hdl;
-	hdr->type     = buf_type;
-
-	check_align(pool, hdr);
-}
-
-
-static void link_bufs(pool_entry_t *pool)
-{
-	odp_buffer_chunk_hdr_t *chunk_hdr;
-	size_t hdr_size;
-	size_t data_size;
-	size_t data_align;
-	size_t tot_size;
-	size_t offset;
-	size_t min_size;
-	uint64_t pool_size;
-	uintptr_t buf_base;
-	uint32_t index;
-	uintptr_t pool_base;
-	int buf_type;
-
-	buf_type   = pool->s.buf_type;
-	data_size  = pool->s.user_size;
-	data_align = pool->s.user_align;
-	pool_size  = pool->s.pool_size;
-	pool_base  = (uintptr_t) pool->s.pool_base_addr;
-
-	if (buf_type == ODP_BUFFER_TYPE_RAW) {
-		hdr_size = sizeof(odp_raw_buffer_hdr_t);
-	} else if (buf_type == ODP_BUFFER_TYPE_PACKET) {
-		hdr_size = sizeof(odp_packet_hdr_t);
-	} else if (buf_type == ODP_BUFFER_TYPE_TIMEOUT) {
-		hdr_size = sizeof(odp_timeout_hdr_t);
-	} else if (buf_type == ODP_BUFFER_TYPE_ANY) {
-		hdr_size = sizeof(odp_any_buffer_hdr_t);
-	} else
-		ODP_ABORT("odp_buffer_pool_create: Bad type %i\n", buf_type);
-
-
-	/* Chunk must fit into buffer data area.*/
-	min_size = sizeof(odp_buffer_chunk_hdr_t) - hdr_size;
-	if (data_size < min_size)
-		data_size = min_size;
-
-	/* Roundup data size to full cachelines */
-	data_size = ODP_CACHE_LINE_SIZE_ROUNDUP(data_size);
-
-	/* Min cacheline alignment for buffer header and data */
-	data_align = ODP_CACHE_LINE_SIZE_ROUNDUP(data_align);
-	offset     = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size);
-
-	/* Multiples of cacheline size */
-	if (data_size > data_align)
-		tot_size = data_size + offset;
-	else
-		tot_size = data_align + offset;
-
-	/* First buffer */
-	buf_base = ODP_ALIGN_ROUNDUP(pool_base + offset, data_align) - offset;
-
-	pool->s.hdr_size   = hdr_size;
-	pool->s.buf_base   = buf_base;
-	pool->s.buf_size   = tot_size;
-	pool->s.buf_offset = offset;
-	index = 0;
-
-	chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index);
-	pool->s.head   = NULL;
-	pool_size     -= buf_base - pool_base;
-
-	while (pool_size > ODP_BUFS_PER_CHUNK * tot_size) {
-		int i;
-
-		fill_hdr(chunk_hdr, pool, index, ODP_BUFFER_TYPE_CHUNK);
-
-		index++;
-
-		for (i = 0; i < ODP_BUFS_PER_CHUNK - 1; i++) {
-			odp_buffer_hdr_t *hdr = index_to_hdr(pool, index);
-
-			fill_hdr(hdr, pool, index, buf_type);
-
-			add_buf_index(chunk_hdr, index);
-			index++;
-		}
-
-		add_chunk(pool, chunk_hdr);
 
-		chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool,
-								   index);
-		pool->s.num_bufs += ODP_BUFS_PER_CHUNK;
-		pool_size -=  ODP_BUFS_PER_CHUNK * tot_size;
+	default:
+		return ODP_BUFFER_POOL_INVALID;
 	}
-}
-
-
-odp_buffer_pool_t odp_buffer_pool_create(const char *name,
-					 void *base_addr, uint64_t size,
-					 size_t buf_size, size_t buf_align,
-					 int buf_type)
-{
-	odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID;
-	pool_entry_t *pool;
-	uint32_t i;
 
 	for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) {
 		pool = get_pool_entry(i);
 
 		LOCK(&pool->s.lock);
+		if (pool->s.shm != ODP_SHM_INVALID) {
+			UNLOCK(&pool->s.lock);
+			continue;
+		}
 
-		if (pool->s.buf_base == 0) {
-			/* found free pool */
+		/* found free pool */
+		size_t block_size, mdata_size, udata_size;
 
-			strncpy(pool->s.name, name,
-				ODP_BUFFER_POOL_NAME_LEN - 1);
-			pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0;
-			pool->s.pool_base_addr = base_addr;
-			pool->s.pool_size      = size;
-			pool->s.user_size      = buf_size;
-			pool->s.user_align     = buf_align;
-			pool->s.buf_type       = buf_type;
+		strncpy(pool->s.name, name,
+			ODP_BUFFER_POOL_NAME_LEN - 1);
+		pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0;
 
-			link_bufs(pool);
+		pool->s.params = *params;
+		pool->s.init_params = *init_params;
 
-			UNLOCK(&pool->s.lock);
+		mdata_size = params->buf_num * buf_stride;
+		udata_size = params->buf_num * udata_stride;
+		block_size = params->buf_num * blk_size;
 
-			pool_hdl = pool->s.pool_hdl;
-			break;
+		pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(mdata_size +
+							  udata_size +
+							  block_size);
+
+		pool->s.shm = odp_shm_reserve(pool->s.name, pool->s.pool_size,
+					      ODP_PAGE_SIZE, 0);
+		if (pool->s.shm == ODP_SHM_INVALID) {
+			UNLOCK(&pool->s.lock);
+			return ODP_BUFFER_POOL_INVALID;
 		}
 
+		pool->s.pool_base_addr = (uint8_t *)odp_shm_addr(pool->s.shm);
+		pool->s.flags.unsegmented = unsegmented;
+		pool->s.seg_size = unsegmented ?
+			blk_size : ODP_CONFIG_BUF_SEG_SIZE;
+		uint8_t *udata_base_addr = pool->s.pool_base_addr + mdata_size;
+		uint8_t *block_base_addr = udata_base_addr + udata_size;
+
+		pool->s.bufcount = 0;
+		pool->s.buf_freelist = NULL;
+		pool->s.blk_freelist = NULL;
+
+		uint8_t *buf = pool->s.udata_base_addr - buf_stride;
+		uint8_t *udat = (udata_stride == 0) ? NULL :
+			block_base_addr - udata_stride;
+
+		/* Init buffer common header and add to pool buffer freelist */
+		do {
+			odp_buffer_hdr_t *tmp = (odp_buffer_hdr_t *)buf;
+			tmp->pool_hdl = pool->s.pool_hdl;
+			tmp->size = 0;
+			tmp->type = params->buf_type;
+			tmp->udata_addr = (void *)udat;
+			tmp->udata_size = init_params->udata_size;
+			tmp->segcount = 0;
+			tmp->segsize = pool->s.seg_size;
+			tmp->buf_hdl.handle =
+				odp_buffer_encode_handle((odp_buffer_hdr_t *)
+							 buf);
+			ret_buf(&pool->s, tmp);
+			buf  -= buf_stride;
+			udat -= udata_stride;
+		} while (buf >= pool->s.pool_base_addr);
+
+		/* Form block freelist for pool */
+		uint8_t *blk = pool->s.pool_base_addr + pool->s.pool_size -
+			pool->s.seg_size;
+
+		if (blk_size > 0)
+			do {
+				ret_blk(&pool->s, blk);
+				blk -= pool->s.seg_size;
+			} while (blk >= block_base_addr);
+
 		UNLOCK(&pool->s.lock);
+
+		pool_hdl = pool->s.pool_hdl;
+		break;
 	}
 
 	return pool_hdl;
@@ -431,76 +264,120 @@  odp_buffer_pool_t odp_buffer_pool_lookup(const char *name)
 	return ODP_BUFFER_POOL_INVALID;
 }
 
-
-odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl)
+int odp_buffer_pool_destroy(odp_buffer_pool_t pool_hdl)
 {
-	pool_entry_t *pool;
-	odp_buffer_chunk_hdr_t *chunk;
-	odp_buffer_bits_t handle;
-	uint32_t pool_id = pool_handle_to_index(pool_hdl);
+	pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
 
-	pool  = get_pool_entry(pool_id);
-	chunk = local_chunk[pool_id];
+	if (pool == NULL)
+		return -1;
 
-	if (chunk == NULL) {
-		LOCK(&pool->s.lock);
-		chunk = rem_chunk(pool);
+	LOCK(&pool->s.lock);
+
+	if (pool->s.shm == ODP_SHM_INVALID ||
+	    pool->s.bufcount > 0 ||
+	    pool->s.flags.predefined) {
 		UNLOCK(&pool->s.lock);
+		return -1;
+	}
 
-		if (chunk == NULL)
-			return ODP_BUFFER_INVALID;
+	odp_shm_free(pool->s.shm);
 
-		local_chunk[pool_id] = chunk;
-	}
+	pool->s.shm = ODP_SHM_INVALID;
+	UNLOCK(&pool->s.lock);
 
-	if (chunk->chunk.num_bufs == 0) {
-		/* give the chunk buffer */
-		local_chunk[pool_id] = NULL;
-		chunk->buf_hdr.type = pool->s.buf_type;
+	return 0;
+}
 
-		handle = chunk->buf_hdr.handle;
-	} else {
-		odp_buffer_hdr_t *hdr;
-		uint32_t index;
-		index = rem_buf_index(chunk);
-		hdr = index_to_hdr(pool, index);
+size_t odp_buffer_pool_headroom(odp_buffer_pool_t pool_hdl)
+{
+	return odp_pool_to_entry(pool_hdl)->s.headroom;
+}
+
+int odp_buffer_pool_set_headroom(odp_buffer_pool_t pool_hdl, size_t hr)
+{
+	pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
 
-		handle = hdr->handle;
+	if (hr >= pool->s.seg_size/2) {
+		return -1;
+	} else {
+		pool->s.headroom = hr;
+		return 0;
 	}
+}
 
-	return handle.u32;
+size_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool_hdl)
+{
+	return odp_pool_to_entry(pool_hdl)->s.tailroom;
 }
 
+int odp_buffer_pool_set_tailroom(odp_buffer_pool_t pool_hdl, size_t tr)
+{
+	pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
 
-void odp_buffer_free(odp_buffer_t buf)
+	if (tr >= pool->s.seg_size/2) {
+		return -1;
+	} else {
+		pool->s.tailroom = tr;
+		return 0;
+	}
+}
+
+odp_buffer_t buffer_alloc(odp_buffer_pool_t pool_hdl, size_t size)
 {
-	odp_buffer_hdr_t *hdr;
-	uint32_t pool_id;
-	pool_entry_t *pool;
-	odp_buffer_chunk_hdr_t *chunk_hdr;
+	pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
+	size_t totsize = pool->s.headroom + size + pool->s.tailroom;
+	odp_anybuf_t *buf;
+	uint8_t *blk;
 
-	hdr       = odp_buf_to_hdr(buf);
-	pool_id   = pool_handle_to_index(hdr->pool_hdl);
-	pool      = get_pool_entry(pool_id);
-	chunk_hdr = local_chunk[pool_id];
+	if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) ||
+	    (!pool->s.flags.unsegmented && totsize > ODP_CONFIG_BUF_MAX_SIZE))
+		return ODP_BUFFER_INVALID;
 
-	if (chunk_hdr && chunk_hdr->chunk.num_bufs == ODP_BUFS_PER_CHUNK - 1) {
-		/* Current chunk is full. Push back to the pool */
-		LOCK(&pool->s.lock);
-		add_chunk(pool, chunk_hdr);
-		UNLOCK(&pool->s.lock);
-		chunk_hdr = NULL;
-	}
+	buf = (odp_anybuf_t *)get_buf(&pool->s);
 
-	if (chunk_hdr == NULL) {
-		/* Use this buffer */
-		chunk_hdr = (odp_buffer_chunk_hdr_t *)hdr;
-		local_chunk[pool_id] = chunk_hdr;
-		chunk_hdr->chunk.num_bufs = 0;
-	} else {
-		/* Add to current chunk */
-		add_buf_index(chunk_hdr, hdr->index);
+	if (buf == NULL)
+		return ODP_BUFFER_INVALID;
+
+	do {
+		blk = get_blk(&pool->s);
+		if (blk == NULL) {
+			ret_buf(&pool->s, &buf->buf);
+			return ODP_BUFFER_INVALID;
+		}
+		buf->buf.addr[buf->buf.segcount++] = blk;
+		totsize = totsize <  pool->s.seg_size ? 0 :
+			totsize - pool->s.seg_size;
+	} while (totsize > 0);
+
+	switch (buf->buf.type) {
+	case ODP_BUFFER_TYPE_RAW:
+		break;
+
+	case ODP_BUFFER_TYPE_PACKET:
+		packet_init(pool, &buf->pkt, size);
+		break;
+
+	case ODP_BUFFER_TYPE_TIMEOUT:
+		break;
+
+	default:
+		ret_buf(&pool->s, &buf->buf);
+		return ODP_BUFFER_INVALID;
 	}
+
+	return odp_hdr_to_buf(&buf->buf);
+}
+
+odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl)
+{
+	return buffer_alloc(pool_hdl, 0);
+}
+
+void odp_buffer_free(odp_buffer_t buf)
+{
+	odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf);
+	pool_entry_t *pool = odp_buf_to_pool(hdr);
+	ret_buf(&pool->s, hdr);
 }
 
 
@@ -516,8 +393,6 @@  odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf)
 void odp_buffer_pool_print(odp_buffer_pool_t pool_hdl)
 {
 	pool_entry_t *pool;
-	odp_buffer_chunk_hdr_t *chunk_hdr;
-	uint32_t i;
 	uint32_t pool_id;
 
 	pool_id = pool_handle_to_index(pool_hdl);
@@ -528,47 +403,4 @@  void odp_buffer_pool_print(odp_buffer_pool_t pool_hdl)
 	printf("  pool          %i\n",           pool->s.pool_hdl);
 	printf("  name          %s\n",           pool->s.name);
 	printf("  pool base     %p\n",           pool->s.pool_base_addr);
-	printf("  buf base      0x%"PRIxPTR"\n", pool->s.buf_base);
-	printf("  pool size     0x%"PRIx64"\n",  pool->s.pool_size);
-	printf("  buf size      %zu\n",          pool->s.user_size);
-	printf("  buf align     %zu\n",          pool->s.user_align);
-	printf("  hdr size      %zu\n",          pool->s.hdr_size);
-	printf("  alloc size    %zu\n",          pool->s.buf_size);
-	printf("  offset to hdr %zu\n",          pool->s.buf_offset);
-	printf("  num bufs      %"PRIu64"\n",    pool->s.num_bufs);
-	printf("  free bufs     %"PRIu64"\n",    pool->s.free_bufs);
-
-	/* first chunk */
-	chunk_hdr = pool->s.head;
-
-	if (chunk_hdr == NULL) {
-		ODP_ERR("  POOL EMPTY\n");
-		return;
-	}
-
-	printf("\n  First chunk\n");
-
-	for (i = 0; i < chunk_hdr->chunk.num_bufs - 1; i++) {
-		uint32_t index;
-		odp_buffer_hdr_t *hdr;
-
-		index = chunk_hdr->chunk.buf_index[i];
-		hdr   = index_to_hdr(pool, index);
-
-		printf("  [%i] addr %p, id %"PRIu32"\n", i, hdr->addr, index);
-	}
-
-	printf("  [%i] addr %p, id %"PRIu32"\n", i, chunk_hdr->buf_hdr.addr,
-	       chunk_hdr->buf_hdr.index);
-
-	/* next chunk */
-	chunk_hdr = next_chunk(pool, chunk_hdr);
-
-	if (chunk_hdr) {
-		printf("  Next chunk\n");
-		printf("  addr %p, id %"PRIu32"\n", chunk_hdr->buf_hdr.addr,
-		       chunk_hdr->buf_hdr.index);
-	}
-
-	printf("\n");
 }
diff --git a/platform/linux-generic/odp_crypto.c b/platform/linux-generic/odp_crypto.c
index 1475437..1e8d448 100644
--- a/platform/linux-generic/odp_crypto.c
+++ b/platform/linux-generic/odp_crypto.c
@@ -14,7 +14,6 @@ 
 #include <odp_shared_memory.h>
 #include <odp_crypto_internal.h>
 #include <odp_hints.h>
-#include <odph_packet.h>
 
 #include <string.h>
 
@@ -370,7 +369,7 @@  odp_crypto_operation(odp_crypto_op_params_t *params,
 		if (completion_event == odp_packet_to_buffer(params->pkt))
 			completion_event =
 				odp_packet_to_buffer(params->out_pkt);
-		odph_packet_free(params->pkt);
+		odp_packet_free(params->pkt);
 		params->pkt = ODP_PACKET_INVALID;
 	}
 
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index 82ea879..1abef3c 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -11,29 +11,31 @@ 
 
 #include <odph_eth.h>
 #include <odph_ip.h>
+#include <odph_tcp.h>
+#include <odph_udp.h>
 
 #include <string.h>
 #include <stdio.h>
 
-static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr,
-				 odph_ipv4hdr_t *ipv4, size_t *offset_out);
-static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr,
-				 odph_ipv6hdr_t *ipv6, size_t *offset_out);
-
 void odp_packet_init(odp_packet_t pkt)
 {
 	odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
+	pool_entry_t *pool = odp_buf_to_pool(&pkt_hdr->buf_hdr);
 	const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr);
 	uint8_t *start;
 	size_t len;
 
 	start = (uint8_t *)pkt_hdr + start_offset;
-	len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset;
+	len = sizeof(odp_packet_hdr_t) - start_offset;
 	memset(start, 0, len);
 
 	pkt_hdr->l2_offset = ODP_PACKET_OFFSET_INVALID;
 	pkt_hdr->l3_offset = ODP_PACKET_OFFSET_INVALID;
 	pkt_hdr->l4_offset = ODP_PACKET_OFFSET_INVALID;
+	pkt_hdr->payload_offset = ODP_PACKET_OFFSET_INVALID;
+
+	pkt_hdr->headroom = pool->s.headroom;
+	pkt_hdr->tailroom = pool->s.tailroom;
 }
 
 odp_packet_t odp_packet_from_buffer(odp_buffer_t buf)
@@ -46,55 +48,74 @@  odp_buffer_t odp_packet_to_buffer(odp_packet_t pkt)
 	return (odp_buffer_t)pkt;
 }
 
-void odp_packet_set_len(odp_packet_t pkt, size_t len)
+size_t odp_packet_len(odp_packet_t pkt)
 {
-	odp_packet_hdr(pkt)->frame_len = len;
+	return odp_packet_hdr(pkt)->frame_len;
 }
 
-size_t odp_packet_get_len(odp_packet_t pkt)
+void *odp_packet_offset_map(odp_packet_t pkt, size_t offset, size_t *seglen)
 {
-	return odp_packet_hdr(pkt)->frame_len;
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+	if (offset >= pkt_hdr->frame_len)
+		return NULL;
+
+	return  buffer_map(&pkt_hdr->buf_hdr,
+			   pkt_hdr->headroom + offset,
+			   seglen, pkt_hdr->frame_len);
 }
 
-uint8_t *odp_packet_addr(odp_packet_t pkt)
+void *odp_packet_map(odp_packet_t pkt, size_t *seglen)
 {
-	return odp_buffer_addr(odp_packet_to_buffer(pkt));
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+	return buffer_map(&pkt_hdr->buf_hdr,
+			  0, seglen, pkt_hdr->frame_len);
 }
 
-uint8_t *odp_packet_data(odp_packet_t pkt)
+void *odp_packet_addr(odp_packet_t pkt)
 {
-	return odp_packet_addr(pkt) + odp_packet_hdr(pkt)->frame_offset;
+	size_t seglen;
+	return odp_packet_map(pkt, &seglen);
 }
 
-
-uint8_t *odp_packet_l2(odp_packet_t pkt)
+void *odp_packet_udata(odp_packet_t pkt, size_t *len)
 {
-	const size_t offset = odp_packet_l2_offset(pkt);
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
 
-	if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
-		return NULL;
+	*len = pkt_hdr->buf_hdr.udata_size;
+	return pkt_hdr->buf_hdr.udata_addr;
+}
 
-	return odp_packet_addr(pkt) + offset;
+void *odp_packet_udata_addr(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->buf_hdr.udata_addr;
 }
 
-size_t odp_packet_l2_offset(odp_packet_t pkt)
+void *odp_packet_l2_map(odp_packet_t pkt, size_t *seglen)
 {
-	return odp_packet_hdr(pkt)->l2_offset;
+	return odp_packet_offset_map(pkt, odp_packet_l2_offset(pkt), seglen);
 }
 
-void odp_packet_set_l2_offset(odp_packet_t pkt, size_t offset)
+size_t odp_packet_l2_offset(odp_packet_t pkt)
 {
-	odp_packet_hdr(pkt)->l2_offset = offset;
+	return odp_packet_hdr(pkt)->l2_offset;
 }
 
-uint8_t *odp_packet_l3(odp_packet_t pkt)
+int odp_packet_set_l2_offset(odp_packet_t pkt, size_t offset)
 {
-	const size_t offset = odp_packet_l3_offset(pkt);
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
 
-	if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
-		return NULL;
+	if (offset >= hdr->frame_len)
+		return -1;
 
-	return odp_packet_addr(pkt) + offset;
+	hdr->l2_offset = offset;
+	return 0;
+}
+
+void *odp_packet_l3_map(odp_packet_t pkt, size_t *seglen)
+{
+	return odp_packet_offset_map(pkt, odp_packet_l3_offset(pkt), seglen);
 }
 
 size_t odp_packet_l3_offset(odp_packet_t pkt)
@@ -102,19 +123,35 @@  size_t odp_packet_l3_offset(odp_packet_t pkt)
 	return odp_packet_hdr(pkt)->l3_offset;
 }
 
-void odp_packet_set_l3_offset(odp_packet_t pkt, size_t offset)
+int odp_packet_set_l3_offset(odp_packet_t pkt, size_t offset)
 {
-	odp_packet_hdr(pkt)->l3_offset = offset;
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+
+	if (offset >= hdr->frame_len)
+		return -1;
+
+	hdr->l3_offset = offset;
+	return 0;
 }
 
-uint8_t *odp_packet_l4(odp_packet_t pkt)
+uint32_t odp_packet_l3_protocol(odp_packet_t pkt)
 {
-	const size_t offset = odp_packet_l4_offset(pkt);
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
 
-	if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
-		return NULL;
+	if (hdr->input_flags.l3)
+		return hdr->l3_protocol;
+	else
+		return -1;
+}
 
-	return odp_packet_addr(pkt) + offset;
+void odp_packet_set_l3_protocol(odp_packet_t pkt, uint16_t protocol)
+{
+	odp_packet_hdr(pkt)->l3_protocol = protocol;
+}
+
+void *odp_packet_l4_map(odp_packet_t pkt, size_t *seglen)
+{
+	return odp_packet_offset_map(pkt, odp_packet_l4_offset(pkt), seglen);
 }
 
 size_t odp_packet_l4_offset(odp_packet_t pkt)
@@ -122,154 +159,550 @@  size_t odp_packet_l4_offset(odp_packet_t pkt)
 	return odp_packet_hdr(pkt)->l4_offset;
 }
 
-void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset)
+int odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset)
 {
-	odp_packet_hdr(pkt)->l4_offset = offset;
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+
+	if (offset >= hdr->frame_len)
+		return -1;
+
+	hdr->l4_offset = offset;
+	return 0;
 }
 
+uint32_t odp_packet_l4_protocol(odp_packet_t pkt)
+{
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
 
-int odp_packet_is_segmented(odp_packet_t pkt)
+	if (hdr->input_flags.l4)
+		return hdr->l4_protocol;
+	else
+		return -1;
+}
+
+void odp_packet_set_l4_protocol(odp_packet_t pkt, uint8_t protocol)
+{
+	odp_packet_hdr(pkt)->l4_protocol = protocol;
+}
+
+void *odp_packet_payload_map(odp_packet_t pkt, size_t *seglen)
+{
+	return odp_packet_offset_map(pkt, odp_packet_payload_offset(pkt),
+				     seglen);
+}
+
+size_t odp_packet_payload_offset(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->payload_offset;
+}
+
+int odp_packet_set_payload_offset(odp_packet_t pkt, size_t offset)
+{
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+
+	if (offset >= hdr->frame_len)
+		return -1;
+
+	hdr->payload_offset = offset;
+	return 0;
+}
+
+int odp_packet_error(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->error_flags.all != 0;
+}
+
+void odp_packet_set_error(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->error_flags.app_error = val;
+}
+
+int odp_packet_inflag_l2(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.l2;
+}
+
+void odp_packet_set_inflag_l2(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.l2 = val;
+}
+
+int odp_packet_inflag_l3(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.l3;
+}
+
+void odp_packet_set_inflag_l3(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.l3 = val;
+}
+
+int odp_packet_inflag_l4(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.l4;
+}
+
+void odp_packet_set_inflag_l4(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.l4 = val;
+}
+
+int odp_packet_inflag_eth(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.eth;
+}
+
+void odp_packet_set_inflag_eth(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.eth = val;
+}
+
+int odp_packet_inflag_jumbo(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.jumbo;
+}
+
+void odp_packet_set_inflag_jumbo(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.jumbo = val;
+}
+
+int odp_packet_inflag_vlan(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.vlan;
+}
+
+void odp_packet_set_inflag_vlan(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.vlan = val;
+}
+
+int odp_packet_inflag_vlan_qinq(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.vlan_qinq;
+}
+
+void odp_packet_set_inflag_vlan_qinq(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.vlan_qinq = val;
+}
+
+int odp_packet_inflag_snap(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.snap;
+}
+
+void odp_packet_set_inflag_snap(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.snap = val;
+}
+
+int odp_packet_inflag_arp(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.arp;
+}
+
+void odp_packet_set_inflag_arp(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.arp = val;
+}
+
+int odp_packet_inflag_ipv4(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.ipv4;
+}
+
+void odp_packet_set_inflag_ipv4(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.ipv4 = val;
+}
+
+int odp_packet_inflag_ipv6(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.ipv6;
+}
+
+void odp_packet_set_inflag_ipv6(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.ipv6 = val;
+}
+
+int odp_packet_inflag_ipfrag(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.ipfrag;
+}
+
+void odp_packet_set_inflag_ipfrag(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.ipfrag = val;
+}
+
+int odp_packet_inflag_ipopt(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.ipopt;
+}
+
+void odp_packet_set_inflag_ipopt(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.ipopt = val;
+}
+
+int odp_packet_inflag_ipsec(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.ipsec;
+}
+
+void odp_packet_set_inflag_ipsec(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.ipsec = val;
+}
+
+int odp_packet_inflag_udp(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.udp;
+}
+
+void odp_packet_set_inflag_udp(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.udp = val;
+}
+
+int odp_packet_inflag_tcp(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.tcp;
+}
+
+void odp_packet_set_inflag_tcp(odp_packet_t pkt, int val)
 {
-	odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt);
+	odp_packet_hdr(pkt)->input_flags.tcp = val;
+}
 
-	if (buf_hdr->scatter.num_bufs == 0)
+int odp_packet_inflag_tcpopt(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.tcpopt;
+}
+
+void odp_packet_set_inflag_tcpopt(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.tcpopt = val;
+}
+
+int odp_packet_inflag_icmp(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->input_flags.icmp;
+}
+
+void odp_packet_set_inflag_icmp(odp_packet_t pkt, int val)
+{
+	odp_packet_hdr(pkt)->input_flags.icmp = val;
+}
+
+int odp_packet_is_valid(odp_packet_t pkt)
+{
+	odp_buffer_hdr_t *buf = validate_buf((odp_buffer_t)pkt);
+
+	if (buf == NULL)
 		return 0;
 	else
-		return 1;
+		return buf->type == ODP_BUFFER_TYPE_PACKET;
 }
 
+int odp_packet_is_segmented(odp_packet_t pkt)
+{
+	return (odp_packet_hdr(pkt)->buf_hdr.segcount > 1);
+}
 
-int odp_packet_seg_count(odp_packet_t pkt)
+int odp_packet_segment_count(odp_packet_t pkt)
 {
-	odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt);
+	return odp_packet_hdr(pkt)->buf_hdr.segcount;
+}
 
-	return (int)buf_hdr->scatter.num_bufs + 1;
+size_t odp_packet_headroom(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->headroom;
 }
 
+size_t odp_packet_tailroom(odp_packet_t pkt)
+{
+	return odp_packet_hdr(pkt)->tailroom;
+}
 
-/**
- * Simple packet parser: eth, VLAN, IP, TCP/UDP/ICMP
- *
- * Internal function: caller is resposible for passing only valid packet handles
- * , lengths and offsets (usually done&called in packet input).
- *
- * @param pkt        Packet handle
- * @param len        Packet length in bytes
- * @param frame_offset  Byte offset to L2 header
- */
-void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset)
+int odp_packet_push_head(odp_packet_t pkt, size_t len)
 {
-	odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
-	odph_ethhdr_t *eth;
-	odph_vlanhdr_t *vlan;
-	odph_ipv4hdr_t *ipv4;
-	odph_ipv6hdr_t *ipv6;
-	uint16_t ethtype;
-	size_t offset = 0;
-	uint8_t ip_proto = 0;
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
 
-	pkt_hdr->input_flags.eth = 1;
-	pkt_hdr->frame_offset = frame_offset;
-	pkt_hdr->frame_len = len;
+	if (len > pkt_hdr->headroom)
+		return -1;
 
-	if (odp_unlikely(len < ODPH_ETH_LEN_MIN)) {
-		pkt_hdr->error_flags.frame_len = 1;
-		return;
-	} else if (len > ODPH_ETH_LEN_MAX) {
-		pkt_hdr->input_flags.jumbo = 1;
-	}
+	push_head(pkt_hdr, len);
+	return 0;
+}
 
-	/* Assume valid L2 header, no CRC/FCS check in SW */
-	pkt_hdr->input_flags.l2 = 1;
-	pkt_hdr->l2_offset = frame_offset;
+void *odp_packet_push_head_and_map(odp_packet_t pkt, size_t len, size_t *seglen)
+{
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
 
-	eth = (odph_ethhdr_t *)odp_packet_data(pkt);
-	ethtype = odp_be_to_cpu_16(eth->type);
-	vlan = (odph_vlanhdr_t *)&eth->type;
+	if (len > pkt_hdr->headroom)
+		return NULL;
 
-	if (ethtype == ODPH_ETHTYPE_VLAN_OUTER) {
-		pkt_hdr->input_flags.vlan_qinq = 1;
-		ethtype = odp_be_to_cpu_16(vlan->tpid);
-		offset += sizeof(odph_vlanhdr_t);
-		vlan = &vlan[1];
+	push_head(pkt_hdr, len);
+
+	return buffer_map(&pkt_hdr->buf_hdr, 0, seglen, pkt_hdr->frame_len);
+}
+
+int odp_packet_pull_head(odp_packet_t pkt, size_t len)
+{
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+	if (len > pkt_hdr->frame_len)
+		return -1;
+
+	pull_head(pkt_hdr, len);
+	return 0;
+}
+
+void *odp_packet_pull_head_and_map(odp_packet_t pkt, size_t len, size_t *seglen)
+{
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+	if (len > pkt_hdr->frame_len)
+		return NULL;
+
+	pull_head(pkt_hdr, len);
+	return buffer_map(&pkt_hdr->buf_hdr, 0, seglen, pkt_hdr->frame_len);
+}
+
+int odp_packet_push_tail(odp_packet_t pkt, size_t len)
+{
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+	if (len > pkt_hdr->tailroom)
+		return -1;
+
+	push_tail(pkt_hdr, len);
+	return 0;
+}
+
+void *odp_packet_push_tail_and_map(odp_packet_t pkt, size_t len, size_t *seglen)
+{
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+	size_t origin = pkt_hdr->frame_len;
+
+	if (len > pkt_hdr->tailroom)
+		return NULL;
+
+	push_tail(pkt_hdr, len);
+
+	return buffer_map(&pkt_hdr->buf_hdr, origin, seglen,
+			  pkt_hdr->frame_len);
+}
+
+int odp_packet_pull_tail(odp_packet_t pkt, size_t len)
+{
+	odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+	if (len > pkt_hdr->frame_len)
+		return -1;
+
+	pull_tail(pkt_hdr, len);
+	return 0;
+}
+
+void odp_packet_print(odp_packet_t pkt)
+{
+	int max_len = 512;
+	char str[max_len];
+	int len = 0;
+	int n = max_len-1;
+	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+
+	len += snprintf(&str[len], n-len, "Packet ");
+	len += odp_buffer_snprint(&str[len], n-len, (odp_buffer_t) pkt);
+	len += snprintf(&str[len], n-len,
+			"  input_flags  0x%x\n", hdr->input_flags.all);
+	len += snprintf(&str[len], n-len,
+			"  error_flags  0x%x\n", hdr->error_flags.all);
+	len += snprintf(&str[len], n-len,
+			"  output_flags 0x%x\n", hdr->output_flags.all);
+	len += snprintf(&str[len], n-len,
+			"  l2_offset    %u\n", hdr->l2_offset);
+	len += snprintf(&str[len], n-len,
+			"  l3_offset    %u\n", hdr->l3_offset);
+	len += snprintf(&str[len], n-len,
+			"  l3_len       %u\n", hdr->l3_len);
+	len += snprintf(&str[len], n-len,
+			"  l3_protocol  0x%x\n", hdr->l3_protocol);
+	len += snprintf(&str[len], n-len,
+			"  l4_offset    %u\n", hdr->l4_offset);
+	len += snprintf(&str[len], n-len,
+			"  l4_len       %u\n", hdr->l4_len);
+	len += snprintf(&str[len], n-len,
+			"  l4_protocol  %u\n", hdr->l4_protocol);
+	len += snprintf(&str[len], n-len,
+			"  payload_offset    %u\n", hdr->payload_offset);
+	len += snprintf(&str[len], n-len,
+			"  frame_len    %u\n", hdr->frame_len);
+	str[len] = '\0';
+
+	printf("\n%s\n", str);
+}
+
+int odp_packet_copy_to_packet(odp_packet_t dstpkt, size_t dstoffset,
+			      odp_packet_t srcpkt, size_t srcoffset,
+			      size_t len)
+{
+	void *dstmap;
+	void *srcmap;
+	size_t cpylen, minseg, dstseglen, srcseglen;
+
+	while (len > 0) {
+		dstmap = odp_packet_offset_map(dstpkt, dstoffset, &dstseglen);
+		srcmap = odp_packet_offset_map(srcpkt, srcoffset, &srcseglen);
+		if (dstmap == NULL || srcmap == NULL)
+			return -1;
+		minseg = dstseglen > srcseglen ? srcseglen : dstseglen;
+		cpylen = len > minseg ? minseg : len;
+		memcpy(dstmap, srcmap, cpylen);
+		srcoffset += cpylen;
+		dstoffset += cpylen;
+		len       -= cpylen;
 	}
 
-	if (ethtype == ODPH_ETHTYPE_VLAN) {
-		pkt_hdr->input_flags.vlan = 1;
-		ethtype = odp_be_to_cpu_16(vlan->tpid);
-		offset += sizeof(odph_vlanhdr_t);
+	return 0;
+}
+
+int odp_packet_copy_to_memory(void *dstmem, odp_packet_t srcpkt,
+			      size_t srcoffset, size_t dstlen)
+{
+	void *mapaddr;
+	size_t seglen, cpylen;
+	uint8_t *dstaddr = (uint8_t *)dstmem;
+
+	while (dstlen > 0) {
+		mapaddr = odp_packet_offset_map(srcpkt, srcoffset, &seglen);
+		if (mapaddr == NULL)
+			return -1;
+		cpylen = dstlen > seglen ? seglen : dstlen;
+		memcpy(dstaddr, mapaddr, cpylen);
+		srcoffset += cpylen;
+		dstaddr   += cpylen;
+		dstlen    -= cpylen;
 	}
 
-	/* Set l3_offset+flag only for known ethtypes */
-	switch (ethtype) {
-	case ODPH_ETHTYPE_IPV4:
-		pkt_hdr->input_flags.ipv4 = 1;
-		pkt_hdr->input_flags.l3 = 1;
-		pkt_hdr->l3_offset = frame_offset + ODPH_ETHHDR_LEN + offset;
-		ipv4 = (odph_ipv4hdr_t *)odp_packet_l3(pkt);
-		ip_proto = parse_ipv4(pkt_hdr, ipv4, &offset);
-		break;
-	case ODPH_ETHTYPE_IPV6:
-		pkt_hdr->input_flags.ipv6 = 1;
-		pkt_hdr->input_flags.l3 = 1;
-		pkt_hdr->l3_offset = frame_offset + ODPH_ETHHDR_LEN + offset;
-		ipv6 = (odph_ipv6hdr_t *)odp_packet_l3(pkt);
-		ip_proto = parse_ipv6(pkt_hdr, ipv6, &offset);
-		break;
-	case ODPH_ETHTYPE_ARP:
-		pkt_hdr->input_flags.arp = 1;
-		/* fall through */
-	default:
-		ip_proto = 0;
-		break;
+	return 0;
+}
+
+int odp_packet_copy_from_memory(odp_packet_t dstpkt, size_t dstoffset,
+				void *srcmem, size_t srclen)
+{
+	void *mapaddr;
+	size_t seglen, cpylen;
+	uint8_t *srcaddr = (uint8_t *)srcmem;
+
+	while (srclen > 0) {
+		mapaddr = odp_packet_offset_map(dstpkt, dstoffset, &seglen);
+		if (mapaddr == NULL)
+			return -1;
+		cpylen = srclen > seglen ? seglen : srclen;
+		memcpy(mapaddr, srcaddr, cpylen);
+		dstoffset += cpylen;
+		srcaddr   += cpylen;
+		srclen    -= cpylen;
 	}
 
-	switch (ip_proto) {
-	case ODPH_IPPROTO_UDP:
-		pkt_hdr->input_flags.udp = 1;
-		pkt_hdr->input_flags.l4 = 1;
-		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
-		break;
-	case ODPH_IPPROTO_TCP:
-		pkt_hdr->input_flags.tcp = 1;
-		pkt_hdr->input_flags.l4 = 1;
-		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
-		break;
-	case ODPH_IPPROTO_SCTP:
-		pkt_hdr->input_flags.sctp = 1;
-		pkt_hdr->input_flags.l4 = 1;
-		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
-		break;
-	case ODPH_IPPROTO_ICMP:
-		pkt_hdr->input_flags.icmp = 1;
-		pkt_hdr->input_flags.l4 = 1;
-		pkt_hdr->l4_offset = pkt_hdr->l3_offset + offset;
-		break;
-	default:
-		/* 0 or unhandled IP protocols, don't set L4 flag+offset */
-		if (pkt_hdr->input_flags.ipv6) {
-			/* IPv6 next_hdr is not L4, mark as IP-option instead */
-			pkt_hdr->input_flags.ipopt = 1;
+	return 0;
+}
+
+odp_packet_t odp_packet_copy(odp_packet_t pkt, odp_buffer_pool_t pool)
+{
+	size_t pktlen = odp_packet_len(pkt);
+	const size_t meta_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr);
+	odp_packet_t newpkt = odp_packet_alloc_len(pool, pktlen);
+	odp_packet_hdr_t *newhdr, *srchdr;
+	uint8_t *newstart, *srcstart;
+
+	if (newpkt != ODP_PACKET_INVALID) {
+		/* Must copy meta data first, followed by packet data */
+		srchdr = odp_packet_hdr(pkt);
+		newhdr = odp_packet_hdr(newpkt);
+		newstart = (uint8_t *)newhdr + meta_offset;
+		srcstart = (uint8_t *)srchdr + meta_offset;
+
+		memcpy(newstart, srcstart,
+		       sizeof(odp_packet_hdr_t) - meta_offset);
+
+		if (odp_packet_copy_to_packet(newpkt, 0, pkt, 0, pktlen) != 0) {
+			odp_packet_free(newpkt);
+			newpkt = ODP_PACKET_INVALID;
 		}
-		break;
 	}
+
+	return newpkt;
 }
 
+odp_packet_t odp_packet_alloc(odp_buffer_pool_t pool)
+{
+	if (odp_pool_to_entry(pool)->s.params.buf_type !=
+	    ODP_BUFFER_TYPE_PACKET)
+		return ODP_PACKET_INVALID;
+
+	return (odp_packet_t)buffer_alloc(pool, 0);
+}
+
+odp_packet_t odp_packet_alloc_len(odp_buffer_pool_t pool, size_t len)
+{
+	if (odp_pool_to_entry(pool)->s.params.buf_type !=
+	    ODP_BUFFER_TYPE_PACKET)
+		return ODP_PACKET_INVALID;
+
+	return (odp_packet_t)buffer_alloc(pool, len);
+}
+
+void odp_packet_free(odp_packet_t pkt)
+{
+	odp_buffer_free((odp_buffer_t)pkt);
+}
+
+uint32_t odp_packet_refcount(odp_packet_t pkt)
+{
+	return odp_buffer_refcount(&odp_packet_hdr(pkt)->buf_hdr);
+}
+
+uint32_t odp_packet_incr_refcount(odp_packet_t pkt, uint32_t val)
+{
+	return odp_buffer_incr_refcount(&odp_packet_hdr(pkt)->buf_hdr, val);
+}
+
+uint32_t odp_packet_decr_refcount(odp_packet_t pkt, uint32_t val)
+{
+	return odp_buffer_decr_refcount(&odp_packet_hdr(pkt)->buf_hdr, val);
+}
+
+/**
+ * Parser helper function for IPv4
+ */
 static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr,
-				 odph_ipv4hdr_t *ipv4, size_t *offset_out)
+				 uint8_t **parseptr, size_t *offset)
 {
-	uint8_t ihl;
+	odph_ipv4hdr_t *ipv4 = (odph_ipv4hdr_t *)*parseptr;
+	uint8_t ver = ODPH_IPV4HDR_VER(ipv4->ver_ihl);
+	uint8_t ihl = ODPH_IPV4HDR_IHL(ipv4->ver_ihl);
 	uint16_t frag_offset;
 
-	ihl = ODPH_IPV4HDR_IHL(ipv4->ver_ihl);
-	if (odp_unlikely(ihl < ODPH_IPV4HDR_IHL_MIN)) {
+	pkt_hdr->l3_len = odp_be_to_cpu_16(ipv4->tot_len);
+
+	if (odp_unlikely(ihl < ODPH_IPV4HDR_IHL_MIN) ||
+	    odp_unlikely(ver != 4) ||
+	    (pkt_hdr->l3_len > pkt_hdr->frame_len - *offset)) {
 		pkt_hdr->error_flags.ip_err = 1;
 		return 0;
 	}
 
+	*offset   += ihl * 4;
+	*parseptr += ihl * 4;
+
 	if (odp_unlikely(ihl > ODPH_IPV4HDR_IHL_MIN)) {
 		pkt_hdr->input_flags.ipopt = 1;
-		return 0;
 	}
 
 	/* A packet is a fragment if:
@@ -280,7 +713,6 @@  static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr,
 	frag_offset = odp_be_to_cpu_16(ipv4->frag_offset);
 	if (odp_unlikely(ODPH_IPV4HDR_IS_FRAGMENT(frag_offset))) {
 		pkt_hdr->input_flags.ipfrag = 1;
-		return 0;
 	}
 
 	if (ipv4->proto == ODPH_IPPROTO_ESP ||
@@ -291,108 +723,260 @@  static inline uint8_t parse_ipv4(odp_packet_hdr_t *pkt_hdr,
 
 	/* Set pkt_hdr->input_flags.ipopt when checking L4 hdrs after return */
 
-	*offset_out = sizeof(uint32_t) * ihl;
+	*offset = sizeof(uint32_t) * ihl;
 	return ipv4->proto;
 }
 
+/**
+ * Parser helper function for IPv6
+ */
 static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr,
-				 odph_ipv6hdr_t *ipv6, size_t *offset_out)
+				 uint8_t **parseptr, size_t *offset)
 {
-	if (ipv6->next_hdr == ODPH_IPPROTO_ESP ||
-	    ipv6->next_hdr == ODPH_IPPROTO_AH) {
-		pkt_hdr->input_flags.ipopt = 1;
-		pkt_hdr->input_flags.ipsec = 1;
+	odph_ipv6hdr_t *ipv6 = (odph_ipv6hdr_t *)*parseptr;
+	odph_ipv6hdr_ext_t *ipv6ext;
+
+	pkt_hdr->l3_len = odp_be_to_cpu_16(ipv6->payload_len);
+
+	/* Basic sanity checks on IPv6 header */
+	if (ipv6->ver != 6 ||
+	    pkt_hdr->l3_len > pkt_hdr->frame_len - *offset) {
+		pkt_hdr->error_flags.ip_err = 1;
 		return 0;
 	}
 
+	/* Skip past IPv6 header */
+	*offset   += sizeof(odph_ipv6hdr_t);
+	*parseptr += sizeof(odph_ipv6hdr_t);
+
+
+	/* Skip past any IPv6 extension headers */
+	if (ipv6->next_hdr == ODPH_IPPROTO_HOPOPTS ||
+	    ipv6->next_hdr == ODPH_IPPROTO_ROUTE) {
+		pkt_hdr->input_flags.ipopt = 1;
+
+		do  {
+			ipv6ext    = (odph_ipv6hdr_ext_t *)*parseptr;
+			uint16_t extlen = 8 + ipv6ext->ext_len * 8;
+
+			*offset   += extlen;
+			*parseptr += extlen;
+		} while ((ipv6ext->next_hdr == ODPH_IPPROTO_HOPOPTS ||
+			  ipv6ext->next_hdr == ODPH_IPPROTO_ROUTE) &&
+			*offset < pkt_hdr->frame_len);
+
+		if (*offset >= pkt_hdr->l3_offset + ipv6->payload_len) {
+			pkt_hdr->error_flags.ip_err = 1;
+			return 0;
+		}
+
+		if (ipv6ext->next_hdr == ODPH_IPPROTO_FRAG)
+			pkt_hdr->input_flags.ipfrag = 1;
+
+		return ipv6ext->next_hdr;
+	}
+
 	if (odp_unlikely(ipv6->next_hdr == ODPH_IPPROTO_FRAG)) {
 		pkt_hdr->input_flags.ipopt = 1;
 		pkt_hdr->input_flags.ipfrag = 1;
-		return 0;
 	}
 
-	/* Don't step through more extensions */
-	*offset_out = ODPH_IPV6HDR_LEN;
 	return ipv6->next_hdr;
 }
 
-void odp_packet_print(odp_packet_t pkt)
+/**
+ * Parser helper function for TCP
+ */
+static inline void parse_tcp(odp_packet_hdr_t *pkt_hdr,
+			     uint8_t **parseptr, size_t *offset)
 {
-	int max_len = 512;
-	char str[max_len];
-	int len = 0;
-	int n = max_len-1;
-	odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
+	odph_tcphdr_t *tcp = (odph_tcphdr_t *)*parseptr;
 
-	len += snprintf(&str[len], n-len, "Packet ");
-	len += odp_buffer_snprint(&str[len], n-len, (odp_buffer_t) pkt);
-	len += snprintf(&str[len], n-len,
-			"  input_flags  0x%x\n", hdr->input_flags.all);
-	len += snprintf(&str[len], n-len,
-			"  error_flags  0x%x\n", hdr->error_flags.all);
-	len += snprintf(&str[len], n-len,
-			"  output_flags 0x%x\n", hdr->output_flags.all);
-	len += snprintf(&str[len], n-len,
-			"  frame_offset %u\n", hdr->frame_offset);
-	len += snprintf(&str[len], n-len,
-			"  l2_offset    %u\n", hdr->l2_offset);
-	len += snprintf(&str[len], n-len,
-			"  l3_offset    %u\n", hdr->l3_offset);
-	len += snprintf(&str[len], n-len,
-			"  l4_offset    %u\n", hdr->l4_offset);
-	len += snprintf(&str[len], n-len,
-			"  frame_len    %u\n", hdr->frame_len);
-	len += snprintf(&str[len], n-len,
-			"  input        %u\n", hdr->input);
-	str[len] = '\0';
+	if (tcp->hl < sizeof(odph_tcphdr_t)/sizeof(uint32_t))
+		pkt_hdr->error_flags.tcp_err = 1;
+	else if ((uint32_t)tcp->hl * 4 > sizeof(odph_tcphdr_t))
+		pkt_hdr->input_flags.tcpopt = 1;
 
-	printf("\n%s\n", str);
+	pkt_hdr->l4_len = pkt_hdr->l3_len +
+		pkt_hdr->l3_offset - pkt_hdr->l4_offset;
+
+	*offset += sizeof(odph_tcphdr_t);
+	*parseptr += sizeof(odph_tcphdr_t);
 }
 
-int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src)
+/**
+ * Parser helper function for UDP
+ */
+static inline void parse_udp(odp_packet_hdr_t *pkt_hdr,
+			     uint8_t **parseptr, size_t *offset)
 {
-	odp_packet_hdr_t *const pkt_hdr_dst = odp_packet_hdr(pkt_dst);
-	odp_packet_hdr_t *const pkt_hdr_src = odp_packet_hdr(pkt_src);
-	const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr);
-	uint8_t *start_src;
-	uint8_t *start_dst;
-	size_t len;
+	odph_udphdr_t *udp = (odph_udphdr_t *)*parseptr;
+	uint32_t udplen = odp_be_to_cpu_16(udp->length);
 
-	if (pkt_dst == ODP_PACKET_INVALID || pkt_src == ODP_PACKET_INVALID)
-		return -1;
+	if (udplen < sizeof(odph_udphdr_t) ||
+	    udplen > (pkt_hdr->l3_len +
+		      pkt_hdr->l3_offset - pkt_hdr->l4_offset)) {
+		pkt_hdr->error_flags.udp_err = 1;
+	}
 
-	if (pkt_hdr_dst->buf_hdr.size <
-		pkt_hdr_src->frame_len + pkt_hdr_src->frame_offset)
-		return -1;
+	pkt_hdr->l4_len = udplen;
 
-	/* Copy packet header */
-	start_dst = (uint8_t *)pkt_hdr_dst + start_offset;
-	start_src = (uint8_t *)pkt_hdr_src + start_offset;
-	len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset;
-	memcpy(start_dst, start_src, len);
+	*offset += sizeof(odph_udphdr_t);
+	*parseptr += sizeof(odph_udphdr_t);
+}
 
-	/* Copy frame payload */
-	start_dst = (uint8_t *)odp_packet_data(pkt_dst);
-	start_src = (uint8_t *)odp_packet_data(pkt_src);
-	len = pkt_hdr_src->frame_len;
-	memcpy(start_dst, start_src, len);
+/**
+ * Simple packet parser: eth, VLAN, IP, TCP/UDP/ICMP
+ *
+ * Internal function: caller is resposible for passing only valid packet handles
+ * , lengths and offsets (usually done&called in packet input).
+ *
+ * @param pkt        Packet handle
+ */
+int odp_packet_parse(odp_packet_t pkt)
+{
+	odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
+	odph_ethhdr_t *eth;
+	odph_vlanhdr_t *vlan;
+	uint16_t ethtype;
+	uint8_t *parseptr;
+	size_t offset, seglen;
+	uint8_t ip_proto = 0;
 
-	/* Copy useful things from the buffer header */
-	pkt_hdr_dst->buf_hdr.cur_offset = pkt_hdr_src->buf_hdr.cur_offset;
+	/* Reset parser metadata for new parse */
+	pkt_hdr->error_flags.all  = 0;
+	pkt_hdr->input_flags.all  = 0;
+	pkt_hdr->output_flags.all = 0;
+	pkt_hdr->l2_offset        = 0;
+	pkt_hdr->l3_offset        = 0;
+	pkt_hdr->l4_offset        = 0;
+	pkt_hdr->payload_offset   = 0;
+	pkt_hdr->vlan_s_tag       = 0;
+	pkt_hdr->vlan_c_tag       = 0;
+	pkt_hdr->l3_protocol      = 0;
+	pkt_hdr->l4_protocol      = 0;
+
+	/* We only support Ethernet for now */
+	pkt_hdr->input_flags.eth = 1;
 
-	/* Create a copy of the scatter list */
-	odp_buffer_copy_scatter(odp_packet_to_buffer(pkt_dst),
-				odp_packet_to_buffer(pkt_src));
+	if (odp_unlikely(pkt_hdr->frame_len < ODPH_ETH_LEN_MIN)) {
+		pkt_hdr->error_flags.frame_len = 1;
+		goto parse_exit;
+	} else if (pkt_hdr->frame_len > ODPH_ETH_LEN_MAX) {
+		pkt_hdr->input_flags.jumbo = 1;
+	}
 
-	return 0;
-}
+	/* Assume valid L2 header, no CRC/FCS check in SW */
+	pkt_hdr->input_flags.l2 = 1;
 
-void odp_packet_set_ctx(odp_packet_t pkt, const void *ctx)
-{
-	odp_packet_hdr(pkt)->user_ctx = (intptr_t)ctx;
-}
+	eth = (odph_ethhdr_t *)odp_packet_map(pkt, &seglen);
+	offset = sizeof(odph_ethhdr_t);
+	parseptr = (uint8_t *)&eth->type;
+	ethtype = odp_be_to_cpu_16(*((uint16_t *)parseptr));
 
-void *odp_packet_get_ctx(odp_packet_t pkt)
-{
-	return (void *)(intptr_t)odp_packet_hdr(pkt)->user_ctx;
+	/* Parse the VLAN header(s), if present */
+	if (ethtype == ODPH_ETHTYPE_VLAN_OUTER) {
+		pkt_hdr->input_flags.vlan_qinq = 1;
+		pkt_hdr->input_flags.vlan = 1;
+		vlan = (odph_vlanhdr_t *)parseptr;
+		pkt_hdr->vlan_s_tag = ((ethtype << 16) |
+				       odp_be_to_cpu_16(vlan->tci));
+		offset += sizeof(odph_vlanhdr_t);
+		parseptr += sizeof(odph_vlanhdr_t);
+		ethtype = odp_be_to_cpu_16(*((uint16_t *)parseptr));
+	}
+
+	if (ethtype == ODPH_ETHTYPE_VLAN) {
+		pkt_hdr->input_flags.vlan = 1;
+		vlan = (odph_vlanhdr_t *)parseptr;
+		pkt_hdr->vlan_c_tag = ((ethtype << 16) |
+				       odp_be_to_cpu_16(vlan->tci));
+		offset += sizeof(odph_vlanhdr_t);
+		parseptr += sizeof(odph_vlanhdr_t);
+		ethtype = odp_be_to_cpu_16(*((uint16_t *)parseptr));
+	}
+
+	/* Check for SNAP vs. DIX */
+	if (ethtype < ODPH_ETH_LEN_MAX) {
+		pkt_hdr->input_flags.snap = 1;
+		if (ethtype > pkt_hdr->frame_len - offset) {
+			pkt_hdr->error_flags.snap_len = 1;
+			goto parse_exit;
+		}
+		offset   += 8;
+		parseptr += 8;
+		ethtype = odp_be_to_cpu_16(*((uint16_t *)parseptr));
+	}
+
+	/* Consume Ethertype for Layer 3 parse */
+	parseptr += 2;
+
+	/* Set l3_offset+flag only for known ethtypes */
+	pkt_hdr->input_flags.l3 = 1;
+	pkt_hdr->l3_offset = offset;
+	pkt_hdr->l3_protocol = ethtype;
+
+	/* Parse Layer 3 headers */
+	switch (ethtype) {
+	case ODPH_ETHTYPE_IPV4:
+		pkt_hdr->input_flags.ipv4 = 1;
+		ip_proto = parse_ipv4(pkt_hdr, &parseptr, &offset);
+		break;
+
+	case ODPH_ETHTYPE_IPV6:
+		pkt_hdr->input_flags.ipv6 = 1;
+		ip_proto = parse_ipv6(pkt_hdr, &parseptr, &offset);
+		break;
+
+	case ODPH_ETHTYPE_ARP:
+		pkt_hdr->input_flags.arp = 1;
+		ip_proto = 255;  /* Reserved invalid by IANA */
+		break;
+
+	default:
+		pkt_hdr->input_flags.l3 = 0;
+		ip_proto = 255;  /* Reserved invalid by IANA */
+	}
+
+	/* Set l4_offset+flag only for known ip_proto */
+	pkt_hdr->input_flags.l4 = 1;
+	pkt_hdr->l4_offset = offset;
+	pkt_hdr->l4_protocol = ip_proto;
+
+	/* Parse Layer 4 headers */
+	switch (ip_proto) {
+	case ODPH_IPPROTO_ICMP:
+		pkt_hdr->input_flags.icmp = 1;
+		break;
+
+	case ODPH_IPPROTO_TCP:
+		pkt_hdr->input_flags.tcp = 1;
+		parse_tcp(pkt_hdr, &parseptr, &offset);
+		break;
+
+	case ODPH_IPPROTO_UDP:
+		pkt_hdr->input_flags.udp = 1;
+		parse_udp(pkt_hdr, &parseptr, &offset);
+		break;
+
+	case ODPH_IPPROTO_AH:
+	case ODPH_IPPROTO_ESP:
+		pkt_hdr->input_flags.ipsec = 1;
+		break;
+
+	default:
+		pkt_hdr->input_flags.l4 = 0;
+		break;
+	}
+
+       /*
+	* Anything beyond what we parse here is considered payload.
+	* Note: Payload is really only relevant for TCP and UDP.  For
+	* all other protocols, the payload offset will point to the
+	* final header (ARP, ICMP, AH, ESP, or IP Fragment.
+	*/
+	pkt_hdr->payload_offset = offset;
+
+parse_exit:
+	return pkt_hdr->error_flags.all != 0;
 }
diff --git a/platform/linux-generic/odp_packet_flags.c b/platform/linux-generic/odp_packet_flags.c
deleted file mode 100644
index 06fdeed..0000000
--- a/platform/linux-generic/odp_packet_flags.c
+++ /dev/null
@@ -1,202 +0,0 @@ 
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier:     BSD-3-Clause
- */
-
-#include <odp_packet_flags.h>
-#include <odp_packet_internal.h>
-
-
-int odp_packet_error(odp_packet_t pkt)
-{
-	return (odp_packet_hdr(pkt)->error_flags.all != 0);
-}
-
-/* Get Error Flags */
-
-int odp_packet_errflag_frame_len(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->error_flags.frame_len;
-}
-
-/* Get Input Flags */
-
-int odp_packet_inflag_l2(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.l2;
-}
-
-int odp_packet_inflag_l3(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.l3;
-}
-
-int odp_packet_inflag_l4(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.l4;
-}
-
-int odp_packet_inflag_eth(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.eth;
-}
-
-int odp_packet_inflag_jumbo(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.jumbo;
-}
-
-int odp_packet_inflag_vlan(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.vlan;
-}
-
-int odp_packet_inflag_vlan_qinq(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.vlan_qinq;
-}
-
-int odp_packet_inflag_arp(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.arp;
-}
-
-int odp_packet_inflag_ipv4(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.ipv4;
-}
-
-int odp_packet_inflag_ipv6(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.ipv6;
-}
-
-int odp_packet_inflag_ipfrag(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.ipfrag;
-}
-
-int odp_packet_inflag_ipopt(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.ipopt;
-}
-
-int odp_packet_inflag_ipsec(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.ipsec;
-}
-
-int odp_packet_inflag_udp(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.udp;
-}
-
-int odp_packet_inflag_tcp(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.tcp;
-}
-
-int odp_packet_inflag_sctp(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.sctp;
-}
-
-int odp_packet_inflag_icmp(odp_packet_t pkt)
-{
-	return odp_packet_hdr(pkt)->input_flags.icmp;
-}
-
-/* Set Output Flags */
-
-void odp_packet_outflag_l4_chksum(odp_packet_t pkt)
-{
-	odp_packet_hdr(pkt)->output_flags.l4_chksum = 1;
-}
-
-/* Set Input Flags */
-
-void odp_packet_set_inflag_l2(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.l2 = val;
-}
-
-void odp_packet_set_inflag_l3(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.l3 = val;
-}
-
-void odp_packet_set_inflag_l4(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.l4 = val;
-}
-
-void odp_packet_set_inflag_eth(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.eth = val;
-}
-
-void odp_packet_set_inflag_jumbo(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.jumbo = val;
-}
-
-void odp_packet_set_inflag_vlan(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.vlan = val;
-}
-
-void odp_packet_set_inflag_vlan_qinq(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.vlan_qinq = val;
-}
-
-void odp_packet_set_inflag_arp(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.arp = val;
-}
-
-void odp_packet_set_inflag_ipv4(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.ipv4 = val;
-}
-
-void odp_packet_set_inflag_ipv6(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.ipv6 = val;
-}
-
-void odp_packet_set_inflag_ipfrag(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.ipfrag = val;
-}
-
-void odp_packet_set_inflag_ipopt(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.ipopt = val;
-}
-
-void odp_packet_set_inflag_ipsec(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.ipsec = val;
-}
-
-void odp_packet_set_inflag_udp(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.udp = val;
-}
-
-void odp_packet_set_inflag_tcp(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.tcp = val;
-}
-
-void odp_packet_set_inflag_sctp(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.sctp = val;
-}
-
-void odp_packet_set_inflag_icmp(odp_packet_t pkt, int val)
-{
-	odp_packet_hdr(pkt)->input_flags.icmp = val;
-}
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index f35193f..5c6f628 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -369,7 +369,8 @@  odp_queue_t odp_pktio_outq_getdef(odp_pktio_t id)
 
 int pktout_enqueue(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr)
 {
-	odp_packet_t pkt = odp_packet_from_buffer(buf_hdr->handle.handle);
+	odp_packet_t pkt =
+		odp_packet_from_buf_internal(odp_hdr_to_buf(buf_hdr));
 	int len = 1;
 	int nbr;
 
@@ -391,7 +392,9 @@  int pktout_enq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[],
 	int i;
 
 	for (i = 0; i < num; ++i)
-		pkt_tbl[i] = odp_packet_from_buffer(buf_hdr[i]->handle.handle);
+		pkt_tbl[i] =
+			odp_packet_from_buf_internal(
+				odp_hdr_to_buf(buf_hdr[i]));
 
 	nbr = odp_pktio_send(qentry->s.pktout, pkt_tbl, num);
 	return (nbr == num ? 0 : -1);
diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c
index 0492d1e..da66cf0 100644
--- a/platform/linux-generic/odp_packet_socket.c
+++ b/platform/linux-generic/odp_packet_socket.c
@@ -34,13 +34,13 @@ 
 #include <errno.h>
 #include <sys/syscall.h>
 
+#include <odp_spinlock.h>
 #include <odp_packet_socket.h>
 #include <odp_packet_internal.h>
 #include <odp_hints.h>
 
 #include <odph_eth.h>
 #include <odph_ip.h>
-#include <odph_packet.h>
 
 /** Provide a sendmmsg wrapper for systems with no libc or kernel support.
  *  As it is implemented as a weak symbol, it has zero effect on systems
@@ -178,28 +178,19 @@  int setup_pkt_sock(pkt_sock_t *const pkt_sock, const char *netdev,
 	unsigned int if_idx;
 	struct ifreq ethreq;
 	struct sockaddr_ll sa_ll;
-	odp_packet_t pkt;
-	uint8_t *pkt_buf;
-	uint8_t *l2_hdr;
 
 	if (pool == ODP_BUFFER_POOL_INVALID)
 		return -1;
 	pkt_sock->pool = pool;
 
-	pkt = odph_packet_alloc(pool);
-	if (!odph_packet_is_valid(pkt))
-		return -1;
-
-	pkt_buf = odp_packet_addr(pkt);
-	l2_hdr = ETHBUF_ALIGN(pkt_buf);
 	/* Store eth buffer offset for pkt buffers from this pool */
-	pkt_sock->frame_offset = (uintptr_t)l2_hdr - (uintptr_t)pkt_buf;
+	pkt_sock->frame_offset = 0;
 	/* pkt buffer size */
-	pkt_sock->buf_size = odph_packet_buf_size(pkt);
+	pkt_sock->buf_size = odp_buffer_pool_segment_size(pool);
 	/* max frame len taking into account the l2-offset */
-	pkt_sock->max_frame_len = pkt_sock->buf_size - pkt_sock->frame_offset;
-
-	odph_packet_free(pkt);
+	pkt_sock->max_frame_len = pkt_sock->buf_size -
+		odp_buffer_pool_headroom(pool) -
+		odp_buffer_pool_tailroom(pool);
 
 	odp_spinlock_lock(&raw_sockets_lock);
 
@@ -284,7 +275,6 @@  int recv_pkt_sock_basic(pkt_sock_t *const pkt_sock,
 	int const sockfd = pkt_sock->sockfd;
 	odp_packet_t pkt = ODP_PACKET_INVALID;
 	uint8_t *pkt_buf;
-	uint8_t *l2_hdr;
 	int nb_rx = 0;
 
 	/*  recvfrom:
@@ -297,15 +287,14 @@  int recv_pkt_sock_basic(pkt_sock_t *const pkt_sock,
 
 	for (i = 0; i < len; i++) {
 		if (odp_likely(pkt == ODP_PACKET_INVALID)) {
-			pkt = odph_packet_alloc(pkt_sock->pool);
+			pkt = odp_packet_alloc(pkt_sock->pool);
 			if (odp_unlikely(pkt == ODP_PACKET_INVALID))
 				break;
 		}
 
 		pkt_buf = odp_packet_addr(pkt);
-		l2_hdr = pkt_buf + pkt_sock->frame_offset;
 
-		recv_bytes = recvfrom(sockfd, l2_hdr,
+		recv_bytes = recvfrom(sockfd, pkt_buf,
 				      pkt_sock->max_frame_len, MSG_DONTWAIT,
 				      (struct sockaddr *)&sll, &addrlen);
 		/* no data or error: free recv buf and break out of loop */
@@ -316,7 +305,8 @@  int recv_pkt_sock_basic(pkt_sock_t *const pkt_sock,
 			continue;
 
 		/* Parse and set packet header data */
-		odp_packet_parse(pkt, recv_bytes, pkt_sock->frame_offset);
+		odp_packet_set_len(pkt, recv_bytes);
+		odp_packet_parse(pkt);
 
 		pkt_table[nb_rx] = pkt;
 		pkt = ODP_PACKET_INVALID;
@@ -324,7 +314,7 @@  int recv_pkt_sock_basic(pkt_sock_t *const pkt_sock,
 	} /* end for() */
 
 	if (odp_unlikely(pkt != ODP_PACKET_INVALID))
-		odph_packet_free(pkt);
+		odp_packet_free(pkt);
 
 	return nb_rx;
 }
@@ -350,8 +340,7 @@  int send_pkt_sock_basic(pkt_sock_t *const pkt_sock,
 	while (i < len) {
 		pkt = pkt_table[i];
 
-		frame = odp_packet_l2(pkt);
-		frame_len = odp_packet_get_len(pkt);
+		frame = odp_packet_map(pkt, &frame_len);
 
 		ret = send(sockfd, frame, frame_len, flags);
 		if (odp_unlikely(ret == -1)) {
@@ -367,8 +356,10 @@  int send_pkt_sock_basic(pkt_sock_t *const pkt_sock,
 	}			/* end while */
 	nb_tx = i;
 
-	for (i = 0; i < len; i++)
-		odph_packet_free(pkt_table[i]);
+	for (i = 0; i < len; i++) {
+		if (odp_packet_decr_refcount(pkt_table[i], 1) == 0)
+			odp_packet_free(pkt_table[i]);
+	}
 
 	return nb_tx;
 }
@@ -395,7 +386,7 @@  int recv_pkt_sock_mmsg(pkt_sock_t *const pkt_sock,
 	memset(msgvec, 0, sizeof(msgvec));
 
 	for (i = 0; i < (int)len; i++) {
-		pkt_table[i] = odph_packet_alloc(pkt_sock->pool);
+		pkt_table[i] = odp_packet_alloc(pkt_sock->pool);
 		if (odp_unlikely(pkt_table[i] == ODP_PACKET_INVALID))
 			break;
 
@@ -417,13 +408,12 @@  int recv_pkt_sock_mmsg(pkt_sock_t *const pkt_sock,
 		/* Don't receive packets sent by ourselves */
 		if (odp_unlikely(ethaddrs_equal(pkt_sock->if_mac,
 						eth_hdr->h_source))) {
-			odph_packet_free(pkt_table[i]);
+			odp_packet_free(pkt_table[i]);
 			continue;
 		}
 
 		/* Parse and set packet header data */
-		odp_packet_parse(pkt_table[i], msgvec[i].msg_len,
-				 pkt_sock->frame_offset);
+		odp_packet_parse(pkt_table[i]);
 
 		pkt_table[nb_rx] = pkt_table[i];
 		nb_rx++;
@@ -431,7 +421,7 @@  int recv_pkt_sock_mmsg(pkt_sock_t *const pkt_sock,
 
 	/* Free unused pkt buffers */
 	for (; i < msgvec_len; i++)
-		odph_packet_free(pkt_table[i]);
+		odp_packet_free(pkt_table[i]);
 
 	return nb_rx;
 }
@@ -457,8 +447,8 @@  int send_pkt_sock_mmsg(pkt_sock_t *const pkt_sock,
 	memset(msgvec, 0, sizeof(msgvec));
 
 	for (i = 0; i < len; i++) {
-		uint8_t *const frame = odp_packet_l2(pkt_table[i]);
-		const size_t frame_len = odp_packet_get_len(pkt_table[i]);
+		size_t frame_len;
+		uint8_t *const frame = odp_packet_map(pkt_table[i], &frame_len);
 		iovecs[i].iov_base = frame;
 		iovecs[i].iov_len = frame_len;
 		msgvec[i].msg_hdr.msg_iov = &iovecs[i];
@@ -472,8 +462,10 @@  int send_pkt_sock_mmsg(pkt_sock_t *const pkt_sock,
 		flags = 0;	/* blocking for next rounds */
 	}
 
-	for (i = 0; i < len; i++)
-		odph_packet_free(pkt_table[i]);
+	for (i = 0; i < len; i++) {
+		if (odp_packet_decr_refcount(pkt_table[i], 1) == 0)
+			odp_packet_free(pkt_table[i]);
+	}
 
 	return len;
 }
@@ -537,7 +529,6 @@  static inline void mmap_tx_user_ready(struct tpacket2_hdr *hdr)
 static inline unsigned pkt_mmap_v2_rx(int sock, struct ring *ring,
 				      odp_packet_t pkt_table[], unsigned len,
 				      odp_buffer_pool_t pool,
-				      size_t frame_offset,
 				      unsigned char if_mac[])
 {
 	union frame_map ppd;
@@ -570,18 +561,18 @@  static inline unsigned pkt_mmap_v2_rx(int sock, struct ring *ring,
 				continue;
 			}
 
-			pkt_table[i] = odph_packet_alloc(pool);
+			pkt_table[i] = odp_packet_alloc(pool);
 			if (odp_unlikely(pkt_table[i] == ODP_PACKET_INVALID))
 				break;
 
-			l2_hdr = odp_packet_addr(pkt_table[i])
-				 + frame_offset;
+			l2_hdr = odp_packet_addr(pkt_table[i]);
+			odp_packet_set_len(pkt_table[i], pkt_len);
 			memcpy(l2_hdr, pkt_buf, pkt_len);
 
 			mmap_rx_user_ready(ppd.raw);
 
 			/* Parse and set packet header data */
-			odp_packet_parse(pkt_table[i], pkt_len, frame_offset);
+			odp_packet_parse(pkt_table[i]);
 
 			frame_num = next_frame_num;
 			i++;
@@ -613,8 +604,7 @@  static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
 
 			next_frame_num = (frame_num + 1) % ring->rd_num;
 
-			pkt_buf = odp_packet_l2(pkt_table[i]);
-			pkt_len = odp_packet_get_len(pkt_table[i]);
+			pkt_buf = odp_packet_map(pkt_table[i], &pkt_len);
 
 			ppd.v2->tp_h.tp_snaplen = pkt_len;
 			ppd.v2->tp_h.tp_len = pkt_len;
@@ -624,7 +614,8 @@  static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
 
 			mmap_tx_user_ready(ppd.raw);
 
-			odph_packet_free(pkt_table[i]);
+			if (odp_packet_decr_refcount(pkt_table[i], 1) == 0)
+				odp_packet_free(pkt_table[i]);
 			frame_num = next_frame_num;
 			i++;
 		} else {
@@ -805,9 +796,6 @@  static int mmap_store_hw_addr(pkt_sock_mmap_t *const pkt_sock,
 int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev,
 			odp_buffer_pool_t pool, int fanout)
 {
-	odp_packet_t pkt;
-	uint8_t *pkt_buf;
-	uint8_t *l2_hdr;
 	int if_idx;
 	int ret = 0;
 
@@ -816,16 +804,8 @@  int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev,
 	if (pool == ODP_BUFFER_POOL_INVALID)
 		return -1;
 
-	pkt = odph_packet_alloc(pool);
-	if (!odph_packet_is_valid(pkt))
-		return -1;
-
-	pkt_buf = odp_packet_addr(pkt);
-	l2_hdr = ETHBUF_ALIGN(pkt_buf);
 	/* Store eth buffer offset for pkt buffers from this pool */
-	pkt_sock->frame_offset = (uintptr_t)l2_hdr - (uintptr_t)pkt_buf;
-
-	odph_packet_free(pkt);
+	pkt_sock->frame_offset = 0;
 
 	pkt_sock->pool = pool;
 	pkt_sock->sockfd = mmap_pkt_socket();
@@ -892,7 +872,7 @@  int recv_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock,
 {
 	return pkt_mmap_v2_rx(pkt_sock->rx_ring.sock, &pkt_sock->rx_ring,
 			      pkt_table, len, pkt_sock->pool,
-			      pkt_sock->frame_offset, pkt_sock->if_mac);
+			      pkt_sock->if_mac);
 }
 
 /*
diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c
index 1318bcd..af3a330 100644
--- a/platform/linux-generic/odp_queue.c
+++ b/platform/linux-generic/odp_queue.c
@@ -9,8 +9,9 @@ 
 #include <odp_std_types.h>
 #include <odp_align.h>
 #include <odp_buffer.h>
-#include <odp_buffer_internal.h>
 #include <odp_buffer_pool_internal.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_inlines.h>
 #include <odp_internal.h>
 #include <odp_shared_memory.h>
 #include <odp_schedule_internal.h>
@@ -422,7 +423,7 @@  int odp_queue_deq_multi(odp_queue_t handle, odp_buffer_t buf[], int num)
 	ret = queue->s.dequeue_multi(queue, buf_hdr, num);
 
 	for (i = 0; i < ret; i++)
-		buf[i] = buf_hdr[i]->handle.handle;
+		buf[i] = odp_hdr_to_buf(buf_hdr[i]);
 
 	return ret;
 }
@@ -437,7 +438,7 @@  odp_buffer_t odp_queue_deq(odp_queue_t handle)
 	buf_hdr = queue->s.dequeue(queue);
 
 	if (buf_hdr)
-		return buf_hdr->handle.handle;
+		return odp_hdr_to_buf(buf_hdr);
 
 	return ODP_BUFFER_INVALID;
 }
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
index 1bf819b..f30b877 100644
--- a/platform/linux-generic/odp_schedule.c
+++ b/platform/linux-generic/odp_schedule.c
@@ -83,8 +83,8 @@  int odp_schedule_init_global(void)
 {
 	odp_shm_t shm;
 	odp_buffer_pool_t pool;
-	void *pool_base;
 	int i, j;
+	odp_buffer_pool_param_t params;
 
 	ODP_DBG("Schedule init ... ");
 
@@ -99,20 +99,12 @@  int odp_schedule_init_global(void)
 		return -1;
 	}
 
-	shm = odp_shm_reserve("odp_sched_pool",
-			      SCHED_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
+	params.buf_num  = SCHED_POOL_SIZE/sizeof(queue_desc_t);
+	params.buf_size = sizeof(queue_desc_t);
+	params.buf_type = ODP_BUFFER_TYPE_RAW;
+	params.buf_opts = ODP_BUFFER_OPTS_UNSEGMENTED;
 
-	pool_base = odp_shm_addr(shm);
-
-	if (pool_base == NULL) {
-		ODP_ERR("Schedule init: Shm reserve failed.\n");
-		return -1;
-	}
-
-	pool = odp_buffer_pool_create("odp_sched_pool", pool_base,
-				      SCHED_POOL_SIZE, sizeof(queue_desc_t),
-				      ODP_CACHE_LINE_SIZE,
-				      ODP_BUFFER_TYPE_RAW);
+	pool = odp_buffer_pool_create("odp_sched_pool", &params, NULL);
 
 	if (pool == ODP_BUFFER_POOL_INVALID) {
 		ODP_ERR("Schedule init: Pool create failed.\n");
diff --git a/test/api_test/odp_timer_ping.c b/test/api_test/odp_timer_ping.c
index 65e3834..90c1ae6 100644
--- a/test/api_test/odp_timer_ping.c
+++ b/test/api_test/odp_timer_ping.c
@@ -318,9 +318,8 @@  int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
 	ping_arg_t pingarg;
 	odp_queue_t queue;
 	odp_buffer_pool_t pool;
-	void *pool_base;
 	int i;
-	odp_shm_t shm;
+	odp_buffer_pool_param_t params;
 
 	if (odp_test_global_init() != 0)
 		return -1;
@@ -333,14 +332,14 @@  int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
 	/*
 	 * Create message pool
 	 */
-	shm = odp_shm_reserve("msg_pool",
-			      MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0);
-	pool_base = odp_shm_addr(shm);
-
-	pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE,
-				      BUF_SIZE,
-				      ODP_CACHE_LINE_SIZE,
-				      ODP_BUFFER_TYPE_RAW);
+
+	params.buf_num  = MSG_POOL_SIZE/BUF_SIZE;
+	params.buf_size = BUF_SIZE;
+	params.buf_type = ODP_BUFFER_TYPE_RAW;
+	params.buf_opts = ODP_BUFFER_OPTS_NONE;
+
+	pool = odp_buffer_pool_create("msg_pool", &params, NULL);
+
 	if (pool == ODP_BUFFER_POOL_INVALID) {
 		ODP_ERR("Pool create failed.\n");
 		return -1;