diff mbox

[PATCHv7] linux-generic: mmap: jumbo frames support

Message ID 1425565290-23836-1-git-send-email-maxim.uvarov@linaro.org
State Superseded
Headers show

Commit Message

Maxim Uvarov March 5, 2015, 2:21 p.m. UTC
Support for jumbo frames for linux-generic with unsegmented buffers.
Test for pkio is also adjusted to work with 9*1024=9216 bytes packets.
https://bugs.linaro.org/show_bug.cgi?id=509

Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
---
That v7 version we with Ciprian tested on his and mine environments. It
looks like it has good performance with number of blocks equal to number
of cpus and number of frames equal to pools packet number.

On virtual devices for linux-generic I have following numbers:
l2fwd in burst mode
TX v3: 20126 kB/s RX v3: 0 kB/s
TX v3: 21058 kB/s RX v3: 0 kB/s
TX v3: 20982 kB/s RX v3: 0 kB/s

l2fwd using scheduler:
TX v3: 175 kB/s RX v3: 0 kB/s
TX v3: 207 kB/s RX v3: 0 kB/s
TX v3: 235 kB/s RX v3: 0 kB/s
TX v3: 219 kB/s RX v3: 0 kB/s

I still think there is still a lof of things to improve in linux-generic to get better
numbers. But this patch should be good to go because it takes pool settings to account
and support jumbo frames. Also at least we sure that there is no performance degradation.

Best regards,
Maxim.

 v7: - add page add page align to for frame size.
 v6: - rewrite mmap_fill_ring to take into account pool settings.
 v5: - test_4_jumbo_pkts -> test_jumbo
     - do not use stack for jumbo packet, simple allocate it.

 v4: - fix work on real interfaces (make check under root)
     - better define jumbo packet payload size



 platform/linux-generic/odp_packet_socket.c | 42 ++++++++++---
 test/validation/odp_pktio.c                | 95 +++++++++++++++++++++++-------
 test/validation/odp_pktio_run              |  4 +-
 3 files changed, 109 insertions(+), 32 deletions(-)

Comments

Bill Fischofer March 5, 2015, 3:38 p.m. UTC | #1
On Thu, Mar 5, 2015 at 8:21 AM, Maxim Uvarov <maxim.uvarov@linaro.org>
wrote:

> Support for jumbo frames for linux-generic with unsegmented buffers.
> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets.
> https://bugs.linaro.org/show_bug.cgi?id=509
>
> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
> ---
> That v7 version we with Ciprian tested on his and mine environments. It
> looks like it has good performance with number of blocks equal to number
> of cpus and number of frames equal to pools packet number.
>
> On virtual devices for linux-generic I have following numbers:
> l2fwd in burst mode
> TX v3: 20126 kB/s RX v3: 0 kB/s
> TX v3: 21058 kB/s RX v3: 0 kB/s
> TX v3: 20982 kB/s RX v3: 0 kB/s
>
> l2fwd using scheduler:
> TX v3: 175 kB/s RX v3: 0 kB/s
> TX v3: 207 kB/s RX v3: 0 kB/s
> TX v3: 235 kB/s RX v3: 0 kB/s
> TX v3: 219 kB/s RX v3: 0 kB/s
>
> I still think there is still a lof of things to improve in linux-generic
> to get better
> numbers. But this patch should be good to go because it takes pool
> settings to account
> and support jumbo frames. Also at least we sure that there is no
> performance degradation.
>
> Best regards,
> Maxim.
>
>  v7: - add page add page align to for frame size.
>  v6: - rewrite mmap_fill_ring to take into account pool settings.
>  v5: - test_4_jumbo_pkts -> test_jumbo
>      - do not use stack for jumbo packet, simple allocate it.
>
>  v4: - fix work on real interfaces (make check under root)
>      - better define jumbo packet payload size
>
>
>
>  platform/linux-generic/odp_packet_socket.c | 42 ++++++++++---
>  test/validation/odp_pktio.c                | 95
> +++++++++++++++++++++++-------
>  test/validation/odp_pktio_run              |  4 +-
>  3 files changed, 109 insertions(+), 32 deletions(-)
>
> diff --git a/platform/linux-generic/odp_packet_socket.c
> b/platform/linux-generic/odp_packet_socket.c
> index 55c212e..b78353e 100644
> --- a/platform/linux-generic/odp_packet_socket.c
> +++ b/platform/linux-generic/odp_packet_socket.c
> @@ -584,11 +584,35 @@ static inline unsigned pkt_mmap_v2_tx(int sock,
> struct ring *ring,
>         return i;
>  }
>
> -static void mmap_fill_ring(struct ring *ring, unsigned blocks)
> +static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int
> fanout)
>  {
> -       ring->req.tp_block_size = getpagesize() << 2;
> -       ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
> -       ring->req.tp_block_nr = blocks;
> +       /*@todo add Huge Pages support*/
> +       int pz = getpagesize();
> +       uint32_t pool_id = pool_handle_to_index(pool_hdl);
> +       pool_entry_t *pool = get_pool_entry(pool_id);
> +
> +       if (pool == NULL)
> +               ODP_ABORT("NULL pool entry");
>

This is a limitation with the existing strong type definition. pool is of
type odp_pool_t and this should be ODP_POOL_INVALID, not NULL.
Unfortunately because we're using pointer types as handles C doesn't flag
NULL as a type mismatch here since it's considered a "universal pointer".


> +
> +       /* Frame has to capture full packet which can fit to the pool
> block.*/
> +       ring->req.tp_frame_size = (pool->s.blk_size +
> +                                 TPACKET_HDRLEN + TPACKET_ALIGNMENT +
> +                                  + (pz - 1)) & (-pz);
> +
> +       /* Calculate how many pages do we need to hold all pool packets
> +       *  and align size to page boundary.
> +       */
> +       ring->req.tp_block_size = (ring->req.tp_frame_size *
> pool->s.buf_num
> +                                  + (pz - 1)) & (-pz);
> +
> +       if (!fanout) {
> +               /* Single socket is in use. Use 1 block with buf_num
> frames. */
> +               ring->req.tp_block_nr = 1;
> +       } else {
> +               /* Fanout is in use, more likely taffic split accodring to
> +                * number of cpu threads. Use cpu blocks and buf_num
> frames. */
> +               ring->req.tp_block_nr = odp_cpu_count();
> +       }
>
>         ring->req.tp_frame_nr = ring->req.tp_block_size /
>                                 ring->req.tp_frame_size *
> ring->req.tp_block_nr;
> @@ -613,10 +637,10 @@ static int mmap_set_packet_loss_discard(int sock)
>         return 0;
>  }
>
> -static int mmap_setup_ring(int sock, struct ring *ring, int type)
> +static int mmap_setup_ring(int sock, struct ring *ring, int type,
> +                          odp_pool_t pool_hdl, int fanout)
>  {
>         int ret = 0;
> -       unsigned blocks = 256;
>
>         ring->sock = sock;
>         ring->type = type;
> @@ -628,7 +652,7 @@ static int mmap_setup_ring(int sock, struct ring
> *ring, int type)
>                         return -1;
>         }
>
> -       mmap_fill_ring(ring, blocks);
> +       mmap_fill_ring(ring, pool_hdl, fanout);
>
>         ret = setsockopt(sock, SOL_PACKET, type, &ring->req,
> sizeof(ring->req));
>         if (ret == -1) {
> @@ -772,12 +796,12 @@ int setup_pkt_sock_mmap(pkt_sock_mmap_t *const
> pkt_sock, const char *netdev,
>                 return -1;
>
>         ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
> -                             PACKET_TX_RING);
> +                             PACKET_TX_RING, pool, fanout);
>         if (ret != 0)
>                 return -1;
>
>         ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
> -                             PACKET_RX_RING);
> +                             PACKET_RX_RING, pool, fanout);
>         if (ret != 0)
>                 return -1;
>
> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c
> index 8df367d..f8b9ecc 100644
> --- a/test/validation/odp_pktio.c
> +++ b/test/validation/odp_pktio.c
> @@ -15,6 +15,10 @@
>
>  #define PKT_BUF_NUM            32
>  #define PKT_BUF_SIZE           1856
> +#define PKT_BUF_JUMBO_SIZE     9216
>

Since we're revving this one more time, it would be better to specify this
as (9*1024) rather than 9216 to make it clear that this is 9K and not just
a magic number.  But not a big deal if you prefer it this way.


> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\
> +                                  (ODPH_UDPHDR_LEN +\
> +                                  ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN))
>  #define MAX_NUM_IFACES         2
>  #define TEST_SEQ_INVALID       ((uint32_t)~0)
>  #define TEST_SEQ_MAGIC         0x92749451
> @@ -33,12 +37,21 @@ typedef struct {
>         odp_queue_t inq;
>  } pktio_info_t;
>
> -/** structure of test packet UDP payload */
> -typedef struct {
> +typedef struct ODP_PACKED {
>         uint32be_t magic;
>         uint32be_t seq;
> +} pkt_head_t;
> +
> +/** structure of test packet UDP payload */
> +typedef struct ODP_PACKED {
> +       pkt_head_t head;
> +       char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) -
> +                 sizeof(uint32be_t)];
> +       uint32be_t magic2;
>  } pkt_test_data_t;
>
> +static int test_jumbo;
> +
>  /** default packet pool */
>  odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
>
> @@ -59,14 +72,18 @@ static void pktio_pkt_set_macs(odp_packet_t pkt,
>         CU_ASSERT(ret == ODPH_ETHADDR_LEN);
>  }
>
> +static uint32_t pkt_payload_len(void)
> +{
> +       return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t);
> +}
> +
>  static int pktio_pkt_set_seq(odp_packet_t pkt)
>  {
>         static uint32_t tstseq;
>         size_t l4_off;
> -       pkt_test_data_t data;
> +       pkt_test_data_t *data;
> +       uint32_t len = pkt_payload_len();
>
> -       data.magic = TEST_SEQ_MAGIC;
> -       data.seq   = tstseq;
>
>         l4_off = odp_packet_l4_offset(pkt);
>         if (!l4_off) {
> @@ -74,9 +91,16 @@ static int pktio_pkt_set_seq(odp_packet_t pkt)
>                 return -1;
>         }
>
> +       data = calloc(1, len);
> +       CU_ASSERT_FATAL(data != NULL);
> +
> +       data->head.magic = TEST_SEQ_MAGIC;
> +       data->magic2 = TEST_SEQ_MAGIC;
> +       data->head.seq   = tstseq;
> +
>         odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN,
> -                              sizeof(data), &data);
> -
> +                              len, data);
> +       free(data);
>         tstseq++;
>
>         return 0;
> @@ -85,18 +109,30 @@ static int pktio_pkt_set_seq(odp_packet_t pkt)
>  static uint32_t pktio_pkt_seq(odp_packet_t pkt)
>  {
>         size_t l4_off;
> -       pkt_test_data_t data;
> +       uint32_t seq = TEST_SEQ_INVALID;
> +       pkt_test_data_t *data;
> +       uint32_t len = pkt_payload_len();
>
>         l4_off = odp_packet_l4_offset(pkt);
> -       if (l4_off) {
> -               odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
> -                                       sizeof(data), &data);
> +       if (!l4_off)
> +               return TEST_SEQ_INVALID;
>

Incorrect test.  This should be:
            if (l4_offset == ODP_PACKET_OFFSET_INVALID) ...

We defined that as the value that's set if the packet doesn't contain an L4
header.


>
> -               if (data.magic == TEST_SEQ_MAGIC)
> -                       return data.seq;
> +       data = calloc(1, len);
> +       CU_ASSERT_FATAL(data != NULL);
> +
> +       odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
> +                               len, data);
> +
> +       if (data->head.magic == TEST_SEQ_MAGIC) {
> +               if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) {
> +                       free(data);
> +                       return TEST_SEQ_INVALID;
> +               }
> +               seq = data->head.seq;
>         }
>
> -       return TEST_SEQ_INVALID;
> +       free(data);
> +       return seq;
>  }
>
>  static odp_packet_t pktio_create_packet(void)
> @@ -107,7 +143,7 @@ static odp_packet_t pktio_create_packet(void)
>         odph_udphdr_t *udp;
>         char *buf;
>         uint16_t seq;
> -       size_t payload_len = sizeof(pkt_test_data_t);
> +       size_t payload_len = pkt_payload_len();
>         uint8_t mac[ODPH_ETHADDR_LEN] = {0};
>
>         pkt = odp_packet_alloc(default_pkt_pool, payload_len +
> ODPH_UDPHDR_LEN +
> @@ -187,8 +223,8 @@ static int default_pool_create(void)
>                 return -1;
>
>         memset(&params, 0, sizeof(params));
> -       params.pkt.seg_len = PKT_BUF_SIZE;
> -       params.pkt.len     = PKT_BUF_SIZE;
> +       params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
> +       params.pkt.len     = PKT_BUF_JUMBO_SIZE;
>         params.pkt.num     = PKT_BUF_NUM;
>         params.type        = ODP_POOL_PACKET;
>
> @@ -208,15 +244,24 @@ static odp_pktio_t create_pktio(const char *iface)
>         odp_pool_param_t params;
>
>         memset(&params, 0, sizeof(params));
> -       params.pkt.seg_len = PKT_BUF_SIZE;
> -       params.pkt.len     = PKT_BUF_SIZE;
> +       if (test_jumbo) {
> +               params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
> +               params.pkt.len     = PKT_BUF_JUMBO_SIZE;
> +
> +       } else {
> +               params.pkt.seg_len = PKT_BUF_SIZE;
> +               params.pkt.len     = PKT_BUF_SIZE;
> +       }
>         params.pkt.num     = PKT_BUF_NUM;
>         params.type        = ODP_POOL_PACKET;
>
>         snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface);
> +
>         pool = odp_pool_lookup(pool_name);
> -       if (pool == ODP_POOL_INVALID)
> -               pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
> +       if (pool != ODP_POOL_INVALID)
> +               odp_pool_destroy(pool);
> +
> +       pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
>         CU_ASSERT(pool != ODP_POOL_INVALID);
>
>         pktio = odp_pktio_open(iface, pool);
> @@ -450,6 +495,13 @@ static void test_odp_pktio_sched_multi(void)
>         pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4);
>  }
>
> +static void test_odp_pktio_jumbo(void)
> +{
> +       test_jumbo = 1;
> +       test_odp_pktio_sched_multi();
> +       test_jumbo = 0;
> +}
> +
>  static void test_odp_pktio_mtu(void)
>  {
>         int ret;
> @@ -668,6 +720,7 @@ CU_TestInfo pktio_tests[] = {
>         {"pktio poll multi",    test_odp_pktio_poll_multi},
>         {"pktio sched queues",  test_odp_pktio_sched_queue},
>         {"pktio sched multi",   test_odp_pktio_sched_multi},
> +       {"pktio jumbo frames",  test_odp_pktio_jumbo},
>         {"pktio mtu",           test_odp_pktio_mtu},
>         {"pktio promisc mode",  test_odp_pktio_promisc},
>         {"pktio mac",           test_odp_pktio_mac},
> diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run
> index 08288e6..b9d7e3c 100755
> --- a/test/validation/odp_pktio_run
> +++ b/test/validation/odp_pktio_run
> @@ -56,8 +56,8 @@ setup_env1()
>                 echo "pktio: error: unable to create veth pair"
>                 exit $TEST_SKIPPED
>         fi
> -       ip link set $IF0 up
> -       ip link set $IF1 up
> +       ip link set $IF0 mtu 9216 up
> +       ip link set $IF1 mtu 9216 up
>
>         # network needs a little time to come up
>         sleep 1
> --
> 1.9.1
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
Maxim Uvarov March 6, 2015, 9:36 a.m. UTC | #2
On 03/05/15 18:38, Bill Fischofer wrote:
>
>     +       if (pool == NULL)
>     +               ODP_ABORT("NULL pool entry");
>
>
> This is a limitation with the existing strong type definition. pool is 
> of type odp_pool_t and this should be ODP_POOL_INVALID, not NULL.  
> Unfortunately because we're using pointer types as handles C doesn't 
> flag NULL as a type mismatch here since it's considered a "universal 
> pointer".

In that case pool is pool entry, not pool_hdl. Will rename variable if 
it's confusing and check for both entry and handler.

Maxim.
diff mbox

Patch

diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c
index 55c212e..b78353e 100644
--- a/platform/linux-generic/odp_packet_socket.c
+++ b/platform/linux-generic/odp_packet_socket.c
@@ -584,11 +584,35 @@  static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
 	return i;
 }
 
-static void mmap_fill_ring(struct ring *ring, unsigned blocks)
+static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout)
 {
-	ring->req.tp_block_size = getpagesize() << 2;
-	ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
-	ring->req.tp_block_nr = blocks;
+	/*@todo add Huge Pages support*/
+	int pz = getpagesize();
+	uint32_t pool_id = pool_handle_to_index(pool_hdl);
+	pool_entry_t *pool = get_pool_entry(pool_id);
+
+	if (pool == NULL)
+		ODP_ABORT("NULL pool entry");
+
+	/* Frame has to capture full packet which can fit to the pool block.*/
+	ring->req.tp_frame_size = (pool->s.blk_size +
+				  TPACKET_HDRLEN + TPACKET_ALIGNMENT +
+				   + (pz - 1)) & (-pz);
+
+	/* Calculate how many pages do we need to hold all pool packets
+	*  and align size to page boundary.
+	*/
+	ring->req.tp_block_size = (ring->req.tp_frame_size * pool->s.buf_num
+				   + (pz - 1)) & (-pz);
+
+	if (!fanout) {
+		/* Single socket is in use. Use 1 block with buf_num frames. */
+		ring->req.tp_block_nr = 1;
+	} else {
+		/* Fanout is in use, more likely taffic split accodring to
+		 * number of cpu threads. Use cpu blocks and buf_num frames. */
+		ring->req.tp_block_nr = odp_cpu_count();
+	}
 
 	ring->req.tp_frame_nr = ring->req.tp_block_size /
 				ring->req.tp_frame_size * ring->req.tp_block_nr;
@@ -613,10 +637,10 @@  static int mmap_set_packet_loss_discard(int sock)
 	return 0;
 }
 
-static int mmap_setup_ring(int sock, struct ring *ring, int type)
+static int mmap_setup_ring(int sock, struct ring *ring, int type,
+			   odp_pool_t pool_hdl, int fanout)
 {
 	int ret = 0;
-	unsigned blocks = 256;
 
 	ring->sock = sock;
 	ring->type = type;
@@ -628,7 +652,7 @@  static int mmap_setup_ring(int sock, struct ring *ring, int type)
 			return -1;
 	}
 
-	mmap_fill_ring(ring, blocks);
+	mmap_fill_ring(ring, pool_hdl, fanout);
 
 	ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req));
 	if (ret == -1) {
@@ -772,12 +796,12 @@  int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev,
 		return -1;
 
 	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
-			      PACKET_TX_RING);
+			      PACKET_TX_RING, pool, fanout);
 	if (ret != 0)
 		return -1;
 
 	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
-			      PACKET_RX_RING);
+			      PACKET_RX_RING, pool, fanout);
 	if (ret != 0)
 		return -1;
 
diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c
index 8df367d..f8b9ecc 100644
--- a/test/validation/odp_pktio.c
+++ b/test/validation/odp_pktio.c
@@ -15,6 +15,10 @@ 
 
 #define PKT_BUF_NUM            32
 #define PKT_BUF_SIZE           1856
+#define PKT_BUF_JUMBO_SIZE     9216
+#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\
+				   (ODPH_UDPHDR_LEN +\
+				   ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN))
 #define MAX_NUM_IFACES         2
 #define TEST_SEQ_INVALID       ((uint32_t)~0)
 #define TEST_SEQ_MAGIC         0x92749451
@@ -33,12 +37,21 @@  typedef struct {
 	odp_queue_t inq;
 } pktio_info_t;
 
-/** structure of test packet UDP payload */
-typedef struct {
+typedef struct ODP_PACKED {
 	uint32be_t magic;
 	uint32be_t seq;
+} pkt_head_t;
+
+/** structure of test packet UDP payload */
+typedef struct ODP_PACKED {
+	pkt_head_t head;
+	char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) -
+		  sizeof(uint32be_t)];
+	uint32be_t magic2;
 } pkt_test_data_t;
 
+static int test_jumbo;
+
 /** default packet pool */
 odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
 
@@ -59,14 +72,18 @@  static void pktio_pkt_set_macs(odp_packet_t pkt,
 	CU_ASSERT(ret == ODPH_ETHADDR_LEN);
 }
 
+static uint32_t pkt_payload_len(void)
+{
+	return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t);
+}
+
 static int pktio_pkt_set_seq(odp_packet_t pkt)
 {
 	static uint32_t tstseq;
 	size_t l4_off;
-	pkt_test_data_t data;
+	pkt_test_data_t *data;
+	uint32_t len = pkt_payload_len();
 
-	data.magic = TEST_SEQ_MAGIC;
-	data.seq   = tstseq;
 
 	l4_off = odp_packet_l4_offset(pkt);
 	if (!l4_off) {
@@ -74,9 +91,16 @@  static int pktio_pkt_set_seq(odp_packet_t pkt)
 		return -1;
 	}
 
+	data = calloc(1, len);
+	CU_ASSERT_FATAL(data != NULL);
+
+	data->head.magic = TEST_SEQ_MAGIC;
+	data->magic2 = TEST_SEQ_MAGIC;
+	data->head.seq   = tstseq;
+
 	odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN,
-			       sizeof(data), &data);
-
+			       len, data);
+	free(data);
 	tstseq++;
 
 	return 0;
@@ -85,18 +109,30 @@  static int pktio_pkt_set_seq(odp_packet_t pkt)
 static uint32_t pktio_pkt_seq(odp_packet_t pkt)
 {
 	size_t l4_off;
-	pkt_test_data_t data;
+	uint32_t seq = TEST_SEQ_INVALID;
+	pkt_test_data_t *data;
+	uint32_t len = pkt_payload_len();
 
 	l4_off = odp_packet_l4_offset(pkt);
-	if (l4_off) {
-		odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
-					sizeof(data), &data);
+	if (!l4_off)
+		return TEST_SEQ_INVALID;
 
-		if (data.magic == TEST_SEQ_MAGIC)
-			return data.seq;
+	data = calloc(1, len);
+	CU_ASSERT_FATAL(data != NULL);
+
+	odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
+				len, data);
+
+	if (data->head.magic == TEST_SEQ_MAGIC) {
+		if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) {
+			free(data);
+			return TEST_SEQ_INVALID;
+		}
+		seq = data->head.seq;
 	}
 
-	return TEST_SEQ_INVALID;
+	free(data);
+	return seq;
 }
 
 static odp_packet_t pktio_create_packet(void)
@@ -107,7 +143,7 @@  static odp_packet_t pktio_create_packet(void)
 	odph_udphdr_t *udp;
 	char *buf;
 	uint16_t seq;
-	size_t payload_len = sizeof(pkt_test_data_t);
+	size_t payload_len = pkt_payload_len();
 	uint8_t mac[ODPH_ETHADDR_LEN] = {0};
 
 	pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN +
@@ -187,8 +223,8 @@  static int default_pool_create(void)
 		return -1;
 
 	memset(&params, 0, sizeof(params));
-	params.pkt.seg_len = PKT_BUF_SIZE;
-	params.pkt.len     = PKT_BUF_SIZE;
+	params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
+	params.pkt.len     = PKT_BUF_JUMBO_SIZE;
 	params.pkt.num     = PKT_BUF_NUM;
 	params.type        = ODP_POOL_PACKET;
 
@@ -208,15 +244,24 @@  static odp_pktio_t create_pktio(const char *iface)
 	odp_pool_param_t params;
 
 	memset(&params, 0, sizeof(params));
-	params.pkt.seg_len = PKT_BUF_SIZE;
-	params.pkt.len     = PKT_BUF_SIZE;
+	if (test_jumbo) {
+		params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
+		params.pkt.len     = PKT_BUF_JUMBO_SIZE;
+
+	} else {
+		params.pkt.seg_len = PKT_BUF_SIZE;
+		params.pkt.len     = PKT_BUF_SIZE;
+	}
 	params.pkt.num     = PKT_BUF_NUM;
 	params.type        = ODP_POOL_PACKET;
 
 	snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface);
+
 	pool = odp_pool_lookup(pool_name);
-	if (pool == ODP_POOL_INVALID)
-		pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
+	if (pool != ODP_POOL_INVALID)
+		odp_pool_destroy(pool);
+
+	pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
 	CU_ASSERT(pool != ODP_POOL_INVALID);
 
 	pktio = odp_pktio_open(iface, pool);
@@ -450,6 +495,13 @@  static void test_odp_pktio_sched_multi(void)
 	pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4);
 }
 
+static void test_odp_pktio_jumbo(void)
+{
+	test_jumbo = 1;
+	test_odp_pktio_sched_multi();
+	test_jumbo = 0;
+}
+
 static void test_odp_pktio_mtu(void)
 {
 	int ret;
@@ -668,6 +720,7 @@  CU_TestInfo pktio_tests[] = {
 	{"pktio poll multi",	test_odp_pktio_poll_multi},
 	{"pktio sched queues",	test_odp_pktio_sched_queue},
 	{"pktio sched multi",	test_odp_pktio_sched_multi},
+	{"pktio jumbo frames",	test_odp_pktio_jumbo},
 	{"pktio mtu",		test_odp_pktio_mtu},
 	{"pktio promisc mode",	test_odp_pktio_promisc},
 	{"pktio mac",		test_odp_pktio_mac},
diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run
index 08288e6..b9d7e3c 100755
--- a/test/validation/odp_pktio_run
+++ b/test/validation/odp_pktio_run
@@ -56,8 +56,8 @@  setup_env1()
 		echo "pktio: error: unable to create veth pair"
 		exit $TEST_SKIPPED
 	fi
-	ip link set $IF0 up
-	ip link set $IF1 up
+	ip link set $IF0 mtu 9216 up
+	ip link set $IF1 mtu 9216 up
 
 	# network needs a little time to come up
 	sleep 1