diff mbox

[PATCHv9] linux-generic: mmap: jumbo frames support

Message ID 1425635486-9936-1-git-send-email-maxim.uvarov@linaro.org
State Superseded
Headers show

Commit Message

Maxim Uvarov March 6, 2015, 9:51 a.m. UTC
Support for jumbo frames for linux-generic with unsegmented buffers.
Test for pkio is also adjusted to work with 9*1024=9216 bytes packets.
https://bugs.linaro.org/show_bug.cgi?id=509

Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
---
 v9: - rename pool entry from pool to pool_entry
 v8: - fix test for invalid offset
     - define jumbo packet size as (9*1024) in test.
 v7: - add page add page align to for frame size.
 v6: - rewrite mmap_fill_ring to take into account pool settings.
 v5: - test_4_jumbo_pkts -> test_jumbo
     - do not use stack for jumbo packet, simple allocate it.

 v4: - fix work on real interfaces (make check under root)
     - better define jumbo packet payload size

 platform/linux-generic/odp_packet_socket.c | 43 +++++++++++---
 test/validation/odp_pktio.c                | 95 +++++++++++++++++++++++-------
 test/validation/odp_pktio_run              |  4 +-
 3 files changed, 110 insertions(+), 32 deletions(-)

Comments

Bill Fischofer March 6, 2015, 6:16 p.m. UTC | #1
On Fri, Mar 6, 2015 at 3:51 AM, Maxim Uvarov <maxim.uvarov@linaro.org>
wrote:

> Support for jumbo frames for linux-generic with unsegmented buffers.
> Test for pkio is also adjusted to work with 9*1024=9216 bytes packets.
> https://bugs.linaro.org/show_bug.cgi?id=509
>
> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
> ---
>  v9: - rename pool entry from pool to pool_entry
>  v8: - fix test for invalid offset
>      - define jumbo packet size as (9*1024) in test.
>  v7: - add page add page align to for frame size.
>  v6: - rewrite mmap_fill_ring to take into account pool settings.
>  v5: - test_4_jumbo_pkts -> test_jumbo
>      - do not use stack for jumbo packet, simple allocate it.
>
>  v4: - fix work on real interfaces (make check under root)
>      - better define jumbo packet payload size
>
>  platform/linux-generic/odp_packet_socket.c | 43 +++++++++++---
>  test/validation/odp_pktio.c                | 95
> +++++++++++++++++++++++-------
>  test/validation/odp_pktio_run              |  4 +-
>  3 files changed, 110 insertions(+), 32 deletions(-)
>
> diff --git a/platform/linux-generic/odp_packet_socket.c
> b/platform/linux-generic/odp_packet_socket.c
> index 55c212e..e8626c6 100644
> --- a/platform/linux-generic/odp_packet_socket.c
> +++ b/platform/linux-generic/odp_packet_socket.c
> @@ -109,6 +109,7 @@ static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t
> *const pkt_sock,
>
>         fanout_group = (uint16_t) (sock_group_idx & 0xffff);
>         val = (PACKET_FANOUT_HASH << 16) | fanout_group;
> +       printf("\n\n fanout group %d\n\n\n",  val);
>

Residual debug printf?  Not clear why this was added here.


>
>         err = setsockopt(sockfd, SOL_PACKET, PACKET_FANOUT, &val,
> sizeof(val));
>         if (err != 0) {
> @@ -584,11 +585,35 @@ static inline unsigned pkt_mmap_v2_tx(int sock,
> struct ring *ring,
>         return i;
>  }
>
> -static void mmap_fill_ring(struct ring *ring, unsigned blocks)
> +static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int
> fanout)
>  {
> -       ring->req.tp_block_size = getpagesize() << 2;
> -       ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
> -       ring->req.tp_block_nr = blocks;
> +       /*@todo add Huge Pages support*/
> +       int pz = getpagesize();
> +       uint32_t pool_id = pool_handle_to_index(pool_hdl);
> +       pool_entry_t *pool_entry = get_pool_entry(pool_id);
> +
> +       if (pool_entry == NULL || pool_hdl == ODP_POOL_INVALID)
> +               ODP_ABORT("NULL pool entry");
>

Sorry, I misread the previous version of this, however if you're going to
check pool_hdl for validity it needs to be checked before get_pool_entry()
is called.  But it's also the case you're not using the pool_id returned by
pool_handle_to_index(), So perhaps:

if (pool_hdl == ODP_POOL_INVALID)
        ODP_ABORT("Invalid pool handle");

 pool_entry = odp_pool_to_entry(pool_handle);


> +
> +       /* Frame has to capture full packet which can fit to the pool
> block.*/
> +       ring->req.tp_frame_size = (pool_entry->s.blk_size +
> +                                  TPACKET_HDRLEN + TPACKET_ALIGNMENT +
> +                                  + (pz - 1)) & (-pz);
> +
> +       /* Calculate how many pages do we need to hold all pool packets
> +       *  and align size to page boundary.
> +       */
> +       ring->req.tp_block_size = (ring->req.tp_frame_size *
> +                                  pool_entry->s.buf_num + (pz - 1)) &
> (-pz);
> +
> +       if (!fanout) {
> +               /* Single socket is in use. Use 1 block with buf_num
> frames. */
> +               ring->req.tp_block_nr = 1;
> +       } else {
> +               /* Fanout is in use, more likely taffic split accodring to
> +                * number of cpu threads. Use cpu blocks and buf_num
> frames. */
> +               ring->req.tp_block_nr = odp_cpu_count();
> +       }
>
>         ring->req.tp_frame_nr = ring->req.tp_block_size /
>                                 ring->req.tp_frame_size *
> ring->req.tp_block_nr;
> @@ -613,10 +638,10 @@ static int mmap_set_packet_loss_discard(int sock)
>         return 0;
>  }
>
> -static int mmap_setup_ring(int sock, struct ring *ring, int type)
> +static int mmap_setup_ring(int sock, struct ring *ring, int type,
> +                          odp_pool_t pool_hdl, int fanout)
>  {
>         int ret = 0;
> -       unsigned blocks = 256;
>
>         ring->sock = sock;
>         ring->type = type;
> @@ -628,7 +653,7 @@ static int mmap_setup_ring(int sock, struct ring
> *ring, int type)
>                         return -1;
>         }
>
> -       mmap_fill_ring(ring, blocks);
> +       mmap_fill_ring(ring, pool_hdl, fanout);
>
>         ret = setsockopt(sock, SOL_PACKET, type, &ring->req,
> sizeof(ring->req));
>         if (ret == -1) {
> @@ -772,12 +797,12 @@ int setup_pkt_sock_mmap(pkt_sock_mmap_t *const
> pkt_sock, const char *netdev,
>                 return -1;
>
>         ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
> -                             PACKET_TX_RING);
> +                             PACKET_TX_RING, pool, fanout);
>         if (ret != 0)
>                 return -1;
>
>         ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
> -                             PACKET_RX_RING);
> +                             PACKET_RX_RING, pool, fanout);
>         if (ret != 0)
>                 return -1;
>
> diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c
> index 8df367d..dcb9fcf 100644
> --- a/test/validation/odp_pktio.c
> +++ b/test/validation/odp_pktio.c
> @@ -15,6 +15,10 @@
>
>  #define PKT_BUF_NUM            32
>  #define PKT_BUF_SIZE           1856
> +#define PKT_BUF_JUMBO_SIZE     (9*1024)
> +#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\
> +                                  (ODPH_UDPHDR_LEN +\
> +                                  ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN))
>  #define MAX_NUM_IFACES         2
>  #define TEST_SEQ_INVALID       ((uint32_t)~0)
>  #define TEST_SEQ_MAGIC         0x92749451
> @@ -33,12 +37,21 @@ typedef struct {
>         odp_queue_t inq;
>  } pktio_info_t;
>
> -/** structure of test packet UDP payload */
> -typedef struct {
> +typedef struct ODP_PACKED {
>         uint32be_t magic;
>         uint32be_t seq;
> +} pkt_head_t;
> +
> +/** structure of test packet UDP payload */
> +typedef struct ODP_PACKED {
> +       pkt_head_t head;
> +       char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) -
> +                 sizeof(uint32be_t)];
> +       uint32be_t magic2;
>  } pkt_test_data_t;
>
> +static int test_jumbo;
> +
>  /** default packet pool */
>  odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
>
> @@ -59,14 +72,18 @@ static void pktio_pkt_set_macs(odp_packet_t pkt,
>         CU_ASSERT(ret == ODPH_ETHADDR_LEN);
>  }
>
> +static uint32_t pkt_payload_len(void)
> +{
> +       return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t);
> +}
> +
>  static int pktio_pkt_set_seq(odp_packet_t pkt)
>  {
>         static uint32_t tstseq;
>         size_t l4_off;
> -       pkt_test_data_t data;
> +       pkt_test_data_t *data;
> +       uint32_t len = pkt_payload_len();
>
> -       data.magic = TEST_SEQ_MAGIC;
> -       data.seq   = tstseq;
>
>         l4_off = odp_packet_l4_offset(pkt);
>         if (!l4_off) {
> @@ -74,9 +91,16 @@ static int pktio_pkt_set_seq(odp_packet_t pkt)
>                 return -1;
>         }
>
> +       data = calloc(1, len);
> +       CU_ASSERT_FATAL(data != NULL);
> +
> +       data->head.magic = TEST_SEQ_MAGIC;
> +       data->magic2 = TEST_SEQ_MAGIC;
> +       data->head.seq   = tstseq;
> +
>         odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN,
> -                              sizeof(data), &data);
> -
> +                              len, data);
> +       free(data);
>         tstseq++;
>
>         return 0;
> @@ -85,18 +109,30 @@ static int pktio_pkt_set_seq(odp_packet_t pkt)
>  static uint32_t pktio_pkt_seq(odp_packet_t pkt)
>  {
>         size_t l4_off;
> -       pkt_test_data_t data;
> +       uint32_t seq = TEST_SEQ_INVALID;
> +       pkt_test_data_t *data;
> +       uint32_t len = pkt_payload_len();
>
>         l4_off = odp_packet_l4_offset(pkt);
> -       if (l4_off) {
> -               odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
> -                                       sizeof(data), &data);
> +       if (l4_off ==  ODP_PACKET_OFFSET_INVALID)
> +               return TEST_SEQ_INVALID;
>
> -               if (data.magic == TEST_SEQ_MAGIC)
> -                       return data.seq;
> +       data = calloc(1, len);
> +       CU_ASSERT_FATAL(data != NULL);
> +
> +       odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
> +                               len, data);
> +
> +       if (data->head.magic == TEST_SEQ_MAGIC) {
> +               if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) {
> +                       free(data);
> +                       return TEST_SEQ_INVALID;
> +               }
> +               seq = data->head.seq;
>         }
>
> -       return TEST_SEQ_INVALID;
> +       free(data);
> +       return seq;
>  }
>
>  static odp_packet_t pktio_create_packet(void)
> @@ -107,7 +143,7 @@ static odp_packet_t pktio_create_packet(void)
>         odph_udphdr_t *udp;
>         char *buf;
>         uint16_t seq;
> -       size_t payload_len = sizeof(pkt_test_data_t);
> +       size_t payload_len = pkt_payload_len();
>         uint8_t mac[ODPH_ETHADDR_LEN] = {0};
>
>         pkt = odp_packet_alloc(default_pkt_pool, payload_len +
> ODPH_UDPHDR_LEN +
> @@ -187,8 +223,8 @@ static int default_pool_create(void)
>                 return -1;
>
>         memset(&params, 0, sizeof(params));
> -       params.pkt.seg_len = PKT_BUF_SIZE;
> -       params.pkt.len     = PKT_BUF_SIZE;
> +       params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
> +       params.pkt.len     = PKT_BUF_JUMBO_SIZE;
>         params.pkt.num     = PKT_BUF_NUM;
>         params.type        = ODP_POOL_PACKET;
>
> @@ -208,15 +244,24 @@ static odp_pktio_t create_pktio(const char *iface)
>         odp_pool_param_t params;
>
>         memset(&params, 0, sizeof(params));
> -       params.pkt.seg_len = PKT_BUF_SIZE;
> -       params.pkt.len     = PKT_BUF_SIZE;
> +       if (test_jumbo) {
> +               params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
> +               params.pkt.len     = PKT_BUF_JUMBO_SIZE;
> +
> +       } else {
> +               params.pkt.seg_len = PKT_BUF_SIZE;
> +               params.pkt.len     = PKT_BUF_SIZE;
> +       }
>         params.pkt.num     = PKT_BUF_NUM;
>         params.type        = ODP_POOL_PACKET;
>
>         snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface);
> +
>         pool = odp_pool_lookup(pool_name);
> -       if (pool == ODP_POOL_INVALID)
> -               pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
> +       if (pool != ODP_POOL_INVALID)
> +               odp_pool_destroy(pool);
> +
> +       pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
>         CU_ASSERT(pool != ODP_POOL_INVALID);
>
>         pktio = odp_pktio_open(iface, pool);
> @@ -450,6 +495,13 @@ static void test_odp_pktio_sched_multi(void)
>         pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4);
>  }
>
> +static void test_odp_pktio_jumbo(void)
> +{
> +       test_jumbo = 1;
> +       test_odp_pktio_sched_multi();
> +       test_jumbo = 0;
> +}
> +
>  static void test_odp_pktio_mtu(void)
>  {
>         int ret;
> @@ -668,6 +720,7 @@ CU_TestInfo pktio_tests[] = {
>         {"pktio poll multi",    test_odp_pktio_poll_multi},
>         {"pktio sched queues",  test_odp_pktio_sched_queue},
>         {"pktio sched multi",   test_odp_pktio_sched_multi},
> +       {"pktio jumbo frames",  test_odp_pktio_jumbo},
>         {"pktio mtu",           test_odp_pktio_mtu},
>         {"pktio promisc mode",  test_odp_pktio_promisc},
>         {"pktio mac",           test_odp_pktio_mac},
> diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run
> index 08288e6..b9d7e3c 100755
> --- a/test/validation/odp_pktio_run
> +++ b/test/validation/odp_pktio_run
> @@ -56,8 +56,8 @@ setup_env1()
>                 echo "pktio: error: unable to create veth pair"
>                 exit $TEST_SKIPPED
>         fi
> -       ip link set $IF0 up
> -       ip link set $IF1 up
> +       ip link set $IF0 mtu 9216 up
> +       ip link set $IF1 mtu 9216 up
>
>         # network needs a little time to come up
>         sleep 1
> --
> 1.9.1
>
>
> _______________________________________________
> lng-odp mailing list
> lng-odp@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/lng-odp
>
diff mbox

Patch

diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c
index 55c212e..e8626c6 100644
--- a/platform/linux-generic/odp_packet_socket.c
+++ b/platform/linux-generic/odp_packet_socket.c
@@ -109,6 +109,7 @@  static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t *const pkt_sock,
 
 	fanout_group = (uint16_t) (sock_group_idx & 0xffff);
 	val = (PACKET_FANOUT_HASH << 16) | fanout_group;
+	printf("\n\n fanout group %d\n\n\n",  val);
 
 	err = setsockopt(sockfd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val));
 	if (err != 0) {
@@ -584,11 +585,35 @@  static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
 	return i;
 }
 
-static void mmap_fill_ring(struct ring *ring, unsigned blocks)
+static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout)
 {
-	ring->req.tp_block_size = getpagesize() << 2;
-	ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
-	ring->req.tp_block_nr = blocks;
+	/*@todo add Huge Pages support*/
+	int pz = getpagesize();
+	uint32_t pool_id = pool_handle_to_index(pool_hdl);
+	pool_entry_t *pool_entry = get_pool_entry(pool_id);
+
+	if (pool_entry == NULL || pool_hdl == ODP_POOL_INVALID)
+		ODP_ABORT("NULL pool entry");
+
+	/* Frame has to capture full packet which can fit to the pool block.*/
+	ring->req.tp_frame_size = (pool_entry->s.blk_size +
+				   TPACKET_HDRLEN + TPACKET_ALIGNMENT +
+				   + (pz - 1)) & (-pz);
+
+	/* Calculate how many pages do we need to hold all pool packets
+	*  and align size to page boundary.
+	*/
+	ring->req.tp_block_size = (ring->req.tp_frame_size *
+				   pool_entry->s.buf_num + (pz - 1)) & (-pz);
+
+	if (!fanout) {
+		/* Single socket is in use. Use 1 block with buf_num frames. */
+		ring->req.tp_block_nr = 1;
+	} else {
+		/* Fanout is in use, more likely taffic split accodring to
+		 * number of cpu threads. Use cpu blocks and buf_num frames. */
+		ring->req.tp_block_nr = odp_cpu_count();
+	}
 
 	ring->req.tp_frame_nr = ring->req.tp_block_size /
 				ring->req.tp_frame_size * ring->req.tp_block_nr;
@@ -613,10 +638,10 @@  static int mmap_set_packet_loss_discard(int sock)
 	return 0;
 }
 
-static int mmap_setup_ring(int sock, struct ring *ring, int type)
+static int mmap_setup_ring(int sock, struct ring *ring, int type,
+			   odp_pool_t pool_hdl, int fanout)
 {
 	int ret = 0;
-	unsigned blocks = 256;
 
 	ring->sock = sock;
 	ring->type = type;
@@ -628,7 +653,7 @@  static int mmap_setup_ring(int sock, struct ring *ring, int type)
 			return -1;
 	}
 
-	mmap_fill_ring(ring, blocks);
+	mmap_fill_ring(ring, pool_hdl, fanout);
 
 	ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req));
 	if (ret == -1) {
@@ -772,12 +797,12 @@  int setup_pkt_sock_mmap(pkt_sock_mmap_t *const pkt_sock, const char *netdev,
 		return -1;
 
 	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
-			      PACKET_TX_RING);
+			      PACKET_TX_RING, pool, fanout);
 	if (ret != 0)
 		return -1;
 
 	ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
-			      PACKET_RX_RING);
+			      PACKET_RX_RING, pool, fanout);
 	if (ret != 0)
 		return -1;
 
diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c
index 8df367d..dcb9fcf 100644
--- a/test/validation/odp_pktio.c
+++ b/test/validation/odp_pktio.c
@@ -15,6 +15,10 @@ 
 
 #define PKT_BUF_NUM            32
 #define PKT_BUF_SIZE           1856
+#define PKT_BUF_JUMBO_SIZE     (9*1024)
+#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\
+				   (ODPH_UDPHDR_LEN +\
+				   ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN))
 #define MAX_NUM_IFACES         2
 #define TEST_SEQ_INVALID       ((uint32_t)~0)
 #define TEST_SEQ_MAGIC         0x92749451
@@ -33,12 +37,21 @@  typedef struct {
 	odp_queue_t inq;
 } pktio_info_t;
 
-/** structure of test packet UDP payload */
-typedef struct {
+typedef struct ODP_PACKED {
 	uint32be_t magic;
 	uint32be_t seq;
+} pkt_head_t;
+
+/** structure of test packet UDP payload */
+typedef struct ODP_PACKED {
+	pkt_head_t head;
+	char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) -
+		  sizeof(uint32be_t)];
+	uint32be_t magic2;
 } pkt_test_data_t;
 
+static int test_jumbo;
+
 /** default packet pool */
 odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
 
@@ -59,14 +72,18 @@  static void pktio_pkt_set_macs(odp_packet_t pkt,
 	CU_ASSERT(ret == ODPH_ETHADDR_LEN);
 }
 
+static uint32_t pkt_payload_len(void)
+{
+	return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t);
+}
+
 static int pktio_pkt_set_seq(odp_packet_t pkt)
 {
 	static uint32_t tstseq;
 	size_t l4_off;
-	pkt_test_data_t data;
+	pkt_test_data_t *data;
+	uint32_t len = pkt_payload_len();
 
-	data.magic = TEST_SEQ_MAGIC;
-	data.seq   = tstseq;
 
 	l4_off = odp_packet_l4_offset(pkt);
 	if (!l4_off) {
@@ -74,9 +91,16 @@  static int pktio_pkt_set_seq(odp_packet_t pkt)
 		return -1;
 	}
 
+	data = calloc(1, len);
+	CU_ASSERT_FATAL(data != NULL);
+
+	data->head.magic = TEST_SEQ_MAGIC;
+	data->magic2 = TEST_SEQ_MAGIC;
+	data->head.seq   = tstseq;
+
 	odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN,
-			       sizeof(data), &data);
-
+			       len, data);
+	free(data);
 	tstseq++;
 
 	return 0;
@@ -85,18 +109,30 @@  static int pktio_pkt_set_seq(odp_packet_t pkt)
 static uint32_t pktio_pkt_seq(odp_packet_t pkt)
 {
 	size_t l4_off;
-	pkt_test_data_t data;
+	uint32_t seq = TEST_SEQ_INVALID;
+	pkt_test_data_t *data;
+	uint32_t len = pkt_payload_len();
 
 	l4_off = odp_packet_l4_offset(pkt);
-	if (l4_off) {
-		odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
-					sizeof(data), &data);
+	if (l4_off ==  ODP_PACKET_OFFSET_INVALID)
+		return TEST_SEQ_INVALID;
 
-		if (data.magic == TEST_SEQ_MAGIC)
-			return data.seq;
+	data = calloc(1, len);
+	CU_ASSERT_FATAL(data != NULL);
+
+	odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
+				len, data);
+
+	if (data->head.magic == TEST_SEQ_MAGIC) {
+		if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) {
+			free(data);
+			return TEST_SEQ_INVALID;
+		}
+		seq = data->head.seq;
 	}
 
-	return TEST_SEQ_INVALID;
+	free(data);
+	return seq;
 }
 
 static odp_packet_t pktio_create_packet(void)
@@ -107,7 +143,7 @@  static odp_packet_t pktio_create_packet(void)
 	odph_udphdr_t *udp;
 	char *buf;
 	uint16_t seq;
-	size_t payload_len = sizeof(pkt_test_data_t);
+	size_t payload_len = pkt_payload_len();
 	uint8_t mac[ODPH_ETHADDR_LEN] = {0};
 
 	pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN +
@@ -187,8 +223,8 @@  static int default_pool_create(void)
 		return -1;
 
 	memset(&params, 0, sizeof(params));
-	params.pkt.seg_len = PKT_BUF_SIZE;
-	params.pkt.len     = PKT_BUF_SIZE;
+	params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
+	params.pkt.len     = PKT_BUF_JUMBO_SIZE;
 	params.pkt.num     = PKT_BUF_NUM;
 	params.type        = ODP_POOL_PACKET;
 
@@ -208,15 +244,24 @@  static odp_pktio_t create_pktio(const char *iface)
 	odp_pool_param_t params;
 
 	memset(&params, 0, sizeof(params));
-	params.pkt.seg_len = PKT_BUF_SIZE;
-	params.pkt.len     = PKT_BUF_SIZE;
+	if (test_jumbo) {
+		params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
+		params.pkt.len     = PKT_BUF_JUMBO_SIZE;
+
+	} else {
+		params.pkt.seg_len = PKT_BUF_SIZE;
+		params.pkt.len     = PKT_BUF_SIZE;
+	}
 	params.pkt.num     = PKT_BUF_NUM;
 	params.type        = ODP_POOL_PACKET;
 
 	snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface);
+
 	pool = odp_pool_lookup(pool_name);
-	if (pool == ODP_POOL_INVALID)
-		pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
+	if (pool != ODP_POOL_INVALID)
+		odp_pool_destroy(pool);
+
+	pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
 	CU_ASSERT(pool != ODP_POOL_INVALID);
 
 	pktio = odp_pktio_open(iface, pool);
@@ -450,6 +495,13 @@  static void test_odp_pktio_sched_multi(void)
 	pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4);
 }
 
+static void test_odp_pktio_jumbo(void)
+{
+	test_jumbo = 1;
+	test_odp_pktio_sched_multi();
+	test_jumbo = 0;
+}
+
 static void test_odp_pktio_mtu(void)
 {
 	int ret;
@@ -668,6 +720,7 @@  CU_TestInfo pktio_tests[] = {
 	{"pktio poll multi",	test_odp_pktio_poll_multi},
 	{"pktio sched queues",	test_odp_pktio_sched_queue},
 	{"pktio sched multi",	test_odp_pktio_sched_multi},
+	{"pktio jumbo frames",	test_odp_pktio_jumbo},
 	{"pktio mtu",		test_odp_pktio_mtu},
 	{"pktio promisc mode",	test_odp_pktio_promisc},
 	{"pktio mac",		test_odp_pktio_mac},
diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run
index 08288e6..b9d7e3c 100755
--- a/test/validation/odp_pktio_run
+++ b/test/validation/odp_pktio_run
@@ -56,8 +56,8 @@  setup_env1()
 		echo "pktio: error: unable to create veth pair"
 		exit $TEST_SKIPPED
 	fi
-	ip link set $IF0 up
-	ip link set $IF1 up
+	ip link set $IF0 mtu 9216 up
+	ip link set $IF1 mtu 9216 up
 
 	# network needs a little time to come up
 	sleep 1