diff mbox

[PATCHv5] linux-generic: mmap: jumbo frames support

Message ID 1425468209-22480-1-git-send-email-maxim.uvarov@linaro.org
State New
Headers show

Commit Message

Maxim Uvarov March 4, 2015, 11:23 a.m. UTC
Support for jumbo frames for linux-generic with unsegmented buffers.
Test for pkio is also adjusted to work with 9*1024=9216 bytes packets.
https://bugs.linaro.org/show_bug.cgi?id=509

Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
---
 v5: - test_4_jumbo_pkts -> test_jumbo
     - do not use stack for jumbo packet, simple allocate it.

 v4: - fix work on real interfaces (make check under root)
     - better define jumbo packet payload size

 platform/linux-generic/odp_packet_socket.c |  2 +-
 test/validation/odp_pktio.c                | 95 +++++++++++++++++++++++-------
 test/validation/odp_pktio_run              |  4 +-
 3 files changed, 77 insertions(+), 24 deletions(-)

Comments

Maxim Uvarov March 4, 2015, 12:03 p.m. UTC | #1
On 03/04/15 14:23, Maxim Uvarov wrote:
> --- a/platform/linux-generic/odp_packet_socket.c
> +++ b/platform/linux-generic/odp_packet_socket.c
> @@ -587,7 +587,7 @@ static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
>   static void mmap_fill_ring(struct ring *ring, unsigned blocks)
>   {
>   	ring->req.tp_block_size = getpagesize() << 2;
> -	ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
> +	ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN;
>   	ring->req.tp_block_nr = blocks;

After that began to work, now I think that above code is not correct.
If I understand that documentation correctly:
https://www.kernel.org/doc/Documentation/networking/packet_mmap.txt
/usr/include/linux/if_packet.h

Code have to be line this:

-       ring->req.tp_block_size = getpagesize() << 2;
-       ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN;
+       /*@todo add Huge Pages support*/
+       int pz = getpagesize();
+
+       /* frame len to not truncate packets up to 9212 bytes */
+       ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN + TPACKET_ALIGNMENT;
+       /* calculate how many pages do we need to hold 
ODP_CONFIG_PKTIO_ENTRIES
+       *  frame in each block.
+       */
+       ring->req.tp_block_size = (ring->req.tp_frame_size + (pz - 1)) & 
(-pz);
+       ring->req.tp_block_size *= ODP_CONFIG_PKTIO_ENTRIES;

And I think that for now linux-generic has such low speed due to luck of 
frames in blocks.

Does somebody have good experience with configuring packet mmap?

Thanks,
Maxim.
diff mbox

Patch

diff --git a/platform/linux-generic/odp_packet_socket.c b/platform/linux-generic/odp_packet_socket.c
index 55c212e..4dcb111 100644
--- a/platform/linux-generic/odp_packet_socket.c
+++ b/platform/linux-generic/odp_packet_socket.c
@@ -587,7 +587,7 @@  static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
 static void mmap_fill_ring(struct ring *ring, unsigned blocks)
 {
 	ring->req.tp_block_size = getpagesize() << 2;
-	ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
+	ring->req.tp_frame_size = 9216 + TPACKET_HDRLEN;
 	ring->req.tp_block_nr = blocks;
 
 	ring->req.tp_frame_nr = ring->req.tp_block_size /
diff --git a/test/validation/odp_pktio.c b/test/validation/odp_pktio.c
index 8df367d..f8b9ecc 100644
--- a/test/validation/odp_pktio.c
+++ b/test/validation/odp_pktio.c
@@ -15,6 +15,10 @@ 
 
 #define PKT_BUF_NUM            32
 #define PKT_BUF_SIZE           1856
+#define PKT_BUF_JUMBO_SIZE     9216
+#define PKT_BUF_JUMBO_MAX_PAYLOAD (PKT_BUF_JUMBO_SIZE -\
+				   (ODPH_UDPHDR_LEN +\
+				   ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN))
 #define MAX_NUM_IFACES         2
 #define TEST_SEQ_INVALID       ((uint32_t)~0)
 #define TEST_SEQ_MAGIC         0x92749451
@@ -33,12 +37,21 @@  typedef struct {
 	odp_queue_t inq;
 } pktio_info_t;
 
-/** structure of test packet UDP payload */
-typedef struct {
+typedef struct ODP_PACKED {
 	uint32be_t magic;
 	uint32be_t seq;
+} pkt_head_t;
+
+/** structure of test packet UDP payload */
+typedef struct ODP_PACKED {
+	pkt_head_t head;
+	char data[PKT_BUF_JUMBO_MAX_PAYLOAD - sizeof(pkt_head_t) -
+		  sizeof(uint32be_t)];
+	uint32be_t magic2;
 } pkt_test_data_t;
 
+static int test_jumbo;
+
 /** default packet pool */
 odp_pool_t default_pkt_pool = ODP_POOL_INVALID;
 
@@ -59,14 +72,18 @@  static void pktio_pkt_set_macs(odp_packet_t pkt,
 	CU_ASSERT(ret == ODPH_ETHADDR_LEN);
 }
 
+static uint32_t pkt_payload_len(void)
+{
+	return test_jumbo ? sizeof(pkt_test_data_t) : sizeof(pkt_head_t);
+}
+
 static int pktio_pkt_set_seq(odp_packet_t pkt)
 {
 	static uint32_t tstseq;
 	size_t l4_off;
-	pkt_test_data_t data;
+	pkt_test_data_t *data;
+	uint32_t len = pkt_payload_len();
 
-	data.magic = TEST_SEQ_MAGIC;
-	data.seq   = tstseq;
 
 	l4_off = odp_packet_l4_offset(pkt);
 	if (!l4_off) {
@@ -74,9 +91,16 @@  static int pktio_pkt_set_seq(odp_packet_t pkt)
 		return -1;
 	}
 
+	data = calloc(1, len);
+	CU_ASSERT_FATAL(data != NULL);
+
+	data->head.magic = TEST_SEQ_MAGIC;
+	data->magic2 = TEST_SEQ_MAGIC;
+	data->head.seq   = tstseq;
+
 	odp_packet_copydata_in(pkt, l4_off+ODPH_UDPHDR_LEN,
-			       sizeof(data), &data);
-
+			       len, data);
+	free(data);
 	tstseq++;
 
 	return 0;
@@ -85,18 +109,30 @@  static int pktio_pkt_set_seq(odp_packet_t pkt)
 static uint32_t pktio_pkt_seq(odp_packet_t pkt)
 {
 	size_t l4_off;
-	pkt_test_data_t data;
+	uint32_t seq = TEST_SEQ_INVALID;
+	pkt_test_data_t *data;
+	uint32_t len = pkt_payload_len();
 
 	l4_off = odp_packet_l4_offset(pkt);
-	if (l4_off) {
-		odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
-					sizeof(data), &data);
+	if (!l4_off)
+		return TEST_SEQ_INVALID;
 
-		if (data.magic == TEST_SEQ_MAGIC)
-			return data.seq;
+	data = calloc(1, len);
+	CU_ASSERT_FATAL(data != NULL);
+
+	odp_packet_copydata_out(pkt, l4_off+ODPH_UDPHDR_LEN,
+				len, data);
+
+	if (data->head.magic == TEST_SEQ_MAGIC) {
+		if (test_jumbo && data->magic2 != TEST_SEQ_MAGIC) {
+			free(data);
+			return TEST_SEQ_INVALID;
+		}
+		seq = data->head.seq;
 	}
 
-	return TEST_SEQ_INVALID;
+	free(data);
+	return seq;
 }
 
 static odp_packet_t pktio_create_packet(void)
@@ -107,7 +143,7 @@  static odp_packet_t pktio_create_packet(void)
 	odph_udphdr_t *udp;
 	char *buf;
 	uint16_t seq;
-	size_t payload_len = sizeof(pkt_test_data_t);
+	size_t payload_len = pkt_payload_len();
 	uint8_t mac[ODPH_ETHADDR_LEN] = {0};
 
 	pkt = odp_packet_alloc(default_pkt_pool, payload_len + ODPH_UDPHDR_LEN +
@@ -187,8 +223,8 @@  static int default_pool_create(void)
 		return -1;
 
 	memset(&params, 0, sizeof(params));
-	params.pkt.seg_len = PKT_BUF_SIZE;
-	params.pkt.len     = PKT_BUF_SIZE;
+	params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
+	params.pkt.len     = PKT_BUF_JUMBO_SIZE;
 	params.pkt.num     = PKT_BUF_NUM;
 	params.type        = ODP_POOL_PACKET;
 
@@ -208,15 +244,24 @@  static odp_pktio_t create_pktio(const char *iface)
 	odp_pool_param_t params;
 
 	memset(&params, 0, sizeof(params));
-	params.pkt.seg_len = PKT_BUF_SIZE;
-	params.pkt.len     = PKT_BUF_SIZE;
+	if (test_jumbo) {
+		params.pkt.seg_len = PKT_BUF_JUMBO_SIZE;
+		params.pkt.len     = PKT_BUF_JUMBO_SIZE;
+
+	} else {
+		params.pkt.seg_len = PKT_BUF_SIZE;
+		params.pkt.len     = PKT_BUF_SIZE;
+	}
 	params.pkt.num     = PKT_BUF_NUM;
 	params.type        = ODP_POOL_PACKET;
 
 	snprintf(pool_name, sizeof(pool_name), "pkt_pool_%s", iface);
+
 	pool = odp_pool_lookup(pool_name);
-	if (pool == ODP_POOL_INVALID)
-		pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
+	if (pool != ODP_POOL_INVALID)
+		odp_pool_destroy(pool);
+
+	pool = odp_pool_create(pool_name, ODP_SHM_NULL, &params);
 	CU_ASSERT(pool != ODP_POOL_INVALID);
 
 	pktio = odp_pktio_open(iface, pool);
@@ -450,6 +495,13 @@  static void test_odp_pktio_sched_multi(void)
 	pktio_test_txrx(ODP_QUEUE_TYPE_SCHED, 4);
 }
 
+static void test_odp_pktio_jumbo(void)
+{
+	test_jumbo = 1;
+	test_odp_pktio_sched_multi();
+	test_jumbo = 0;
+}
+
 static void test_odp_pktio_mtu(void)
 {
 	int ret;
@@ -668,6 +720,7 @@  CU_TestInfo pktio_tests[] = {
 	{"pktio poll multi",	test_odp_pktio_poll_multi},
 	{"pktio sched queues",	test_odp_pktio_sched_queue},
 	{"pktio sched multi",	test_odp_pktio_sched_multi},
+	{"pktio jumbo frames",	test_odp_pktio_jumbo},
 	{"pktio mtu",		test_odp_pktio_mtu},
 	{"pktio promisc mode",	test_odp_pktio_promisc},
 	{"pktio mac",		test_odp_pktio_mac},
diff --git a/test/validation/odp_pktio_run b/test/validation/odp_pktio_run
index 08288e6..b9d7e3c 100755
--- a/test/validation/odp_pktio_run
+++ b/test/validation/odp_pktio_run
@@ -56,8 +56,8 @@  setup_env1()
 		echo "pktio: error: unable to create veth pair"
 		exit $TEST_SKIPPED
 	fi
-	ip link set $IF0 up
-	ip link set $IF1 up
+	ip link set $IF0 mtu 9216 up
+	ip link set $IF1 mtu 9216 up
 
 	# network needs a little time to come up
 	sleep 1