diff mbox series

[1/5] net/dpaa2: support more than 16 burst size in Rx func

Message ID 1516016286-11942-1-git-send-email-hemant.agrawal@nxp.com
State Accepted
Commit 043b36f621dd27b948188237f3213712a4b194df
Headers show
Series [1/5] net/dpaa2: support more than 16 burst size in Rx func | expand

Commit Message

Hemant Agrawal Jan. 15, 2018, 11:38 a.m. UTC
This patch enhances the Rx function to support more than
16 burst size.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h |   5 +-
 drivers/net/dpaa2/dpaa2_rxtx.c          | 140 ++++++++++++++++++++++++--------
 2 files changed, 108 insertions(+), 37 deletions(-)

-- 
2.7.4

Comments

Ferruh Yigit Jan. 17, 2018, 7:11 p.m. UTC | #1
On 1/15/2018 11:38 AM, Hemant Agrawal wrote:
> This patch enhances the Rx function to support more than

> 16 burst size.

> 

> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>


Series applied to dpdk-next-net/master, thanks.
diff mbox series

Patch

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 2e79399..97e61bb 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -95,8 +95,9 @@  struct dpaa2_dpbp_dev {
 struct queue_storage_info_t {
 	struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
 	struct qbman_result *active_dqs;
-	int active_dpio_id;
-	int toggle;
+	uint8_t active_dpio_id;
+	uint8_t toggle;
+	uint8_t last_num_pkts;
 };
 
 struct dpaa2_queue;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 53466c3..efad728 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -495,12 +495,12 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
-	struct qbman_result *dq_storage;
+	struct qbman_result *dq_storage, *dq_storage1 = 0;
 	uint32_t fqid = dpaa2_q->fqid;
-	int ret, num_rx = 0;
-	uint8_t is_last = 0, status;
+	int ret, num_rx = 0, next_pull = 0, num_pulled, num_to_pull;
+	uint8_t pending, is_repeat, status;
 	struct qbman_swp *swp;
-	const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE], *next_fd;
+	const struct qbman_fd *fd, *next_fd;
 	struct qbman_pull_desc pulldesc;
 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
 	struct rte_eth_dev *dev = dpaa2_q->dev;
@@ -513,37 +513,51 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	if (!q_storage->active_dqs) {
+
+	/* if the original request for this q was from another portal */
+	if (unlikely(DPAA2_PER_LCORE_DPIO->index !=
+		q_storage->active_dpio_id)) {
+		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+			while (!qbman_check_command_complete(get_swp_active_dqs
+				(DPAA2_PER_LCORE_DPIO->index)))
+				;
+			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+		}
+		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+	}
+
+	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
 		dq_storage = q_storage->dq_storage[q_storage->toggle];
+		q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+					       DPAA2_DQRR_RING_SIZE : nb_pkts;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
-					       DPAA2_DQRR_RING_SIZE : nb_pkts);
+					      q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
 			(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-			while (!qbman_check_command_complete(
-			       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-				;
-			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
-		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				PMD_RX_LOG(WARNING, "VDQ command is not issued."
-					   "QBMAN is busy\n");
+				PMD_RX_LOG(WARNING,
+					"VDQ command not issued.QBMAN busy\n");
 				/* Portal was busy, try again */
 				continue;
 			}
 			break;
 		}
 		q_storage->active_dqs = dq_storage;
-		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
 	}
+
+	/* pkt to pull in current pull request */
+	num_to_pull = q_storage->last_num_pkts;
+
+	/* Number of packet requested is more than current pull request */
+	if (nb_pkts > num_to_pull)
+		next_pull = nb_pkts - num_to_pull;
+
 	dq_storage = q_storage->active_dqs;
-	rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
 	 * and the SEC driver.
@@ -552,7 +566,49 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		;
 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
 		clear_swp_active_dqs(q_storage->active_dpio_id);
-	while (!is_last) {
+
+repeat:
+	is_repeat = 0;
+
+	/* issue the deq command one more time to get another set of packets */
+	if (next_pull) {
+		q_storage->toggle ^= 1;
+		dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+		qbman_pull_desc_clear(&pulldesc);
+
+		if (next_pull > DPAA2_DQRR_RING_SIZE) {
+			qbman_pull_desc_set_numframes(&pulldesc,
+					DPAA2_DQRR_RING_SIZE);
+			next_pull = next_pull - DPAA2_DQRR_RING_SIZE;
+			q_storage->last_num_pkts = DPAA2_DQRR_RING_SIZE;
+		} else {
+			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
+			q_storage->last_num_pkts = next_pull;
+			next_pull = 0;
+		}
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
+		qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
+			(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		while (1) {
+			if (qbman_swp_pull(swp, &pulldesc)) {
+				PMD_RX_LOG(WARNING,
+					"VDQ command not issued.QBMAN busy\n");
+				/* Portal was busy, try again */
+				continue;
+			}
+			break;
+		}
+		is_repeat = 1;
+		q_storage->active_dqs = dq_storage1;
+		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
+	}
+
+	rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
+
+	num_pulled = 0;
+	pending = 1;
+
+	do {
 		/* Loop until the dq_storage is updated with
 		 * new token by QBMAN
 		 */
@@ -563,23 +619,23 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		 * setting Condition for Loop termination
 		 */
 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-			is_last = 1;
+			pending = 0;
 			/* Check for valid frame. */
-			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			status = qbman_result_DQ_flags(dq_storage);
 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
 				continue;
 		}
-		fd[num_rx] = qbman_result_DQ_fd(dq_storage);
+		fd = qbman_result_DQ_fd(dq_storage);
 
 		next_fd = qbman_result_DQ_fd(dq_storage + 1);
 		/* Prefetch Annotation address for the parse results */
-		rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(next_fd)
+		rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd)
 				+ DPAA2_FD_PTA_SIZE + 16));
 
-		if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
-			bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
+		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
+			bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
 		else
-			bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
+			bufs[num_rx] = eth_fd_to_mbuf(fd);
 		bufs[num_rx]->port = dev->data->port_id;
 
 		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
@@ -587,22 +643,37 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		dq_storage++;
 		num_rx++;
-	}
+		num_pulled++;
+	} while (pending);
 
-	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-		while (!qbman_check_command_complete(
-		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-			;
-		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+	/* Another VDQ request pending and this request returned full */
+	if (is_repeat) {
+		/* all packets pulled from this pull request */
+		if (num_pulled == num_to_pull)  {
+			/* pkt to pull in current pull request */
+			num_to_pull = q_storage->last_num_pkts;
+
+			dq_storage = dq_storage1;
+
+			while (!qbman_check_command_complete(dq_storage))
+				;
+			goto repeat;
+		} else {
+			/* if this request did not returned all pkts */
+			goto next_time;
+		}
 	}
+
 	q_storage->toggle ^= 1;
 	dq_storage = q_storage->dq_storage[q_storage->toggle];
+	q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+				       DPAA2_DQRR_RING_SIZE : nb_pkts;
 	qbman_pull_desc_clear(&pulldesc);
-	qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
+	qbman_pull_desc_set_numframes(&pulldesc, q_storage->last_num_pkts);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
 			(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-	/* Issue a volatile dequeue command. */
+	/* issue a volatile dequeue command for next pull */
 	while (1) {
 		if (qbman_swp_pull(swp, &pulldesc)) {
 			PMD_RX_LOG(WARNING, "VDQ command is not issued."
@@ -612,12 +683,11 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		break;
 	}
 	q_storage->active_dqs = dq_storage;
-	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
 
+next_time:
 	dpaa2_q->rx_pkts += num_rx;
 
-	/* Return the total number of packets received to DPAA2 app */
 	return num_rx;
 }