diff mbox series

[v2,04/16] bus/fslmc: rework portal allocation to a per thread basis

Message ID 20200306095742.18080-5-hemant.agrawal@nxp.com
State Superseded
Headers show
Series NXP DPAAx fixes and enhancements | expand

Commit Message

Hemant Agrawal March 6, 2020, 9:57 a.m. UTC
From: Nipun Gupta <nipun.gupta@nxp.com>


The patch reworks the portal allocation which was previously
being done on per lcore basis to a per thread basis.
Now user can also create its own threads and use DPAA2 portals
for packet I/O.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>

---
 drivers/bus/fslmc/Makefile               |   1 +
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c | 210 ++++++++++++-----------
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.h |   3 -
 3 files changed, 114 insertions(+), 100 deletions(-)

-- 
2.17.1
diff mbox series

Patch

diff --git a/drivers/bus/fslmc/Makefile b/drivers/bus/fslmc/Makefile
index 6d2286088..b38305fb4 100644
--- a/drivers/bus/fslmc/Makefile
+++ b/drivers/bus/fslmc/Makefile
@@ -18,6 +18,7 @@  CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
 CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
 CFLAGS += -I$(RTE_SDK)/drivers/common/dpaax
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common
+LDLIBS += -lpthread
 LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
 LDLIBS += -lrte_ethdev
 LDLIBS += -lrte_common_dpaax
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 739ce434b..e765a382f 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -62,6 +62,9 @@  uint8_t dpaa2_dqrr_size;
 /* Variable to store DPAA2 EQCR size */
 uint8_t dpaa2_eqcr_size;
 
+/* Variable to hold the portal_key, once created.*/
+static pthread_key_t dpaa2_portal_key;
+
 /*Stashing Macros default for LS208x*/
 static int dpaa2_core_cluster_base = 0x04;
 static int dpaa2_cluster_sz = 2;
@@ -87,6 +90,32 @@  static int dpaa2_cluster_sz = 2;
  * Cluster 4 (ID = x07) : CPU14, CPU15;
  */
 
+static int
+dpaa2_get_core_id(void)
+{
+	rte_cpuset_t cpuset;
+	int i, ret, cpu_id = -1;
+
+	ret = pthread_getaffinity_np(pthread_self(), sizeof(cpu_set_t),
+		&cpuset);
+	if (ret) {
+		DPAA2_BUS_ERR("pthread_getaffinity_np() failed");
+		return ret;
+	}
+
+	for (i = 0; i < RTE_MAX_LCORE; i++) {
+		if (CPU_ISSET(i, &cpuset)) {
+			if (cpu_id == -1)
+				cpu_id = i;
+			else
+				/* Multiple cpus are affined */
+				return -1;
+		}
+	}
+
+	return cpu_id;
+}
+
 static int
 dpaa2_core_cluster_sdest(int cpu_id)
 {
@@ -97,7 +126,7 @@  dpaa2_core_cluster_sdest(int cpu_id)
 
 #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
 static void
-dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id, int lcoreid)
+dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id, int cpu_id)
 {
 #define STRING_LEN	28
 #define COMMAND_LEN	50
@@ -130,7 +159,7 @@  dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id, int lcoreid)
 		return;
 	}
 
-	cpu_mask = cpu_mask << dpaa2_cpu[lcoreid];
+	cpu_mask = cpu_mask << dpaa2_cpu[cpu_id];
 	snprintf(command, COMMAND_LEN, "echo %X > /proc/irq/%s/smp_affinity",
 		 cpu_mask, token);
 	ret = system(command);
@@ -144,7 +173,7 @@  dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id, int lcoreid)
 	fclose(file);
 }
 
-static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev, int lcoreid)
+static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
 {
 	struct epoll_event epoll_ev;
 	int eventfd, dpio_epoll_fd, ret;
@@ -181,36 +210,42 @@  static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev, int lcoreid)
 	}
 	dpio_dev->epoll_fd = dpio_epoll_fd;
 
-	dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id, lcoreid);
+	dpaa2_affine_dpio_intr_to_respective_core(dpio_dev->hw_id, cpu_id);
 
 	return 0;
 }
+
+static void dpaa2_dpio_intr_deinit(struct dpaa2_dpio_dev *dpio_dev)
+{
+	int ret;
+
+	ret = rte_dpaa2_intr_disable(&dpio_dev->intr_handle, 0);
+	if (ret)
+		DPAA2_BUS_ERR("DPIO interrupt disable failed");
+
+	close(dpio_dev->epoll_fd);
+}
 #endif
 
 static int
-dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int lcoreid)
+dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev)
 {
 	int sdest, ret;
 	int cpu_id;
 
 	/* Set the Stashing Destination */
-	if (lcoreid < 0) {
-		lcoreid = rte_get_master_lcore();
-		if (lcoreid < 0) {
-			DPAA2_BUS_ERR("Getting CPU Index failed");
-			return -1;
-		}
+	cpu_id = dpaa2_get_core_id();
+	if (cpu_id < 0) {
+		DPAA2_BUS_ERR("Thread not affined to a single core");
+		return -1;
 	}
 
-	cpu_id = dpaa2_cpu[lcoreid];
-
 	/* Set the STASH Destination depending on Current CPU ID.
 	 * Valid values of SDEST are 4,5,6,7. Where,
 	 */
-
 	sdest = dpaa2_core_cluster_sdest(cpu_id);
-	DPAA2_BUS_DEBUG("Portal= %d  CPU= %u lcore id =%u SDEST= %d",
-			dpio_dev->index, cpu_id, lcoreid, sdest);
+	DPAA2_BUS_DEBUG("Portal= %d  CPU= %u SDEST= %d",
+			dpio_dev->index, cpu_id, sdest);
 
 	ret = dpio_set_stashing_destination(dpio_dev->dpio, CMD_PRI_LOW,
 					    dpio_dev->token, sdest);
@@ -220,7 +255,7 @@  dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int lcoreid)
 	}
 
 #ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
-	if (dpaa2_dpio_intr_init(dpio_dev, lcoreid)) {
+	if (dpaa2_dpio_intr_init(dpio_dev, cpu_id)) {
 		DPAA2_BUS_ERR("Interrupt registration failed for dpio");
 		return -1;
 	}
@@ -229,7 +264,7 @@  dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int lcoreid)
 	return 0;
 }
 
-static struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int lcoreid)
+static struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(void)
 {
 	struct dpaa2_dpio_dev *dpio_dev = NULL;
 	int ret;
@@ -245,108 +280,74 @@  static struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int lcoreid)
 	DPAA2_BUS_DEBUG("New Portal %p (%d) affined thread - %lu",
 			dpio_dev, dpio_dev->index, syscall(SYS_gettid));
 
-	ret = dpaa2_configure_stashing(dpio_dev, lcoreid);
-	if (ret)
+	ret = dpaa2_configure_stashing(dpio_dev);
+	if (ret) {
 		DPAA2_BUS_ERR("dpaa2_configure_stashing failed");
+		return NULL;
+	}
 
 	return dpio_dev;
 }
 
+static void dpaa2_put_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
+{
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
+	dpaa2_dpio_intr_deinit(dpio_dev);
+#endif
+	if (dpio_dev)
+		rte_atomic16_clear(&dpio_dev->ref_count);
+}
+
 int
 dpaa2_affine_qbman_swp(void)
 {
-	unsigned int lcore_id = rte_lcore_id();
+	struct dpaa2_dpio_dev *dpio_dev;
 	uint64_t tid = syscall(SYS_gettid);
 
-	if (lcore_id == LCORE_ID_ANY)
-		lcore_id = rte_get_master_lcore();
-	/* if the core id is not supported */
-	else if (lcore_id >= RTE_MAX_LCORE)
-		return -1;
-
-	if (dpaa2_io_portal[lcore_id].dpio_dev) {
-		DPAA2_BUS_DP_INFO("DPAA Portal=%p (%d) is being shared"
-			    " between thread %" PRIu64 " and current "
-			    "%" PRIu64 "\n",
-			    dpaa2_io_portal[lcore_id].dpio_dev,
-			    dpaa2_io_portal[lcore_id].dpio_dev->index,
-			    dpaa2_io_portal[lcore_id].net_tid,
-			    tid);
-		RTE_PER_LCORE(_dpaa2_io).dpio_dev
-			= dpaa2_io_portal[lcore_id].dpio_dev;
-		rte_atomic16_inc(&dpaa2_io_portal
-				 [lcore_id].dpio_dev->ref_count);
-		dpaa2_io_portal[lcore_id].net_tid = tid;
-
-		DPAA2_BUS_DP_DEBUG("Old Portal=%p (%d) affined thread - "
-				   "%" PRIu64 "\n",
-			    dpaa2_io_portal[lcore_id].dpio_dev,
-			    dpaa2_io_portal[lcore_id].dpio_dev->index,
-			    tid);
-		return 0;
-	}
-
 	/* Populate the dpaa2_io_portal structure */
-	dpaa2_io_portal[lcore_id].dpio_dev = dpaa2_get_qbman_swp(lcore_id);
-
-	if (dpaa2_io_portal[lcore_id].dpio_dev) {
-		RTE_PER_LCORE(_dpaa2_io).dpio_dev
-			= dpaa2_io_portal[lcore_id].dpio_dev;
-		dpaa2_io_portal[lcore_id].net_tid = tid;
+	if (!RTE_PER_LCORE(_dpaa2_io).dpio_dev) {
+		dpio_dev = dpaa2_get_qbman_swp();
+		if (!dpio_dev) {
+			DPAA2_BUS_ERR("No software portal resource left");
+			return -1;
+		}
+		RTE_PER_LCORE(_dpaa2_io).dpio_dev = dpio_dev;
 
-		return 0;
-	} else {
-		return -1;
+		DPAA2_BUS_INFO(
+			"DPAA Portal=%p (%d) is affined to thread %" PRIu64,
+			dpio_dev, dpio_dev->index, tid);
 	}
+	return 0;
 }
 
 int
 dpaa2_affine_qbman_ethrx_swp(void)
 {
-	unsigned int lcore_id = rte_lcore_id();
+	struct dpaa2_dpio_dev *dpio_dev;
 	uint64_t tid = syscall(SYS_gettid);
 
-	if (lcore_id == LCORE_ID_ANY)
-		lcore_id = rte_get_master_lcore();
-	/* if the core id is not supported */
-	else if (lcore_id >= RTE_MAX_LCORE)
-		return -1;
+	/* Populate the dpaa2_io_portal structure */
+	if (!RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev) {
+		dpio_dev = dpaa2_get_qbman_swp();
+		if (!dpio_dev) {
+			DPAA2_BUS_ERR("No software portal resource left");
+			return -1;
+		}
+		RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev = dpio_dev;
 
-	if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
-		DPAA2_BUS_DP_INFO(
-			"DPAA Portal=%p (%d) is being shared between thread"
-			" %" PRIu64 " and current %" PRIu64 "\n",
-			dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
-			dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
-			dpaa2_io_portal[lcore_id].sec_tid,
-			tid);
-		RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
-			= dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
-		rte_atomic16_inc(&dpaa2_io_portal
-				 [lcore_id].ethrx_dpio_dev->ref_count);
-		dpaa2_io_portal[lcore_id].sec_tid = tid;
-
-		DPAA2_BUS_DP_DEBUG(
-			"Old Portal=%p (%d) affined thread"
-			" - %" PRIu64 "\n",
-			dpaa2_io_portal[lcore_id].ethrx_dpio_dev,
-			dpaa2_io_portal[lcore_id].ethrx_dpio_dev->index,
-			tid);
-		return 0;
+		DPAA2_BUS_INFO(
+			"DPAA Portal=%p (%d) is affined for eth rx to thread %"
+			PRIu64, dpio_dev, dpio_dev->index, tid);
 	}
+	return 0;
+}
 
-	/* Populate the dpaa2_io_portal structure */
-	dpaa2_io_portal[lcore_id].ethrx_dpio_dev =
-		dpaa2_get_qbman_swp(lcore_id);
-
-	if (dpaa2_io_portal[lcore_id].ethrx_dpio_dev) {
-		RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev
-			= dpaa2_io_portal[lcore_id].ethrx_dpio_dev;
-		dpaa2_io_portal[lcore_id].sec_tid = tid;
-		return 0;
-	} else {
-		return -1;
-	}
+static void __attribute__((destructor(102))) dpaa2_portal_finish(void *arg)
+{
+	RTE_SET_USED(arg);
+
+	dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).dpio_dev);
+	dpaa2_put_qbman_swp(RTE_PER_LCORE(_dpaa2_io).ethrx_dpio_dev);
 }
 
 /*
@@ -398,6 +399,7 @@  dpaa2_create_dpio_device(int vdev_fd,
 	struct vfio_region_info reg_info = { .argsz = sizeof(reg_info)};
 	struct qbman_swp_desc p_des;
 	struct dpio_attr attr;
+	int ret;
 	static int check_lcore_cpuset;
 
 	if (obj_info->num_regions < NUM_DPIO_REGIONS) {
@@ -547,12 +549,26 @@  dpaa2_create_dpio_device(int vdev_fd,
 
 	TAILQ_INSERT_TAIL(&dpio_dev_list, dpio_dev, next);
 
+	if (!dpaa2_portal_key) {
+		/* create the key, supplying a function that'll be invoked
+		 * when a portal affined thread will be deleted.
+		 */
+		ret = pthread_key_create(&dpaa2_portal_key,
+					 dpaa2_portal_finish);
+		if (ret) {
+			DPAA2_BUS_DEBUG("Unable to create pthread key (%d)",
+					ret);
+			goto err;
+		}
+	}
+
 	return 0;
 
 err:
 	if (dpio_dev->dpio) {
 		dpio_disable(dpio_dev->dpio, CMD_PRI_LOW, dpio_dev->token);
 		dpio_close(dpio_dev->dpio, CMD_PRI_LOW,  dpio_dev->token);
+		rte_free(dpio_dev->eqresp);
 		rte_free(dpio_dev->dpio);
 	}
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
index 9da4af782..8af0474a1 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.h
@@ -14,9 +14,6 @@ 
 struct dpaa2_io_portal_t {
 	struct dpaa2_dpio_dev *dpio_dev;
 	struct dpaa2_dpio_dev *ethrx_dpio_dev;
-	uint64_t net_tid;
-	uint64_t sec_tid;
-	void *eventdev;
 };
 
 /*! Global per thread DPIO portal */