diff mbox series

[v3,04/10] dpaa2: prepare for 32 bit compilation

Message ID 1521014166-3201-5-git-send-email-hemant.agrawal@nxp.com
State New
Headers show
Series meson build support for dpaaX | expand

Commit Message

Hemant Agrawal March 14, 2018, 7:56 a.m. UTC
This patch prepare the dpaa2 drivers for compilation on 32 bit machine.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

---
 drivers/bus/fslmc/fslmc_vfio.c              |  10 +--
 drivers/bus/fslmc/mc/fsl_mc_cmd.h           |   2 +-
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c    |  22 +++---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h     |  37 +++++-----
 drivers/bus/fslmc/qbman/qbman_portal.c      |  14 ++--
 drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 101 ++++++++++++++--------------
 drivers/event/dpaa2/dpaa2_eventdev.c        |  10 +--
 drivers/mempool/dpaa2/dpaa2_hw_mempool.c    |   8 +--
 drivers/net/dpaa2/Makefile                  |   1 -
 drivers/net/dpaa2/base/dpaa2_hw_dpni.c      |   2 +-
 drivers/net/dpaa2/dpaa2_ethdev.c            |   6 +-
 drivers/net/dpaa2/dpaa2_rxtx.c              |  63 +++++++++--------
 12 files changed, 137 insertions(+), 139 deletions(-)

-- 
2.7.4
diff mbox series

Patch

diff --git a/drivers/bus/fslmc/fslmc_vfio.c b/drivers/bus/fslmc/fslmc_vfio.c
index 1241295..e840ad6 100644
--- a/drivers/bus/fslmc/fslmc_vfio.c
+++ b/drivers/bus/fslmc/fslmc_vfio.c
@@ -76,7 +76,7 @@  fslmc_get_container_group(int *groupid)
 	if (!g_container) {
 		container = getenv("DPRC");
 		if (container == NULL) {
-			RTE_LOG(WARNING, EAL, "DPAA2: DPRC not available\n");
+			RTE_LOG(DEBUG, EAL, "DPAA2: DPRC not available\n");
 			return -EINVAL;
 		}
 
@@ -270,7 +270,7 @@  int rte_fslmc_vfio_dmamap(void)
 
 static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
 {
-	int64_t v_addr = (int64_t)MAP_FAILED;
+	intptr_t v_addr = (intptr_t)MAP_FAILED;
 	int32_t ret, mc_fd;
 
 	struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
@@ -301,7 +301,7 @@  static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
 	FSLMC_VFIO_LOG(DEBUG, "region offset = %llx  , region size = %llx",
 		       reg_info.offset, reg_info.size);
 
-	v_addr = (uint64_t)mmap(NULL, reg_info.size,
+	v_addr = (size_t)mmap(NULL, reg_info.size,
 		PROT_WRITE | PROT_READ, MAP_SHARED,
 		mc_fd, reg_info.offset);
 
@@ -469,7 +469,7 @@  fslmc_process_iodevices(struct rte_dpaa2_device *dev)
 static int
 fslmc_process_mcp(struct rte_dpaa2_device *dev)
 {
-	int64_t v_addr;
+	intptr_t v_addr;
 	char *dev_name;
 	struct fsl_mc_io dpmng  = {0};
 	struct mc_version mc_ver_info = {0};
@@ -489,7 +489,7 @@  fslmc_process_mcp(struct rte_dpaa2_device *dev)
 	}
 
 	v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
-	if (v_addr == (int64_t)MAP_FAILED) {
+	if (v_addr == (intptr_t)MAP_FAILED) {
 		FSLMC_VFIO_LOG(ERR, "Error mapping region  (errno = %d)",
 			       errno);
 		free(rte_mcp_ptr_list);
diff --git a/drivers/bus/fslmc/mc/fsl_mc_cmd.h b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
index a3c3e79..ac91961 100644
--- a/drivers/bus/fslmc/mc/fsl_mc_cmd.h
+++ b/drivers/bus/fslmc/mc/fsl_mc_cmd.h
@@ -27,7 +27,7 @@ 
 #define le32_to_cpu	rte_le_to_cpu_32
 #define le16_to_cpu	rte_le_to_cpu_16
 
-#define BITS_PER_LONG			64
+#define BITS_PER_LONG	(__SIZEOF_LONG__ * 8)
 #define GENMASK(h, l) \
 		(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index eefde15..7b671ef 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -291,7 +291,7 @@  struct dpaa2_dpio_dev *dpaa2_get_qbman_swp(int cpu_id)
 	if (!dpio_dev)
 		return NULL;
 
-	PMD_DRV_LOG(DEBUG, "New Portal=0x%x (%d) affined thread - %lu",
+	PMD_DRV_LOG(DEBUG, "New Portal %p (%d) affined thread - %lu",
 		    dpio_dev, dpio_dev->index, syscall(SYS_gettid));
 
 	ret = dpaa2_configure_stashing(dpio_dev, cpu_id);
@@ -314,8 +314,9 @@  dpaa2_affine_qbman_swp(void)
 		return -1;
 
 	if (dpaa2_io_portal[lcore_id].dpio_dev) {
-		PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
-			    " between thread %lu and current  %lu",
+		PMD_DRV_LOG(INFO, "DPAAPortal=%p (%d) is being shared"
+			    " between thread %" PRIu64 " and current "
+			    "%" PRIu64 "\n",
 			    dpaa2_io_portal[lcore_id].dpio_dev,
 			    dpaa2_io_portal[lcore_id].dpio_dev->index,
 			    dpaa2_io_portal[lcore_id].net_tid,
@@ -326,7 +327,8 @@  dpaa2_affine_qbman_swp(void)
 				 [lcore_id].dpio_dev->ref_count);
 		dpaa2_io_portal[lcore_id].net_tid = tid;
 
-		PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
+		PMD_DRV_LOG(DEBUG, "Old Portal=%p (%d)"
+			    "affined thread - %" PRIu64 "\n",
 			    dpaa2_io_portal[lcore_id].dpio_dev,
 			    dpaa2_io_portal[lcore_id].dpio_dev->index,
 			    tid);
@@ -360,8 +362,9 @@  dpaa2_affine_qbman_swp_sec(void)
 		return -1;
 
 	if (dpaa2_io_portal[lcore_id].sec_dpio_dev) {
-		PMD_DRV_LOG(INFO, "DPAA Portal=0x%x (%d) is being shared"
-			    " between thread %lu and current  %lu",
+		PMD_DRV_LOG(INFO, "DPAAPortal=%p (%d) is being shared"
+			    " between thread %" PRIu64 " and current "
+			    "%" PRIu64 "\n",
 			    dpaa2_io_portal[lcore_id].sec_dpio_dev,
 			    dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
 			    dpaa2_io_portal[lcore_id].sec_tid,
@@ -372,7 +375,8 @@  dpaa2_affine_qbman_swp_sec(void)
 				 [lcore_id].sec_dpio_dev->ref_count);
 		dpaa2_io_portal[lcore_id].sec_tid = tid;
 
-		PMD_DRV_LOG(DEBUG, "Old Portal=0x%x (%d) affined thread - %lu",
+		PMD_DRV_LOG(DEBUG, "Old Portal=%p (%d) "
+			    "affined thread - %" PRIu64 "\n",
 			    dpaa2_io_portal[lcore_id].sec_dpio_dev,
 			    dpaa2_io_portal[lcore_id].sec_dpio_dev->index,
 			    tid);
@@ -427,7 +431,7 @@  dpaa2_create_dpio_device(int vdev_fd,
 	}
 
 	dpio_dev->ce_size = reg_info.size;
-	dpio_dev->qbman_portal_ce_paddr = (uint64_t)mmap(NULL, reg_info.size,
+	dpio_dev->qbman_portal_ce_paddr = (size_t)mmap(NULL, reg_info.size,
 				PROT_WRITE | PROT_READ, MAP_SHARED,
 				vdev_fd, reg_info.offset);
 
@@ -439,7 +443,7 @@  dpaa2_create_dpio_device(int vdev_fd,
 	}
 
 	dpio_dev->ci_size = reg_info.size;
-	dpio_dev->qbman_portal_ci_paddr = (uint64_t)mmap(NULL, reg_info.size,
+	dpio_dev->qbman_portal_ci_paddr = (size_t)mmap(NULL, reg_info.size,
 				PROT_WRITE | PROT_READ, MAP_SHARED,
 				vdev_fd, reg_info.offset);
 
diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index d421dbf..4a19d42 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -174,7 +174,7 @@  enum qbman_fd_format {
 };
 /*Macros to define operations on FD*/
 #define DPAA2_SET_FD_ADDR(fd, addr) do {			\
-	(fd)->simple.addr_lo = lower_32_bits((uint64_t)(addr));	\
+	(fd)->simple.addr_lo = lower_32_bits((size_t)(addr));	\
 	(fd)->simple.addr_hi = upper_32_bits((uint64_t)(addr));	\
 } while (0)
 #define DPAA2_SET_FD_LEN(fd, length)	((fd)->simple.len = length)
@@ -193,33 +193,32 @@  enum qbman_fd_format {
 
 #define	DPAA2_SET_FD_ASAL(fd, asal)	((fd)->simple.ctrl |= (asal << 16))
 #define DPAA2_SET_FD_FLC(fd, addr)	do { \
-	(fd)->simple.flc_lo = lower_32_bits((uint64_t)(addr));	\
+	(fd)->simple.flc_lo = lower_32_bits((size_t)(addr));	\
 	(fd)->simple.flc_hi = upper_32_bits((uint64_t)(addr));	\
 } while (0)
 #define DPAA2_SET_FLE_INTERNAL_JD(fle, len) ((fle)->frc = (0x80000000 | (len)))
 #define DPAA2_GET_FLE_ADDR(fle)					\
 	(uint64_t)((((uint64_t)((fle)->addr_hi)) << 32) + (fle)->addr_lo)
 #define DPAA2_SET_FLE_ADDR(fle, addr) do { \
-	(fle)->addr_lo = lower_32_bits((uint64_t)addr);     \
-	(fle)->addr_hi = upper_32_bits((uint64_t)addr);	  \
+	(fle)->addr_lo = lower_32_bits((size_t)addr);		\
+	(fle)->addr_hi = upper_32_bits((uint64_t)addr);		\
 } while (0)
 #define DPAA2_GET_FLE_CTXT(fle)					\
-	(uint64_t)((((uint64_t)((fle)->reserved[1])) << 32) + \
-			(fle)->reserved[0])
+	((((uint64_t)((fle)->reserved[1])) << 32) + (fle)->reserved[0])
 #define DPAA2_FLE_SAVE_CTXT(fle, addr) do { \
-	(fle)->reserved[0] = lower_32_bits((uint64_t)addr);     \
-	(fle)->reserved[1] = upper_32_bits((uint64_t)addr);	  \
+	(fle)->reserved[0] = lower_32_bits((size_t)addr);	\
+	(fle)->reserved[1] = upper_32_bits((uint64_t)addr);	\
 } while (0)
 #define DPAA2_SET_FLE_OFFSET(fle, offset) \
 	((fle)->fin_bpid_offset |= (uint32_t)(offset) << 16)
-#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (uint64_t)bpid)
+#define DPAA2_SET_FLE_BPID(fle, bpid) ((fle)->fin_bpid_offset |= (size_t)bpid)
 #define DPAA2_GET_FLE_BPID(fle) ((fle)->fin_bpid_offset & 0x000000ff)
-#define DPAA2_SET_FLE_FIN(fle)	((fle)->fin_bpid_offset |= (uint64_t)1 << 31)
+#define DPAA2_SET_FLE_FIN(fle)	((fle)->fin_bpid_offset |= 1 << 31)
 #define DPAA2_SET_FLE_IVP(fle)   (((fle)->fin_bpid_offset |= 0x00004000))
 #define DPAA2_SET_FD_COMPOUND_FMT(fd)	\
 	((fd)->simple.bpid_offset |= (uint32_t)1 << 28)
 #define DPAA2_GET_FD_ADDR(fd)	\
-((uint64_t)((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
+(((((uint64_t)((fd)->simple.addr_hi)) << 32) + (fd)->simple.addr_lo))
 
 #define DPAA2_GET_FD_LEN(fd)	((fd)->simple.len)
 #define DPAA2_GET_FD_BPID(fd)	(((fd)->simple.bpid_offset & 0x00003FFF))
@@ -231,7 +230,7 @@  enum qbman_fd_format {
 	(((fle)->fin_bpid_offset & ((uint64_t)1 << 29)) ? 1 : 0)
 
 #define DPAA2_INLINE_MBUF_FROM_BUF(buf, meta_data_size) \
-	((struct rte_mbuf *)((uint64_t)(buf) - (meta_data_size)))
+	((struct rte_mbuf *)((size_t)(buf) - (meta_data_size)))
 
 #define DPAA2_ASAL_VAL (DPAA2_MBUF_HW_ANNOTATION / 64)
 
@@ -265,14 +264,14 @@  static void *dpaa2_mem_ptov(phys_addr_t paddr)
 	int i;
 
 	if (dpaa2_virt_mode)
-		return (void *)paddr;
+		return (void *)(size_t)paddr;
 
 	memseg = rte_eal_get_physmem_layout();
 
 	for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
 		if (paddr >= memseg[i].iova &&
-		   (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
-			return (void *)(memseg[i].addr_64
+		    paddr < memseg[i].iova + memseg[i].len)
+			return (void *)(size_t)(memseg[i].addr_64
 				+ (paddr - memseg[i].iova));
 	}
 	return NULL;
@@ -295,7 +294,7 @@  static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
 			return memseg[i].iova
 				+ (vaddr - memseg[i].addr_64);
 	}
-	return (phys_addr_t)(NULL);
+	return (size_t)(NULL);
 }
 
 /**
@@ -311,18 +310,18 @@  static phys_addr_t dpaa2_mem_vtop(uint64_t vaddr)
 /**
  * macro to convert Virtual address to IOVA
  */
-#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((uint64_t)(_vaddr))
+#define DPAA2_VADDR_TO_IOVA(_vaddr) dpaa2_mem_vtop((size_t)(_vaddr))
 
 /**
  * macro to convert IOVA to Virtual address
  */
-#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((phys_addr_t)(_iova))
+#define DPAA2_IOVA_TO_VADDR(_iova) dpaa2_mem_ptov((size_t)(_iova))
 
 /**
  * macro to convert modify the memory containing IOVA to Virtual address
  */
 #define DPAA2_MODIFY_IOVA_TO_VADDR(_mem, _type) \
-	{_mem = (_type)(dpaa2_mem_ptov((phys_addr_t)(_mem))); }
+	{_mem = (_type)(dpaa2_mem_ptov((size_t)(_mem))); }
 
 #else	/* RTE_LIBRTE_DPAA2_USE_PHYS_IOVA */
 
diff --git a/drivers/bus/fslmc/qbman/qbman_portal.c b/drivers/bus/fslmc/qbman/qbman_portal.c
index e221733..713ec96 100644
--- a/drivers/bus/fslmc/qbman/qbman_portal.c
+++ b/drivers/bus/fslmc/qbman/qbman_portal.c
@@ -553,10 +553,9 @@  int qbman_swp_enqueue_multiple(struct qbman_swp *s,
 
 	/* Flush all the cacheline without load/store in between */
 	eqcr_pi = s->eqcr.pi;
-	addr_cena = (uint64_t)s->sys.addr_cena;
+	addr_cena = (size_t)s->sys.addr_cena;
 	for (i = 0; i < num_enqueued; i++) {
-		dcbf((uint64_t *)(addr_cena +
-				QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+		dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
 		eqcr_pi++;
 		eqcr_pi &= 0xF;
 	}
@@ -620,10 +619,9 @@  int qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
 
 	/* Flush all the cacheline without load/store in between */
 	eqcr_pi = s->eqcr.pi;
-	addr_cena = (uint64_t)s->sys.addr_cena;
+	addr_cena = (size_t)s->sys.addr_cena;
 	for (i = 0; i < num_enqueued; i++) {
-		dcbf((uint64_t *)(addr_cena +
-				QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
+		dcbf((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & 7)));
 		eqcr_pi++;
 		eqcr_pi &= 0xF;
 	}
@@ -690,7 +688,7 @@  void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
 				 dma_addr_t storage_phys,
 				 int stash)
 {
-	d->pull.rsp_addr_virt = (uint64_t)storage;
+	d->pull.rsp_addr_virt = (size_t)storage;
 
 	if (!storage) {
 		d->pull.verb &= ~(1 << QB_VDQCR_VERB_RLS_SHIFT);
@@ -749,7 +747,7 @@  int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
 	}
 
 	d->pull.tok = s->sys.idx + 1;
-	s->vdq.storage = (void *)d->pull.rsp_addr_virt;
+	s->vdq.storage = (void *)(size_t)d->pull.rsp_addr_virt;
 	p = qbman_cena_write_start_wo_shadow(&s->sys, QBMAN_CENA_SWP_VDQCR);
 	memcpy(&p[1], &cl[1], 12);
 
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 9a790dd..9a74845 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -77,11 +77,11 @@  build_proto_fd(dpaa2_sec_session *sess,
 	DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
 	DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
 	DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
-	DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
+	DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
 
 	/* save physical address of mbuf */
 	op->sym->aead.digest.phys_addr = mbuf->buf_iova;
-	mbuf->buf_iova = (uint64_t)op;
+	mbuf->buf_iova = (size_t)op;
 
 	return 0;
 }
@@ -118,7 +118,7 @@  build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
 	}
 	memset(fle, 0, FLE_SG_MEM_SIZE);
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
 
 	op_fle = fle + 1;
 	ip_fle = fle + 2;
@@ -269,7 +269,7 @@  build_authenc_gcm_fd(dpaa2_sec_session *sess,
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
 	fle = fle + 1;
 	sge = fle + 2;
 	if (likely(bpid < MAX_BPID)) {
@@ -414,7 +414,7 @@  build_authenc_sg_fd(dpaa2_sec_session *sess,
 	}
 	memset(fle, 0, FLE_SG_MEM_SIZE);
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
 
 	op_fle = fle + 1;
 	ip_fle = fle + 2;
@@ -563,7 +563,7 @@  build_authenc_fd(dpaa2_sec_session *sess,
 	}
 	memset(fle, 0, FLE_POOL_BUF_SIZE);
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
 	fle = fle + 1;
 	sge = fle + 2;
 	if (likely(bpid < MAX_BPID)) {
@@ -692,7 +692,7 @@  static inline int build_auth_sg_fd(
 	memset(fle, 0, FLE_SG_MEM_SIZE);
 	/* first FLE entry used to store mbuf and session ctxt */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
 	op_fle = fle + 1;
 	ip_fle = fle + 2;
 	sge = fle + 3;
@@ -773,7 +773,7 @@  build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
 	fle = fle + 1;
 
 	if (likely(bpid < MAX_BPID)) {
@@ -865,7 +865,7 @@  build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	memset(fle, 0, FLE_SG_MEM_SIZE);
 	/* first FLE entry used to store mbuf and session ctxt */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
 
 	op_fle = fle + 1;
 	ip_fle = fle + 2;
@@ -944,13 +944,13 @@  build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	DPAA2_SET_FD_COMPOUND_FMT(fd);
 	DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
 
-	PMD_TX_LOG(DEBUG,
-			"CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
-		   (void *)DPAA2_GET_FD_ADDR(fd),
-		   DPAA2_GET_FD_BPID(fd),
-		   rte_dpaa2_bpid_info[bpid].meta_data_size,
-		   DPAA2_GET_FD_OFFSET(fd),
-		   DPAA2_GET_FD_LEN(fd));
+	PMD_TX_LOG(DEBUG, "CIPHER SG: fdaddr =%" PRIx64
+		" bpid =%d meta =%d off =%d, len =%d\n",
+		DPAA2_GET_FD_ADDR(fd),
+		DPAA2_GET_FD_BPID(fd),
+		rte_dpaa2_bpid_info[bpid].meta_data_size,
+		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_LEN(fd));
 	return 0;
 }
 
@@ -987,7 +987,7 @@  build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
 	 * We can have a better approach to use the inline Mbuf
 	 */
 	DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
-	DPAA2_FLE_SAVE_CTXT(fle, priv);
+	DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
 	fle = fle + 1;
 	sge = fle + 2;
 
@@ -1206,7 +1206,7 @@  sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
 		DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
 
-	op = (struct rte_crypto_op *)mbuf->buf_iova;
+	op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
 	mbuf->buf_iova = op->sym->aead.digest.phys_addr;
 	op->sym->aead.digest.phys_addr = 0L;
 
@@ -1267,16 +1267,17 @@  sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
 	PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
 		   (void *)dst, dst->buf_addr);
 
-	PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
-		   (void *)DPAA2_GET_FD_ADDR(fd),
-		   DPAA2_GET_FD_BPID(fd),
-		   rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
-		   DPAA2_GET_FD_OFFSET(fd),
-		   DPAA2_GET_FD_LEN(fd));
+	PMD_RX_LOG(DEBUG, "fdaddr =%" PRIx64
+		" bpid =%d meta =%d off =%d, len =%d",
+		DPAA2_GET_FD_ADDR(fd),
+		DPAA2_GET_FD_BPID(fd),
+		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_LEN(fd));
 
 	/* free the fle memory */
 	if (likely(rte_pktmbuf_is_contiguous(src))) {
-		priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+		priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
 		rte_mempool_put(priv->fle_pool, (void *)(fle-1));
 	} else
 		rte_free((void *)(fle-1));
@@ -1455,7 +1456,7 @@  dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
 	dev->data->queue_pairs[qp_id] = qp;
 
 	cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
-	cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+	cfg.user_ctx = (size_t)(&qp->rx_vq);
 	retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
 				      qp_id, &cfg);
 	return retcode;
@@ -1536,7 +1537,7 @@  dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 
 	memcpy(session->cipher_key.data, xform->cipher.key.data,
 	       xform->cipher.key.length);
-	cipherdata.key = (uint64_t)session->cipher_key.data;
+	cipherdata.key = (size_t)session->cipher_key.data;
 	cipherdata.keylen = session->cipher_key.length;
 	cipherdata.key_enc_flags = 0;
 	cipherdata.key_type = RTA_DATA_IMM;
@@ -1595,10 +1596,10 @@  dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
 
 	flc->word1_sdl = (uint8_t)bufsize;
 	flc->word2_rflc_31_0 = lower_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	flc->word3_rflc_63_32 = upper_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
 
@@ -1651,7 +1652,7 @@  dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 
 	memcpy(session->auth_key.data, xform->auth.key.data,
 	       xform->auth.key.length);
-	authdata.key = (uint64_t)session->auth_key.data;
+	authdata.key = (size_t)session->auth_key.data;
 	authdata.keylen = session->auth_key.length;
 	authdata.key_enc_flags = 0;
 	authdata.key_type = RTA_DATA_IMM;
@@ -1720,10 +1721,10 @@  dpaa2_sec_auth_init(struct rte_cryptodev *dev,
 
 	flc->word1_sdl = (uint8_t)bufsize;
 	flc->word2_rflc_31_0 = lower_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	flc->word3_rflc_63_32 = upper_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
@@ -1786,7 +1787,7 @@  dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 	session->aead_key.length = aead_xform->key.length;
 	ctxt->auth_only_len = aead_xform->aad_length;
 
-	aeaddata.key = (uint64_t)session->aead_key.data;
+	aeaddata.key = (size_t)session->aead_key.data;
 	aeaddata.keylen = session->aead_key.length;
 	aeaddata.key_enc_flags = 0;
 	aeaddata.key_type = RTA_DATA_IMM;
@@ -1840,10 +1841,10 @@  dpaa2_sec_aead_init(struct rte_cryptodev *dev,
 				session->digest_length);
 	flc->word1_sdl = (uint8_t)bufsize;
 	flc->word2_rflc_31_0 = lower_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	flc->word3_rflc_63_32 = upper_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
@@ -1928,7 +1929,7 @@  dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
 	memcpy(session->auth_key.data, auth_xform->key.data,
 	       auth_xform->key.length);
 
-	authdata.key = (uint64_t)session->auth_key.data;
+	authdata.key = (size_t)session->auth_key.data;
 	authdata.keylen = session->auth_key.length;
 	authdata.key_enc_flags = 0;
 	authdata.key_type = RTA_DATA_IMM;
@@ -1988,7 +1989,7 @@  dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
 			auth_xform->algo);
 		goto error_out;
 	}
-	cipherdata.key = (uint64_t)session->cipher_key.data;
+	cipherdata.key = (size_t)session->cipher_key.data;
 	cipherdata.keylen = session->cipher_key.length;
 	cipherdata.key_enc_flags = 0;
 	cipherdata.key_type = RTA_DATA_IMM;
@@ -2066,10 +2067,10 @@  dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
 
 	flc->word1_sdl = (uint8_t)bufsize;
 	flc->word2_rflc_31_0 = lower_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	flc->word3_rflc_63_32 = upper_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 	session->ctxt = priv;
 	for (i = 0; i < bufsize; i++)
@@ -2202,7 +2203,7 @@  dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 	memcpy(session->auth_key.data, auth_xform->key.data,
 			auth_xform->key.length);
 
-	authdata.key = (uint64_t)session->auth_key.data;
+	authdata.key = (size_t)session->auth_key.data;
 	authdata.keylen = session->auth_key.length;
 	authdata.key_enc_flags = 0;
 	authdata.key_type = RTA_DATA_IMM;
@@ -2261,7 +2262,7 @@  dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 			auth_xform->algo);
 		goto out;
 	}
-	cipherdata.key = (uint64_t)session->cipher_key.data;
+	cipherdata.key = (size_t)session->cipher_key.data;
 	cipherdata.keylen = session->cipher_key.length;
 	cipherdata.key_enc_flags = 0;
 	cipherdata.key_type = RTA_DATA_IMM;
@@ -2345,10 +2346,10 @@  dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
 	/* Enable the stashing control bit */
 	DPAA2_SET_FLC_RSC(flc);
 	flc->word2_rflc_31_0 = lower_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq) | 0x14);
 	flc->word3_rflc_63_32 = upper_32_bits(
-			(uint64_t)&(((struct dpaa2_sec_qp *)
+			(size_t)&(((struct dpaa2_sec_qp *)
 			dev->data->queue_pairs[0])->rx_vq));
 
 	/* Set EWS bit i.e. enable write-safe */
@@ -2647,13 +2648,13 @@  void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
 		PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
 	} else {
 		PMD_DRV_LOG(INFO, "dpseci hw stats:"
-			    "\n\tNumber of Requests Dequeued = %lu"
-			    "\n\tNumber of Outbound Encrypt Requests = %lu"
-			    "\n\tNumber of Inbound Decrypt Requests = %lu"
-			    "\n\tNumber of Outbound Bytes Encrypted = %lu"
-			    "\n\tNumber of Outbound Bytes Protected = %lu"
-			    "\n\tNumber of Inbound Bytes Decrypted = %lu"
-			    "\n\tNumber of Inbound Bytes Validated = %lu",
+			"\n\tNumber of Requests Dequeued = %" PRIu64
+			"\n\tNumber of Outbound Encrypt Requests = %" PRIu64
+			"\n\tNumber of Inbound Decrypt Requests = %" PRIu64
+			"\n\tNumber of Outbound Bytes Encrypted = %" PRIu64
+			"\n\tNumber of Outbound Bytes Protected = %" PRIu64
+			"\n\tNumber of Inbound Bytes Decrypted = %" PRIu64
+			"\n\tNumber of Inbound Bytes Validated = %" PRIu64,
 			    counters.dequeued_requests,
 			    counters.ob_enc_requests,
 			    counters.ib_dec_requests,
diff --git a/drivers/event/dpaa2/dpaa2_eventdev.c b/drivers/event/dpaa2/dpaa2_eventdev.c
index c3e6fbf..8800b47 100644
--- a/drivers/event/dpaa2/dpaa2_eventdev.c
+++ b/drivers/event/dpaa2/dpaa2_eventdev.c
@@ -126,7 +126,7 @@  dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
 				goto send_partial;
 			}
 			rte_memcpy(ev_temp, event, sizeof(struct rte_event));
-			DPAA2_SET_FD_ADDR((&fd_arr[loop]), ev_temp);
+			DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
 			DPAA2_SET_FD_LEN((&fd_arr[loop]),
 					 sizeof(struct rte_event));
 		}
@@ -182,7 +182,7 @@  static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
 					    struct rte_event *ev)
 {
 	struct rte_event *ev_temp =
-		(struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
 
 	RTE_SET_USED(rxq);
 
@@ -199,7 +199,7 @@  static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
 					  struct rte_event *ev)
 {
 	struct rte_event *ev_temp =
-		(struct rte_event *)DPAA2_GET_FD_ADDR(fd);
+		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
 	uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
 
 	RTE_SET_USED(swp);
@@ -258,7 +258,7 @@  dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
 		qbman_swp_prefetch_dqrr_next(swp);
 
 		fd = qbman_result_DQ_fd(dq);
-		rxq = (struct dpaa2_queue *)qbman_result_DQ_fqd_ctx(dq);
+		rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
 		if (rxq) {
 			rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
 		} else {
@@ -736,7 +736,7 @@  dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
 		dpaa2_eventdev_process_atomic;
 
 	for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
-		rx_queue_cfg.user_ctx = (uint64_t)(&dpci_dev->queue[i]);
+		rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->queue[i]);
 		ret = dpci_set_rx_queue(&dpci_dev->dpci,
 					CMD_PRI_LOW,
 					dpci_dev->token, i,
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
index 2bd62e8..1a618ae 100644
--- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -242,7 +242,7 @@  rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 #endif
 	struct qbman_swp *swp;
 	uint16_t bpid;
-	uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+	size_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
 	int i, ret;
 	unsigned int n = 0;
 	struct dpaa2_bp_info *bp_info;
@@ -270,10 +270,10 @@  rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 		 * then the remainder.
 		 */
 		if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
-			ret = qbman_swp_acquire(swp, bpid, bufs,
+			ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
 						DPAA2_MBUF_MAX_ACQ_REL);
 		} else {
-			ret = qbman_swp_acquire(swp, bpid, bufs,
+			ret = qbman_swp_acquire(swp, bpid, (void *)bufs,
 						count - n);
 		}
 		/* In case of less than requested number of buffers available
@@ -290,7 +290,7 @@  rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
 		}
 		/* assigning mbuf from the acquired objects */
 		for (i = 0; (i < ret) && bufs[i]; i++) {
-			DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+			DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], size_t);
 			obj_table[n] = (struct rte_mbuf *)
 				       (bufs[i] - bp_info->meta_data_size);
 			PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
diff --git a/drivers/net/dpaa2/Makefile b/drivers/net/dpaa2/Makefile
index 5a93a0b..068e9d3 100644
--- a/drivers/net/dpaa2/Makefile
+++ b/drivers/net/dpaa2/Makefile
@@ -25,7 +25,6 @@  CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
 CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/mc
 CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/portal
 CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa2
-CFLAGS += -I$(RTE_SDK)/drivers/event/dpaa2
 CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
 
 # versioning export map
diff --git a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
index b93376d..4b60f56 100644
--- a/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
+++ b/drivers/net/dpaa2/base/dpaa2_hw_dpni.c
@@ -50,7 +50,7 @@  dpaa2_setup_flow_dist(struct rte_eth_dev *eth_dev,
 
 	ret = dpaa2_distset_to_dpkg_profile_cfg(req_dist_set, &kg_cfg);
 	if (ret) {
-		PMD_INIT_LOG(ERR, "given rss_hf (%lx) not supported",
+		PMD_INIT_LOG(ERR, "given rss_hf (%" PRIx64 ") not supported",
 			     req_dist_set);
 		rte_free(p_params);
 		return ret;
diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 09a11d6..fd5897e 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -445,7 +445,7 @@  dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
 	memset(&cfg, 0, sizeof(struct dpni_queue));
 
 	options = options | DPNI_QUEUE_OPT_USER_CTX;
-	cfg.user_context = (uint64_t)(dpaa2_q);
+	cfg.user_context = (size_t)(dpaa2_q);
 
 	/*if ls2088 or rev2 device, enable the stashing */
 
@@ -560,7 +560,7 @@  dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
 		 */
 		cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD;
 		cong_notif_cfg.message_ctx = 0;
-		cong_notif_cfg.message_iova = (uint64_t)dpaa2_q->cscn;
+		cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn;
 		cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE;
 		cong_notif_cfg.notification_mode =
 					 DPNI_CONG_OPT_WRITE_MEM_ON_ENTER |
@@ -1702,7 +1702,7 @@  int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
 	}
 
 	options |= DPNI_QUEUE_OPT_USER_CTX;
-	cfg.user_context = (uint64_t)(dpaa2_ethq);
+	cfg.user_context = (size_t)(dpaa2_ethq);
 
 	ret = dpni_set_queue(dpni, CMD_PRI_LOW, eth_priv->token, DPNI_QUEUE_RX,
 			     dpaa2_ethq->tc_index, flow_id, options, &cfg);
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 183293c..21a08b6 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -21,7 +21,6 @@ 
 #include <dpaa2_hw_pvt.h>
 #include <dpaa2_hw_dpio.h>
 #include <dpaa2_hw_mempool.h>
-#include <dpaa2_eventdev.h>
 
 #include "dpaa2_ethdev.h"
 #include "base/dpaa2_hw_dpni_annot.h"
@@ -104,13 +103,11 @@  dpaa2_dev_rx_parse_frc(struct rte_mbuf *m, uint16_t frc)
 }
 
 static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse_slow(struct dpaa2_annot_hdr *annotation)
 {
 	uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
-	struct dpaa2_annot_hdr *annotation =
-			(struct dpaa2_annot_hdr *)hw_annot_addr;
 
-	PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
+	PMD_RX_LOG(DEBUG, "annotation = 0x%" PRIx64, annotation->word4);
 	if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
 		pkt_type = RTE_PTYPE_L2_ETHER_ARP;
 		goto parse_done;
@@ -167,12 +164,12 @@  dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
 }
 
 static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
 {
 	struct dpaa2_annot_hdr *annotation =
 			(struct dpaa2_annot_hdr *)hw_annot_addr;
 
-	PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
+	PMD_RX_LOG(DEBUG, "annotation = 0x%" PRIx64, annotation->word4);
 
 	/* Check offloads first */
 	if (BIT_ISSET_AT_POS(annotation->word3,
@@ -207,25 +204,24 @@  dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
 		break;
 	}
 
-	return dpaa2_dev_rx_parse_slow(hw_annot_addr);
+	return dpaa2_dev_rx_parse_slow(annotation);
 }
 
 static inline struct rte_mbuf *__attribute__((hot))
 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
 {
 	struct qbman_sge *sgt, *sge;
-	dma_addr_t sg_addr;
+	size_t sg_addr, fd_addr;
 	int i = 0;
-	uint64_t fd_addr;
 	struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
 
-	fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+	fd_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
 
 	/* Get Scatter gather table address */
 	sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
 
 	sge = &sgt[i++];
-	sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
+	sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
 
 	/* First Scatter gather entry */
 	first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
@@ -243,14 +239,14 @@  eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
 				DPAA2_GET_FD_FRC_PARSE_SUM(fd));
 	else
 		first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
-			 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
-			 + DPAA2_FD_PTA_SIZE);
+			(void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+			 + DPAA2_FD_PTA_SIZE));
 
 	rte_mbuf_refcnt_set(first_seg, 1);
 	cur_seg = first_seg;
 	while (!DPAA2_SG_IS_FINAL(sge)) {
 		sge = &sgt[i++];
-		sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
+		sg_addr = (size_t)DPAA2_IOVA_TO_VADDR(
 				DPAA2_GET_FLE_ADDR(sge));
 		next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
 			rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
@@ -299,12 +295,12 @@  eth_fd_to_mbuf(const struct qbman_fd *fd)
 		dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
 	else
 		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
-			(uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
-			 + DPAA2_FD_PTA_SIZE);
+			(void *)((size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+			 + DPAA2_FD_PTA_SIZE));
 
 	PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
-		"fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
-		mbuf, mbuf->buf_addr, mbuf->data_off,
+		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
+		(void *)mbuf, (void *)mbuf->buf_addr, mbuf->data_off,
 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
@@ -340,7 +336,7 @@  eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 	DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
 	/*Set Scatter gather table and Scatter gather entries*/
 	sgt = (struct qbman_sge *)(
-			(uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
+			(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
 			+ DPAA2_GET_FD_OFFSET(fd));
 
 	for (i = 0; i < mbuf->nb_segs; i++) {
@@ -402,8 +398,8 @@  eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
 	PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
-		"fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
-		mbuf, mbuf->buf_addr, mbuf->data_off,
+		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
+		(void *)mbuf, mbuf->buf_addr, mbuf->data_off,
 		DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
 		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
 		DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
@@ -458,11 +454,12 @@  eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
 	PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
 		   (void *)mbuf, mbuf->buf_addr);
 
-	PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
-		   DPAA2_GET_FD_ADDR(fd),
-		DPAA2_GET_FD_BPID(fd),
-		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+	PMD_TX_LOG(DEBUG,
+		"fd_off=%d fd =%" PRIx64 ", meta = %d  bpid =%d, len=%d\n",
 		DPAA2_GET_FD_OFFSET(fd),
+		DPAA2_GET_FD_ADDR(fd),
+		rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+		DPAA2_GET_FD_BPID(fd),
 		DPAA2_GET_FD_LEN(fd));
 
 	return 0;
@@ -523,8 +520,8 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 	}
 
 	dq_storage = q_storage->active_dqs;
-	rte_prefetch0((void *)((uint64_t)(dq_storage)));
-	rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
+	rte_prefetch0((void *)(size_t)(dq_storage));
+	rte_prefetch0((void *)(size_t)(dq_storage + 1));
 
 	/* Prepare next pull descriptor. This will give space for the
 	 * prefething done on DQRR entries
@@ -554,7 +551,7 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		 */
 		while (!qbman_check_new_result(dq_storage))
 			;
-		rte_prefetch0((void *)((uint64_t)(dq_storage + 2)));
+		rte_prefetch0((void *)((size_t)(dq_storage + 2)));
 		/* Check whether Last Pull command is Expired and
 		 * setting Condition for Loop termination
 		 */
@@ -569,7 +566,7 @@  dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		next_fd = qbman_result_DQ_fd(dq_storage + 1);
 		/* Prefetch Annotation address for the parse results */
-		rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd)
+		rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
 				+ DPAA2_FD_PTA_SIZE + 16));
 
 		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
@@ -616,7 +613,7 @@  dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
 				 struct dpaa2_queue *rxq,
 				 struct rte_event *ev)
 {
-	rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
 		DPAA2_FD_PTA_SIZE + 16));
 
 	ev->flow_id = rxq->ev.flow_id;
@@ -641,7 +638,7 @@  dpaa2_dev_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
 {
 	uint8_t dqrr_index;
 
-	rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(fd) +
+	rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd) +
 		DPAA2_FD_PTA_SIZE + 16));
 
 	ev->flow_id = rxq->ev.flow_id;
@@ -726,7 +723,7 @@  dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 			fd_arr[loop].simple.frc = 0;
 			DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
-			DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
+			DPAA2_SET_FD_FLC((&fd_arr[loop]), (size_t)NULL);
 			if (likely(RTE_MBUF_DIRECT(*bufs))) {
 				mp = (*bufs)->pool;
 				/* Check the basic scenario and set