diff mbox series

[v3,4/8] baseband/la12xx: add queue and modem config support

Message ID 20210413051715.26430-5-hemant.agrawal@nxp.com
State New
Headers show
Series baseband: add NXP LA12xx driver | expand

Commit Message

Hemant Agrawal April 13, 2021, 5:17 a.m. UTC
This patch add support for connecting with modem
and creating the ipc channel as queues with modem
for the exchange of data.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

---
 drivers/baseband/la12xx/bbdev_la12xx.c     | 564 ++++++++++++++++++++-
 drivers/baseband/la12xx/bbdev_la12xx.h     |   5 +-
 drivers/baseband/la12xx/bbdev_la12xx_ipc.h | 180 +++++++
 3 files changed, 746 insertions(+), 3 deletions(-)

-- 
2.17.1

Comments

Chautru, Nicolas April 14, 2021, 12:41 a.m. UTC | #1
> -----Original Message-----

> From: Hemant Agrawal <hemant.agrawal@nxp.com>

> Sent: Monday, April 12, 2021 10:17 PM

> 

> This patch add support for connecting with modem and creating the ipc

> channel as queues with modem for the exchange of data.

> 

> Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>

> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

> ---

>  drivers/baseband/la12xx/bbdev_la12xx.c     | 564 ++++++++++++++++++++-

>  drivers/baseband/la12xx/bbdev_la12xx.h     |   5 +-

>  drivers/baseband/la12xx/bbdev_la12xx_ipc.h | 180 +++++++

>  3 files changed, 746 insertions(+), 3 deletions(-)

> 

> diff --git a/drivers/baseband/la12xx/bbdev_la12xx.c

> b/drivers/baseband/la12xx/bbdev_la12xx.c

> index 7e9be74bb4..0a68686205 100644

> --- a/drivers/baseband/la12xx/bbdev_la12xx.c

> +++ b/drivers/baseband/la12xx/bbdev_la12xx.c

> @@ -3,6 +3,11 @@

>   */

> 

>  #include <string.h>

> +#include <unistd.h>

> +#include <fcntl.h>

> +#include <sys/ioctl.h>

> +#include <sys/mman.h>

> +#include <dirent.h>

> 

>  #include <rte_common.h>

>  #include <rte_bus_vdev.h>

> @@ -31,11 +36,561 @@ struct bbdev_la12xx_params {

>  #define BBDEV_LA12XX_VDEV_MODEM_ID_ARG	"modem"

>  #define LA12XX_MAX_MODEM 4

> 

> +#define LA12XX_MAX_CORES	4

> +#define BBDEV_LA12XX_LDPC_ENC_CORE	0

> +#define BBDEV_LA12XX_LDPC_DEC_CORE	1

> +

>  static const char * const bbdev_la12xx_valid_params[] = {

>  	BBDEV_LA12XX_MAX_NB_QUEUES_ARG,

>  	BBDEV_LA12XX_VDEV_MODEM_ID_ARG,

>  };

> 

> +static const struct rte_bbdev_op_cap bbdev_capabilities[] = {

> +	{

> +		.type   = RTE_BBDEV_OP_LDPC_ENC,

> +		.cap.ldpc_enc = {

> +			.capability_flags =

> +					RTE_BBDEV_LDPC_CRC_24A_ATTACH

> |

> +

> 	RTE_BBDEV_LDPC_CRC_24B_ATTACH,


I believe you also support RTE_BBDEV_LDPC_RATE_MATCH based on future commits.
Still in that very commit I dont believe support anything yet


> +			.num_buffers_src =

> +

> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,

> +			.num_buffers_dst =

> +

> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,

> +		}

> +	},

> +	{

> +		.type   = RTE_BBDEV_OP_LDPC_DEC,

> +		.cap.ldpc_dec = {

> +			.capability_flags =

> +				RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK |

> +

> 	RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |

> +

> 	RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP,


Similar comment, on that very commit I dont believe you support anything yet.
Future commits and documentation suggests you support more than these.


> +			.num_buffers_src =

> +

> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,

> +			.num_buffers_hard_out =

> +

> 	RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,

> +		}

> +	},

> +	RTE_BBDEV_END_OF_CAPABILITIES_LIST()

> +};

> +

> +static struct rte_bbdev_queue_conf default_queue_conf = {

> +	.queue_size = MAX_CHANNEL_DEPTH,

> +};

> +

> +/* Get device info */

> +static void

> +la12xx_info_get(struct rte_bbdev *dev,

> +		struct rte_bbdev_driver_info *dev_info) {

> +	PMD_INIT_FUNC_TRACE();

> +

> +	dev_info->driver_name = RTE_STR(DRIVER_NAME);

> +	dev_info->max_num_queues = LA12XX_MAX_QUEUES;

> +	dev_info->queue_size_lim = MAX_CHANNEL_DEPTH;

> +	dev_info->hardware_accelerated = true;

> +	dev_info->max_dl_queue_priority = 0;

> +	dev_info->max_ul_queue_priority = 0;

> +	dev_info->default_queue_conf = default_queue_conf;

> +	dev_info->capabilities = bbdev_capabilities;

> +	dev_info->cpu_flag_reqs = NULL;

> +	dev_info->min_alignment = 64;

> +

> +	BBDEV_LA12XX_PMD_DEBUG("got device info from %u", dev->data-

> >dev_id);

> +}

> +

> +/* Release queue */

> +static int

> +la12xx_queue_release(struct rte_bbdev *dev, uint16_t q_id) {

> +	RTE_SET_USED(dev);

> +	RTE_SET_USED(q_id);

> +

> +	PMD_INIT_FUNC_TRACE();

> +

> +	/* TODO: Implement */


I believe this should be implemented in that commit. Not added later in the serie from what I could see.

> +

> +	return 0;

> +}

> +

> +#define HUGEPG_OFFSET(A) \

> +		((uint64_t) ((unsigned long) (A) \

> +		- ((uint64_t)ipc_priv->hugepg_start.host_vaddr)))

> +

> +static int ipc_queue_configure(uint32_t channel_id,

> +		ipc_t instance, struct bbdev_la12xx_q_priv *q_priv) {

> +	ipc_userspace_t *ipc_priv = (ipc_userspace_t *)instance;

> +	ipc_instance_t *ipc_instance = ipc_priv->instance;

> +	ipc_ch_t *ch;

> +	void *vaddr;

> +	uint32_t i = 0;

> +	uint32_t msg_size = sizeof(struct bbdev_ipc_enqueue_op);

> +

> +	PMD_INIT_FUNC_TRACE();

> +

> +	BBDEV_LA12XX_PMD_DEBUG("%x %p", ipc_instance->initialized,

> +		ipc_priv->instance);

> +	ch = &(ipc_instance->ch_list[channel_id]);

> +

> +	BBDEV_LA12XX_PMD_DEBUG("channel: %u, depth: %u, msg size:

> %u",

> +		channel_id, q_priv->queue_size, msg_size);

> +

> +	/* Start init of channel */

> +	ch->md.ring_size = rte_cpu_to_be_32(q_priv->queue_size);

> +	ch->md.pi = 0;

> +	ch->md.ci = 0;

> +	ch->md.msg_size = msg_size;

> +	for (i = 0; i < q_priv->queue_size; i++) {

> +		vaddr = rte_malloc(NULL, msg_size, RTE_CACHE_LINE_SIZE);

> +		if (!vaddr)

> +			return IPC_HOST_BUF_ALLOC_FAIL;

> +		/* Only offset now */

> +		ch->bd[i].modem_ptr =

> +			rte_cpu_to_be_32(HUGEPG_OFFSET(vaddr));

> +		ch->bd[i].host_virt_l = lower_32_bits(vaddr);

> +		ch->bd[i].host_virt_h = upper_32_bits(vaddr);

> +		q_priv->msg_ch_vaddr[i] = vaddr;

> +		/* Not sure use of this len may be for CRC*/


What is that comment refering to? Is that a TODO? 

> +		ch->bd[i].len = 0;

> +	}

> +	ch->host_ipc_params =

> +		rte_cpu_to_be_32(HUGEPG_OFFSET(q_priv->host_params));

> +	ch->bl_initialized = 1;

> +

> +	BBDEV_LA12XX_PMD_DEBUG("Channel configured");

> +	return IPC_SUCCESS;

> +}

> +

> +static int

> +la12xx_e200_queue_setup(struct rte_bbdev *dev,

> +		struct bbdev_la12xx_q_priv *q_priv)

> +{

> +	struct bbdev_la12xx_private *priv = dev->data->dev_private;

> +	ipc_userspace_t *ipc_priv = priv->ipc_priv;

> +	struct gul_hif *mhif;

> +	ipc_metadata_t *ipc_md;

> +	ipc_ch_t *ch;

> +	int instance_id = 0, i;

> +	int ret;

> +

> +	PMD_INIT_FUNC_TRACE();

> +

> +	switch (q_priv->op_type) {

> +	case RTE_BBDEV_OP_LDPC_ENC:

> +		q_priv->la12xx_core_id = BBDEV_LA12XX_LDPC_ENC_CORE;

> +		break;

> +	case RTE_BBDEV_OP_LDPC_DEC:

> +		q_priv->la12xx_core_id = BBDEV_LA12XX_LDPC_DEC_CORE;

> +		break;

> +	default:

> +		BBDEV_LA12XX_PMD_ERR("Unsupported op type\n");

> +		return -1;

> +	}

> +

> +	mhif = (struct gul_hif *)ipc_priv->mhif_start.host_vaddr;

> +	/* offset is from start of PEB */

> +	ipc_md = (ipc_metadata_t *)((size_t)ipc_priv->peb_start.host_vaddr

> +

> +		mhif->ipc_regs.ipc_mdata_offset);

> +	ch = &ipc_md->instance_list[instance_id].ch_list[q_priv->q_id];

> +

> +	if (q_priv->q_id < priv->num_valid_queues) {

> +		ipc_br_md_t *md, *host_md;

> +		ipc_ch_t *host_rx_ch;

> +

> +		host_rx_ch =

> +			&ipc_md->instance_list[instance_id].ch_list[q_priv-

> >q_id

> +				+ HOST_RX_QUEUEID_OFFSET];

> +		md = &(ch->md);

> +		host_md = &(host_rx_ch->md);

> +

> +		q_priv->feca_blk_id = rte_cpu_to_be_32(ch->feca_blk_id);

> +		q_priv->feca_blk_id_be32 = ch->feca_blk_id;

> +		q_priv->host_pi = rte_be_to_cpu_32(host_md->pi);

> +		q_priv->host_ci = rte_be_to_cpu_32(md->ci);

> +		q_priv->host_params = (host_ipc_params_t *)

> +			(rte_be_to_cpu_32(ch->host_ipc_params) +

> +			((size_t)ipc_priv->hugepg_start.host_vaddr));

> +

> +		for (i = 0; i < q_priv->queue_size; i++) {

> +			uint32_t h, l;

> +

> +			h = host_rx_ch->bd[i].host_virt_h;

> +			l = host_rx_ch->bd[i].host_virt_l;

> +			q_priv->msg_ch_vaddr[i] = (void *)join_32_bits(h, l);

> +		}

> +

> +		BBDEV_LA12XX_PMD_WARN(

> +			"Queue [%d] already configured, not configuring

> again",

> +			q_priv->q_id);

> +		return 0;

> +	}

> +

> +	BBDEV_LA12XX_PMD_DEBUG("setting up queue %d", q_priv->q_id);

> +

> +	q_priv->host_params = rte_zmalloc(NULL,

> sizeof(host_ipc_params_t),

> +			RTE_CACHE_LINE_SIZE);

> +	ch->host_ipc_params =

> +		rte_cpu_to_be_32(HUGEPG_OFFSET(q_priv->host_params));

> +

> +	/* Call ipc_configure_channel */

> +	ret = ipc_queue_configure((q_priv->q_id +

> HOST_RX_QUEUEID_OFFSET),

> +				  ipc_priv, q_priv);

> +	if (ret) {

> +		BBDEV_LA12XX_PMD_ERR("Unable to setup queue (%d)

> (err=%d)",

> +		       q_priv->q_id, ret);

> +		return ret;

> +	}

> +

> +	/* Set queue properties for LA12xx device */

> +	switch (q_priv->op_type) {

> +	case RTE_BBDEV_OP_LDPC_ENC:

> +		if (priv->num_ldpc_enc_queues >=

> MAX_LDPC_ENC_FECA_QUEUES) {

> +			BBDEV_LA12XX_PMD_ERR(

> +				"num_ldpc_enc_queues reached max

> value");

> +			return -1;

> +		}

> +		ch->la12xx_core_id =

> +

> 	rte_cpu_to_be_32(BBDEV_LA12XX_LDPC_ENC_CORE);

> +		ch->feca_blk_id = rte_cpu_to_be_32(priv-

> >num_ldpc_enc_queues++);

> +		break;

> +	case RTE_BBDEV_OP_LDPC_DEC:

> +		if (priv->num_ldpc_dec_queues >=

> MAX_LDPC_DEC_FECA_QUEUES) {

> +			BBDEV_LA12XX_PMD_ERR(

> +				"num_ldpc_dec_queues reached max

> value");

> +			return -1;

> +		}

> +		ch->la12xx_core_id =

> +

> 	rte_cpu_to_be_32(BBDEV_LA12XX_LDPC_DEC_CORE);

> +		ch->feca_blk_id = rte_cpu_to_be_32(priv-

> >num_ldpc_dec_queues++);

> +		break;

> +	default:

> +		BBDEV_LA12XX_PMD_ERR("Not supported op type\n");

> +		return -1;

> +	}

> +	ch->op_type = rte_cpu_to_be_32(q_priv->op_type);

> +	ch->depth = rte_cpu_to_be_32(q_priv->queue_size);

> +

> +	/* Store queue config here */

> +	q_priv->feca_blk_id = rte_cpu_to_be_32(ch->feca_blk_id);

> +	q_priv->feca_blk_id_be32 = ch->feca_blk_id;

> +

> +	return 0;

> +}

> +

> +/* Setup a queue */

> +static int

> +la12xx_queue_setup(struct rte_bbdev *dev, uint16_t q_id,

> +		const struct rte_bbdev_queue_conf *queue_conf) {

> +	struct bbdev_la12xx_private *priv = dev->data->dev_private;

> +	struct rte_bbdev_queue_data *q_data;

> +	struct bbdev_la12xx_q_priv *q_priv;

> +	int ret;

> +

> +	PMD_INIT_FUNC_TRACE();

> +

> +	/* Move to setup_queues callback */

> +	q_data = &dev->data->queues[q_id];

> +	q_data->queue_private = rte_zmalloc(NULL,

> +		sizeof(struct bbdev_la12xx_q_priv), 0);

> +	if (!q_data->queue_private) {

> +		BBDEV_LA12XX_PMD_ERR("Memory allocation failed for

> qpriv");

> +		return -ENOMEM;

> +	}

> +	q_priv = q_data->queue_private;

> +	q_priv->q_id = q_id;

> +	q_priv->bbdev_priv = dev->data->dev_private;

> +	q_priv->queue_size = queue_conf->queue_size;

> +	q_priv->op_type = queue_conf->op_type;

> +

> +	ret = la12xx_e200_queue_setup(dev, q_priv);

> +	if (ret) {

> +		BBDEV_LA12XX_PMD_ERR("e200_queue_setup failed for

> qid: %d",

> +				     q_id);

> +		return ret;

> +	}

> +

> +	/* Store queue config here */

> +	priv->num_valid_queues++;

> +

> +	return 0;

> +}

> +

> +static int

> +la12xx_start(struct rte_bbdev *dev)

> +{

> +	struct bbdev_la12xx_private *priv = dev->data->dev_private;

> +	ipc_userspace_t *ipc_priv = priv->ipc_priv;

> +	int ready = 0;

> +	struct gul_hif *hif_start;

> +

> +	PMD_INIT_FUNC_TRACE();

> +

> +	hif_start = (struct gul_hif *)ipc_priv->mhif_start.host_vaddr;

> +

> +	/* Set Host Read bit */

> +	SET_HIF_HOST_RDY(hif_start, HIF_HOST_READY_IPC_APP);

> +

> +	/* Now wait for modem ready bit */

> +	while (!ready)

> +		ready = CHK_HIF_MOD_RDY(hif_start,

> HIF_MOD_READY_IPC_APP);

> +

> +	return 0;

> +}

> +

> +static const struct rte_bbdev_ops pmd_ops = {

> +	.info_get = la12xx_info_get,

> +	.queue_setup = la12xx_queue_setup,

> +	.queue_release = la12xx_queue_release,

> +	.start = la12xx_start

> +};

> +static struct hugepage_info *

> +get_hugepage_info(void)

> +{

> +	struct hugepage_info *hp_info;

> +	struct rte_memseg *mseg;

> +

> +	PMD_INIT_FUNC_TRACE();

> +

> +	/* TODO - find a better way */

> +	hp_info = rte_malloc(NULL, sizeof(struct hugepage_info), 0);

> +	if (!hp_info) {

> +		BBDEV_LA12XX_PMD_ERR("Unable to allocate on local

> heap");

> +		return NULL;

> +	}

> +

> +	mseg = rte_mem_virt2memseg(hp_info, NULL);

> +	hp_info->vaddr = mseg->addr;

> +	hp_info->paddr = rte_mem_virt2phy(mseg->addr);

> +	hp_info->len = mseg->len;

> +

> +	return hp_info;

> +}


I was not sure what this is trying to do. Is that assuming a single huge page being used and keep track of it for future DMA activity? Is that valid?


> +

> +static int open_ipc_dev(int modem_id)

> +{

> +	char dev_initials[16], dev_path[PATH_MAX];

> +	struct dirent *entry;

> +	int dev_ipc = 0;

> +	DIR *dir;

> +

> +	dir = opendir("/dev/");

> +	if (!dir) {

> +		BBDEV_LA12XX_PMD_ERR("Unable to open /dev/");

> +		return -1;

> +	}

> +

> +	sprintf(dev_initials, "gulipcgul%d", modem_id);

> +

> +	while ((entry = readdir(dir)) != NULL) {

> +		if (!strncmp(dev_initials, entry->d_name,

> +		    sizeof(dev_initials) - 1))

> +			break;

> +	}

> +

> +	if (!entry) {

> +		BBDEV_LA12XX_PMD_ERR("Error: No gulipcgul%d device",

> modem_id);

> +		return -1;

> +	}

> +

> +	sprintf(dev_path, "/dev/%s", entry->d_name);

> +	dev_ipc = open(dev_path, O_RDWR);

> +	if (dev_ipc  < 0) {

> +		BBDEV_LA12XX_PMD_ERR("Error: Cannot open %s",

> dev_path);

> +		return -errno;

> +	}

> +

> +	return dev_ipc;

> +}

> +

> +static int

> +setup_la12xx_dev(struct rte_bbdev *dev) {

> +	struct bbdev_la12xx_private *priv = dev->data->dev_private;

> +	ipc_userspace_t *ipc_priv = priv->ipc_priv;

> +	struct hugepage_info *hp = NULL;

> +	ipc_channel_us_t *ipc_priv_ch = NULL;

> +	int dev_ipc = 0, dev_mem = 0, i;

> +	ipc_metadata_t *ipc_md;

> +	struct gul_hif *mhif;

> +	uint32_t phy_align = 0;

> +	int ret;

> +

> +	PMD_INIT_FUNC_TRACE();

> +

> +	if (!ipc_priv) {

> +		/* TODO - get a better way */

> +		/* Get the hugepage info against it */

> +		hp = get_hugepage_info();

> +		if (!hp) {

> +			BBDEV_LA12XX_PMD_ERR("Unable to get hugepage

> info");

> +			ret = -ENOMEM;

> +			goto err;

> +		}

> +

> +		BBDEV_LA12XX_PMD_DEBUG("0x%" PRIx64 " %p %x",

> +				hp->paddr, hp->vaddr, (int)hp->len);

> +

> +		ipc_priv = rte_zmalloc(0, sizeof(ipc_userspace_t), 0);

> +		if (ipc_priv == NULL) {

> +			BBDEV_LA12XX_PMD_ERR(

> +				"Unable to allocate memory for ipc priv");

> +			ret = -ENOMEM;

> +			goto err;

> +		}

> +

> +		for (i = 0; i < IPC_MAX_CHANNEL_COUNT; i++) {

> +			ipc_priv_ch = rte_zmalloc(0,

> +				sizeof(ipc_channel_us_t), 0);

> +			if (ipc_priv_ch == NULL) {

> +				BBDEV_LA12XX_PMD_ERR(

> +					"Unable to allocate memory for

> channels");

> +				ret = -ENOMEM;

> +			}

> +			ipc_priv->channels[i] = ipc_priv_ch;

> +		}

> +

> +		dev_mem = open("/dev/mem", O_RDWR);

> +		if (dev_mem < 0) {

> +			BBDEV_LA12XX_PMD_ERR("Error: Cannot open

> /dev/mem");

> +			ret = -errno;

> +			goto err;

> +		}

> +

> +		ipc_priv->instance_id = 0;

> +		ipc_priv->dev_mem = dev_mem;

> +

> +		ipc_priv->sys_map.hugepg_start.host_phys = hp->paddr;

> +		ipc_priv->sys_map.hugepg_start.size = hp->len;

> +

> +		ipc_priv->hugepg_start.host_phys = hp->paddr;

> +		ipc_priv->hugepg_start.host_vaddr = hp->vaddr;

> +		ipc_priv->hugepg_start.size = hp->len;

> +

> +		rte_free(hp);

> +	}

> +

> +	dev_ipc = open_ipc_dev(priv->modem_id);

> +	if (dev_ipc < 0) {

> +		BBDEV_LA12XX_PMD_ERR("Error: open_ipc_dev failed");

> +		goto err;

> +	}

> +	ipc_priv->dev_ipc = dev_ipc;

> +

> +	/* Send IOCTL to get system map */

> +	/* Send IOCTL to put hugepg_start map */

> +	ret = ioctl(ipc_priv->dev_ipc, IOCTL_GUL_IPC_GET_SYS_MAP,

> +		    &ipc_priv->sys_map);

> +	if (ret) {

> +		BBDEV_LA12XX_PMD_ERR(

> +			"IOCTL_GUL_IPC_GET_SYS_MAP ioctl failed");

> +		goto err;

> +	}

> +

> +	phy_align = (ipc_priv->sys_map.mhif_start.host_phys % 0x1000);


I would replace the 0x1000 magic number used for alignment

> +	ipc_priv->mhif_start.host_vaddr =

> +		mmap(0, ipc_priv->sys_map.mhif_start.size + phy_align,

> +		     (PROT_READ | PROT_WRITE), MAP_SHARED, ipc_priv-

> >dev_mem,

> +		     (ipc_priv->sys_map.mhif_start.host_phys - phy_align));

> +	if (ipc_priv->mhif_start.host_vaddr == MAP_FAILED) {

> +		BBDEV_LA12XX_PMD_ERR("MAP failed:");

> +		ret = -errno;

> +		goto err;

> +	}

> +

> +	ipc_priv->mhif_start.host_vaddr = (void *)((size_t)

> +		(ipc_priv->mhif_start.host_vaddr) + phy_align);

> +

> +	phy_align = (ipc_priv->sys_map.peb_start.host_phys % 0x1000);

> +	ipc_priv->peb_start.host_vaddr =

> +		mmap(0, ipc_priv->sys_map.peb_start.size + phy_align,

> +		     (PROT_READ | PROT_WRITE), MAP_SHARED, ipc_priv-

> >dev_mem,

> +		     (ipc_priv->sys_map.peb_start.host_phys - phy_align));

> +	if (ipc_priv->peb_start.host_vaddr == MAP_FAILED) {

> +		BBDEV_LA12XX_PMD_ERR("MAP failed:");

> +		ret = -errno;

> +		goto err;

> +	}

> +

> +	ipc_priv->peb_start.host_vaddr = (void *)((size_t)

> +		(ipc_priv->peb_start.host_vaddr) + phy_align);

> +

> +	phy_align = (ipc_priv->sys_map.modem_ccsrbar.host_phys %

> 0x1000);

> +	ipc_priv->modem_ccsrbar.host_vaddr =

> +		mmap(0, ipc_priv->sys_map.modem_ccsrbar.size +

> phy_align,

> +		     (PROT_READ | PROT_WRITE), MAP_SHARED, ipc_priv-

> >dev_mem,

> +		     (ipc_priv->sys_map.modem_ccsrbar.host_phys -

> phy_align));

> +	if (ipc_priv->modem_ccsrbar.host_vaddr == MAP_FAILED) {

> +		BBDEV_LA12XX_PMD_ERR("MAP failed:");

> +		ret = -errno;

> +		goto err;

> +	}

> +

> +	ipc_priv->modem_ccsrbar.host_vaddr = (void *)((size_t)

> +		(ipc_priv->modem_ccsrbar.host_vaddr) + phy_align);

> +

> +	ipc_priv->hugepg_start.modem_phys =

> +		ipc_priv->sys_map.hugepg_start.modem_phys;

> +

> +	ipc_priv->mhif_start.host_phys =

> +		ipc_priv->sys_map.mhif_start.host_phys;

> +	ipc_priv->mhif_start.size = ipc_priv->sys_map.mhif_start.size;

> +	ipc_priv->peb_start.host_phys = ipc_priv-

> >sys_map.peb_start.host_phys;

> +	ipc_priv->peb_start.size = ipc_priv->sys_map.peb_start.size;

> +

> +	BBDEV_LA12XX_PMD_INFO("peb 0x%" PRIx64 " %p %x",

> +			ipc_priv->peb_start.host_phys,

> +			ipc_priv->peb_start.host_vaddr,

> +			ipc_priv->peb_start.size);

> +	BBDEV_LA12XX_PMD_INFO("hugepg 0x%" PRIx64 " %p %x",

> +			ipc_priv->hugepg_start.host_phys,

> +			ipc_priv->hugepg_start.host_vaddr,

> +			ipc_priv->hugepg_start.size);

> +	BBDEV_LA12XX_PMD_INFO("mhif 0x%" PRIx64 " %p %x",

> +			ipc_priv->mhif_start.host_phys,

> +			ipc_priv->mhif_start.host_vaddr,

> +			ipc_priv->mhif_start.size);

> +	mhif = (struct gul_hif *)ipc_priv->mhif_start.host_vaddr;

> +

> +	/* offset is from start of PEB */

> +	ipc_md = (ipc_metadata_t *)((size_t)ipc_priv->peb_start.host_vaddr

> +

> +			mhif->ipc_regs.ipc_mdata_offset);

> +

> +	if (sizeof(ipc_metadata_t) != mhif->ipc_regs.ipc_mdata_size) {

> +		BBDEV_LA12XX_PMD_ERR(

> +			"\n ipc_metadata_t=%d mhif-

> >ipc_regs.ipc_mdata_size=%x",

> +			(int)sizeof(ipc_metadata_t),

> +			mhif->ipc_regs.ipc_mdata_size);

> +		BBDEV_LA12XX_PMD_ERR(

> +			"--> mhif->ipc_regs.ipc_mdata_offset= %x",

> +			mhif->ipc_regs.ipc_mdata_offset);

> +		BBDEV_LA12XX_PMD_ERR(

> +			"gul_hif size=%d", (int)sizeof(struct gul_hif));

> +		return IPC_MD_SZ_MISS_MATCH;

> +	}

> +

> +	ipc_priv->instance = (ipc_instance_t *)

> +		(&ipc_md->instance_list[ipc_priv->instance_id]);

> +

> +	BBDEV_LA12XX_PMD_DEBUG("finish host init");

> +

> +	priv->ipc_priv = ipc_priv;

> +

> +	return 0;

> +

> +err:

> +	rte_free(hp);

> +	rte_free(ipc_priv);

> +	rte_free(ipc_priv_ch);

> +	if (dev_mem)

> +		close(dev_mem);

> +	if (dev_ipc)

> +		close(dev_ipc);

> +

> +	return ret;

> +}

> +

>  static inline int

>  parse_u16_arg(const char *key, const char *value, void *extra_args)  { @@ -

> 123,6 +678,7 @@ la12xx_bbdev_create(struct rte_vdev_device *vdev,

>  	struct rte_bbdev *bbdev;

>  	const char *name = rte_vdev_device_name(vdev);

>  	struct bbdev_la12xx_private *priv;

> +	int ret;

> 

>  	PMD_INIT_FUNC_TRACE();

> 

> @@ -152,7 +708,13 @@ la12xx_bbdev_create(struct rte_vdev_device

> *vdev,

> 

>  	BBDEV_LA12XX_PMD_INFO("Setting Up %s: DevId=%d,

> ModemId=%d",

>  				name, bbdev->data->dev_id, priv-

> >modem_id);

> -	bbdev->dev_ops = NULL;

> +	ret = setup_la12xx_dev(bbdev);

> +	if (ret) {

> +		BBDEV_LA12XX_PMD_ERR("IPC Setup failed for %s", name);

> +		rte_free(bbdev->data->dev_private);

> +		return ret;

> +	}

> +	bbdev->dev_ops = &pmd_ops;

>  	bbdev->device = &vdev->device;

>  	bbdev->data->socket_id = 0;

>  	bbdev->intr_handle = NULL;

> diff --git a/drivers/baseband/la12xx/bbdev_la12xx.h

> b/drivers/baseband/la12xx/bbdev_la12xx.h

> index 5228502331..c94f08e059 100644

> --- a/drivers/baseband/la12xx/bbdev_la12xx.h

> +++ b/drivers/baseband/la12xx/bbdev_la12xx.h

> @@ -14,7 +14,7 @@

>  #define MAX_CHANNEL_DEPTH 16

>  /* private data structure */

>  struct bbdev_la12xx_private {

> -	void *ipc_priv;

> +	ipc_userspace_t *ipc_priv;

>  	uint8_t num_valid_queues;

>  	uint8_t max_nb_queues;

>  	uint8_t num_ldpc_enc_queues;

> @@ -52,5 +52,6 @@ struct bbdev_la12xx_q_priv {

> 

>  #define lower_32_bits(x) ((uint32_t)((uint64_t)x))  #define upper_32_bits(x)

> ((uint32_t)(((uint64_t)(x) >> 16) >> 16))

> -

> +#define join_32_bits(upper, lower) \

> +	((size_t)(((uint64_t)(upper) << 32) | (uint32_t)(lower)))

>  #endif

> diff --git a/drivers/baseband/la12xx/bbdev_la12xx_ipc.h

> b/drivers/baseband/la12xx/bbdev_la12xx_ipc.h

> index 9aa5562981..9d5789f726 100644

> --- a/drivers/baseband/la12xx/bbdev_la12xx_ipc.h

> +++ b/drivers/baseband/la12xx/bbdev_la12xx_ipc.h

> @@ -4,9 +4,175 @@

>  #ifndef __BBDEV_LA12XX_IPC_H__

>  #define __BBDEV_LA12XX_IPC_H__

> 

> +#define LA12XX_MAX_QUEUES 20

> +

> +/** No. of max channel per instance */

> +#define IPC_MAX_CHANNEL_COUNT	(64)

> +

>  /** No. of max channel per instance */

>  #define IPC_MAX_DEPTH	(16)

> 

> +/** No. of max IPC instance per modem */

> +#define IPC_MAX_INSTANCE_COUNT	(1)

> +

> +#define HOST_RX_QUEUEID_OFFSET LA12XX_MAX_QUEUES

> +

> +#define MAX_MEM_POOL_COUNT 8

> +

> +/** Error codes */

> +#define IPC_SUCCESS		(0) /** IPC operation success */

> +#define IPC_INPUT_INVALID	(-1) /** Invalid input to API */

> +#define IPC_CH_INVALID		(-2) /** Channel no is invalid */

> +#define IPC_INSTANCE_INVALID	(-3) /** Instance no is invalid */

> +#define IPC_MEM_INVALID		(-4) /** Insufficient memory */

> +#define IPC_CH_FULL		(-5) /** Channel is full */

> +#define IPC_CH_EMPTY		(-6) /** Channel is empty */

> +#define IPC_BL_EMPTY		(-7) /** Free buffer list is empty */

> +#define IPC_BL_FULL		(-8) /** Free buffer list is full */

> +#define IPC_HOST_BUF_ALLOC_FAIL	(-9) /** DPDK malloc fail */

> +#define IPC_MD_SZ_MISS_MATCH	(-10) /** META DATA sz in mhif miss

> matched*/

> +#define IPC_MALLOC_FAIL		(-11) /** system malloc fail */

> +#define IPC_IOCTL_FAIL		(-12) /** IOCTL call failed */

> +#define IPC_MMAP_FAIL		(-14) /** MMAP fail */

> +#define IPC_OPEN_FAIL		(-15) /** OPEN fail */

> +#define IPC_EVENTFD_FAIL	(-16) /** eventfd initialization failed */

> +#define IPC_NOT_IMPLEMENTED	(-17) /** IPC feature is not

> implemented yet*/

> +

> +#define SET_HIF_HOST_RDY(hif, RDY_MASK) (hif->host_ready |=

> RDY_MASK)

> +#define CHK_HIF_MOD_RDY(hif, RDY_MASK) (hif->mod_ready &

> RDY_MASK)

> +

> +/* Host Ready bits */

> +#define HIF_HOST_READY_HOST_REGIONS	(1 << 0)

> +#define HIF_HOST_READY_IPC_LIB		(1 << 12)

> +#define HIF_HOST_READY_IPC_APP		(1 << 13)

> +#define HIF_HOST_READY_FECA		(1 << 14)

> +

> +/* Modem Ready bits */

> +#define HIF_MOD_READY_IPC_LIB		(1 << 5)

> +#define HIF_MOD_READY_IPC_APP		(1 << 6)

> +#define HIF_MOD_READY_FECA		(1 << 7)

> +

> +typedef void *ipc_t;

> +

> +struct ipc_msg {

> +	int chid;

> +	void *addr;

> +	uint32_t len;

> +	uint8_t flags;

> +};

> +

> +typedef struct {

> +	uint64_t host_phys;

> +	uint32_t modem_phys;

> +	void    *host_vaddr;

> +	uint32_t size;

> +} mem_range_t;

> +

> +#define GUL_IPC_MAGIC	'R'

> +

> +#define IOCTL_GUL_IPC_GET_SYS_MAP _IOW(GUL_IPC_MAGIC, 1, struct

> ipc_msg

> +*) #define IOCTL_GUL_IPC_CHANNEL_REGISTER _IOWR(GUL_IPC_MAGIC, 4,

> +struct ipc_msg *) #define IOCTL_GUL_IPC_CHANNEL_DEREGISTER \

> +	_IOWR(GUL_IPC_MAGIC, 5, struct ipc_msg *) #define

> +IOCTL_GUL_IPC_CHANNEL_RAISE_INTERRUPT _IOW(GUL_IPC_MAGIC, 6, int

> *)

> +

> +/** buffer ring common metadata */

> +typedef struct ipc_bd_ring_md {

> +	volatile uint32_t pi;		/**< Producer index and flag (MSB)

> +					  *  which flip for each Ring wrapping

> +					  */

> +	volatile uint32_t ci;		/**< Consumer index and flag (MSB)

> +					  *  which flip for each Ring wrapping

> +					  */

> +	uint32_t ring_size;	/**< depth (Used to roll-over pi/ci) */

> +	uint32_t msg_size;	/**< Size of the each buffer */

> +} __rte_packed ipc_br_md_t;

> +

> +/** IPC buffer descriptor */

> +typedef struct ipc_buffer_desc {

> +	union {

> +		uint64_t host_virt;	/**< msg's host virtual address */

> +		struct {

> +			uint32_t host_virt_l;

> +			uint32_t host_virt_h;

> +		};

> +	};

> +	uint64_t host_phy;	/**< msg's host physical address */


No intention to use rte_iova_t here? 

> +	uint32_t modem_ptr;	/**< msg's modem physical address */

> +	uint32_t len;		/**< msg len */

> +	uint64_t crc;		/**< crc */

> +} __rte_packed ipc_bd_t;

> +

> +typedef struct ipc_channel {

> +	uint32_t ch_id;		/**< Channel id */

> +	uint32_t bl_initialized;/**< Set when buffer list is initialized */

> +	ipc_br_md_t md;

> +	ipc_bd_t bd[IPC_MAX_DEPTH];

> +	uint32_t op_type;/* BBDEV operation supported on this channel */

> +	uint32_t depth;	/* Channel depth */

> +	uint32_t feca_blk_id;/* FECA Transport Block ID for processing */

> +	uint32_t la12xx_core_id;/* LA12xx core ID to scheduled work for it*/

> +	uint32_t host_ipc_params;/* Address for host IPC parameters */ }

> +__rte_packed ipc_ch_t;

> +

> +typedef struct ipc_instance {

> +	uint32_t initialized;		/**< Set in ipc_init */

> +	ipc_ch_t ch_list[IPC_MAX_CHANNEL_COUNT];

> +		/**< Channel descriptors in this instance */ } __rte_packed

> +ipc_instance_t;

> +

> +typedef struct ipc_metadata {

> +	ipc_instance_t instance_list[IPC_MAX_INSTANCE_COUNT];

> +} __rte_packed ipc_metadata_t;

> +

> +typedef struct ipc_channel_us_priv {

> +	int32_t		eventfd;

> +	uint32_t	channel_id;

> +	/* In flight packets status for buffer list. */

> +	uint8_t		bufs_inflight[IPC_MAX_DEPTH];

> +} ipc_channel_us_t;

> +

> +typedef struct {

> +	uint64_t host_phys;

> +	uint32_t modem_phys;

> +	uint32_t size;

> +} mem_strt_addr_t;

> +

> +typedef struct {

> +	mem_strt_addr_t modem_ccsrbar;

> +	mem_strt_addr_t peb_start; /* PEB meta data */

> +	mem_strt_addr_t mhif_start; /* MHIF meta daat */

> +	mem_strt_addr_t hugepg_start; /* Modem to access hugepage */ }

> +sys_map_t;

> +

> +typedef struct ipc_priv_t {

> +	int instance_id;

> +	int dev_ipc;

> +	int dev_mem;

> +	struct rte_mempool *rtemempool[MAX_MEM_POOL_COUNT];

> +	sys_map_t sys_map;

> +	mem_range_t modem_ccsrbar;

> +	mem_range_t peb_start;

> +	mem_range_t mhif_start;

> +	mem_range_t hugepg_start;

> +	ipc_channel_us_t *channels[IPC_MAX_CHANNEL_COUNT];

> +	ipc_instance_t	*instance;

> +} ipc_userspace_t;

> +

> +/** Structure specifying enqueue operation (enqueue at LA1224) */

> +struct bbdev_ipc_enqueue_op {

> +	/** Status of operation that was performed */

> +	int32_t status;

> +	/** CRC Status of SD operation that was performed */

> +	int32_t crc_stat_addr;

> +	/** HARQ Output buffer memory length for Shared Decode.

> +	 * Filled by LA12xx.

> +	 */


What is this? Is HARQ supported or not? Is this memory accessible.

> +	uint32_t out_len;

> +	/** Reserved (for 8 byte alignment) */

> +	uint32_t rsvd;

> +};

> +

>  /* This shared memory would be on the host side which have copy of some

>   * of the parameters which are also part of Shared BD ring. Read access

>   * of these parameters from the host side would not be over PCI.

> @@ -17,4 +183,18 @@ typedef struct host_ipc_params {

>  	volatile uint32_t modem_ptr[IPC_MAX_DEPTH];  } __rte_packed

> host_ipc_params_t;

> 

> +struct hif_ipc_regs {

> +	uint32_t ipc_mdata_offset;

> +	uint32_t ipc_mdata_size;

> +} __rte_packed;

> +

> +struct gul_hif {

> +	uint32_t ver;

> +	uint32_t hif_ver;

> +	uint32_t status;

> +	volatile uint32_t host_ready;

> +	volatile uint32_t mod_ready;

> +	struct hif_ipc_regs ipc_regs;

> +} __rte_packed;

> +

>  #endif

> --

> 2.17.1
diff mbox series

Patch

diff --git a/drivers/baseband/la12xx/bbdev_la12xx.c b/drivers/baseband/la12xx/bbdev_la12xx.c
index 7e9be74bb4..0a68686205 100644
--- a/drivers/baseband/la12xx/bbdev_la12xx.c
+++ b/drivers/baseband/la12xx/bbdev_la12xx.c
@@ -3,6 +3,11 @@ 
  */
 
 #include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <dirent.h>
 
 #include <rte_common.h>
 #include <rte_bus_vdev.h>
@@ -31,11 +36,561 @@  struct bbdev_la12xx_params {
 #define BBDEV_LA12XX_VDEV_MODEM_ID_ARG	"modem"
 #define LA12XX_MAX_MODEM 4
 
+#define LA12XX_MAX_CORES	4
+#define BBDEV_LA12XX_LDPC_ENC_CORE	0
+#define BBDEV_LA12XX_LDPC_DEC_CORE	1
+
 static const char * const bbdev_la12xx_valid_params[] = {
 	BBDEV_LA12XX_MAX_NB_QUEUES_ARG,
 	BBDEV_LA12XX_VDEV_MODEM_ID_ARG,
 };
 
+static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
+	{
+		.type   = RTE_BBDEV_OP_LDPC_ENC,
+		.cap.ldpc_enc = {
+			.capability_flags =
+					RTE_BBDEV_LDPC_CRC_24A_ATTACH |
+					RTE_BBDEV_LDPC_CRC_24B_ATTACH,
+			.num_buffers_src =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+			.num_buffers_dst =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+		}
+	},
+	{
+		.type   = RTE_BBDEV_OP_LDPC_DEC,
+		.cap.ldpc_dec = {
+			.capability_flags =
+				RTE_BBDEV_LDPC_CRC_TYPE_24A_CHECK |
+					RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
+					RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP,
+			.num_buffers_src =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+			.num_buffers_hard_out =
+					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
+		}
+	},
+	RTE_BBDEV_END_OF_CAPABILITIES_LIST()
+};
+
+static struct rte_bbdev_queue_conf default_queue_conf = {
+	.queue_size = MAX_CHANNEL_DEPTH,
+};
+
+/* Get device info */
+static void
+la12xx_info_get(struct rte_bbdev *dev,
+		struct rte_bbdev_driver_info *dev_info)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	dev_info->driver_name = RTE_STR(DRIVER_NAME);
+	dev_info->max_num_queues = LA12XX_MAX_QUEUES;
+	dev_info->queue_size_lim = MAX_CHANNEL_DEPTH;
+	dev_info->hardware_accelerated = true;
+	dev_info->max_dl_queue_priority = 0;
+	dev_info->max_ul_queue_priority = 0;
+	dev_info->default_queue_conf = default_queue_conf;
+	dev_info->capabilities = bbdev_capabilities;
+	dev_info->cpu_flag_reqs = NULL;
+	dev_info->min_alignment = 64;
+
+	BBDEV_LA12XX_PMD_DEBUG("got device info from %u", dev->data->dev_id);
+}
+
+/* Release queue */
+static int
+la12xx_queue_release(struct rte_bbdev *dev, uint16_t q_id)
+{
+	RTE_SET_USED(dev);
+	RTE_SET_USED(q_id);
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* TODO: Implement */
+
+	return 0;
+}
+
+#define HUGEPG_OFFSET(A) \
+		((uint64_t) ((unsigned long) (A) \
+		- ((uint64_t)ipc_priv->hugepg_start.host_vaddr)))
+
+static int ipc_queue_configure(uint32_t channel_id,
+		ipc_t instance, struct bbdev_la12xx_q_priv *q_priv)
+{
+	ipc_userspace_t *ipc_priv = (ipc_userspace_t *)instance;
+	ipc_instance_t *ipc_instance = ipc_priv->instance;
+	ipc_ch_t *ch;
+	void *vaddr;
+	uint32_t i = 0;
+	uint32_t msg_size = sizeof(struct bbdev_ipc_enqueue_op);
+
+	PMD_INIT_FUNC_TRACE();
+
+	BBDEV_LA12XX_PMD_DEBUG("%x %p", ipc_instance->initialized,
+		ipc_priv->instance);
+	ch = &(ipc_instance->ch_list[channel_id]);
+
+	BBDEV_LA12XX_PMD_DEBUG("channel: %u, depth: %u, msg size: %u",
+		channel_id, q_priv->queue_size, msg_size);
+
+	/* Start init of channel */
+	ch->md.ring_size = rte_cpu_to_be_32(q_priv->queue_size);
+	ch->md.pi = 0;
+	ch->md.ci = 0;
+	ch->md.msg_size = msg_size;
+	for (i = 0; i < q_priv->queue_size; i++) {
+		vaddr = rte_malloc(NULL, msg_size, RTE_CACHE_LINE_SIZE);
+		if (!vaddr)
+			return IPC_HOST_BUF_ALLOC_FAIL;
+		/* Only offset now */
+		ch->bd[i].modem_ptr =
+			rte_cpu_to_be_32(HUGEPG_OFFSET(vaddr));
+		ch->bd[i].host_virt_l = lower_32_bits(vaddr);
+		ch->bd[i].host_virt_h = upper_32_bits(vaddr);
+		q_priv->msg_ch_vaddr[i] = vaddr;
+		/* Not sure use of this len may be for CRC*/
+		ch->bd[i].len = 0;
+	}
+	ch->host_ipc_params =
+		rte_cpu_to_be_32(HUGEPG_OFFSET(q_priv->host_params));
+	ch->bl_initialized = 1;
+
+	BBDEV_LA12XX_PMD_DEBUG("Channel configured");
+	return IPC_SUCCESS;
+}
+
+static int
+la12xx_e200_queue_setup(struct rte_bbdev *dev,
+		struct bbdev_la12xx_q_priv *q_priv)
+{
+	struct bbdev_la12xx_private *priv = dev->data->dev_private;
+	ipc_userspace_t *ipc_priv = priv->ipc_priv;
+	struct gul_hif *mhif;
+	ipc_metadata_t *ipc_md;
+	ipc_ch_t *ch;
+	int instance_id = 0, i;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	switch (q_priv->op_type) {
+	case RTE_BBDEV_OP_LDPC_ENC:
+		q_priv->la12xx_core_id = BBDEV_LA12XX_LDPC_ENC_CORE;
+		break;
+	case RTE_BBDEV_OP_LDPC_DEC:
+		q_priv->la12xx_core_id = BBDEV_LA12XX_LDPC_DEC_CORE;
+		break;
+	default:
+		BBDEV_LA12XX_PMD_ERR("Unsupported op type\n");
+		return -1;
+	}
+
+	mhif = (struct gul_hif *)ipc_priv->mhif_start.host_vaddr;
+	/* offset is from start of PEB */
+	ipc_md = (ipc_metadata_t *)((size_t)ipc_priv->peb_start.host_vaddr +
+		mhif->ipc_regs.ipc_mdata_offset);
+	ch = &ipc_md->instance_list[instance_id].ch_list[q_priv->q_id];
+
+	if (q_priv->q_id < priv->num_valid_queues) {
+		ipc_br_md_t *md, *host_md;
+		ipc_ch_t *host_rx_ch;
+
+		host_rx_ch =
+			&ipc_md->instance_list[instance_id].ch_list[q_priv->q_id
+				+ HOST_RX_QUEUEID_OFFSET];
+		md = &(ch->md);
+		host_md = &(host_rx_ch->md);
+
+		q_priv->feca_blk_id = rte_cpu_to_be_32(ch->feca_blk_id);
+		q_priv->feca_blk_id_be32 = ch->feca_blk_id;
+		q_priv->host_pi = rte_be_to_cpu_32(host_md->pi);
+		q_priv->host_ci = rte_be_to_cpu_32(md->ci);
+		q_priv->host_params = (host_ipc_params_t *)
+			(rte_be_to_cpu_32(ch->host_ipc_params) +
+			((size_t)ipc_priv->hugepg_start.host_vaddr));
+
+		for (i = 0; i < q_priv->queue_size; i++) {
+			uint32_t h, l;
+
+			h = host_rx_ch->bd[i].host_virt_h;
+			l = host_rx_ch->bd[i].host_virt_l;
+			q_priv->msg_ch_vaddr[i] = (void *)join_32_bits(h, l);
+		}
+
+		BBDEV_LA12XX_PMD_WARN(
+			"Queue [%d] already configured, not configuring again",
+			q_priv->q_id);
+		return 0;
+	}
+
+	BBDEV_LA12XX_PMD_DEBUG("setting up queue %d", q_priv->q_id);
+
+	q_priv->host_params = rte_zmalloc(NULL, sizeof(host_ipc_params_t),
+			RTE_CACHE_LINE_SIZE);
+	ch->host_ipc_params =
+		rte_cpu_to_be_32(HUGEPG_OFFSET(q_priv->host_params));
+
+	/* Call ipc_configure_channel */
+	ret = ipc_queue_configure((q_priv->q_id + HOST_RX_QUEUEID_OFFSET),
+				  ipc_priv, q_priv);
+	if (ret) {
+		BBDEV_LA12XX_PMD_ERR("Unable to setup queue (%d) (err=%d)",
+		       q_priv->q_id, ret);
+		return ret;
+	}
+
+	/* Set queue properties for LA12xx device */
+	switch (q_priv->op_type) {
+	case RTE_BBDEV_OP_LDPC_ENC:
+		if (priv->num_ldpc_enc_queues >= MAX_LDPC_ENC_FECA_QUEUES) {
+			BBDEV_LA12XX_PMD_ERR(
+				"num_ldpc_enc_queues reached max value");
+			return -1;
+		}
+		ch->la12xx_core_id =
+			rte_cpu_to_be_32(BBDEV_LA12XX_LDPC_ENC_CORE);
+		ch->feca_blk_id = rte_cpu_to_be_32(priv->num_ldpc_enc_queues++);
+		break;
+	case RTE_BBDEV_OP_LDPC_DEC:
+		if (priv->num_ldpc_dec_queues >= MAX_LDPC_DEC_FECA_QUEUES) {
+			BBDEV_LA12XX_PMD_ERR(
+				"num_ldpc_dec_queues reached max value");
+			return -1;
+		}
+		ch->la12xx_core_id =
+			rte_cpu_to_be_32(BBDEV_LA12XX_LDPC_DEC_CORE);
+		ch->feca_blk_id = rte_cpu_to_be_32(priv->num_ldpc_dec_queues++);
+		break;
+	default:
+		BBDEV_LA12XX_PMD_ERR("Not supported op type\n");
+		return -1;
+	}
+	ch->op_type = rte_cpu_to_be_32(q_priv->op_type);
+	ch->depth = rte_cpu_to_be_32(q_priv->queue_size);
+
+	/* Store queue config here */
+	q_priv->feca_blk_id = rte_cpu_to_be_32(ch->feca_blk_id);
+	q_priv->feca_blk_id_be32 = ch->feca_blk_id;
+
+	return 0;
+}
+
+/* Setup a queue */
+static int
+la12xx_queue_setup(struct rte_bbdev *dev, uint16_t q_id,
+		const struct rte_bbdev_queue_conf *queue_conf)
+{
+	struct bbdev_la12xx_private *priv = dev->data->dev_private;
+	struct rte_bbdev_queue_data *q_data;
+	struct bbdev_la12xx_q_priv *q_priv;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Move to setup_queues callback */
+	q_data = &dev->data->queues[q_id];
+	q_data->queue_private = rte_zmalloc(NULL,
+		sizeof(struct bbdev_la12xx_q_priv), 0);
+	if (!q_data->queue_private) {
+		BBDEV_LA12XX_PMD_ERR("Memory allocation failed for qpriv");
+		return -ENOMEM;
+	}
+	q_priv = q_data->queue_private;
+	q_priv->q_id = q_id;
+	q_priv->bbdev_priv = dev->data->dev_private;
+	q_priv->queue_size = queue_conf->queue_size;
+	q_priv->op_type = queue_conf->op_type;
+
+	ret = la12xx_e200_queue_setup(dev, q_priv);
+	if (ret) {
+		BBDEV_LA12XX_PMD_ERR("e200_queue_setup failed for qid: %d",
+				     q_id);
+		return ret;
+	}
+
+	/* Store queue config here */
+	priv->num_valid_queues++;
+
+	return 0;
+}
+
+static int
+la12xx_start(struct rte_bbdev *dev)
+{
+	struct bbdev_la12xx_private *priv = dev->data->dev_private;
+	ipc_userspace_t *ipc_priv = priv->ipc_priv;
+	int ready = 0;
+	struct gul_hif *hif_start;
+
+	PMD_INIT_FUNC_TRACE();
+
+	hif_start = (struct gul_hif *)ipc_priv->mhif_start.host_vaddr;
+
+	/* Set Host Read bit */
+	SET_HIF_HOST_RDY(hif_start, HIF_HOST_READY_IPC_APP);
+
+	/* Now wait for modem ready bit */
+	while (!ready)
+		ready = CHK_HIF_MOD_RDY(hif_start, HIF_MOD_READY_IPC_APP);
+
+	return 0;
+}
+
+static const struct rte_bbdev_ops pmd_ops = {
+	.info_get = la12xx_info_get,
+	.queue_setup = la12xx_queue_setup,
+	.queue_release = la12xx_queue_release,
+	.start = la12xx_start
+};
+static struct hugepage_info *
+get_hugepage_info(void)
+{
+	struct hugepage_info *hp_info;
+	struct rte_memseg *mseg;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* TODO - find a better way */
+	hp_info = rte_malloc(NULL, sizeof(struct hugepage_info), 0);
+	if (!hp_info) {
+		BBDEV_LA12XX_PMD_ERR("Unable to allocate on local heap");
+		return NULL;
+	}
+
+	mseg = rte_mem_virt2memseg(hp_info, NULL);
+	hp_info->vaddr = mseg->addr;
+	hp_info->paddr = rte_mem_virt2phy(mseg->addr);
+	hp_info->len = mseg->len;
+
+	return hp_info;
+}
+
+static int open_ipc_dev(int modem_id)
+{
+	char dev_initials[16], dev_path[PATH_MAX];
+	struct dirent *entry;
+	int dev_ipc = 0;
+	DIR *dir;
+
+	dir = opendir("/dev/");
+	if (!dir) {
+		BBDEV_LA12XX_PMD_ERR("Unable to open /dev/");
+		return -1;
+	}
+
+	sprintf(dev_initials, "gulipcgul%d", modem_id);
+
+	while ((entry = readdir(dir)) != NULL) {
+		if (!strncmp(dev_initials, entry->d_name,
+		    sizeof(dev_initials) - 1))
+			break;
+	}
+
+	if (!entry) {
+		BBDEV_LA12XX_PMD_ERR("Error: No gulipcgul%d device", modem_id);
+		return -1;
+	}
+
+	sprintf(dev_path, "/dev/%s", entry->d_name);
+	dev_ipc = open(dev_path, O_RDWR);
+	if (dev_ipc  < 0) {
+		BBDEV_LA12XX_PMD_ERR("Error: Cannot open %s", dev_path);
+		return -errno;
+	}
+
+	return dev_ipc;
+}
+
+static int
+setup_la12xx_dev(struct rte_bbdev *dev)
+{
+	struct bbdev_la12xx_private *priv = dev->data->dev_private;
+	ipc_userspace_t *ipc_priv = priv->ipc_priv;
+	struct hugepage_info *hp = NULL;
+	ipc_channel_us_t *ipc_priv_ch = NULL;
+	int dev_ipc = 0, dev_mem = 0, i;
+	ipc_metadata_t *ipc_md;
+	struct gul_hif *mhif;
+	uint32_t phy_align = 0;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (!ipc_priv) {
+		/* TODO - get a better way */
+		/* Get the hugepage info against it */
+		hp = get_hugepage_info();
+		if (!hp) {
+			BBDEV_LA12XX_PMD_ERR("Unable to get hugepage info");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		BBDEV_LA12XX_PMD_DEBUG("0x%" PRIx64 " %p %x",
+				hp->paddr, hp->vaddr, (int)hp->len);
+
+		ipc_priv = rte_zmalloc(0, sizeof(ipc_userspace_t), 0);
+		if (ipc_priv == NULL) {
+			BBDEV_LA12XX_PMD_ERR(
+				"Unable to allocate memory for ipc priv");
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		for (i = 0; i < IPC_MAX_CHANNEL_COUNT; i++) {
+			ipc_priv_ch = rte_zmalloc(0,
+				sizeof(ipc_channel_us_t), 0);
+			if (ipc_priv_ch == NULL) {
+				BBDEV_LA12XX_PMD_ERR(
+					"Unable to allocate memory for channels");
+				ret = -ENOMEM;
+			}
+			ipc_priv->channels[i] = ipc_priv_ch;
+		}
+
+		dev_mem = open("/dev/mem", O_RDWR);
+		if (dev_mem < 0) {
+			BBDEV_LA12XX_PMD_ERR("Error: Cannot open /dev/mem");
+			ret = -errno;
+			goto err;
+		}
+
+		ipc_priv->instance_id = 0;
+		ipc_priv->dev_mem = dev_mem;
+
+		ipc_priv->sys_map.hugepg_start.host_phys = hp->paddr;
+		ipc_priv->sys_map.hugepg_start.size = hp->len;
+
+		ipc_priv->hugepg_start.host_phys = hp->paddr;
+		ipc_priv->hugepg_start.host_vaddr = hp->vaddr;
+		ipc_priv->hugepg_start.size = hp->len;
+
+		rte_free(hp);
+	}
+
+	dev_ipc = open_ipc_dev(priv->modem_id);
+	if (dev_ipc < 0) {
+		BBDEV_LA12XX_PMD_ERR("Error: open_ipc_dev failed");
+		goto err;
+	}
+	ipc_priv->dev_ipc = dev_ipc;
+
+	/* Send IOCTL to get system map */
+	/* Send IOCTL to put hugepg_start map */
+	ret = ioctl(ipc_priv->dev_ipc, IOCTL_GUL_IPC_GET_SYS_MAP,
+		    &ipc_priv->sys_map);
+	if (ret) {
+		BBDEV_LA12XX_PMD_ERR(
+			"IOCTL_GUL_IPC_GET_SYS_MAP ioctl failed");
+		goto err;
+	}
+
+	phy_align = (ipc_priv->sys_map.mhif_start.host_phys % 0x1000);
+	ipc_priv->mhif_start.host_vaddr =
+		mmap(0, ipc_priv->sys_map.mhif_start.size + phy_align,
+		     (PROT_READ | PROT_WRITE), MAP_SHARED, ipc_priv->dev_mem,
+		     (ipc_priv->sys_map.mhif_start.host_phys - phy_align));
+	if (ipc_priv->mhif_start.host_vaddr == MAP_FAILED) {
+		BBDEV_LA12XX_PMD_ERR("MAP failed:");
+		ret = -errno;
+		goto err;
+	}
+
+	ipc_priv->mhif_start.host_vaddr = (void *)((size_t)
+		(ipc_priv->mhif_start.host_vaddr) + phy_align);
+
+	phy_align = (ipc_priv->sys_map.peb_start.host_phys % 0x1000);
+	ipc_priv->peb_start.host_vaddr =
+		mmap(0, ipc_priv->sys_map.peb_start.size + phy_align,
+		     (PROT_READ | PROT_WRITE), MAP_SHARED, ipc_priv->dev_mem,
+		     (ipc_priv->sys_map.peb_start.host_phys - phy_align));
+	if (ipc_priv->peb_start.host_vaddr == MAP_FAILED) {
+		BBDEV_LA12XX_PMD_ERR("MAP failed:");
+		ret = -errno;
+		goto err;
+	}
+
+	ipc_priv->peb_start.host_vaddr = (void *)((size_t)
+		(ipc_priv->peb_start.host_vaddr) + phy_align);
+
+	phy_align = (ipc_priv->sys_map.modem_ccsrbar.host_phys % 0x1000);
+	ipc_priv->modem_ccsrbar.host_vaddr =
+		mmap(0, ipc_priv->sys_map.modem_ccsrbar.size + phy_align,
+		     (PROT_READ | PROT_WRITE), MAP_SHARED, ipc_priv->dev_mem,
+		     (ipc_priv->sys_map.modem_ccsrbar.host_phys - phy_align));
+	if (ipc_priv->modem_ccsrbar.host_vaddr == MAP_FAILED) {
+		BBDEV_LA12XX_PMD_ERR("MAP failed:");
+		ret = -errno;
+		goto err;
+	}
+
+	ipc_priv->modem_ccsrbar.host_vaddr = (void *)((size_t)
+		(ipc_priv->modem_ccsrbar.host_vaddr) + phy_align);
+
+	ipc_priv->hugepg_start.modem_phys =
+		ipc_priv->sys_map.hugepg_start.modem_phys;
+
+	ipc_priv->mhif_start.host_phys =
+		ipc_priv->sys_map.mhif_start.host_phys;
+	ipc_priv->mhif_start.size = ipc_priv->sys_map.mhif_start.size;
+	ipc_priv->peb_start.host_phys = ipc_priv->sys_map.peb_start.host_phys;
+	ipc_priv->peb_start.size = ipc_priv->sys_map.peb_start.size;
+
+	BBDEV_LA12XX_PMD_INFO("peb 0x%" PRIx64 " %p %x",
+			ipc_priv->peb_start.host_phys,
+			ipc_priv->peb_start.host_vaddr,
+			ipc_priv->peb_start.size);
+	BBDEV_LA12XX_PMD_INFO("hugepg 0x%" PRIx64 " %p %x",
+			ipc_priv->hugepg_start.host_phys,
+			ipc_priv->hugepg_start.host_vaddr,
+			ipc_priv->hugepg_start.size);
+	BBDEV_LA12XX_PMD_INFO("mhif 0x%" PRIx64 " %p %x",
+			ipc_priv->mhif_start.host_phys,
+			ipc_priv->mhif_start.host_vaddr,
+			ipc_priv->mhif_start.size);
+	mhif = (struct gul_hif *)ipc_priv->mhif_start.host_vaddr;
+
+	/* offset is from start of PEB */
+	ipc_md = (ipc_metadata_t *)((size_t)ipc_priv->peb_start.host_vaddr +
+			mhif->ipc_regs.ipc_mdata_offset);
+
+	if (sizeof(ipc_metadata_t) != mhif->ipc_regs.ipc_mdata_size) {
+		BBDEV_LA12XX_PMD_ERR(
+			"\n ipc_metadata_t=%d mhif->ipc_regs.ipc_mdata_size=%x",
+			(int)sizeof(ipc_metadata_t),
+			mhif->ipc_regs.ipc_mdata_size);
+		BBDEV_LA12XX_PMD_ERR(
+			"--> mhif->ipc_regs.ipc_mdata_offset= %x",
+			mhif->ipc_regs.ipc_mdata_offset);
+		BBDEV_LA12XX_PMD_ERR(
+			"gul_hif size=%d", (int)sizeof(struct gul_hif));
+		return IPC_MD_SZ_MISS_MATCH;
+	}
+
+	ipc_priv->instance = (ipc_instance_t *)
+		(&ipc_md->instance_list[ipc_priv->instance_id]);
+
+	BBDEV_LA12XX_PMD_DEBUG("finish host init");
+
+	priv->ipc_priv = ipc_priv;
+
+	return 0;
+
+err:
+	rte_free(hp);
+	rte_free(ipc_priv);
+	rte_free(ipc_priv_ch);
+	if (dev_mem)
+		close(dev_mem);
+	if (dev_ipc)
+		close(dev_ipc);
+
+	return ret;
+}
+
 static inline int
 parse_u16_arg(const char *key, const char *value, void *extra_args)
 {
@@ -123,6 +678,7 @@  la12xx_bbdev_create(struct rte_vdev_device *vdev,
 	struct rte_bbdev *bbdev;
 	const char *name = rte_vdev_device_name(vdev);
 	struct bbdev_la12xx_private *priv;
+	int ret;
 
 	PMD_INIT_FUNC_TRACE();
 
@@ -152,7 +708,13 @@  la12xx_bbdev_create(struct rte_vdev_device *vdev,
 
 	BBDEV_LA12XX_PMD_INFO("Setting Up %s: DevId=%d, ModemId=%d",
 				name, bbdev->data->dev_id, priv->modem_id);
-	bbdev->dev_ops = NULL;
+	ret = setup_la12xx_dev(bbdev);
+	if (ret) {
+		BBDEV_LA12XX_PMD_ERR("IPC Setup failed for %s", name);
+		rte_free(bbdev->data->dev_private);
+		return ret;
+	}
+	bbdev->dev_ops = &pmd_ops;
 	bbdev->device = &vdev->device;
 	bbdev->data->socket_id = 0;
 	bbdev->intr_handle = NULL;
diff --git a/drivers/baseband/la12xx/bbdev_la12xx.h b/drivers/baseband/la12xx/bbdev_la12xx.h
index 5228502331..c94f08e059 100644
--- a/drivers/baseband/la12xx/bbdev_la12xx.h
+++ b/drivers/baseband/la12xx/bbdev_la12xx.h
@@ -14,7 +14,7 @@ 
 #define MAX_CHANNEL_DEPTH 16
 /* private data structure */
 struct bbdev_la12xx_private {
-	void *ipc_priv;
+	ipc_userspace_t *ipc_priv;
 	uint8_t num_valid_queues;
 	uint8_t max_nb_queues;
 	uint8_t num_ldpc_enc_queues;
@@ -52,5 +52,6 @@  struct bbdev_la12xx_q_priv {
 
 #define lower_32_bits(x) ((uint32_t)((uint64_t)x))
 #define upper_32_bits(x) ((uint32_t)(((uint64_t)(x) >> 16) >> 16))
-
+#define join_32_bits(upper, lower) \
+	((size_t)(((uint64_t)(upper) << 32) | (uint32_t)(lower)))
 #endif
diff --git a/drivers/baseband/la12xx/bbdev_la12xx_ipc.h b/drivers/baseband/la12xx/bbdev_la12xx_ipc.h
index 9aa5562981..9d5789f726 100644
--- a/drivers/baseband/la12xx/bbdev_la12xx_ipc.h
+++ b/drivers/baseband/la12xx/bbdev_la12xx_ipc.h
@@ -4,9 +4,175 @@ 
 #ifndef __BBDEV_LA12XX_IPC_H__
 #define __BBDEV_LA12XX_IPC_H__
 
+#define LA12XX_MAX_QUEUES 20
+
+/** No. of max channel per instance */
+#define IPC_MAX_CHANNEL_COUNT	(64)
+
 /** No. of max channel per instance */
 #define IPC_MAX_DEPTH	(16)
 
+/** No. of max IPC instance per modem */
+#define IPC_MAX_INSTANCE_COUNT	(1)
+
+#define HOST_RX_QUEUEID_OFFSET LA12XX_MAX_QUEUES
+
+#define MAX_MEM_POOL_COUNT 8
+
+/** Error codes */
+#define IPC_SUCCESS		(0) /** IPC operation success */
+#define IPC_INPUT_INVALID	(-1) /** Invalid input to API */
+#define IPC_CH_INVALID		(-2) /** Channel no is invalid */
+#define IPC_INSTANCE_INVALID	(-3) /** Instance no is invalid */
+#define IPC_MEM_INVALID		(-4) /** Insufficient memory */
+#define IPC_CH_FULL		(-5) /** Channel is full */
+#define IPC_CH_EMPTY		(-6) /** Channel is empty */
+#define IPC_BL_EMPTY		(-7) /** Free buffer list is empty */
+#define IPC_BL_FULL		(-8) /** Free buffer list is full */
+#define IPC_HOST_BUF_ALLOC_FAIL	(-9) /** DPDK malloc fail */
+#define IPC_MD_SZ_MISS_MATCH	(-10) /** META DATA sz in mhif miss matched*/
+#define IPC_MALLOC_FAIL		(-11) /** system malloc fail */
+#define IPC_IOCTL_FAIL		(-12) /** IOCTL call failed */
+#define IPC_MMAP_FAIL		(-14) /** MMAP fail */
+#define IPC_OPEN_FAIL		(-15) /** OPEN fail */
+#define IPC_EVENTFD_FAIL	(-16) /** eventfd initialization failed */
+#define IPC_NOT_IMPLEMENTED	(-17) /** IPC feature is not implemented yet*/
+
+#define SET_HIF_HOST_RDY(hif, RDY_MASK) (hif->host_ready |= RDY_MASK)
+#define CHK_HIF_MOD_RDY(hif, RDY_MASK) (hif->mod_ready & RDY_MASK)
+
+/* Host Ready bits */
+#define HIF_HOST_READY_HOST_REGIONS	(1 << 0)
+#define HIF_HOST_READY_IPC_LIB		(1 << 12)
+#define HIF_HOST_READY_IPC_APP		(1 << 13)
+#define HIF_HOST_READY_FECA		(1 << 14)
+
+/* Modem Ready bits */
+#define HIF_MOD_READY_IPC_LIB		(1 << 5)
+#define HIF_MOD_READY_IPC_APP		(1 << 6)
+#define HIF_MOD_READY_FECA		(1 << 7)
+
+typedef void *ipc_t;
+
+struct ipc_msg {
+	int chid;
+	void *addr;
+	uint32_t len;
+	uint8_t flags;
+};
+
+typedef struct {
+	uint64_t host_phys;
+	uint32_t modem_phys;
+	void    *host_vaddr;
+	uint32_t size;
+} mem_range_t;
+
+#define GUL_IPC_MAGIC	'R'
+
+#define IOCTL_GUL_IPC_GET_SYS_MAP _IOW(GUL_IPC_MAGIC, 1, struct ipc_msg *)
+#define IOCTL_GUL_IPC_CHANNEL_REGISTER _IOWR(GUL_IPC_MAGIC, 4, struct ipc_msg *)
+#define IOCTL_GUL_IPC_CHANNEL_DEREGISTER \
+	_IOWR(GUL_IPC_MAGIC, 5, struct ipc_msg *)
+#define IOCTL_GUL_IPC_CHANNEL_RAISE_INTERRUPT _IOW(GUL_IPC_MAGIC, 6, int *)
+
+/** buffer ring common metadata */
+typedef struct ipc_bd_ring_md {
+	volatile uint32_t pi;		/**< Producer index and flag (MSB)
+					  *  which flip for each Ring wrapping
+					  */
+	volatile uint32_t ci;		/**< Consumer index and flag (MSB)
+					  *  which flip for each Ring wrapping
+					  */
+	uint32_t ring_size;	/**< depth (Used to roll-over pi/ci) */
+	uint32_t msg_size;	/**< Size of the each buffer */
+} __rte_packed ipc_br_md_t;
+
+/** IPC buffer descriptor */
+typedef struct ipc_buffer_desc {
+	union {
+		uint64_t host_virt;	/**< msg's host virtual address */
+		struct {
+			uint32_t host_virt_l;
+			uint32_t host_virt_h;
+		};
+	};
+	uint64_t host_phy;	/**< msg's host physical address */
+	uint32_t modem_ptr;	/**< msg's modem physical address */
+	uint32_t len;		/**< msg len */
+	uint64_t crc;		/**< crc */
+} __rte_packed ipc_bd_t;
+
+typedef struct ipc_channel {
+	uint32_t ch_id;		/**< Channel id */
+	uint32_t bl_initialized;/**< Set when buffer list is initialized */
+	ipc_br_md_t md;
+	ipc_bd_t bd[IPC_MAX_DEPTH];
+	uint32_t op_type;/* BBDEV operation supported on this channel */
+	uint32_t depth;	/* Channel depth */
+	uint32_t feca_blk_id;/* FECA Transport Block ID for processing */
+	uint32_t la12xx_core_id;/* LA12xx core ID to scheduled work for it*/
+	uint32_t host_ipc_params;/* Address for host IPC parameters */
+} __rte_packed ipc_ch_t;
+
+typedef struct ipc_instance {
+	uint32_t initialized;		/**< Set in ipc_init */
+	ipc_ch_t ch_list[IPC_MAX_CHANNEL_COUNT];
+		/**< Channel descriptors in this instance */
+} __rte_packed ipc_instance_t;
+
+typedef struct ipc_metadata {
+	ipc_instance_t instance_list[IPC_MAX_INSTANCE_COUNT];
+} __rte_packed ipc_metadata_t;
+
+typedef struct ipc_channel_us_priv {
+	int32_t		eventfd;
+	uint32_t	channel_id;
+	/* In flight packets status for buffer list. */
+	uint8_t		bufs_inflight[IPC_MAX_DEPTH];
+} ipc_channel_us_t;
+
+typedef struct {
+	uint64_t host_phys;
+	uint32_t modem_phys;
+	uint32_t size;
+} mem_strt_addr_t;
+
+typedef struct {
+	mem_strt_addr_t modem_ccsrbar;
+	mem_strt_addr_t peb_start; /* PEB meta data */
+	mem_strt_addr_t mhif_start; /* MHIF meta daat */
+	mem_strt_addr_t hugepg_start; /* Modem to access hugepage */
+} sys_map_t;
+
+typedef struct ipc_priv_t {
+	int instance_id;
+	int dev_ipc;
+	int dev_mem;
+	struct rte_mempool *rtemempool[MAX_MEM_POOL_COUNT];
+	sys_map_t sys_map;
+	mem_range_t modem_ccsrbar;
+	mem_range_t peb_start;
+	mem_range_t mhif_start;
+	mem_range_t hugepg_start;
+	ipc_channel_us_t *channels[IPC_MAX_CHANNEL_COUNT];
+	ipc_instance_t	*instance;
+} ipc_userspace_t;
+
+/** Structure specifying enqueue operation (enqueue at LA1224) */
+struct bbdev_ipc_enqueue_op {
+	/** Status of operation that was performed */
+	int32_t status;
+	/** CRC Status of SD operation that was performed */
+	int32_t crc_stat_addr;
+	/** HARQ Output buffer memory length for Shared Decode.
+	 * Filled by LA12xx.
+	 */
+	uint32_t out_len;
+	/** Reserved (for 8 byte alignment) */
+	uint32_t rsvd;
+};
+
 /* This shared memory would be on the host side which have copy of some
  * of the parameters which are also part of Shared BD ring. Read access
  * of these parameters from the host side would not be over PCI.
@@ -17,4 +183,18 @@  typedef struct host_ipc_params {
 	volatile uint32_t modem_ptr[IPC_MAX_DEPTH];
 } __rte_packed host_ipc_params_t;
 
+struct hif_ipc_regs {
+	uint32_t ipc_mdata_offset;
+	uint32_t ipc_mdata_size;
+} __rte_packed;
+
+struct gul_hif {
+	uint32_t ver;
+	uint32_t hif_ver;
+	uint32_t status;
+	volatile uint32_t host_ready;
+	volatile uint32_t mod_ready;
+	struct hif_ipc_regs ipc_regs;
+} __rte_packed;
+
 #endif