@@ -35,6 +35,16 @@ int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
bhi_vec->size = mhi_buf->len;
}
+ if (!mhi_cntrl->rddm_prealloc) {
+ mhi_buf->dma_addr = dma_map_single(mhi_cntrl->cntrl_dev,
+ mhi_buf->buf, mhi_buf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(mhi_cntrl->cntrl_dev, mhi_buf->dma_addr)) {
+ dev_err(dev, "dma mapping failed\n");
+ return -ENOMEM;
+ }
+ }
+
dev_dbg(dev, "BHIe programming for RDDM\n");
mhi_write_reg(mhi_cntrl, base, BHIE_RXVECADDR_HIGH_OFFS,
@@ -158,10 +168,35 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
{
void __iomem *base = mhi_cntrl->bhie;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ struct mhi_buf *mhi_buf = NULL;
u32 rx_status;
+ int ret;
- if (in_panic)
- return __mhi_download_rddm_in_panic(mhi_cntrl);
+ /*
+ * Allocate RDDM table if specified, this table is for debugging purpose
+ */
+ if (!mhi_cntrl->rddm_prealloc && mhi_cntrl->rddm_size) {
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size, IMG_TYPE_RDDM);
+ if (ret) {
+ dev_err(dev, "Failed to allocate RDDM table memory\n");
+ return ret;
+ }
+
+ /* setup the RX vector table */
+ ret = mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image);
+ if (ret) {
+ dev_err(dev, "Failed to prepare RDDM\n");
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image,
+ IMG_TYPE_RDDM);
+ return ret;
+ }
+ }
+
+ if (in_panic) {
+ ret = __mhi_download_rddm_in_panic(mhi_cntrl);
+ goto out;
+ }
dev_dbg(dev, "Waiting for RDDM image download via BHIe\n");
@@ -173,7 +208,16 @@ int mhi_download_rddm_image(struct mhi_controller *mhi_cntrl, bool in_panic)
&rx_status) || rx_status,
msecs_to_jiffies(mhi_cntrl->timeout_ms));
- return (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+ ret = (rx_status == BHIE_RXVECSTATUS_STATUS_XFER_COMPL) ? 0 : -EIO;
+
+out:
+ mhi_buf = &mhi_cntrl->rddm_image->mhi_buf[mhi_cntrl->rddm_image->entries - 1];
+
+ if (!mhi_cntrl->rddm_prealloc)
+ dma_unmap_single(mhi_cntrl->cntrl_dev, mhi_buf->dma_addr,
+ mhi_buf->len, DMA_TO_DEVICE);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mhi_download_rddm_image);
@@ -297,14 +341,25 @@ static int mhi_fw_load_bhi(struct mhi_controller *mhi_cntrl,
}
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
- struct image_info *image_info)
+ struct image_info *image_info,
+ enum image_type img_type)
{
int i;
struct mhi_buf *mhi_buf = image_info->mhi_buf;
- for (i = 0; i < image_info->entries; i++, mhi_buf++)
- mhi_fw_free_coherent(mhi_cntrl, mhi_buf->len,
- mhi_buf->buf, mhi_buf->dma_addr);
+ for (i = 0; i < image_info->entries; i++, mhi_buf++) {
+ if (img_type == IMG_TYPE_RDDM && !mhi_cntrl->rddm_prealloc) {
+ if (i == (image_info->entries - 1))
+ dma_unmap_single(mhi_cntrl->cntrl_dev,
+ mhi_buf->dma_addr,
+ mhi_buf->len,
+ DMA_FROM_DEVICE);
+ kfree(mhi_buf->buf);
+ } else {
+ mhi_fw_free_coherent(mhi_cntrl, mhi_buf->len,
+ mhi_buf->buf, mhi_buf->dma_addr);
+ }
+ }
kfree(image_info->mhi_buf);
kfree(image_info);
@@ -312,21 +367,31 @@ void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
struct image_info **image_info,
- size_t alloc_size)
+ size_t alloc_size, enum image_type img_type)
{
size_t seg_size = mhi_cntrl->seg_len;
- int segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+ int segments;
int i;
struct image_info *img_info;
struct mhi_buf *mhi_buf;
+ /* Masked __GFP_DIRECT_RECLAIM flag for non-interrupt context
+ * to avoid rcu context sleep issue in kmalloc during kernel panic
+ */
+ gfp_t gfp = (in_interrupt() ? GFP_ATOMIC :
+ ((GFP_KERNEL | __GFP_NORETRY) & ~__GFP_DIRECT_RECLAIM));
+
+ if (img_type == IMG_TYPE_RDDM)
+ seg_size = mhi_cntrl->rddm_seg_len;
- img_info = kzalloc(sizeof(*img_info), GFP_KERNEL);
+ segments = DIV_ROUND_UP(alloc_size, seg_size) + 1;
+
+ img_info = kzalloc(sizeof(*img_info), gfp);
if (!img_info)
return -ENOMEM;
/* Allocate memory for entries */
img_info->mhi_buf = kcalloc(segments, sizeof(*img_info->mhi_buf),
- GFP_KERNEL);
+ gfp);
if (!img_info->mhi_buf)
goto error_alloc_mhi_buf;
@@ -340,11 +405,42 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
vec_size = sizeof(struct bhi_vec_entry) * i;
mhi_buf->len = vec_size;
- mhi_buf->buf = mhi_fw_alloc_coherent(mhi_cntrl, vec_size,
- &mhi_buf->dma_addr,
- GFP_KERNEL);
- if (!mhi_buf->buf)
- goto error_alloc_segment;
+
+ if (img_type == IMG_TYPE_RDDM && !mhi_cntrl->rddm_prealloc) {
+ /* Vector table is the last entry */
+ if (i == segments - 1) {
+ mhi_buf->buf = kzalloc(PAGE_ALIGN(vec_size),
+ gfp);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ /* Vector table entry will be dma_mapped during
+ * rddm prepare with DMA_TO_DEVICE and unmapped
+ * once the target completes the RDDM XFER.
+ */
+ continue;
+ }
+ mhi_buf->buf = kmalloc(vec_size, gfp);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+
+ mhi_buf->dma_addr = dma_map_single(mhi_cntrl->cntrl_dev,
+ mhi_buf->buf,
+ vec_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mhi_cntrl->cntrl_dev,
+ mhi_buf->dma_addr)) {
+ kfree(mhi_buf->buf);
+ goto error_alloc_segment;
+ }
+ } else {
+ mhi_buf->buf = mhi_fw_alloc_coherent(mhi_cntrl,
+ vec_size,
+ &mhi_buf->dma_addr,
+ GFP_KERNEL);
+ if (!mhi_buf->buf)
+ goto error_alloc_segment;
+ }
}
img_info->bhi_vec = img_info->mhi_buf[segments - 1].buf;
@@ -354,9 +450,18 @@ int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
return 0;
error_alloc_segment:
- for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
- mhi_fw_free_coherent(mhi_cntrl, mhi_buf->len,
- mhi_buf->buf, mhi_buf->dma_addr);
+ for (--i, --mhi_buf; i >= 0; i--, mhi_buf--) {
+ if (img_type == IMG_TYPE_RDDM && !mhi_cntrl->rddm_prealloc) {
+ dma_unmap_single(mhi_cntrl->cntrl_dev,
+ mhi_buf->dma_addr, mhi_buf->len,
+ DMA_FROM_DEVICE);
+ kfree(mhi_buf->buf);
+
+ } else {
+ mhi_fw_free_coherent(mhi_cntrl, mhi_buf->len,
+ mhi_buf->buf, mhi_buf->dma_addr);
+ }
+ }
error_alloc_mhi_buf:
kfree(img_info);
@@ -485,7 +590,8 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
* device transitioning into MHI READY state
*/
if (mhi_cntrl->fbc_download) {
- ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz);
+ ret = mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->fbc_image, fw_sz,
+ IMG_TYPE_FBC);
if (ret) {
release_firmware(firmware);
goto error_fw_load;
@@ -510,7 +616,8 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
error_ready_state:
if (mhi_cntrl->fbc_download) {
- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image,
+ IMG_TYPE_FBC);
mhi_cntrl->fbc_image = NULL;
}
@@ -1058,6 +1058,9 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
}
+ mhi_cntrl->rddm_prealloc = true;
+ mhi_cntrl->rddm_seg_len = mhi_cntrl->seg_len;
+
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
if (mhi_cntrl->index < 0) {
ret = mhi_cntrl->index;
@@ -1224,14 +1227,18 @@ int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
/*
* Allocate RDDM table for debugging purpose if specified
*/
- mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
- mhi_cntrl->rddm_size);
+ if (mhi_cntrl->rddm_prealloc)
+ mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
+ mhi_cntrl->rddm_size,
+ IMG_TYPE_RDDM);
+
if (mhi_cntrl->rddm_image) {
ret = mhi_rddm_prepare(mhi_cntrl,
mhi_cntrl->rddm_image);
if (ret) {
mhi_free_bhie_table(mhi_cntrl,
- mhi_cntrl->rddm_image);
+ mhi_cntrl->rddm_image,
+ IMG_TYPE_RDDM);
goto error_reg_offset;
}
}
@@ -1254,12 +1261,14 @@ EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up);
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl)
{
if (mhi_cntrl->fbc_image) {
- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image);
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image,
+ IMG_TYPE_FBC);
mhi_cntrl->fbc_image = NULL;
}
if (mhi_cntrl->rddm_image) {
- mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image);
+ mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image,
+ IMG_TYPE_RDDM);
mhi_cntrl->rddm_image = NULL;
}
@@ -176,6 +176,11 @@ enum mhi_er_type {
MHI_ER_TYPE_VALID = 0x1,
};
+enum image_type {
+ IMG_TYPE_FBC,
+ IMG_TYPE_RDDM,
+};
+
struct db_cfg {
bool reset_req;
bool db_mode;
@@ -314,9 +319,11 @@ int mhi_destroy_device(struct device *dev, void *data);
void mhi_create_devices(struct mhi_controller *mhi_cntrl);
int mhi_alloc_bhie_table(struct mhi_controller *mhi_cntrl,
- struct image_info **image_info, size_t alloc_size);
+ struct image_info **image_info, size_t alloc_size,
+ enum image_type img_type);
void mhi_free_bhie_table(struct mhi_controller *mhi_cntrl,
- struct image_info *image_info);
+ struct image_info *image_info,
+ enum image_type img_type);
/* Power management APIs */
enum mhi_pm_state __must_check mhi_tryset_pm_state(
@@ -503,13 +503,13 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
}
write_unlock_irq(&mhi_cntrl->pm_lock);
- if (pm_state != MHI_PM_SYS_ERR_DETECT)
+ if (pm_state != MHI_PM_SYS_ERR_DETECT && ee != MHI_EE_RDDM)
goto exit_intvec;
switch (ee) {
case MHI_EE_RDDM:
/* proceed if power down is not already in progress */
- if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
+ if (mhi_cntrl->rddm_size && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
wake_up_all(&mhi_cntrl->state_event);
@@ -767,7 +767,7 @@ void mhi_pm_sys_err_handler(struct mhi_controller *mhi_cntrl)
struct device *dev = &mhi_cntrl->mhi_dev->dev;
/* skip if controller supports RDDM */
- if (mhi_cntrl->rddm_image) {
+ if (mhi_cntrl->rddm_size) {
dev_dbg(dev, "Controller supports RDDM, skip SYS_ERROR\n");
return;
}
@@ -452,6 +452,8 @@ struct mhi_controller {
struct device_node *cma_node;
phys_addr_t cma_base;
size_t cma_size;
+ bool rddm_prealloc;
+ u32 rddm_seg_len;
};
/**