diff mbox series

[v3,09/11] drm/mediatek: Add secure flow support to mediatek-drm

Message ID 20231223182932.27683-10-jason-jh.lin@mediatek.com
State New
Headers show
Series Add mediate-drm secure flow for SVP | expand

Commit Message

Jason-JH Lin (林睿祥) Dec. 23, 2023, 6:29 p.m. UTC
To add secure flow support for mediatek-drm, each crtc have to
create a secure cmdq mailbox channel. Then cmdq packets with
display HW configuration will be sent to secure cmdq mailbox channel
and configured in the secure world.

Each crtc have to use secure cmdq interface to configure some secure
settings for display HW before sending cmdq packets to secure cmdq
mailbox channel.

If any of fb get from current drm_atomic_state is secure, then crtc
will switch to the secure flow to configure display HW.
If all fbs are not secure in current drm_atomic_state, then crtc will
switch to the normal flow.

TODO:
1. Remove get sec larb port interface in ddp_comp, ovl and ovl_adaptor.
2. Verify instruction for enabling/disabling dapc and larb port in TEE
   drop the sec_engine flags in normal world.

Signed-off-by: Jason-JH.Lin <jason-jh.lin@mediatek.com>
---
 drivers/gpu/drm/mediatek/mtk_drm_crtc.c  | 272 ++++++++++++++++++++++-
 drivers/gpu/drm/mediatek/mtk_drm_crtc.h  |   1 +
 drivers/gpu/drm/mediatek/mtk_drm_plane.c |   7 +
 3 files changed, 269 insertions(+), 11 deletions(-)

Comments

CK Hu (胡俊光) Dec. 26, 2023, 5:43 a.m. UTC | #1
Hi, Jason:

On Sun, 2023-12-24 at 02:29 +0800, Jason-JH.Lin wrote:
> To add secure flow support for mediatek-drm, each crtc have to
> create a secure cmdq mailbox channel. Then cmdq packets with
> display HW configuration will be sent to secure cmdq mailbox channel
> and configured in the secure world.
> 
> Each crtc have to use secure cmdq interface to configure some secure
> settings for display HW before sending cmdq packets to secure cmdq
> mailbox channel.
> 
> If any of fb get from current drm_atomic_state is secure, then crtc
> will switch to the secure flow to configure display HW.
> If all fbs are not secure in current drm_atomic_state, then crtc will
> switch to the normal flow.
> 
> TODO:
> 1. Remove get sec larb port interface in ddp_comp, ovl and
> ovl_adaptor.
> 2. Verify instruction for enabling/disabling dapc and larb port in
> TEE
>    drop the sec_engine flags in normal world.
> 
> Signed-off-by: Jason-JH.Lin <jason-jh.lin@mediatek.com>
> 

[snip]

> @@ -1091,14 +1292,63 @@ int mtk_drm_crtc_create(struct drm_device
> *drm_dev,
>  			if (ret) {
>  				dev_dbg(dev, "mtk_crtc %d failed to
> create cmdq packet\n",
>  					drm_crtc_index(&mtk_crtc-
> >base));
> -				mbox_free_channel(mtk_crtc-
> >cmdq_client.chan);
> -				mtk_crtc->cmdq_client.chan = NULL;
> +				goto cmdq_err;
>  			}
>  		}
>  
>  		/* for sending blocking cmd in crtc disable */
>  		init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
>  	}
> +
> +	mtk_crtc->sec_cmdq_client.client.dev = mtk_crtc->mmsys_dev;
> +	mtk_crtc->sec_cmdq_client.client.tx_block = false;
> +	mtk_crtc->sec_cmdq_client.client.knows_txdone = true;
> +	mtk_crtc->sec_cmdq_client.client.rx_callback = ddp_cmdq_cb;
> +	mtk_crtc->sec_cmdq_client.chan =
> +			mbox_request_channel(&mtk_crtc-
> >sec_cmdq_client.client, i + 1);
> +	if (IS_ERR(mtk_crtc->sec_cmdq_client.chan)) {
> +		dev_err(dev, "mtk_crtc %d failed to create sec mailbox
> client\n",
> +			drm_crtc_index(&mtk_crtc->base));
> +		mtk_crtc->sec_cmdq_client.chan = NULL;
> +	}
> +
> +	if (mtk_crtc->sec_cmdq_client.chan) {

I would like use secure channel to replace normal channel. It means
that no extra channel is required and change the original normal
channel to secure channel. The secure channel could process both normal
buffer and secure buffer, so you need not to switch the channel.

Regards,
CK

> +		struct device_link *link;
> +
> +		/* add devlink to cmdq dev to make sure suspend/resume
> order is correct */
> +		link = device_link_add(priv->dev, mtk_crtc-
> >sec_cmdq_client.chan->mbox->dev,
> +				       DL_FLAG_PM_RUNTIME |
> DL_FLAG_STATELESS);
> +		if (!link) {
> +			dev_err(priv->dev, "Unable to link dev=%s\n",
> +				dev_name(mtk_crtc-
> >sec_cmdq_client.chan->mbox->dev));
> +			ret = -ENODEV;
> +			goto cmdq_err;
> +		}
> +
> +		ret = mtk_drm_cmdq_pkt_create(&mtk_crtc-
> >sec_cmdq_client,
> +					      &mtk_crtc-
> >sec_cmdq_handle,
> +					      PAGE_SIZE);
> +		if (ret) {
> +			dev_dbg(dev, "mtk_crtc %d failed to create cmdq
> secure packet\n",
> +				drm_crtc_index(&mtk_crtc->base));
> +			goto cmdq_err;
> +		}
> +
> +		/* for sending blocking cmd in crtc disable */
> +		init_waitqueue_head(&mtk_crtc->sec_cb_blocking_queue);
> +	}
> +
> +cmdq_err:
> +	if (ret) {
> +		if (mtk_crtc->cmdq_client.chan) {
> +			mbox_free_channel(mtk_crtc->cmdq_client.chan);
> +			mtk_crtc->cmdq_client.chan = NULL;
> +		}
> +		if (mtk_crtc->sec_cmdq_client.chan) {
> +			mbox_free_channel(mtk_crtc-
> >sec_cmdq_client.chan);
> +			mtk_crtc->sec_cmdq_client.chan = NULL;
> +		}
> +	}
>  #endif
>  
>  	if (conn_routes) {
> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
> b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
> index 1f988ff1bf9f..cf8433846108 100644
> --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
> +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
> @@ -21,6 +21,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
>  			int priv_data_index,
>  			const struct mtk_drm_route *conn_routes,
>  			unsigned int num_conn_routes);
> +void mtk_crtc_disable_secure_state(struct drm_crtc *crtc);
>  int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane
> *plane,
>  			     struct mtk_plane_state *state);
>  void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct
> drm_plane *plane,
> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
> b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
> index d4d515627ca4..96293c632d67 100644
> --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
> +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
> @@ -287,6 +287,13 @@ static void mtk_plane_atomic_disable(struct
> drm_plane *plane,
>  	mtk_plane_state->pending.enable = false;
>  	wmb(); /* Make sure the above parameter is set before update */
>  	mtk_plane_state->pending.dirty = true;
> +
> +	if (mtk_plane_state->pending.is_secure) {
> +		struct drm_plane_state *old_state =
> drm_atomic_get_old_plane_state(state, plane);
> +
> +		if (old_state->crtc)
> +			mtk_crtc_disable_secure_state(old_state->crtc);
> +	}
>  }
>  
>  static void mtk_plane_atomic_update(struct drm_plane *plane,
Jason-JH Lin (林睿祥) Dec. 27, 2023, 7:05 a.m. UTC | #2
Hi CK,

Thanks for the reviews.

On Tue, 2023-12-26 at 05:43 +0000, CK Hu (胡俊光) wrote:
> Hi, Jason:
> 
> On Sun, 2023-12-24 at 02:29 +0800, Jason-JH.Lin wrote:
> > To add secure flow support for mediatek-drm, each crtc have to
> > create a secure cmdq mailbox channel. Then cmdq packets with
> > display HW configuration will be sent to secure cmdq mailbox
> > channel
> > and configured in the secure world.
> > 
> > Each crtc have to use secure cmdq interface to configure some
> > secure
> > settings for display HW before sending cmdq packets to secure cmdq
> > mailbox channel.
> > 
> > If any of fb get from current drm_atomic_state is secure, then crtc
> > will switch to the secure flow to configure display HW.
> > If all fbs are not secure in current drm_atomic_state, then crtc
> > will
> > switch to the normal flow.
> > 
> > TODO:
> > 1. Remove get sec larb port interface in ddp_comp, ovl and
> > ovl_adaptor.
> > 2. Verify instruction for enabling/disabling dapc and larb port in
> > TEE
> >    drop the sec_engine flags in normal world.
> > 
> > Signed-off-by: Jason-JH.Lin <jason-jh.lin@mediatek.com>
> > 
> 
> [snip]
> 
> > @@ -1091,14 +1292,63 @@ int mtk_drm_crtc_create(struct drm_device
> > *drm_dev,
> >  			if (ret) {
> >  				dev_dbg(dev, "mtk_crtc %d failed to
> > create cmdq packet\n",
> >  					drm_crtc_index(&mtk_crtc-
> > > base));
> > 
> > -				mbox_free_channel(mtk_crtc-
> > > cmdq_client.chan);
> > 
> > -				mtk_crtc->cmdq_client.chan = NULL;
> > +				goto cmdq_err;
> >  			}
> >  		}
> >  
> >  		/* for sending blocking cmd in crtc disable */
> >  		init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
> >  	}
> > +
> > +	mtk_crtc->sec_cmdq_client.client.dev = mtk_crtc->mmsys_dev;
> > +	mtk_crtc->sec_cmdq_client.client.tx_block = false;
> > +	mtk_crtc->sec_cmdq_client.client.knows_txdone = true;
> > +	mtk_crtc->sec_cmdq_client.client.rx_callback = ddp_cmdq_cb;
> > +	mtk_crtc->sec_cmdq_client.chan =
> > +			mbox_request_channel(&mtk_crtc-
> > > sec_cmdq_client.client, i + 1);
> > 
> > +	if (IS_ERR(mtk_crtc->sec_cmdq_client.chan)) {
> > +		dev_err(dev, "mtk_crtc %d failed to create sec mailbox
> > client\n",
> > +			drm_crtc_index(&mtk_crtc->base));
> > +		mtk_crtc->sec_cmdq_client.chan = NULL;
> > +	}
> > +
> > +	if (mtk_crtc->sec_cmdq_client.chan) {
> 
> I would like use secure channel to replace normal channel. It means
> that no extra channel is required and change the original normal
> channel to secure channel. The secure channel could process both
> normal
> buffer and secure buffer, so you need not to switch the channel.
> 
> Regards,
> CK

It sounds quite reasonable!

If the platform or project support OPTEE, we can default use secure
channel to handle both normal and secure buffers. 
I will try to to refine this and make sure it won't cause latency issue
on OPTEE transaction frequently.

Regards,
Jason-JH.Lin
diff mbox series

Patch

diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index db43f9dff912..79617c0f016d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -56,6 +56,11 @@  struct mtk_drm_crtc {
 	u32				cmdq_event;
 	u32				cmdq_vblank_cnt;
 	wait_queue_head_t		cb_blocking_queue;
+
+	struct cmdq_client		sec_cmdq_client;
+	struct cmdq_pkt			sec_cmdq_handle;
+	bool				sec_cmdq_working;
+	wait_queue_head_t		sec_cb_blocking_queue;
 #endif
 
 	struct device			*mmsys_dev;
@@ -69,6 +74,7 @@  struct mtk_drm_crtc {
 	/* lock for display hardware access */
 	struct mutex			hw_lock;
 	bool				config_updating;
+	bool				sec_on;
 };
 
 struct mtk_crtc_state {
@@ -111,6 +117,154 @@  static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
 	}
 }
 
+void mtk_crtc_disable_secure_state(struct drm_crtc *crtc)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	enum cmdq_sec_scenario sec_scn = CMDQ_SEC_SCNR_MAX;
+	int i;
+	struct mtk_ddp_comp *ddp_first_comp;
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	u64 sec_engine = 0; /* for hw engine write output secure fb */
+	u64 sec_port = 0; /* for larb port read input secure fb */
+
+	mutex_lock(&mtk_crtc->hw_lock);
+
+	if (!mtk_crtc->sec_cmdq_client.chan) {
+		pr_err("crtc-%d secure mbox channel is NULL\n", drm_crtc_index(crtc));
+		goto err;
+	}
+
+	if (!mtk_crtc->sec_on) {
+		pr_debug("crtc-%d is already disabled!\n", drm_crtc_index(crtc));
+		goto err;
+	}
+
+	mbox_flush(mtk_crtc->sec_cmdq_client.chan, 0);
+	mtk_crtc->sec_cmdq_handle.cmd_buf_size = 0;
+
+	if (mtk_crtc->sec_cmdq_handle.sec_data) {
+		struct cmdq_sec_data *sec_data;
+
+		sec_data = mtk_crtc->sec_cmdq_handle.sec_data;
+		sec_data->addr_metadata_cnt = 0;
+		sec_data->addr_metadatas = (uintptr_t)NULL;
+	}
+
+	/*
+	 * Secure path only support DL mode, so we just wait
+	 * the first path frame done here
+	 */
+	cmdq_pkt_wfe(&mtk_crtc->sec_cmdq_handle, mtk_crtc->cmdq_event, false);
+
+	ddp_first_comp = mtk_crtc->ddp_comp[0];
+	for (i = 0; i < mtk_crtc->layer_nr; i++) {
+		struct drm_plane *plane = &mtk_crtc->planes[i];
+
+		sec_port |= mtk_ddp_comp_layer_get_sec_port(ddp_first_comp, i);
+
+		/* make sure secure layer off before switching secure state */
+		if (!mtk_plane_fb_is_secure(plane->state->fb)) {
+			struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state);
+
+			plane_state->pending.enable = false;
+			mtk_ddp_comp_layer_config(ddp_first_comp, i, plane_state,
+						  &mtk_crtc->sec_cmdq_handle);
+		}
+	}
+
+	/* Disable secure path */
+	if (drm_crtc_index(crtc) == 0)
+		sec_scn = CMDQ_SEC_SCNR_PRIMARY_DISP_DISABLE;
+	else if (drm_crtc_index(crtc) == 1)
+		sec_scn = CMDQ_SEC_SCNR_SUB_DISP_DISABLE;
+
+	cmdq_sec_pkt_set_data(&mtk_crtc->sec_cmdq_handle, sec_engine, sec_engine, sec_scn);
+
+	cmdq_pkt_finalize(&mtk_crtc->sec_cmdq_handle);
+	dma_sync_single_for_device(mtk_crtc->sec_cmdq_client.chan->mbox->dev,
+				   mtk_crtc->sec_cmdq_handle.pa_base,
+				   mtk_crtc->sec_cmdq_handle.cmd_buf_size,
+				   DMA_TO_DEVICE);
+
+	mtk_crtc->sec_cmdq_working = true;
+	mbox_send_message(mtk_crtc->sec_cmdq_client.chan, &mtk_crtc->sec_cmdq_handle);
+	mbox_client_txdone(mtk_crtc->sec_cmdq_client.chan, 0);
+
+	// Wait for sec state to be disabled by cmdq
+	wait_event_timeout(mtk_crtc->sec_cb_blocking_queue,
+			   !mtk_crtc->sec_cmdq_working,
+			   msecs_to_jiffies(500));
+
+	mtk_crtc->sec_on = false;
+	pr_debug("crtc-%d disable secure plane!\n", drm_crtc_index(crtc));
+
+err:
+	mutex_unlock(&mtk_crtc->hw_lock);
+#endif
+}
+
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+static void mtk_crtc_enable_secure_state(struct drm_crtc *crtc)
+{
+	enum cmdq_sec_scenario sec_scn = CMDQ_SEC_SCNR_MAX;
+	int i;
+	struct mtk_ddp_comp *ddp_first_comp;
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	u64 sec_engine = 0; /* for hw engine write output secure fb */
+	u64 sec_port = 0; /* for larb port read input secure fb */
+
+	cmdq_pkt_wfe(&mtk_crtc->sec_cmdq_handle, mtk_crtc->cmdq_event, false);
+
+	ddp_first_comp = mtk_crtc->ddp_comp[0];
+	for (i = 0; i < mtk_crtc->layer_nr; i++)
+		if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
+			sec_port |= mtk_ddp_comp_layer_get_sec_port(ddp_first_comp, i);
+
+	if (drm_crtc_index(crtc) == 0)
+		sec_scn = CMDQ_SEC_SCNR_PRIMARY_DISP;
+	else if (drm_crtc_index(crtc) == 1)
+		sec_scn = CMDQ_SEC_SCNR_SUB_DISP;
+
+	cmdq_sec_pkt_set_data(&mtk_crtc->sec_cmdq_handle, sec_engine, sec_port, sec_scn);
+
+	pr_debug("crtc-%d enable secure plane!\n", drm_crtc_index(crtc));
+}
+#endif
+
+static void mtk_drm_crtc_plane_switch_sec_state(struct drm_crtc *crtc,
+						struct drm_atomic_state *state)
+{
+#if IS_REACHABLE(CONFIG_MTK_CMDQ)
+	bool sec_on[MAX_CRTC] = {0};
+	int i;
+	struct drm_crtc_state *crtc_state;
+	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct drm_plane *plane;
+	struct drm_plane_state *old_plane_state;
+
+	for_each_old_plane_in_state(state, plane, old_plane_state, i) {
+		if (!plane->state->crtc)
+			continue;
+
+		if (plane->state->fb &&
+		    mtk_plane_fb_is_secure(plane->state->fb) &&
+		    mtk_crtc->sec_cmdq_client.chan)
+			sec_on[drm_crtc_index(plane->state->crtc)] = true;
+	}
+
+	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+		mtk_crtc = to_mtk_crtc(crtc);
+
+		if (!sec_on[i])
+			mtk_crtc_disable_secure_state(crtc);
+
+		mutex_lock(&mtk_crtc->hw_lock);
+		mtk_crtc->sec_on = true;
+		mutex_unlock(&mtk_crtc->hw_lock);
+	}
+#endif
+}
+
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
 static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
 				   size_t size)
@@ -146,22 +300,33 @@  static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
 	dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
 			 DMA_TO_DEVICE);
 	kfree(pkt->va_base);
+	kfree(pkt->sec_data);
 }
 #endif
 
 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
 {
 	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+	struct mtk_drm_private *priv = crtc->dev->dev_private;
 	int i;
 
+	priv = priv->all_drm_private[drm_crtc_index(crtc)];
+
 	mtk_mutex_put(mtk_crtc->mutex);
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
 	mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
+	mtk_drm_cmdq_pkt_destroy(&mtk_crtc->sec_cmdq_handle);
 
 	if (mtk_crtc->cmdq_client.chan) {
 		mbox_free_channel(mtk_crtc->cmdq_client.chan);
 		mtk_crtc->cmdq_client.chan = NULL;
 	}
+
+	if (mtk_crtc->sec_cmdq_client.chan) {
+		device_link_remove(priv->dev, mtk_crtc->sec_cmdq_client.chan->mbox->dev);
+		mbox_free_channel(mtk_crtc->sec_cmdq_client.chan);
+		mtk_crtc->sec_cmdq_client.chan = NULL;
+	}
 #endif
 
 	for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
@@ -288,13 +453,18 @@  static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
 {
 	struct cmdq_cb_data *data = mssg;
 	struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
-	struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+	struct mtk_drm_crtc *mtk_crtc;
 	struct mtk_crtc_state *state;
 	unsigned int i;
 
 	if (data->sta < 0)
 		return;
 
+	if (!data->pkt || !data->pkt->sec_data)
+		mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
+	else
+		mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, sec_cmdq_client);
+
 	state = to_mtk_crtc_state(mtk_crtc->base.state);
 
 	state->pending_config = false;
@@ -323,6 +493,11 @@  static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
 		mtk_crtc->pending_async_planes = false;
 	}
 
+	if (mtk_crtc->sec_cmdq_working) {
+		mtk_crtc->sec_cmdq_working = false;
+		wake_up(&mtk_crtc->sec_cb_blocking_queue);
+	}
+
 	mtk_crtc->cmdq_vblank_cnt = 0;
 	wake_up(&mtk_crtc->cb_blocking_queue);
 }
@@ -549,7 +724,8 @@  static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
 				       bool needs_vblank)
 {
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-	struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
+	struct cmdq_client cmdq_client;
+	struct cmdq_pkt *cmdq_handle;
 #endif
 	struct drm_crtc *crtc = &mtk_crtc->base;
 	struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -587,14 +763,36 @@  static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
 		mtk_mutex_release(mtk_crtc->mutex);
 	}
 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-	if (mtk_crtc->cmdq_client.chan) {
+	if (mtk_crtc->sec_on) {
+		mbox_flush(mtk_crtc->sec_cmdq_client.chan, 0);
+		mtk_crtc->sec_cmdq_handle.cmd_buf_size = 0;
+
+		if (mtk_crtc->sec_cmdq_handle.sec_data) {
+			struct cmdq_sec_data *sec_data;
+
+			sec_data = mtk_crtc->sec_cmdq_handle.sec_data;
+			sec_data->addr_metadata_cnt = 0;
+			sec_data->addr_metadatas = (uintptr_t)NULL;
+		}
+
+		mtk_crtc_enable_secure_state(crtc);
+
+		cmdq_client = mtk_crtc->sec_cmdq_client;
+		cmdq_handle = &mtk_crtc->sec_cmdq_handle;
+	} else if (mtk_crtc->cmdq_client.chan) {
 		mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
-		cmdq_handle->cmd_buf_size = 0;
+		mtk_crtc->cmdq_handle.cmd_buf_size = 0;
+
+		cmdq_client =  mtk_crtc->cmdq_client;
+		cmdq_handle = &mtk_crtc->cmdq_handle;
+	}
+
+	if (cmdq_client.chan) {
 		cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
 		cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
 		mtk_crtc_ddp_config(crtc, cmdq_handle);
 		cmdq_pkt_finalize(cmdq_handle);
-		dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
+		dma_sync_single_for_device(cmdq_client.chan->mbox->dev,
 					   cmdq_handle->pa_base,
 					   cmdq_handle->cmd_buf_size,
 					   DMA_TO_DEVICE);
@@ -607,8 +805,8 @@  static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
 		 */
 		mtk_crtc->cmdq_vblank_cnt = 3;
 
-		mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
-		mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
+		mbox_send_message(cmdq_client.chan, cmdq_handle);
+		mbox_client_txdone(cmdq_client.chan, 0);
 	}
 #endif
 	mtk_crtc->config_updating = false;
@@ -750,6 +948,8 @@  static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
 	if (!mtk_crtc->enabled)
 		return;
 
+	mtk_crtc_disable_secure_state(crtc);
+
 	/* Set all pending plane state to disabled */
 	for (i = 0; i < mtk_crtc->layer_nr; i++) {
 		struct drm_plane *plane = &mtk_crtc->planes[i];
@@ -790,6 +990,8 @@  static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
 	struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
 	unsigned long flags;
 
+	mtk_drm_crtc_plane_switch_sec_state(crtc, state);
+
 	if (mtk_crtc->event && mtk_crtc_state->base.event)
 		DRM_ERROR("new event while there is still a pending event\n");
 
@@ -1082,8 +1284,7 @@  int mtk_drm_crtc_create(struct drm_device *drm_dev,
 		if (ret) {
 			dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
 				drm_crtc_index(&mtk_crtc->base));
-			mbox_free_channel(mtk_crtc->cmdq_client.chan);
-			mtk_crtc->cmdq_client.chan = NULL;
+			goto cmdq_err;
 		} else {
 			ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
 						      &mtk_crtc->cmdq_handle,
@@ -1091,14 +1292,63 @@  int mtk_drm_crtc_create(struct drm_device *drm_dev,
 			if (ret) {
 				dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
 					drm_crtc_index(&mtk_crtc->base));
-				mbox_free_channel(mtk_crtc->cmdq_client.chan);
-				mtk_crtc->cmdq_client.chan = NULL;
+				goto cmdq_err;
 			}
 		}
 
 		/* for sending blocking cmd in crtc disable */
 		init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
 	}
+
+	mtk_crtc->sec_cmdq_client.client.dev = mtk_crtc->mmsys_dev;
+	mtk_crtc->sec_cmdq_client.client.tx_block = false;
+	mtk_crtc->sec_cmdq_client.client.knows_txdone = true;
+	mtk_crtc->sec_cmdq_client.client.rx_callback = ddp_cmdq_cb;
+	mtk_crtc->sec_cmdq_client.chan =
+			mbox_request_channel(&mtk_crtc->sec_cmdq_client.client, i + 1);
+	if (IS_ERR(mtk_crtc->sec_cmdq_client.chan)) {
+		dev_err(dev, "mtk_crtc %d failed to create sec mailbox client\n",
+			drm_crtc_index(&mtk_crtc->base));
+		mtk_crtc->sec_cmdq_client.chan = NULL;
+	}
+
+	if (mtk_crtc->sec_cmdq_client.chan) {
+		struct device_link *link;
+
+		/* add devlink to cmdq dev to make sure suspend/resume order is correct */
+		link = device_link_add(priv->dev, mtk_crtc->sec_cmdq_client.chan->mbox->dev,
+				       DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
+		if (!link) {
+			dev_err(priv->dev, "Unable to link dev=%s\n",
+				dev_name(mtk_crtc->sec_cmdq_client.chan->mbox->dev));
+			ret = -ENODEV;
+			goto cmdq_err;
+		}
+
+		ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->sec_cmdq_client,
+					      &mtk_crtc->sec_cmdq_handle,
+					      PAGE_SIZE);
+		if (ret) {
+			dev_dbg(dev, "mtk_crtc %d failed to create cmdq secure packet\n",
+				drm_crtc_index(&mtk_crtc->base));
+			goto cmdq_err;
+		}
+
+		/* for sending blocking cmd in crtc disable */
+		init_waitqueue_head(&mtk_crtc->sec_cb_blocking_queue);
+	}
+
+cmdq_err:
+	if (ret) {
+		if (mtk_crtc->cmdq_client.chan) {
+			mbox_free_channel(mtk_crtc->cmdq_client.chan);
+			mtk_crtc->cmdq_client.chan = NULL;
+		}
+		if (mtk_crtc->sec_cmdq_client.chan) {
+			mbox_free_channel(mtk_crtc->sec_cmdq_client.chan);
+			mtk_crtc->sec_cmdq_client.chan = NULL;
+		}
+	}
 #endif
 
 	if (conn_routes) {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 1f988ff1bf9f..cf8433846108 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -21,6 +21,7 @@  int mtk_drm_crtc_create(struct drm_device *drm_dev,
 			int priv_data_index,
 			const struct mtk_drm_route *conn_routes,
 			unsigned int num_conn_routes);
+void mtk_crtc_disable_secure_state(struct drm_crtc *crtc);
 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
 			     struct mtk_plane_state *state);
 void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index d4d515627ca4..96293c632d67 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -287,6 +287,13 @@  static void mtk_plane_atomic_disable(struct drm_plane *plane,
 	mtk_plane_state->pending.enable = false;
 	wmb(); /* Make sure the above parameter is set before update */
 	mtk_plane_state->pending.dirty = true;
+
+	if (mtk_plane_state->pending.is_secure) {
+		struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+
+		if (old_state->crtc)
+			mtk_crtc_disable_secure_state(old_state->crtc);
+	}
 }
 
 static void mtk_plane_atomic_update(struct drm_plane *plane,