diff mbox series

[v3] serial: samsung: use dma_ops of DMA if attached

Message ID 20210622091710.44032-1-m.shams@samsung.com
State New
Headers show
Series [v3] serial: samsung: use dma_ops of DMA if attached | expand

Commit Message

Tamseel Shams June 22, 2021, 9:17 a.m. UTC
When DMA is used for TX and RX by serial driver, it should
pass the DMA device pointer to DMA API instead of UART device
pointer. DMA device should be used for DMA API because only
the DMA device is aware of how the device connects to the memory.
There might be an extra level of address translation due to a
SMMU attached to the DMA device. When serial device pointer device
is used for DMA API, the DMA API will have no clue of the SMMU
attached to the DMA device.

This patch is necessary to fix the SMMU page faults
which is observed when a DMA(with SMMU enabled) is attached
to UART for transfer.

Signed-off-by: Tamseel Shams <m.shams@samsung.com>
Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com>
---
Changes since v1:
1. Rebased the patch on "tty-next" branch of TTY driver tree

Changes since v2:
1. Updated the commit message.
2. Changed the comment description

 drivers/tty/serial/samsung_tty.c | 62 +++++++++++++++++++++++++-------
 1 file changed, 50 insertions(+), 12 deletions(-)

Comments

Krzysztof Kozlowski June 23, 2021, 6:10 a.m. UTC | #1
On 22/06/2021 11:17, Tamseel Shams wrote:
> When DMA is used for TX and RX by serial driver, it should

> pass the DMA device pointer to DMA API instead of UART device

> pointer. DMA device should be used for DMA API because only

> the DMA device is aware of how the device connects to the memory.

> There might be an extra level of address translation due to a

> SMMU attached to the DMA device. When serial device pointer device

> is used for DMA API, the DMA API will have no clue of the SMMU

> attached to the DMA device.

> 

> This patch is necessary to fix the SMMU page faults

> which is observed when a DMA(with SMMU enabled) is attached

> to UART for transfer.

> 

> Signed-off-by: Tamseel Shams <m.shams@samsung.com>

> Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com>

> ---

> Changes since v1:

> 1. Rebased the patch on "tty-next" branch of TTY driver tree

> 

> Changes since v2:

> 1. Updated the commit message.

> 2. Changed the comment description

> 

>  drivers/tty/serial/samsung_tty.c | 62 +++++++++++++++++++++++++-------

>  1 file changed, 50 insertions(+), 12 deletions(-)

> 

> diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c

> index 9fbc61151c2e..7dd648cb7237 100644

> --- a/drivers/tty/serial/samsung_tty.c

> +++ b/drivers/tty/serial/samsung_tty.c

> @@ -284,8 +284,13 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)

>  	struct s3c24xx_uart_dma *dma = ourport->dma;

>  	struct circ_buf *xmit = &port->state->xmit;

>  	struct dma_tx_state state;

> +	struct device *dma_map_ops_dev = ourport->port.dev;

>  	int count;

>  

> +	/* Null pointer check for "dma" and "dma->tx_chan" */


The comment is not useful.

> +	if (dma && dma->tx_chan)

> +		dma_map_ops_dev = dma->tx_chan->device->dev;

> +

>  	if (!ourport->tx_enabled)

>  		return;

>  

> @@ -305,7 +310,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port *port)

>  		dmaengine_pause(dma->tx_chan);

>  		dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);

>  		dmaengine_terminate_all(dma->tx_chan);

> -		dma_sync_single_for_cpu(ourport->port.dev,

> +		dma_sync_single_for_cpu(dma_map_ops_dev,

>  			dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE);

>  		async_tx_ack(dma->tx_desc);

>  		count = dma->tx_bytes_requested - state.residue;

> @@ -331,14 +336,19 @@ static void s3c24xx_serial_tx_dma_complete(void *args)

>  	struct circ_buf *xmit = &port->state->xmit;

>  	struct s3c24xx_uart_dma *dma = ourport->dma;

>  	struct dma_tx_state state;

> +	struct device *dma_map_ops_dev = ourport->port.dev;

>  	unsigned long flags;

>  	int count;

>  

> +	/* Null pointer check for "dma" and "dma->tx_chan" */

> +	if (dma && dma->tx_chan)



What is the "else" here? How is it even possible? Why having this "if"
at all?

> +		dma_map_ops_dev = dma->tx_chan->device->dev;

> +

>  	dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);

>  	count = dma->tx_bytes_requested - state.residue;

>  	async_tx_ack(dma->tx_desc);

>  

> -	dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,

> +	dma_sync_single_for_cpu(dma_map_ops_dev, dma->tx_transfer_addr,

>  				dma->tx_size, DMA_TO_DEVICE);

>  

>  	spin_lock_irqsave(&port->lock, flags);

> @@ -436,6 +446,11 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,

>  	struct uart_port *port = &ourport->port;

>  	struct circ_buf *xmit = &port->state->xmit;

>  	struct s3c24xx_uart_dma *dma = ourport->dma;

> +	struct device *dma_map_ops_dev = ourport->port.dev;

> +

> +	/* Null pointer check for "dma" and "dma->tx_chan" */

> +	if (dma && dma->tx_chan)

> +		dma_map_ops_dev = dma->tx_chan->device->dev;

>  

>  	if (ourport->tx_mode != S3C24XX_TX_DMA)

>  		enable_tx_dma(ourport);

> @@ -443,7 +458,7 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,

>  	dma->tx_size = count & ~(dma_get_cache_alignment() - 1);

>  	dma->tx_transfer_addr = dma->tx_addr + xmit->tail;

>  

> -	dma_sync_single_for_device(ourport->port.dev, dma->tx_transfer_addr,

> +	dma_sync_single_for_device(dma_map_ops_dev, dma->tx_transfer_addr,

>  				dma->tx_size, DMA_TO_DEVICE);

>  

>  	dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,

> @@ -510,12 +525,17 @@ static void s3c24xx_uart_copy_rx_to_tty(struct s3c24xx_uart_port *ourport,

>  		struct tty_port *tty, int count)

>  {

>  	struct s3c24xx_uart_dma *dma = ourport->dma;

> +	struct device *dma_map_ops_dev = ourport->port.dev;

>  	int copied;

>  

> +	/* Null pointer check for "dma" and "dma->rx_chan" */

> +	if (dma && dma->rx_chan)

> +		dma_map_ops_dev = dma->rx_chan->device->dev;

> +

>  	if (!count)

>  		return;

>  

> -	dma_sync_single_for_cpu(ourport->port.dev, dma->rx_addr,

> +	dma_sync_single_for_cpu(dma_map_ops_dev, dma->rx_addr,

>  				dma->rx_size, DMA_FROM_DEVICE);

>  

>  	ourport->port.icount.rx += count;

> @@ -635,8 +655,13 @@ static void s3c24xx_serial_rx_dma_complete(void *args)

>  static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)

>  {

>  	struct s3c24xx_uart_dma *dma = ourport->dma;

> +	struct device *dma_map_ops_dev = ourport->port.dev;

> +

> +	/* Null pointer check for "dma" and "dma->rx_chan" */

> +	if (dma && dma->rx_chan)

> +		dma_map_ops_dev = dma->rx_chan->device->dev;

>  

> -	dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,

> +	dma_sync_single_for_device(dma_map_ops_dev, dma->rx_addr,

>  				dma->rx_size, DMA_FROM_DEVICE);

>  

>  	dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,

> @@ -1045,6 +1070,7 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)

>  	struct s3c24xx_uart_dma	*dma = p->dma;

>  	struct dma_slave_caps dma_caps;

>  	const char *reason = NULL;

> +	struct device *dma_map_ops_dev = p->port.dev;

>  	int ret;

>  

>  	/* Default slave configuration parameters */

> @@ -1102,18 +1128,25 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)

>  		goto err_release_tx;

>  	}

>  

> -	dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,

> +	/* Null pointer check for "dma" and "dma->rx_chan" */

> +	if (dma && dma->rx_chan)

> +		dma_map_ops_dev = dma->rx_chan->device->dev;

> +

> +	dma->rx_addr = dma_map_single(dma_map_ops_dev, dma->rx_buf,

>  				dma->rx_size, DMA_FROM_DEVICE);

> -	if (dma_mapping_error(p->port.dev, dma->rx_addr)) {

> +	if (dma_mapping_error(dma_map_ops_dev, dma->rx_addr)) {

>  		reason = "DMA mapping error for RX buffer";

>  		ret = -EIO;

>  		goto err_free_rx;

>  	}

>  

> +	/* Null pointer check for "dma" and "dma->tx_chan" */

> +	if (dma && dma->tx_chan)

> +		dma_map_ops_dev = dma->tx_chan->device->dev;

>  	/* TX buffer */

> -	dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,

> +	dma->tx_addr = dma_map_single(dma_map_ops_dev, p->port.state->xmit.buf,

>  				UART_XMIT_SIZE, DMA_TO_DEVICE);

> -	if (dma_mapping_error(p->port.dev, dma->tx_addr)) {

> +	if (dma_mapping_error(dma_map_ops_dev, dma->tx_addr)) {

>  		reason = "DMA mapping error for TX buffer";

>  		ret = -EIO;

>  		goto err_unmap_rx;

> @@ -1122,7 +1155,9 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)

>  	return 0;

>  

>  err_unmap_rx:

> -	dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size,

> +	if (dma->rx_chan)

> +		dma_map_ops_dev = dma->rx_chan->device->dev;

> +	dma_unmap_single(dma_map_ops_dev, dma->rx_addr, dma->rx_size,

>  			 DMA_FROM_DEVICE);

>  err_free_rx:

>  	kfree(dma->rx_buf);

> @@ -1139,10 +1174,12 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)

>  static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)

>  {

>  	struct s3c24xx_uart_dma	*dma = p->dma;

> +	struct device *dma_map_ops_dev = p->port.dev;

>  

>  	if (dma->rx_chan) {

> +		dma_map_ops_dev = dma->rx_chan->device->dev;

>  		dmaengine_terminate_all(dma->rx_chan);

> -		dma_unmap_single(p->port.dev, dma->rx_addr,

> +		dma_unmap_single(dma_map_ops_dev, dma->rx_addr,

>  				dma->rx_size, DMA_FROM_DEVICE);

>  		kfree(dma->rx_buf);

>  		dma_release_channel(dma->rx_chan);

> @@ -1150,8 +1187,9 @@ static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)

>  	}

>  

>  	if (dma->tx_chan) {

> +		dma_map_ops_dev = dma->tx_chan->device->dev;

>  		dmaengine_terminate_all(dma->tx_chan);

> -		dma_unmap_single(p->port.dev, dma->tx_addr,

> +		dma_unmap_single(dma_map_ops_dev, dma->tx_addr,

>  				UART_XMIT_SIZE, DMA_TO_DEVICE);

>  		dma_release_channel(dma->tx_chan);

>  		dma->tx_chan = NULL;

> 



Best regards,
Krzysztof
Tamseel Shams June 23, 2021, 6:29 a.m. UTC | #2
> > When DMA is used for TX and RX by serial driver, it should pass the

> > DMA device pointer to DMA API instead of UART device pointer. DMA

> > device should be used for DMA API because only the DMA device is aware

> > of how the device connects to the memory.

> > There might be an extra level of address translation due to a SMMU

> > attached to the DMA device. When serial device pointer device is used

> > for DMA API, the DMA API will have no clue of the SMMU attached to the

> > DMA device.

> >

> > This patch is necessary to fix the SMMU page faults which is observed

> > when a DMA(with SMMU enabled) is attached to UART for transfer.

> >

> > Signed-off-by: Tamseel Shams <m.shams@samsung.com>

> > Signed-off-by: Ajay Kumar <ajaykumar.rs@samsung.com>

> > ---

> > Changes since v1:

> > 1. Rebased the patch on "tty-next" branch of TTY driver tree

> >

> > Changes since v2:

> > 1. Updated the commit message.

> > 2. Changed the comment description

> >

> >  drivers/tty/serial/samsung_tty.c | 62

> > +++++++++++++++++++++++++-------

> >  1 file changed, 50 insertions(+), 12 deletions(-)

> >

> > diff --git a/drivers/tty/serial/samsung_tty.c

> > b/drivers/tty/serial/samsung_tty.c

> > index 9fbc61151c2e..7dd648cb7237 100644

> > --- a/drivers/tty/serial/samsung_tty.c

> > +++ b/drivers/tty/serial/samsung_tty.c

> > @@ -284,8 +284,13 @@ static void s3c24xx_serial_stop_tx(struct uart_port

> *port)

> >  	struct s3c24xx_uart_dma *dma = ourport->dma;

> >  	struct circ_buf *xmit = &port->state->xmit;

> >  	struct dma_tx_state state;

> > +	struct device *dma_map_ops_dev = ourport->port.dev;

> >  	int count;

> >

> > +	/* Null pointer check for "dma" and "dma->tx_chan" */

> 

> The comment is not useful.

> 

I will remove these comments.
> > +	if (dma && dma->tx_chan)

> > +		dma_map_ops_dev = dma->tx_chan->device->dev;

> > +

> >  	if (!ourport->tx_enabled)

> >  		return;

> >

> > @@ -305,7 +310,7 @@ static void s3c24xx_serial_stop_tx(struct uart_port

> *port)

> >  		dmaengine_pause(dma->tx_chan);

> >  		dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);

> >  		dmaengine_terminate_all(dma->tx_chan);

> > -		dma_sync_single_for_cpu(ourport->port.dev,

> > +		dma_sync_single_for_cpu(dma_map_ops_dev,

> >  			dma->tx_transfer_addr, dma->tx_size,

> DMA_TO_DEVICE);

> >  		async_tx_ack(dma->tx_desc);

> >  		count = dma->tx_bytes_requested - state.residue; @@ -331,14

> +336,19

> > @@ static void s3c24xx_serial_tx_dma_complete(void *args)

> >  	struct circ_buf *xmit = &port->state->xmit;

> >  	struct s3c24xx_uart_dma *dma = ourport->dma;

> >  	struct dma_tx_state state;

> > +	struct device *dma_map_ops_dev = ourport->port.dev;

> >  	unsigned long flags;

> >  	int count;

> >

> > +	/* Null pointer check for "dma" and "dma->tx_chan" */

> > +	if (dma && dma->tx_chan)

> 

> 

> What is the "else" here? How is it even possible? Why having this "if"

> at all?

> 

As mentioned in my reply (https://lkml.org/lkml/2021/6/23/6)  to
Robin's comments for patch version 2, I will be removing these
"if" conditions, and directly assign DMA device pointers.

> > +		dma_map_ops_dev = dma->tx_chan->device->dev;

> > +

> >  	dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);

> >  	count = dma->tx_bytes_requested - state.residue;

> >  	async_tx_ack(dma->tx_desc);

> >

> > -	dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,

> > +	dma_sync_single_for_cpu(dma_map_ops_dev, dma->tx_transfer_addr,

> >  				dma->tx_size, DMA_TO_DEVICE);

> >

> >  	spin_lock_irqsave(&port->lock, flags); @@ -436,6 +446,11 @@ static

> > int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,

> >  	struct uart_port *port = &ourport->port;

> >  	struct circ_buf *xmit = &port->state->xmit;

> >  	struct s3c24xx_uart_dma *dma = ourport->dma;

> > +	struct device *dma_map_ops_dev = ourport->port.dev;

> > +

> > +	/* Null pointer check for "dma" and "dma->tx_chan" */

> > +	if (dma && dma->tx_chan)

> > +		dma_map_ops_dev = dma->tx_chan->device->dev;

> >

> >  	if (ourport->tx_mode != S3C24XX_TX_DMA)

> >  		enable_tx_dma(ourport);

> > @@ -443,7 +458,7 @@ static int s3c24xx_serial_start_tx_dma(struct

> s3c24xx_uart_port *ourport,

> >  	dma->tx_size = count & ~(dma_get_cache_alignment() - 1);

> >  	dma->tx_transfer_addr = dma->tx_addr + xmit->tail;

> >

> > -	dma_sync_single_for_device(ourport->port.dev, dma-

> >tx_transfer_addr,

> > +	dma_sync_single_for_device(dma_map_ops_dev, dma-

> >tx_transfer_addr,

> >  				dma->tx_size, DMA_TO_DEVICE);

> >

> >  	dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,

> > @@ -510,12 +525,17 @@ static void s3c24xx_uart_copy_rx_to_tty(struct

> s3c24xx_uart_port *ourport,

> >  		struct tty_port *tty, int count)

> >  {

> >  	struct s3c24xx_uart_dma *dma = ourport->dma;

> > +	struct device *dma_map_ops_dev = ourport->port.dev;

> >  	int copied;

> >

> > +	/* Null pointer check for "dma" and "dma->rx_chan" */

> > +	if (dma && dma->rx_chan)

> > +		dma_map_ops_dev = dma->rx_chan->device->dev;

> > +

> >  	if (!count)

> >  		return;

> >

> > -	dma_sync_single_for_cpu(ourport->port.dev, dma->rx_addr,

> > +	dma_sync_single_for_cpu(dma_map_ops_dev, dma->rx_addr,

> >  				dma->rx_size, DMA_FROM_DEVICE);

> >

> >  	ourport->port.icount.rx += count;

> > @@ -635,8 +655,13 @@ static void s3c24xx_serial_rx_dma_complete(void

> > *args)  static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port

> > *ourport)  {

> >  	struct s3c24xx_uart_dma *dma = ourport->dma;

> > +	struct device *dma_map_ops_dev = ourport->port.dev;

> > +

> > +	/* Null pointer check for "dma" and "dma->rx_chan" */

> > +	if (dma && dma->rx_chan)

> > +		dma_map_ops_dev = dma->rx_chan->device->dev;

> >

> > -	dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,

> > +	dma_sync_single_for_device(dma_map_ops_dev, dma->rx_addr,

> >  				dma->rx_size, DMA_FROM_DEVICE);

> >

> >  	dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,

> > @@ -1045,6 +1070,7 @@ static int s3c24xx_serial_request_dma(struct

> s3c24xx_uart_port *p)

> >  	struct s3c24xx_uart_dma	*dma = p->dma;

> >  	struct dma_slave_caps dma_caps;

> >  	const char *reason = NULL;

> > +	struct device *dma_map_ops_dev = p->port.dev;

> >  	int ret;

> >

> >  	/* Default slave configuration parameters */ @@ -1102,18 +1128,25

> @@

> > static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)

> >  		goto err_release_tx;

> >  	}

> >

> > -	dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,

> > +	/* Null pointer check for "dma" and "dma->rx_chan" */

> > +	if (dma && dma->rx_chan)

> > +		dma_map_ops_dev = dma->rx_chan->device->dev;

> > +

> > +	dma->rx_addr = dma_map_single(dma_map_ops_dev, dma->rx_buf,

> >  				dma->rx_size, DMA_FROM_DEVICE);

> > -	if (dma_mapping_error(p->port.dev, dma->rx_addr)) {

> > +	if (dma_mapping_error(dma_map_ops_dev, dma->rx_addr)) {

> >  		reason = "DMA mapping error for RX buffer";

> >  		ret = -EIO;

> >  		goto err_free_rx;

> >  	}

> >

> > +	/* Null pointer check for "dma" and "dma->tx_chan" */

> > +	if (dma && dma->tx_chan)

> > +		dma_map_ops_dev = dma->tx_chan->device->dev;

> >  	/* TX buffer */

> > -	dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,

> > +	dma->tx_addr = dma_map_single(dma_map_ops_dev,

> > +p->port.state->xmit.buf,

> >  				UART_XMIT_SIZE, DMA_TO_DEVICE);

> > -	if (dma_mapping_error(p->port.dev, dma->tx_addr)) {

> > +	if (dma_mapping_error(dma_map_ops_dev, dma->tx_addr)) {

> >  		reason = "DMA mapping error for TX buffer";

> >  		ret = -EIO;

> >  		goto err_unmap_rx;

> > @@ -1122,7 +1155,9 @@ static int s3c24xx_serial_request_dma(struct

> s3c24xx_uart_port *p)

> >  	return 0;

> >

> >  err_unmap_rx:

> > -	dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size,

> > +	if (dma->rx_chan)

> > +		dma_map_ops_dev = dma->rx_chan->device->dev;

> > +	dma_unmap_single(dma_map_ops_dev, dma->rx_addr, dma->rx_size,

> >  			 DMA_FROM_DEVICE);

> >  err_free_rx:

> >  	kfree(dma->rx_buf);

> > @@ -1139,10 +1174,12 @@ static int s3c24xx_serial_request_dma(struct

> > s3c24xx_uart_port *p)  static void s3c24xx_serial_release_dma(struct

> > s3c24xx_uart_port *p)  {

> >  	struct s3c24xx_uart_dma	*dma = p->dma;

> > +	struct device *dma_map_ops_dev = p->port.dev;

> >

> >  	if (dma->rx_chan) {

> > +		dma_map_ops_dev = dma->rx_chan->device->dev;

> >  		dmaengine_terminate_all(dma->rx_chan);

> > -		dma_unmap_single(p->port.dev, dma->rx_addr,

> > +		dma_unmap_single(dma_map_ops_dev, dma->rx_addr,

> >  				dma->rx_size, DMA_FROM_DEVICE);

> >  		kfree(dma->rx_buf);

> >  		dma_release_channel(dma->rx_chan);

> > @@ -1150,8 +1187,9 @@ static void s3c24xx_serial_release_dma(struct

> s3c24xx_uart_port *p)

> >  	}

> >

> >  	if (dma->tx_chan) {

> > +		dma_map_ops_dev = dma->tx_chan->device->dev;

> >  		dmaengine_terminate_all(dma->tx_chan);

> > -		dma_unmap_single(p->port.dev, dma->tx_addr,

> > +		dma_unmap_single(dma_map_ops_dev, dma->tx_addr,

> >  				UART_XMIT_SIZE, DMA_TO_DEVICE);

> >  		dma_release_channel(dma->tx_chan);

> >  		dma->tx_chan = NULL;

> >

> 

> 

> Best regards,

> Krzysztof


Thanks & Regards,
Tamseel Shams
diff mbox series

Patch

diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 9fbc61151c2e..7dd648cb7237 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -284,8 +284,13 @@  static void s3c24xx_serial_stop_tx(struct uart_port *port)
 	struct s3c24xx_uart_dma *dma = ourport->dma;
 	struct circ_buf *xmit = &port->state->xmit;
 	struct dma_tx_state state;
+	struct device *dma_map_ops_dev = ourport->port.dev;
 	int count;
 
+	/* Null pointer check for "dma" and "dma->tx_chan" */
+	if (dma && dma->tx_chan)
+		dma_map_ops_dev = dma->tx_chan->device->dev;
+
 	if (!ourport->tx_enabled)
 		return;
 
@@ -305,7 +310,7 @@  static void s3c24xx_serial_stop_tx(struct uart_port *port)
 		dmaengine_pause(dma->tx_chan);
 		dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
 		dmaengine_terminate_all(dma->tx_chan);
-		dma_sync_single_for_cpu(ourport->port.dev,
+		dma_sync_single_for_cpu(dma_map_ops_dev,
 			dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE);
 		async_tx_ack(dma->tx_desc);
 		count = dma->tx_bytes_requested - state.residue;
@@ -331,14 +336,19 @@  static void s3c24xx_serial_tx_dma_complete(void *args)
 	struct circ_buf *xmit = &port->state->xmit;
 	struct s3c24xx_uart_dma *dma = ourport->dma;
 	struct dma_tx_state state;
+	struct device *dma_map_ops_dev = ourport->port.dev;
 	unsigned long flags;
 	int count;
 
+	/* Null pointer check for "dma" and "dma->tx_chan" */
+	if (dma && dma->tx_chan)
+		dma_map_ops_dev = dma->tx_chan->device->dev;
+
 	dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
 	count = dma->tx_bytes_requested - state.residue;
 	async_tx_ack(dma->tx_desc);
 
-	dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,
+	dma_sync_single_for_cpu(dma_map_ops_dev, dma->tx_transfer_addr,
 				dma->tx_size, DMA_TO_DEVICE);
 
 	spin_lock_irqsave(&port->lock, flags);
@@ -436,6 +446,11 @@  static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
 	struct uart_port *port = &ourport->port;
 	struct circ_buf *xmit = &port->state->xmit;
 	struct s3c24xx_uart_dma *dma = ourport->dma;
+	struct device *dma_map_ops_dev = ourport->port.dev;
+
+	/* Null pointer check for "dma" and "dma->tx_chan" */
+	if (dma && dma->tx_chan)
+		dma_map_ops_dev = dma->tx_chan->device->dev;
 
 	if (ourport->tx_mode != S3C24XX_TX_DMA)
 		enable_tx_dma(ourport);
@@ -443,7 +458,7 @@  static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport,
 	dma->tx_size = count & ~(dma_get_cache_alignment() - 1);
 	dma->tx_transfer_addr = dma->tx_addr + xmit->tail;
 
-	dma_sync_single_for_device(ourport->port.dev, dma->tx_transfer_addr,
+	dma_sync_single_for_device(dma_map_ops_dev, dma->tx_transfer_addr,
 				dma->tx_size, DMA_TO_DEVICE);
 
 	dma->tx_desc = dmaengine_prep_slave_single(dma->tx_chan,
@@ -510,12 +525,17 @@  static void s3c24xx_uart_copy_rx_to_tty(struct s3c24xx_uart_port *ourport,
 		struct tty_port *tty, int count)
 {
 	struct s3c24xx_uart_dma *dma = ourport->dma;
+	struct device *dma_map_ops_dev = ourport->port.dev;
 	int copied;
 
+	/* Null pointer check for "dma" and "dma->rx_chan" */
+	if (dma && dma->rx_chan)
+		dma_map_ops_dev = dma->rx_chan->device->dev;
+
 	if (!count)
 		return;
 
-	dma_sync_single_for_cpu(ourport->port.dev, dma->rx_addr,
+	dma_sync_single_for_cpu(dma_map_ops_dev, dma->rx_addr,
 				dma->rx_size, DMA_FROM_DEVICE);
 
 	ourport->port.icount.rx += count;
@@ -635,8 +655,13 @@  static void s3c24xx_serial_rx_dma_complete(void *args)
 static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
 {
 	struct s3c24xx_uart_dma *dma = ourport->dma;
+	struct device *dma_map_ops_dev = ourport->port.dev;
+
+	/* Null pointer check for "dma" and "dma->rx_chan" */
+	if (dma && dma->rx_chan)
+		dma_map_ops_dev = dma->rx_chan->device->dev;
 
-	dma_sync_single_for_device(ourport->port.dev, dma->rx_addr,
+	dma_sync_single_for_device(dma_map_ops_dev, dma->rx_addr,
 				dma->rx_size, DMA_FROM_DEVICE);
 
 	dma->rx_desc = dmaengine_prep_slave_single(dma->rx_chan,
@@ -1045,6 +1070,7 @@  static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
 	struct s3c24xx_uart_dma	*dma = p->dma;
 	struct dma_slave_caps dma_caps;
 	const char *reason = NULL;
+	struct device *dma_map_ops_dev = p->port.dev;
 	int ret;
 
 	/* Default slave configuration parameters */
@@ -1102,18 +1128,25 @@  static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
 		goto err_release_tx;
 	}
 
-	dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
+	/* Null pointer check for "dma" and "dma->rx_chan" */
+	if (dma && dma->rx_chan)
+		dma_map_ops_dev = dma->rx_chan->device->dev;
+
+	dma->rx_addr = dma_map_single(dma_map_ops_dev, dma->rx_buf,
 				dma->rx_size, DMA_FROM_DEVICE);
-	if (dma_mapping_error(p->port.dev, dma->rx_addr)) {
+	if (dma_mapping_error(dma_map_ops_dev, dma->rx_addr)) {
 		reason = "DMA mapping error for RX buffer";
 		ret = -EIO;
 		goto err_free_rx;
 	}
 
+	/* Null pointer check for "dma" and "dma->tx_chan" */
+	if (dma && dma->tx_chan)
+		dma_map_ops_dev = dma->tx_chan->device->dev;
 	/* TX buffer */
-	dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
+	dma->tx_addr = dma_map_single(dma_map_ops_dev, p->port.state->xmit.buf,
 				UART_XMIT_SIZE, DMA_TO_DEVICE);
-	if (dma_mapping_error(p->port.dev, dma->tx_addr)) {
+	if (dma_mapping_error(dma_map_ops_dev, dma->tx_addr)) {
 		reason = "DMA mapping error for TX buffer";
 		ret = -EIO;
 		goto err_unmap_rx;
@@ -1122,7 +1155,9 @@  static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
 	return 0;
 
 err_unmap_rx:
-	dma_unmap_single(p->port.dev, dma->rx_addr, dma->rx_size,
+	if (dma->rx_chan)
+		dma_map_ops_dev = dma->rx_chan->device->dev;
+	dma_unmap_single(dma_map_ops_dev, dma->rx_addr, dma->rx_size,
 			 DMA_FROM_DEVICE);
 err_free_rx:
 	kfree(dma->rx_buf);
@@ -1139,10 +1174,12 @@  static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p)
 static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
 {
 	struct s3c24xx_uart_dma	*dma = p->dma;
+	struct device *dma_map_ops_dev = p->port.dev;
 
 	if (dma->rx_chan) {
+		dma_map_ops_dev = dma->rx_chan->device->dev;
 		dmaengine_terminate_all(dma->rx_chan);
-		dma_unmap_single(p->port.dev, dma->rx_addr,
+		dma_unmap_single(dma_map_ops_dev, dma->rx_addr,
 				dma->rx_size, DMA_FROM_DEVICE);
 		kfree(dma->rx_buf);
 		dma_release_channel(dma->rx_chan);
@@ -1150,8 +1187,9 @@  static void s3c24xx_serial_release_dma(struct s3c24xx_uart_port *p)
 	}
 
 	if (dma->tx_chan) {
+		dma_map_ops_dev = dma->tx_chan->device->dev;
 		dmaengine_terminate_all(dma->tx_chan);
-		dma_unmap_single(p->port.dev, dma->tx_addr,
+		dma_unmap_single(dma_map_ops_dev, dma->tx_addr,
 				UART_XMIT_SIZE, DMA_TO_DEVICE);
 		dma_release_channel(dma->tx_chan);
 		dma->tx_chan = NULL;