diff mbox series

[v10,30/30] videobuf2: use sgtable-based scatterlist wrappers

Message ID 20200904131711.12950-31-m.szyprowski@samsung.com
State Accepted
Commit 8b7c0280ab03ddf465c948cb510debb85eb4e8ac
Headers show
Series DRM: fix struct sg_table nents vs. orig_nents misuse | expand

Commit Message

Marek Szyprowski Sept. 4, 2020, 1:17 p.m. UTC
Use recently introduced common wrappers operating directly on the struct
sg_table objects and scatterlist page iterators to make the code a bit
more compact, robust, easier to follow and copy/paste safe.

No functional change, because the code already properly did all the
scatterlist related calls.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

Reviewed-by: Robin Murphy <robin.murphy@arm.com>

---
 .../common/videobuf2/videobuf2-dma-contig.c   | 34 ++++++++-----------
 .../media/common/videobuf2/videobuf2-dma-sg.c | 32 +++++++----------
 .../common/videobuf2/videobuf2-vmalloc.c      | 12 +++----
 3 files changed, 31 insertions(+), 47 deletions(-)

-- 
2.17.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Comments

Tomasz Figa Sept. 7, 2020, 1:07 p.m. UTC | #1
Hi Marek,

On Fri, Sep 4, 2020 at 3:35 PM Marek Szyprowski
<m.szyprowski@samsung.com> wrote:
>

> Use recently introduced common wrappers operating directly on the struct

> sg_table objects and scatterlist page iterators to make the code a bit

> more compact, robust, easier to follow and copy/paste safe.

>

> No functional change, because the code already properly did all the

> scatterlist related calls.

>

> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

> Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> ---

>  .../common/videobuf2/videobuf2-dma-contig.c   | 34 ++++++++-----------

>  .../media/common/videobuf2/videobuf2-dma-sg.c | 32 +++++++----------

>  .../common/videobuf2/videobuf2-vmalloc.c      | 12 +++----

>  3 files changed, 31 insertions(+), 47 deletions(-)

>


Thanks for the patch! Please see my comments inline.

> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> index ec3446cc45b8..1b242d844dde 100644

> --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> @@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)

>         unsigned int i;

>         unsigned long size = 0;

>

> -       for_each_sg(sgt->sgl, s, sgt->nents, i) {

> +       for_each_sgtable_dma_sg(sgt, s, i) {

>                 if (sg_dma_address(s) != expected)

>                         break;

> -               expected = sg_dma_address(s) + sg_dma_len(s);

> +               expected += sg_dma_len(s);

>                 size += sg_dma_len(s);

>         }

>         return size;

> @@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv)

>         if (!sgt)

>                 return;

>

> -       dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

> -                              buf->dma_dir);

> +       dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

>  }

>

>  static void vb2_dc_finish(void *buf_priv)

> @@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv)

>         if (!sgt)

>                 return;

>

> -       dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

> +       dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

>  }

>

>  /*********************************************/

> @@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,

>                  * memory locations do not require any explicit cache

>                  * maintenance prior or after being used by the device.

>                  */

> -               dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                                  attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> +                                 DMA_ATTR_SKIP_CPU_SYNC);

>         sg_free_table(sgt);

>         kfree(attach);

>         db_attach->priv = NULL;

> @@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

>

>         /* release any previous cache */

>         if (attach->dma_dir != DMA_NONE) {

> -               dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                                  attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> +                                 DMA_ATTR_SKIP_CPU_SYNC);

>                 attach->dma_dir = DMA_NONE;

>         }

>

> @@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

>          * mapping to the client with new direction, no cache sync

>          * required see comment in vb2_dc_dmabuf_ops_detach()

>          */

> -       sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                                     dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -       if (!sgt->nents) {

> +       if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,

> +                           DMA_ATTR_SKIP_CPU_SYNC)) {

>                 pr_err("failed to map scatterlist\n");

>                 mutex_unlock(lock);

>                 return ERR_PTR(-EIO);


As opposed to dma_map_sg_attrs(), dma_map_sgtable() now returns an
error code on its own. Is it expected to ignore it and return -EIO?

> @@ -455,8 +453,8 @@ static void vb2_dc_put_userptr(void *buf_priv)

>                  * No need to sync to CPU, it's already synced to the CPU

>                  * since the finish() memop will have been called before this.

>                  */

> -               dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -                                  buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +               dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,

> +                                 DMA_ATTR_SKIP_CPU_SYNC);

>                 pages = frame_vector_pages(buf->vec);

>                 /* sgt should exist only if vector contains pages... */

>                 BUG_ON(IS_ERR(pages));

> @@ -553,9 +551,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,

>          * No need to sync to the device, this will happen later when the

>          * prepare() memop is called.

>          */

> -       sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -                                     buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -       if (sgt->nents <= 0) {

> +       if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> +                           DMA_ATTR_SKIP_CPU_SYNC)) {

>                 pr_err("failed to map scatterlist\n");

>                 ret = -EIO;


Ditto.

>                 goto fail_sgt_init;

> @@ -577,8 +574,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,

>         return buf;

>

>  fail_map_sg:

> -       dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -                          buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +       dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

>

>  fail_sgt_init:

>         sg_free_table(sgt);

> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> index 0a40e00f0d7e..0dd3b19025e0 100644

> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> @@ -148,9 +148,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,

>          * No need to sync to the device, this will happen later when the

>          * prepare() memop is called.

>          */

> -       sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -                                     buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -       if (!sgt->nents)

> +       if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> +                           DMA_ATTR_SKIP_CPU_SYNC))

>                 goto fail_map;

>


Ditto.

>         buf->handler.refcount = &buf->refcount;

> @@ -186,8 +185,8 @@ static void vb2_dma_sg_put(void *buf_priv)

>         if (refcount_dec_and_test(&buf->refcount)) {

>                 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,

>                         buf->num_pages);

> -               dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -                                  buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +               dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,

> +                                 DMA_ATTR_SKIP_CPU_SYNC);

>                 if (buf->vaddr)

>                         vm_unmap_ram(buf->vaddr, buf->num_pages);

>                 sg_free_table(buf->dma_sgt);

> @@ -204,8 +203,7 @@ static void vb2_dma_sg_prepare(void *buf_priv)

>         struct vb2_dma_sg_buf *buf = buf_priv;

>         struct sg_table *sgt = buf->dma_sgt;

>

> -       dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

> -                              buf->dma_dir);

> +       dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

>  }

>

>  static void vb2_dma_sg_finish(void *buf_priv)

> @@ -213,7 +211,7 @@ static void vb2_dma_sg_finish(void *buf_priv)

>         struct vb2_dma_sg_buf *buf = buf_priv;

>         struct sg_table *sgt = buf->dma_sgt;

>

> -       dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

> +       dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

>  }

>

>  static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,

> @@ -256,9 +254,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,

>          * No need to sync to the device, this will happen later when the

>          * prepare() memop is called.

>          */

> -       sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -                                     buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -       if (!sgt->nents)

> +       if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> +                           DMA_ATTR_SKIP_CPU_SYNC))

>                 goto userptr_fail_map;

>


Ditto.

>         return buf;

> @@ -284,8 +281,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)

>

>         dprintk(1, "%s: Releasing userspace buffer of %d pages\n",

>                __func__, buf->num_pages);

> -       dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,

> -                          DMA_ATTR_SKIP_CPU_SYNC);

> +       dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

>         if (buf->vaddr)

>                 vm_unmap_ram(buf->vaddr, buf->num_pages);

>         sg_free_table(buf->dma_sgt);

> @@ -408,8 +404,7 @@ static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,

>

>         /* release the scatterlist cache */

>         if (attach->dma_dir != DMA_NONE)

> -               dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                       attach->dma_dir);

> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>         sg_free_table(sgt);

>         kfree(attach);

>         db_attach->priv = NULL;

> @@ -434,15 +429,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(

>

>         /* release any previous cache */

>         if (attach->dma_dir != DMA_NONE) {

> -               dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                       attach->dma_dir);

> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>                 attach->dma_dir = DMA_NONE;

>         }

>

>         /* mapping to the client with new direction */

> -       sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                               dma_dir);

> -       if (!sgt->nents) {

> +       if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {

>                 pr_err("failed to map scatterlist\n");

>                 mutex_unlock(lock);

>                 return ERR_PTR(-EIO);


Ditto.

> diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> index c66fda4a65e4..bf5ac63a5742 100644

> --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> @@ -229,7 +229,7 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,

>                 kfree(attach);

>                 return ret;

>         }

> -       for_each_sg(sgt->sgl, sg, sgt->nents, i) {

> +       for_each_sgtable_sg(sgt, sg, i) {

>                 struct page *page = vmalloc_to_page(vaddr);

>

>                 if (!page) {

> @@ -259,8 +259,7 @@ static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,

>

>         /* release the scatterlist cache */

>         if (attach->dma_dir != DMA_NONE)

> -               dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                       attach->dma_dir);

> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>         sg_free_table(sgt);

>         kfree(attach);

>         db_attach->priv = NULL;

> @@ -285,15 +284,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(

>

>         /* release any previous cache */

>         if (attach->dma_dir != DMA_NONE) {

> -               dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                       attach->dma_dir);

> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>                 attach->dma_dir = DMA_NONE;

>         }

>

>         /* mapping to the client with new direction */

> -       sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -                               dma_dir);

> -       if (!sgt->nents) {

> +       if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {

>                 pr_err("failed to map scatterlist\n");

>                 mutex_unlock(lock);

>                 return ERR_PTR(-EIO);


Ditto.

Best regards,
Tomasz
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Marek Szyprowski Sept. 7, 2020, 2:02 p.m. UTC | #2
Hi Tomasz,

On 07.09.2020 15:07, Tomasz Figa wrote:
> On Fri, Sep 4, 2020 at 3:35 PM Marek Szyprowski

> <m.szyprowski@samsung.com> wrote:

>> Use recently introduced common wrappers operating directly on the struct

>> sg_table objects and scatterlist page iterators to make the code a bit

>> more compact, robust, easier to follow and copy/paste safe.

>>

>> No functional change, because the code already properly did all the

>> scatterlist related calls.

>>

>> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

>> Reviewed-by: Robin Murphy <robin.murphy@arm.com>

>> ---

>>   .../common/videobuf2/videobuf2-dma-contig.c   | 34 ++++++++-----------

>>   .../media/common/videobuf2/videobuf2-dma-sg.c | 32 +++++++----------

>>   .../common/videobuf2/videobuf2-vmalloc.c      | 12 +++----

>>   3 files changed, 31 insertions(+), 47 deletions(-)

>>

> Thanks for the patch! Please see my comments inline.

>

>> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

>> index ec3446cc45b8..1b242d844dde 100644

>> --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c

>> +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

>> @@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)

>>          unsigned int i;

>>          unsigned long size = 0;

>>

>> -       for_each_sg(sgt->sgl, s, sgt->nents, i) {

>> +       for_each_sgtable_dma_sg(sgt, s, i) {

>>                  if (sg_dma_address(s) != expected)

>>                          break;

>> -               expected = sg_dma_address(s) + sg_dma_len(s);

>> +               expected += sg_dma_len(s);

>>                  size += sg_dma_len(s);

>>          }

>>          return size;

>> @@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv)

>>          if (!sgt)

>>                  return;

>>

>> -       dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

>> -                              buf->dma_dir);

>> +       dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

>>   }

>>

>>   static void vb2_dc_finish(void *buf_priv)

>> @@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv)

>>          if (!sgt)

>>                  return;

>>

>> -       dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

>> +       dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

>>   }

>>

>>   /*********************************************/

>> @@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,

>>                   * memory locations do not require any explicit cache

>>                   * maintenance prior or after being used by the device.

>>                   */

>> -               dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

>> -                                  attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

>> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

>> +                                 DMA_ATTR_SKIP_CPU_SYNC);

>>          sg_free_table(sgt);

>>          kfree(attach);

>>          db_attach->priv = NULL;

>> @@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

>>

>>          /* release any previous cache */

>>          if (attach->dma_dir != DMA_NONE) {

>> -               dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

>> -                                  attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

>> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

>> +                                 DMA_ATTR_SKIP_CPU_SYNC);

>>                  attach->dma_dir = DMA_NONE;

>>          }

>>

>> @@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

>>           * mapping to the client with new direction, no cache sync

>>           * required see comment in vb2_dc_dmabuf_ops_detach()

>>           */

>> -       sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

>> -                                     dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

>> -       if (!sgt->nents) {

>> +       if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,

>> +                           DMA_ATTR_SKIP_CPU_SYNC)) {

>>                  pr_err("failed to map scatterlist\n");

>>                  mutex_unlock(lock);

>>                  return ERR_PTR(-EIO);

> As opposed to dma_map_sg_attrs(), dma_map_sgtable() now returns an

> error code on its own. Is it expected to ignore it and return -EIO?


Those errors are more or less propagated to userspace and -EIO has been 
already widely documented in V4L2 documentation as the error code for 
the most of the V4L2 ioctls. I don't want to change it. A possible 
-EINVAL returned from dma_map_sgtable() was just one of the 'generic' 
error codes, not very descriptive in that case. Probably the main 
problem here is that dma_map_sg() and friend doesn't return any error 
codes...

 > ...


Best regards
-- 
Marek Szyprowski, PhD
Samsung R&D Institute Poland

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Tomasz Figa Sept. 7, 2020, 3:51 p.m. UTC | #3
On Mon, Sep 7, 2020 at 4:02 PM Marek Szyprowski
<m.szyprowski@samsung.com> wrote:
>

> Hi Tomasz,

>

> On 07.09.2020 15:07, Tomasz Figa wrote:

> > On Fri, Sep 4, 2020 at 3:35 PM Marek Szyprowski

> > <m.szyprowski@samsung.com> wrote:

> >> Use recently introduced common wrappers operating directly on the struct

> >> sg_table objects and scatterlist page iterators to make the code a bit

> >> more compact, robust, easier to follow and copy/paste safe.

> >>

> >> No functional change, because the code already properly did all the

> >> scatterlist related calls.

> >>

> >> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

> >> Reviewed-by: Robin Murphy <robin.murphy@arm.com>

> >> ---

> >>   .../common/videobuf2/videobuf2-dma-contig.c   | 34 ++++++++-----------

> >>   .../media/common/videobuf2/videobuf2-dma-sg.c | 32 +++++++----------

> >>   .../common/videobuf2/videobuf2-vmalloc.c      | 12 +++----

> >>   3 files changed, 31 insertions(+), 47 deletions(-)

> >>

> > Thanks for the patch! Please see my comments inline.

> >

> >> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> >> index ec3446cc45b8..1b242d844dde 100644

> >> --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> >> +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> >> @@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)

> >>          unsigned int i;

> >>          unsigned long size = 0;

> >>

> >> -       for_each_sg(sgt->sgl, s, sgt->nents, i) {

> >> +       for_each_sgtable_dma_sg(sgt, s, i) {

> >>                  if (sg_dma_address(s) != expected)

> >>                          break;

> >> -               expected = sg_dma_address(s) + sg_dma_len(s);

> >> +               expected += sg_dma_len(s);

> >>                  size += sg_dma_len(s);

> >>          }

> >>          return size;

> >> @@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv)

> >>          if (!sgt)

> >>                  return;

> >>

> >> -       dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

> >> -                              buf->dma_dir);

> >> +       dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

> >>   }

> >>

> >>   static void vb2_dc_finish(void *buf_priv)

> >> @@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv)

> >>          if (!sgt)

> >>                  return;

> >>

> >> -       dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

> >> +       dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

> >>   }

> >>

> >>   /*********************************************/

> >> @@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,

> >>                   * memory locations do not require any explicit cache

> >>                   * maintenance prior or after being used by the device.

> >>                   */

> >> -               dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> >> -                                  attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> >> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> >> +                                 DMA_ATTR_SKIP_CPU_SYNC);

> >>          sg_free_table(sgt);

> >>          kfree(attach);

> >>          db_attach->priv = NULL;

> >> @@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

> >>

> >>          /* release any previous cache */

> >>          if (attach->dma_dir != DMA_NONE) {

> >> -               dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> >> -                                  attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> >> +               dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> >> +                                 DMA_ATTR_SKIP_CPU_SYNC);

> >>                  attach->dma_dir = DMA_NONE;

> >>          }

> >>

> >> @@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

> >>           * mapping to the client with new direction, no cache sync

> >>           * required see comment in vb2_dc_dmabuf_ops_detach()

> >>           */

> >> -       sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> >> -                                     dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> >> -       if (!sgt->nents) {

> >> +       if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,

> >> +                           DMA_ATTR_SKIP_CPU_SYNC)) {

> >>                  pr_err("failed to map scatterlist\n");

> >>                  mutex_unlock(lock);

> >>                  return ERR_PTR(-EIO);

> > As opposed to dma_map_sg_attrs(), dma_map_sgtable() now returns an

> > error code on its own. Is it expected to ignore it and return -EIO?

>

> Those errors are more or less propagated to userspace and -EIO has been

> already widely documented in V4L2 documentation as the error code for

> the most of the V4L2 ioctls. I don't want to change it. A possible

> -EINVAL returned from dma_map_sgtable() was just one of the 'generic'

> error codes, not very descriptive in that case. Probably the main

> problem here is that dma_map_sg() and friend doesn't return any error

> codes...


True for the alloc/get_*() callbacks, but the dmabuf_ops_map() ones
are used for the in-kernel DMA-buf exporter ops, called by DMA-buf
importers.

As a side note, returning user-facing error codes from deep internals
of vb2 and having the userspace rely on particular values sounds quite
fragile. For example, I see vb2_dc_attach_dmabuf() returning a return
value coming from dma_buf_attach() directly and __prepare_dmabuf()
propagating it back to __buf_prepare(), which can just return that
back to the userspace. I guess we might have to do some follow-up work
to clean it up.

Best regards,
Tomasz
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Hans Verkuil Sept. 10, 2020, 9:17 a.m. UTC | #4
On 04/09/2020 15:17, Marek Szyprowski wrote:
> Use recently introduced common wrappers operating directly on the struct

> sg_table objects and scatterlist page iterators to make the code a bit

> more compact, robust, easier to follow and copy/paste safe.

> 

> No functional change, because the code already properly did all the

> scatterlist related calls.

> 

> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

> Reviewed-by: Robin Murphy <robin.murphy@arm.com>


Acked-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>


Note that I agree with Marek to keep returning -EIO. If we want to propagate
low-level errors, then that should be done in a separate patch. But I think EIO
is fine.

Regards,

	Hans

> ---

>  .../common/videobuf2/videobuf2-dma-contig.c   | 34 ++++++++-----------

>  .../media/common/videobuf2/videobuf2-dma-sg.c | 32 +++++++----------

>  .../common/videobuf2/videobuf2-vmalloc.c      | 12 +++----

>  3 files changed, 31 insertions(+), 47 deletions(-)

> 

> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> index ec3446cc45b8..1b242d844dde 100644

> --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> @@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)

>  	unsigned int i;

>  	unsigned long size = 0;

>  

> -	for_each_sg(sgt->sgl, s, sgt->nents, i) {

> +	for_each_sgtable_dma_sg(sgt, s, i) {

>  		if (sg_dma_address(s) != expected)

>  			break;

> -		expected = sg_dma_address(s) + sg_dma_len(s);

> +		expected += sg_dma_len(s);

>  		size += sg_dma_len(s);

>  	}

>  	return size;

> @@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv)

>  	if (!sgt)

>  		return;

>  

> -	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

> -			       buf->dma_dir);

> +	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

>  }

>  

>  static void vb2_dc_finish(void *buf_priv)

> @@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv)

>  	if (!sgt)

>  		return;

>  

> -	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

> +	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

>  }

>  

>  /*********************************************/

> @@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,

>  		 * memory locations do not require any explicit cache

>  		 * maintenance prior or after being used by the device.

>  		 */

> -		dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -				   attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> +				  DMA_ATTR_SKIP_CPU_SYNC);

>  	sg_free_table(sgt);

>  	kfree(attach);

>  	db_attach->priv = NULL;

> @@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

>  

>  	/* release any previous cache */

>  	if (attach->dma_dir != DMA_NONE) {

> -		dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -				   attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> +				  DMA_ATTR_SKIP_CPU_SYNC);

>  		attach->dma_dir = DMA_NONE;

>  	}

>  

> @@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

>  	 * mapping to the client with new direction, no cache sync

>  	 * required see comment in vb2_dc_dmabuf_ops_detach()

>  	 */

> -	sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -				      dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -	if (!sgt->nents) {

> +	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,

> +			    DMA_ATTR_SKIP_CPU_SYNC)) {

>  		pr_err("failed to map scatterlist\n");

>  		mutex_unlock(lock);

>  		return ERR_PTR(-EIO);

> @@ -455,8 +453,8 @@ static void vb2_dc_put_userptr(void *buf_priv)

>  		 * No need to sync to CPU, it's already synced to the CPU

>  		 * since the finish() memop will have been called before this.

>  		 */

> -		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,

> +				  DMA_ATTR_SKIP_CPU_SYNC);

>  		pages = frame_vector_pages(buf->vec);

>  		/* sgt should exist only if vector contains pages... */

>  		BUG_ON(IS_ERR(pages));

> @@ -553,9 +551,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,

>  	 * No need to sync to the device, this will happen later when the

>  	 * prepare() memop is called.

>  	 */

> -	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -	if (sgt->nents <= 0) {

> +	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> +			    DMA_ATTR_SKIP_CPU_SYNC)) {

>  		pr_err("failed to map scatterlist\n");

>  		ret = -EIO;

>  		goto fail_sgt_init;

> @@ -577,8 +574,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,

>  	return buf;

>  

>  fail_map_sg:

> -	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -			   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

>  

>  fail_sgt_init:

>  	sg_free_table(sgt);

> diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> index 0a40e00f0d7e..0dd3b19025e0 100644

> --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> @@ -148,9 +148,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,

>  	 * No need to sync to the device, this will happen later when the

>  	 * prepare() memop is called.

>  	 */

> -	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -	if (!sgt->nents)

> +	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> +			    DMA_ATTR_SKIP_CPU_SYNC))

>  		goto fail_map;

>  

>  	buf->handler.refcount = &buf->refcount;

> @@ -186,8 +185,8 @@ static void vb2_dma_sg_put(void *buf_priv)

>  	if (refcount_dec_and_test(&buf->refcount)) {

>  		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,

>  			buf->num_pages);

> -		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> +		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,

> +				  DMA_ATTR_SKIP_CPU_SYNC);

>  		if (buf->vaddr)

>  			vm_unmap_ram(buf->vaddr, buf->num_pages);

>  		sg_free_table(buf->dma_sgt);

> @@ -204,8 +203,7 @@ static void vb2_dma_sg_prepare(void *buf_priv)

>  	struct vb2_dma_sg_buf *buf = buf_priv;

>  	struct sg_table *sgt = buf->dma_sgt;

>  

> -	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

> -			       buf->dma_dir);

> +	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

>  }

>  

>  static void vb2_dma_sg_finish(void *buf_priv)

> @@ -213,7 +211,7 @@ static void vb2_dma_sg_finish(void *buf_priv)

>  	struct vb2_dma_sg_buf *buf = buf_priv;

>  	struct sg_table *sgt = buf->dma_sgt;

>  

> -	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

> +	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

>  }

>  

>  static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,

> @@ -256,9 +254,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,

>  	 * No need to sync to the device, this will happen later when the

>  	 * prepare() memop is called.

>  	 */

> -	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> -				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> -	if (!sgt->nents)

> +	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> +			    DMA_ATTR_SKIP_CPU_SYNC))

>  		goto userptr_fail_map;

>  

>  	return buf;

> @@ -284,8 +281,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)

>  

>  	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",

>  	       __func__, buf->num_pages);

> -	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,

> -			   DMA_ATTR_SKIP_CPU_SYNC);

> +	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

>  	if (buf->vaddr)

>  		vm_unmap_ram(buf->vaddr, buf->num_pages);

>  	sg_free_table(buf->dma_sgt);

> @@ -408,8 +404,7 @@ static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,

>  

>  	/* release the scatterlist cache */

>  	if (attach->dma_dir != DMA_NONE)

> -		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -			attach->dma_dir);

> +		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>  	sg_free_table(sgt);

>  	kfree(attach);

>  	db_attach->priv = NULL;

> @@ -434,15 +429,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(

>  

>  	/* release any previous cache */

>  	if (attach->dma_dir != DMA_NONE) {

> -		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -			attach->dma_dir);

> +		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>  		attach->dma_dir = DMA_NONE;

>  	}

>  

>  	/* mapping to the client with new direction */

> -	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -				dma_dir);

> -	if (!sgt->nents) {

> +	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {

>  		pr_err("failed to map scatterlist\n");

>  		mutex_unlock(lock);

>  		return ERR_PTR(-EIO);

> diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> index c66fda4a65e4..bf5ac63a5742 100644

> --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> @@ -229,7 +229,7 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,

>  		kfree(attach);

>  		return ret;

>  	}

> -	for_each_sg(sgt->sgl, sg, sgt->nents, i) {

> +	for_each_sgtable_sg(sgt, sg, i) {

>  		struct page *page = vmalloc_to_page(vaddr);

>  

>  		if (!page) {

> @@ -259,8 +259,7 @@ static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,

>  

>  	/* release the scatterlist cache */

>  	if (attach->dma_dir != DMA_NONE)

> -		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -			attach->dma_dir);

> +		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>  	sg_free_table(sgt);

>  	kfree(attach);

>  	db_attach->priv = NULL;

> @@ -285,15 +284,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(

>  

>  	/* release any previous cache */

>  	if (attach->dma_dir != DMA_NONE) {

> -		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -			attach->dma_dir);

> +		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

>  		attach->dma_dir = DMA_NONE;

>  	}

>  

>  	/* mapping to the client with new direction */

> -	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> -				dma_dir);

> -	if (!sgt->nents) {

> +	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {

>  		pr_err("failed to map scatterlist\n");

>  		mutex_unlock(lock);

>  		return ERR_PTR(-EIO);

> 


_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Tomasz Figa Sept. 10, 2020, 9:47 a.m. UTC | #5
On Thu, Sep 10, 2020 at 11:17 AM Hans Verkuil <hverkuil@xs4all.nl> wrote:
>

> On 04/09/2020 15:17, Marek Szyprowski wrote:

> > Use recently introduced common wrappers operating directly on the struct

> > sg_table objects and scatterlist page iterators to make the code a bit

> > more compact, robust, easier to follow and copy/paste safe.

> >

> > No functional change, because the code already properly did all the

> > scatterlist related calls.

> >

> > Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

> > Reviewed-by: Robin Murphy <robin.murphy@arm.com>

>

> Acked-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>

>

> Note that I agree with Marek to keep returning -EIO. If we want to propagate

> low-level errors, then that should be done in a separate patch. But I think EIO

> is fine.


As I mentioned, there are 2 different cases here - UAPI and kAPI. I
agree that we should keep -EIO for UAPI, but kAPI is another story.
But if we're convinced that -EIO is also fine for the latter, I'm fine
with that.

Best regards,
Tomasz

>

> Regards,

>

>         Hans

>

> > ---

> >  .../common/videobuf2/videobuf2-dma-contig.c   | 34 ++++++++-----------

> >  .../media/common/videobuf2/videobuf2-dma-sg.c | 32 +++++++----------

> >  .../common/videobuf2/videobuf2-vmalloc.c      | 12 +++----

> >  3 files changed, 31 insertions(+), 47 deletions(-)

> >

> > diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> > index ec3446cc45b8..1b242d844dde 100644

> > --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> > +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c

> > @@ -58,10 +58,10 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)

> >       unsigned int i;

> >       unsigned long size = 0;

> >

> > -     for_each_sg(sgt->sgl, s, sgt->nents, i) {

> > +     for_each_sgtable_dma_sg(sgt, s, i) {

> >               if (sg_dma_address(s) != expected)

> >                       break;

> > -             expected = sg_dma_address(s) + sg_dma_len(s);

> > +             expected += sg_dma_len(s);

> >               size += sg_dma_len(s);

> >       }

> >       return size;

> > @@ -103,8 +103,7 @@ static void vb2_dc_prepare(void *buf_priv)

> >       if (!sgt)

> >               return;

> >

> > -     dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                            buf->dma_dir);

> > +     dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

> >  }

> >

> >  static void vb2_dc_finish(void *buf_priv)

> > @@ -115,7 +114,7 @@ static void vb2_dc_finish(void *buf_priv)

> >       if (!sgt)

> >               return;

> >

> > -     dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

> > +     dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

> >  }

> >

> >  /*********************************************/

> > @@ -275,8 +274,8 @@ static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,

> >                * memory locations do not require any explicit cache

> >                * maintenance prior or after being used by the device.

> >                */

> > -             dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                                attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > +             dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> > +                               DMA_ATTR_SKIP_CPU_SYNC);

> >       sg_free_table(sgt);

> >       kfree(attach);

> >       db_attach->priv = NULL;

> > @@ -301,8 +300,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

> >

> >       /* release any previous cache */

> >       if (attach->dma_dir != DMA_NONE) {

> > -             dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                                attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > +             dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,

> > +                               DMA_ATTR_SKIP_CPU_SYNC);

> >               attach->dma_dir = DMA_NONE;

> >       }

> >

> > @@ -310,9 +309,8 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(

> >        * mapping to the client with new direction, no cache sync

> >        * required see comment in vb2_dc_dmabuf_ops_detach()

> >        */

> > -     sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                                   dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > -     if (!sgt->nents) {

> > +     if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,

> > +                         DMA_ATTR_SKIP_CPU_SYNC)) {

> >               pr_err("failed to map scatterlist\n");

> >               mutex_unlock(lock);

> >               return ERR_PTR(-EIO);

> > @@ -455,8 +453,8 @@ static void vb2_dc_put_userptr(void *buf_priv)

> >                * No need to sync to CPU, it's already synced to the CPU

> >                * since the finish() memop will have been called before this.

> >                */

> > -             dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                                buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > +             dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,

> > +                               DMA_ATTR_SKIP_CPU_SYNC);

> >               pages = frame_vector_pages(buf->vec);

> >               /* sgt should exist only if vector contains pages... */

> >               BUG_ON(IS_ERR(pages));

> > @@ -553,9 +551,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,

> >        * No need to sync to the device, this will happen later when the

> >        * prepare() memop is called.

> >        */

> > -     sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                                   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > -     if (sgt->nents <= 0) {

> > +     if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> > +                         DMA_ATTR_SKIP_CPU_SYNC)) {

> >               pr_err("failed to map scatterlist\n");

> >               ret = -EIO;

> >               goto fail_sgt_init;

> > @@ -577,8 +574,7 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,

> >       return buf;

> >

> >  fail_map_sg:

> > -     dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                        buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > +     dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> >

> >  fail_sgt_init:

> >       sg_free_table(sgt);

> > diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> > index 0a40e00f0d7e..0dd3b19025e0 100644

> > --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> > +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c

> > @@ -148,9 +148,8 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,

> >        * No need to sync to the device, this will happen later when the

> >        * prepare() memop is called.

> >        */

> > -     sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                                   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > -     if (!sgt->nents)

> > +     if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> > +                         DMA_ATTR_SKIP_CPU_SYNC))

> >               goto fail_map;

> >

> >       buf->handler.refcount = &buf->refcount;

> > @@ -186,8 +185,8 @@ static void vb2_dma_sg_put(void *buf_priv)

> >       if (refcount_dec_and_test(&buf->refcount)) {

> >               dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,

> >                       buf->num_pages);

> > -             dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                                buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > +             dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,

> > +                               DMA_ATTR_SKIP_CPU_SYNC);

> >               if (buf->vaddr)

> >                       vm_unmap_ram(buf->vaddr, buf->num_pages);

> >               sg_free_table(buf->dma_sgt);

> > @@ -204,8 +203,7 @@ static void vb2_dma_sg_prepare(void *buf_priv)

> >       struct vb2_dma_sg_buf *buf = buf_priv;

> >       struct sg_table *sgt = buf->dma_sgt;

> >

> > -     dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                            buf->dma_dir);

> > +     dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);

> >  }

> >

> >  static void vb2_dma_sg_finish(void *buf_priv)

> > @@ -213,7 +211,7 @@ static void vb2_dma_sg_finish(void *buf_priv)

> >       struct vb2_dma_sg_buf *buf = buf_priv;

> >       struct sg_table *sgt = buf->dma_sgt;

> >

> > -     dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);

> > +     dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);

> >  }

> >

> >  static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,

> > @@ -256,9 +254,8 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,

> >        * No need to sync to the device, this will happen later when the

> >        * prepare() memop is called.

> >        */

> > -     sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,

> > -                                   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> > -     if (!sgt->nents)

> > +     if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,

> > +                         DMA_ATTR_SKIP_CPU_SYNC))

> >               goto userptr_fail_map;

> >

> >       return buf;

> > @@ -284,8 +281,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)

> >

> >       dprintk(1, "%s: Releasing userspace buffer of %d pages\n",

> >              __func__, buf->num_pages);

> > -     dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,

> > -                        DMA_ATTR_SKIP_CPU_SYNC);

> > +     dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);

> >       if (buf->vaddr)

> >               vm_unmap_ram(buf->vaddr, buf->num_pages);

> >       sg_free_table(buf->dma_sgt);

> > @@ -408,8 +404,7 @@ static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,

> >

> >       /* release the scatterlist cache */

> >       if (attach->dma_dir != DMA_NONE)

> > -             dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                     attach->dma_dir);

> > +             dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

> >       sg_free_table(sgt);

> >       kfree(attach);

> >       db_attach->priv = NULL;

> > @@ -434,15 +429,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(

> >

> >       /* release any previous cache */

> >       if (attach->dma_dir != DMA_NONE) {

> > -             dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                     attach->dma_dir);

> > +             dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

> >               attach->dma_dir = DMA_NONE;

> >       }

> >

> >       /* mapping to the client with new direction */

> > -     sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                             dma_dir);

> > -     if (!sgt->nents) {

> > +     if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {

> >               pr_err("failed to map scatterlist\n");

> >               mutex_unlock(lock);

> >               return ERR_PTR(-EIO);

> > diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> > index c66fda4a65e4..bf5ac63a5742 100644

> > --- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> > +++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c

> > @@ -229,7 +229,7 @@ static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,

> >               kfree(attach);

> >               return ret;

> >       }

> > -     for_each_sg(sgt->sgl, sg, sgt->nents, i) {

> > +     for_each_sgtable_sg(sgt, sg, i) {

> >               struct page *page = vmalloc_to_page(vaddr);

> >

> >               if (!page) {

> > @@ -259,8 +259,7 @@ static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,

> >

> >       /* release the scatterlist cache */

> >       if (attach->dma_dir != DMA_NONE)

> > -             dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                     attach->dma_dir);

> > +             dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

> >       sg_free_table(sgt);

> >       kfree(attach);

> >       db_attach->priv = NULL;

> > @@ -285,15 +284,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(

> >

> >       /* release any previous cache */

> >       if (attach->dma_dir != DMA_NONE) {

> > -             dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                     attach->dma_dir);

> > +             dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);

> >               attach->dma_dir = DMA_NONE;

> >       }

> >

> >       /* mapping to the client with new direction */

> > -     sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,

> > -                             dma_dir);

> > -     if (!sgt->nents) {

> > +     if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {

> >               pr_err("failed to map scatterlist\n");

> >               mutex_unlock(lock);

> >               return ERR_PTR(-EIO);

> >

>

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox series

Patch

diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index ec3446cc45b8..1b242d844dde 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -58,10 +58,10 @@  static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
 	unsigned int i;
 	unsigned long size = 0;
 
-	for_each_sg(sgt->sgl, s, sgt->nents, i) {
+	for_each_sgtable_dma_sg(sgt, s, i) {
 		if (sg_dma_address(s) != expected)
 			break;
-		expected = sg_dma_address(s) + sg_dma_len(s);
+		expected += sg_dma_len(s);
 		size += sg_dma_len(s);
 	}
 	return size;
@@ -103,8 +103,7 @@  static void vb2_dc_prepare(void *buf_priv)
 	if (!sgt)
 		return;
 
-	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
-			       buf->dma_dir);
+	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
 }
 
 static void vb2_dc_finish(void *buf_priv)
@@ -115,7 +114,7 @@  static void vb2_dc_finish(void *buf_priv)
 	if (!sgt)
 		return;
 
-	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
 }
 
 /*********************************************/
@@ -275,8 +274,8 @@  static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
 		 * memory locations do not require any explicit cache
 		 * maintenance prior or after being used by the device.
 		 */
-		dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
-				   attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+				  DMA_ATTR_SKIP_CPU_SYNC);
 	sg_free_table(sgt);
 	kfree(attach);
 	db_attach->priv = NULL;
@@ -301,8 +300,8 @@  static struct sg_table *vb2_dc_dmabuf_ops_map(
 
 	/* release any previous cache */
 	if (attach->dma_dir != DMA_NONE) {
-		dma_unmap_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
-				   attach->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+				  DMA_ATTR_SKIP_CPU_SYNC);
 		attach->dma_dir = DMA_NONE;
 	}
 
@@ -310,9 +309,8 @@  static struct sg_table *vb2_dc_dmabuf_ops_map(
 	 * mapping to the client with new direction, no cache sync
 	 * required see comment in vb2_dc_dmabuf_ops_detach()
 	 */
-	sgt->nents = dma_map_sg_attrs(db_attach->dev, sgt->sgl, sgt->orig_nents,
-				      dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-	if (!sgt->nents) {
+	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
+			    DMA_ATTR_SKIP_CPU_SYNC)) {
 		pr_err("failed to map scatterlist\n");
 		mutex_unlock(lock);
 		return ERR_PTR(-EIO);
@@ -455,8 +453,8 @@  static void vb2_dc_put_userptr(void *buf_priv)
 		 * No need to sync to CPU, it's already synced to the CPU
 		 * since the finish() memop will have been called before this.
 		 */
-		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+				  DMA_ATTR_SKIP_CPU_SYNC);
 		pages = frame_vector_pages(buf->vec);
 		/* sgt should exist only if vector contains pages... */
 		BUG_ON(IS_ERR(pages));
@@ -553,9 +551,8 @@  static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
 	 * No need to sync to the device, this will happen later when the
 	 * prepare() memop is called.
 	 */
-	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-	if (sgt->nents <= 0) {
+	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+			    DMA_ATTR_SKIP_CPU_SYNC)) {
 		pr_err("failed to map scatterlist\n");
 		ret = -EIO;
 		goto fail_sgt_init;
@@ -577,8 +574,7 @@  static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
 	return buf;
 
 fail_map_sg:
-	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-			   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 
 fail_sgt_init:
 	sg_free_table(sgt);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 0a40e00f0d7e..0dd3b19025e0 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -148,9 +148,8 @@  static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
 	 * No need to sync to the device, this will happen later when the
 	 * prepare() memop is called.
 	 */
-	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-	if (!sgt->nents)
+	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+			    DMA_ATTR_SKIP_CPU_SYNC))
 		goto fail_map;
 
 	buf->handler.refcount = &buf->refcount;
@@ -186,8 +185,8 @@  static void vb2_dma_sg_put(void *buf_priv)
 	if (refcount_dec_and_test(&buf->refcount)) {
 		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
 			buf->num_pages);
-		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-				   buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+		dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+				  DMA_ATTR_SKIP_CPU_SYNC);
 		if (buf->vaddr)
 			vm_unmap_ram(buf->vaddr, buf->num_pages);
 		sg_free_table(buf->dma_sgt);
@@ -204,8 +203,7 @@  static void vb2_dma_sg_prepare(void *buf_priv)
 	struct vb2_dma_sg_buf *buf = buf_priv;
 	struct sg_table *sgt = buf->dma_sgt;
 
-	dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents,
-			       buf->dma_dir);
+	dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
 }
 
 static void vb2_dma_sg_finish(void *buf_priv)
@@ -213,7 +211,7 @@  static void vb2_dma_sg_finish(void *buf_priv)
 	struct vb2_dma_sg_buf *buf = buf_priv;
 	struct sg_table *sgt = buf->dma_sgt;
 
-	dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
+	dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
 }
 
 static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
@@ -256,9 +254,8 @@  static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
 	 * No need to sync to the device, this will happen later when the
 	 * prepare() memop is called.
 	 */
-	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
-				      buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
-	if (!sgt->nents)
+	if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+			    DMA_ATTR_SKIP_CPU_SYNC))
 		goto userptr_fail_map;
 
 	return buf;
@@ -284,8 +281,7 @@  static void vb2_dma_sg_put_userptr(void *buf_priv)
 
 	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
 	       __func__, buf->num_pages);
-	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
-			   DMA_ATTR_SKIP_CPU_SYNC);
+	dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
 	if (buf->vaddr)
 		vm_unmap_ram(buf->vaddr, buf->num_pages);
 	sg_free_table(buf->dma_sgt);
@@ -408,8 +404,7 @@  static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
 
 	/* release the scatterlist cache */
 	if (attach->dma_dir != DMA_NONE)
-		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
-			attach->dma_dir);
+		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
 	sg_free_table(sgt);
 	kfree(attach);
 	db_attach->priv = NULL;
@@ -434,15 +429,12 @@  static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
 
 	/* release any previous cache */
 	if (attach->dma_dir != DMA_NONE) {
-		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
-			attach->dma_dir);
+		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
 		attach->dma_dir = DMA_NONE;
 	}
 
 	/* mapping to the client with new direction */
-	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
-				dma_dir);
-	if (!sgt->nents) {
+	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
 		pr_err("failed to map scatterlist\n");
 		mutex_unlock(lock);
 		return ERR_PTR(-EIO);
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index c66fda4a65e4..bf5ac63a5742 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -229,7 +229,7 @@  static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
 		kfree(attach);
 		return ret;
 	}
-	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+	for_each_sgtable_sg(sgt, sg, i) {
 		struct page *page = vmalloc_to_page(vaddr);
 
 		if (!page) {
@@ -259,8 +259,7 @@  static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
 
 	/* release the scatterlist cache */
 	if (attach->dma_dir != DMA_NONE)
-		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
-			attach->dma_dir);
+		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
 	sg_free_table(sgt);
 	kfree(attach);
 	db_attach->priv = NULL;
@@ -285,15 +284,12 @@  static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
 
 	/* release any previous cache */
 	if (attach->dma_dir != DMA_NONE) {
-		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
-			attach->dma_dir);
+		dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
 		attach->dma_dir = DMA_NONE;
 	}
 
 	/* mapping to the client with new direction */
-	sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
-				dma_dir);
-	if (!sgt->nents) {
+	if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
 		pr_err("failed to map scatterlist\n");
 		mutex_unlock(lock);
 		return ERR_PTR(-EIO);