diff mbox series

[v5,05/38] drm: prime: use sgtable iterators in drm_prime_sg_to_page_addr_arrays()

Message ID 20200513133245.6408-5-m.szyprowski@samsung.com
State New
Headers show
Series DRM: fix struct sg_table nents vs. orig_nents misuse | expand

Commit Message

Marek Szyprowski May 13, 2020, 1:32 p.m. UTC
Replace the current hand-crafted code for extracting pages and DMA
addresses from the given scatterlist by the much more robust
code based on the generic scatterlist iterators and recently
introduced sg_table-based wrappers. The resulting code is simple and
easy to understand, so the comment describing the old code is no
longer needed.

Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

---
For more information, see '[PATCH v5 00/38] DRM: fix struct sg_table nents
vs. orig_nents misuse' thread:
https://lore.kernel.org/linux-iommu/20200513132114.6046-1-m.szyprowski@samsung.com/T/
---
 drivers/gpu/drm/drm_prime.c | 47 ++++++++++++++-------------------------------
 1 file changed, 14 insertions(+), 33 deletions(-)

-- 
1.9.1

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

Comments

Alex Goins Sept. 21, 2020, 11:15 p.m. UTC | #1
Tested-by: Alex Goins <agoins@nvidia.com>


This change fixes a regression with drm_prime_sg_to_page_addr_arrays() and
AMDGPU in v5.9.

Commit 39913934 similarly revamped AMDGPU to use sgtable helper functions. When
it changed from dma_map_sg_attrs() to dma_map_sgtable(), as a side effect it
started correctly updating sgt->nents to the return value of dma_map_sg_attrs().
However, drm_prime_sg_to_page_addr_arrays() incorrectly uses sgt->nents to
iterate over pages, rather than sgt->orig_nents, resulting in it now returning
the incorrect number of pages on AMDGPU.

I had written a patch that changes drm_prime_sg_to_page_addr_arrays() to use
for_each_sgtable_sg() instead of for_each_sg(), iterating using sgt->orig_nents:

-       for_each_sg(sgt->sgl, sg, sgt->nents, count) {
+       for_each_sgtable_sg(sgt, sg, count) {

This patch takes it further, but still has the effect of fixing the number of
pages that drm_prime_sg_to_page_addr_arrays() returns. Something like this
should be included in v5.9 to prevent a regression with AMDGPU.

Thanks,
Alex

On Wed, 13 May 2020, Marek Szyprowski wrote:

> Replace the current hand-crafted code for extracting pages and DMA

> addresses from the given scatterlist by the much more robust

> code based on the generic scatterlist iterators and recently

> introduced sg_table-based wrappers. The resulting code is simple and

> easy to understand, so the comment describing the old code is no

> longer needed.

> 

> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>

> ---

> For more information, see '[PATCH v5 00/38] DRM: fix struct sg_table nents

> vs. orig_nents misuse' thread:

> https://lore.kernel.org/linux-iommu/20200513132114.6046-1-m.szyprowski@samsung.com/T/

> ---

>  drivers/gpu/drm/drm_prime.c | 47 ++++++++++++++-------------------------------

>  1 file changed, 14 insertions(+), 33 deletions(-)

> 

> diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c

> index 1d2e5fe..dfdf4d4 100644

> --- a/drivers/gpu/drm/drm_prime.c

> +++ b/drivers/gpu/drm/drm_prime.c

> @@ -985,45 +985,26 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,

>  int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,

>  				     dma_addr_t *addrs, int max_entries)

>  {

> -	unsigned count;

> -	struct scatterlist *sg;

> -	struct page *page;

> -	u32 page_len, page_index;

> -	dma_addr_t addr;

> -	u32 dma_len, dma_index;

> +	struct sg_dma_page_iter dma_iter;

> +	struct sg_page_iter page_iter;

> +	struct page **p = pages;

> +	dma_addr_t *a = addrs;

>  

> -	/*

> -	 * Scatterlist elements contains both pages and DMA addresses, but

> -	 * one shoud not assume 1:1 relation between them. The sg->length is

> -	 * the size of the physical memory chunk described by the sg->page,

> -	 * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk

> -	 * described by the sg_dma_address(sg).

> -	 */

> -	page_index = 0;

> -	dma_index = 0;

> -	for_each_sg(sgt->sgl, sg, sgt->nents, count) {

> -		page_len = sg->length;

> -		page = sg_page(sg);

> -		dma_len = sg_dma_len(sg);

> -		addr = sg_dma_address(sg);

> -

> -		while (pages && page_len > 0) {

> -			if (WARN_ON(page_index >= max_entries))

> +	if (pages) {

> +		for_each_sgtable_page(sgt, &page_iter, 0) {

> +			if (p - pages >= max_entries)

>  				return -1;

> -			pages[page_index] = page;

> -			page++;

> -			page_len -= PAGE_SIZE;

> -			page_index++;

> +			*p++ = sg_page_iter_page(&page_iter);

>  		}

> -		while (addrs && dma_len > 0) {

> -			if (WARN_ON(dma_index >= max_entries))

> +	}

> +	if (addrs) {

> +		for_each_sgtable_dma_page(sgt, &dma_iter, 0) {

> +			if (a - addrs >= max_entries)

>  				return -1;

> -			addrs[dma_index] = addr;

> -			addr += PAGE_SIZE;

> -			dma_len -= PAGE_SIZE;

> -			dma_index++;

> +			*a++ = sg_page_iter_dma_address(&dma_iter);

>  		}

>  	}

> +

>  	return 0;

>  }

>  EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);

> -- 

> 1.9.1

> 

> _______________________________________________

> dri-devel mailing list

> dri-devel@lists.freedesktop.org

> https://lists.freedesktop.org/mailman/listinfo/dri-devel

> 

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Alex Goins Sept. 22, 2020, 9:12 p.m. UTC | #2
Hi Marek,

On Tue, 22 Sep 2020, Marek Szyprowski wrote:

> External email: Use caution opening links or attachments

> 

> 

> Hi Alex,

> 

> On 22.09.2020 01:15, Alex Goins wrote:

> > Tested-by: Alex Goins <agoins@nvidia.com>

> >

> > This change fixes a regression with drm_prime_sg_to_page_addr_arrays() and

> > AMDGPU in v5.9.

> 

> Thanks for testing!

> 

> > Commit 39913934 similarly revamped AMDGPU to use sgtable helper functions. When

> > it changed from dma_map_sg_attrs() to dma_map_sgtable(), as a side effect it

> > started correctly updating sgt->nents to the return value of dma_map_sg_attrs().

> > However, drm_prime_sg_to_page_addr_arrays() incorrectly uses sgt->nents to

> > iterate over pages, rather than sgt->orig_nents, resulting in it now returning

> > the incorrect number of pages on AMDGPU.

> >

> > I had written a patch that changes drm_prime_sg_to_page_addr_arrays() to use

> > for_each_sgtable_sg() instead of for_each_sg(), iterating using sgt->orig_nents:

> >

> > -       for_each_sg(sgt->sgl, sg, sgt->nents, count) {

> > +       for_each_sgtable_sg(sgt, sg, count) {

> >

> > This patch takes it further, but still has the effect of fixing the number of

> > pages that drm_prime_sg_to_page_addr_arrays() returns. Something like this

> > should be included in v5.9 to prevent a regression with AMDGPU.

> 

> Probably the easiest way to handle a fix for v5.9 would be to simply

> merge the latest version of this patch also to v5.9-rcX:

> https://lore.kernel.org/dri-devel/20200904131711.12950-3-m.szyprowski@samsung.com/


Tested-by: Alex Goins <agoins@nvidia.com> that version too.


> 

> This way we would get it fixed and avoid possible conflict in the -next.


> Do you have any AMDGPU fixes for v5.9 in the queue? Maybe you can add that

> patch to the queue? 


I don't have any more AMDGPU fixes, just want to ensure that this makes it in.

Thanks,
Alex

> Dave: would it be okay that way?

> 

> Best regards

> --

> Marek Szyprowski, PhD

> Samsung R&D Institute Poland

> 

> 

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Alex Deucher Sept. 25, 2020, 9:23 p.m. UTC | #3
On Tue, Sep 22, 2020 at 2:28 AM Marek Szyprowski
<m.szyprowski@samsung.com> wrote:
>

> Hi Alex,

>

> On 22.09.2020 01:15, Alex Goins wrote:

> > Tested-by: Alex Goins <agoins@nvidia.com>

> >

> > This change fixes a regression with drm_prime_sg_to_page_addr_arrays() and

> > AMDGPU in v5.9.

>

> Thanks for testing!

>

> > Commit 39913934 similarly revamped AMDGPU to use sgtable helper functions. When

> > it changed from dma_map_sg_attrs() to dma_map_sgtable(), as a side effect it

> > started correctly updating sgt->nents to the return value of dma_map_sg_attrs().

> > However, drm_prime_sg_to_page_addr_arrays() incorrectly uses sgt->nents to

> > iterate over pages, rather than sgt->orig_nents, resulting in it now returning

> > the incorrect number of pages on AMDGPU.

> >

> > I had written a patch that changes drm_prime_sg_to_page_addr_arrays() to use

> > for_each_sgtable_sg() instead of for_each_sg(), iterating using sgt->orig_nents:

> >

> > -       for_each_sg(sgt->sgl, sg, sgt->nents, count) {

> > +       for_each_sgtable_sg(sgt, sg, count) {

> >

> > This patch takes it further, but still has the effect of fixing the number of

> > pages that drm_prime_sg_to_page_addr_arrays() returns. Something like this

> > should be included in v5.9 to prevent a regression with AMDGPU.

>

> Probably the easiest way to handle a fix for v5.9 would be to simply

> merge the latest version of this patch also to v5.9-rcX:

> https://lore.kernel.org/dri-devel/20200904131711.12950-3-m.szyprowski@samsung.com/

>

>

> This way we would get it fixed and avoid possible conflict in the -next.

> Do you have any AMDGPU fixes for v5.9 in the queue? Maybe you can add

> that patch to the queue? Dave: would it be okay that way?


I think this should go into drm-misc for 5.9 since it's an update to
drm_prime.c.  Is that patch ready to merge?
Acked-by: Alex Deucher <alexander.deucher@amd.com>


Alex

>

> Best regards

> --

> Marek Szyprowski, PhD

> Samsung R&D Institute Poland

>

> _______________________________________________

> Linaro-mm-sig mailing list

> Linaro-mm-sig@lists.linaro.org

> https://lists.linaro.org/mailman/listinfo/linaro-mm-sig

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
Marek Szyprowski Sept. 30, 2020, 7:15 a.m. UTC | #4
Hi All,

On 25.09.2020 23:23, Alex Deucher wrote:
> On Tue, Sep 22, 2020 at 2:28 AM Marek Szyprowski

> <m.szyprowski@samsung.com> wrote:

>> On 22.09.2020 01:15, Alex Goins wrote:

>>> Tested-by: Alex Goins <agoins@nvidia.com>

>>>

>>> This change fixes a regression with drm_prime_sg_to_page_addr_arrays() and

>>> AMDGPU in v5.9.

>> Thanks for testing!

>>

>>> Commit 39913934 similarly revamped AMDGPU to use sgtable helper functions. When

>>> it changed from dma_map_sg_attrs() to dma_map_sgtable(), as a side effect it

>>> started correctly updating sgt->nents to the return value of dma_map_sg_attrs().

>>> However, drm_prime_sg_to_page_addr_arrays() incorrectly uses sgt->nents to

>>> iterate over pages, rather than sgt->orig_nents, resulting in it now returning

>>> the incorrect number of pages on AMDGPU.

>>>

>>> I had written a patch that changes drm_prime_sg_to_page_addr_arrays() to use

>>> for_each_sgtable_sg() instead of for_each_sg(), iterating using sgt->orig_nents:

>>>

>>> -       for_each_sg(sgt->sgl, sg, sgt->nents, count) {

>>> +       for_each_sgtable_sg(sgt, sg, count) {

>>>

>>> This patch takes it further, but still has the effect of fixing the number of

>>> pages that drm_prime_sg_to_page_addr_arrays() returns. Something like this

>>> should be included in v5.9 to prevent a regression with AMDGPU.

>> Probably the easiest way to handle a fix for v5.9 would be to simply

>> merge the latest version of this patch also to v5.9-rcX:

>> https://lore.kernel.org/dri-devel/20200904131711.12950-3-m.szyprowski@samsung.com/

>>

>>

>> This way we would get it fixed and avoid possible conflict in the -next.

>> Do you have any AMDGPU fixes for v5.9 in the queue? Maybe you can add

>> that patch to the queue? Dave: would it be okay that way?

> I think this should go into drm-misc for 5.9 since it's an update to

> drm_prime.c.  Is that patch ready to merge?

> Acked-by: Alex Deucher <alexander.deucher@amd.com>


Maarten, Maxime or Thomas: could you take this one:

https://lore.kernel.org/dri-devel/20200904131711.12950-3-m.szyprowski@samsung.com/

also to drm-misc-fixes for v5.9-rc?

Best regards

-- 
Marek Szyprowski, PhD
Samsung R&D Institute Poland

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1d2e5fe..dfdf4d4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -985,45 +985,26 @@  struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
 				     dma_addr_t *addrs, int max_entries)
 {
-	unsigned count;
-	struct scatterlist *sg;
-	struct page *page;
-	u32 page_len, page_index;
-	dma_addr_t addr;
-	u32 dma_len, dma_index;
+	struct sg_dma_page_iter dma_iter;
+	struct sg_page_iter page_iter;
+	struct page **p = pages;
+	dma_addr_t *a = addrs;
 
-	/*
-	 * Scatterlist elements contains both pages and DMA addresses, but
-	 * one shoud not assume 1:1 relation between them. The sg->length is
-	 * the size of the physical memory chunk described by the sg->page,
-	 * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
-	 * described by the sg_dma_address(sg).
-	 */
-	page_index = 0;
-	dma_index = 0;
-	for_each_sg(sgt->sgl, sg, sgt->nents, count) {
-		page_len = sg->length;
-		page = sg_page(sg);
-		dma_len = sg_dma_len(sg);
-		addr = sg_dma_address(sg);
-
-		while (pages && page_len > 0) {
-			if (WARN_ON(page_index >= max_entries))
+	if (pages) {
+		for_each_sgtable_page(sgt, &page_iter, 0) {
+			if (p - pages >= max_entries)
 				return -1;
-			pages[page_index] = page;
-			page++;
-			page_len -= PAGE_SIZE;
-			page_index++;
+			*p++ = sg_page_iter_page(&page_iter);
 		}
-		while (addrs && dma_len > 0) {
-			if (WARN_ON(dma_index >= max_entries))
+	}
+	if (addrs) {
+		for_each_sgtable_dma_page(sgt, &dma_iter, 0) {
+			if (a - addrs >= max_entries)
 				return -1;
-			addrs[dma_index] = addr;
-			addr += PAGE_SIZE;
-			dma_len -= PAGE_SIZE;
-			dma_index++;
+			*a++ = sg_page_iter_dma_address(&dma_iter);
 		}
 	}
+
 	return 0;
 }
 EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays);