diff mbox

[v2,2/3] gpu: ion: carveout_heap page wised cache flush

Message ID 1346746935-1149-3-git-send-email-zhangfei.gao@marvell.com
State Rejected
Headers show

Commit Message

Zhangfei Gao Sept. 4, 2012, 8:22 a.m. UTC
Extend dirty bit per PAGE_SIZE
Page wised cache flush is supported and only takes effect for dirty buffer

Signed-off-by: Zhangfei Gao <zhangfei.gao@marvell.com>
---
 drivers/gpu/ion/ion_carveout_heap.c |   23 +++++++++++++++++------
 1 files changed, 17 insertions(+), 6 deletions(-)

Comments

Nishanth Peethambaran Sept. 4, 2012, 9:18 a.m. UTC | #1
On Tue, Sep 4, 2012 at 1:52 PM, Zhangfei Gao <zhangfei.gao@marvell.com> wrote:
> Extend dirty bit per PAGE_SIZE
> Page wised cache flush is supported and only takes effect for dirty buffer
>
> Signed-off-by: Zhangfei Gao <zhangfei.gao@marvell.com>
> ---
>  drivers/gpu/ion/ion_carveout_heap.c |   23 +++++++++++++++++------
>  1 files changed, 17 insertions(+), 6 deletions(-)
>
> diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
> index 13f6e8d..60e97e5 100644
> --- a/drivers/gpu/ion/ion_carveout_heap.c
> +++ b/drivers/gpu/ion/ion_carveout_heap.c
> @@ -88,25 +88,36 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
>                                               struct ion_buffer *buffer)
>  {
>         struct sg_table *table;
> -       int ret;
> +       struct scatterlist *sg;
> +       int ret, i;
> +       int nents = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
> +       struct page *page = phys_to_page(buffer->priv_phys);
>
>         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
>         if (!table)
>                 return ERR_PTR(-ENOMEM);
> -       ret = sg_alloc_table(table, 1, GFP_KERNEL);
> +
> +       ret = sg_alloc_table(table, nents, GFP_KERNEL);
>         if (ret) {
>                 kfree(table);
>                 return ERR_PTR(ret);
>         }
> -       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
> -                   0);
> +
> +       sg = table->sgl;
> +       for (i = 0; i < nents; i++) {
> +               sg_set_page(sg, page + i, PAGE_SIZE, 0);
> +               sg = sg_next(sg);
> +       }
> +
>         return table;
>  }

It would need page-wise sg_table only if buffer is allocated as cached.

>
>  void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
>                                  struct ion_buffer *buffer)
>  {
> -       sg_free_table(buffer->sg_table);
> +       if (buffer->sg_table)
> +               sg_free_table(buffer->sg_table);
> +       kfree(buffer->sg_table);
>  }
>
>  void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
> @@ -157,7 +168,7 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
>         if (!carveout_heap)
>                 return ERR_PTR(-ENOMEM);
>
> -       carveout_heap->pool = gen_pool_create(12, -1);
> +       carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
>         if (!carveout_heap->pool) {
>                 kfree(carveout_heap);
>                 return ERR_PTR(-ENOMEM);
> --
> 1.7.1
>
>
> _______________________________________________
> Linaro-mm-sig mailing list
> Linaro-mm-sig@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/linaro-mm-sig
Zhangfei Gao Sept. 4, 2012, 10:10 a.m. UTC | #2
>> @@ -88,25 +88,36 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
>>                                               struct ion_buffer *buffer)
>>  {
>>         struct sg_table *table;
>> -       int ret;
>> +       struct scatterlist *sg;
>> +       int ret, i;
>> +       int nents = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
>> +       struct page *page = phys_to_page(buffer->priv_phys);
>>
>>         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
>>         if (!table)
>>                 return ERR_PTR(-ENOMEM);
>> -       ret = sg_alloc_table(table, 1, GFP_KERNEL);
>> +
>> +       ret = sg_alloc_table(table, nents, GFP_KERNEL);
>>         if (ret) {
>>                 kfree(table);
>>                 return ERR_PTR(ret);
>>         }
>> -       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
>> -                   0);
>> +
>> +       sg = table->sgl;
>> +       for (i = 0; i < nents; i++) {
>> +               sg_set_page(sg, page + i, PAGE_SIZE, 0);
>> +               sg = sg_next(sg);
>> +       }
>> +
>>         return table;
>>  }
>
> It would need page-wise sg_table only if buffer is allocated as cached.
>
This refers to ion_system_heap.c, personally, I think it is fine.
Nishanth Peethambaran Sept. 4, 2012, 10:59 a.m. UTC | #3
On Tue, Sep 4, 2012 at 3:40 PM, zhangfei gao <zhangfei.gao@gmail.com> wrote:
>>> @@ -88,25 +88,36 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
>>>                                               struct ion_buffer *buffer)
>>>  {
>>>         struct sg_table *table;
>>> -       int ret;
>>> +       struct scatterlist *sg;
>>> +       int ret, i;
>>> +       int nents = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
>>> +       struct page *page = phys_to_page(buffer->priv_phys);
>>>
>>>         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
>>>         if (!table)
>>>                 return ERR_PTR(-ENOMEM);
>>> -       ret = sg_alloc_table(table, 1, GFP_KERNEL);
>>> +
>>> +       ret = sg_alloc_table(table, nents, GFP_KERNEL);
>>>         if (ret) {
>>>                 kfree(table);
>>>                 return ERR_PTR(ret);
>>>         }
>>> -       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
>>> -                   0);
>>> +
>>> +       sg = table->sgl;
>>> +       for (i = 0; i < nents; i++) {
>>> +               sg_set_page(sg, page + i, PAGE_SIZE, 0);
>>> +               sg = sg_next(sg);
>>> +       }
>>> +
>>>         return table;
>>>  }
>>
>> It would need page-wise sg_table only if buffer is allocated as cached.
>>
> This refers to ion_system_heap.c, personally, I think it is fine.

AFAIU the purpose of making page-wise sg_table is for dirty page
tracking by ion and so is needed only if ION_FLAG_CACHED is set for
the buffer.
If buffer->flags does not have ION_FLAG_CACHED, sg_table with single
nents should be fine. Right?
Zhangfei Gao Sept. 5, 2012, 4:54 a.m. UTC | #4
On Tue, Sep 4, 2012 at 6:59 PM, Nishanth Peethambaran
<nishanth.peethu@gmail.com> wrote:
> On Tue, Sep 4, 2012 at 3:40 PM, zhangfei gao <zhangfei.gao@gmail.com> wrote:
>>>> @@ -88,25 +88,36 @@ struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
>>>>                                               struct ion_buffer *buffer)
>>>>  {
>>>>         struct sg_table *table;
>>>> -       int ret;
>>>> +       struct scatterlist *sg;
>>>> +       int ret, i;
>>>> +       int nents = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
>>>> +       struct page *page = phys_to_page(buffer->priv_phys);
>>>>
>>>>         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
>>>>         if (!table)
>>>>                 return ERR_PTR(-ENOMEM);
>>>> -       ret = sg_alloc_table(table, 1, GFP_KERNEL);
>>>> +
>>>> +       ret = sg_alloc_table(table, nents, GFP_KERNEL);
>>>>         if (ret) {
>>>>                 kfree(table);
>>>>                 return ERR_PTR(ret);
>>>>         }
>>>> -       sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
>>>> -                   0);
>>>> +
>>>> +       sg = table->sgl;
>>>> +       for (i = 0; i < nents; i++) {
>>>> +               sg_set_page(sg, page + i, PAGE_SIZE, 0);
>>>> +               sg = sg_next(sg);
>>>> +       }
>>>> +
>>>>         return table;
>>>>  }
>>>
>>> It would need page-wise sg_table only if buffer is allocated as cached.
>>>
>> This refers to ion_system_heap.c, personally, I think it is fine.
>
> AFAIU the purpose of making page-wise sg_table is for dirty page
> tracking by ion and so is needed only if ION_FLAG_CACHED is set for
> the buffer.
> If buffer->flags does not have ION_FLAG_CACHED, sg_table with single
> nents should be fine. Right?

Yes, it is workable to use single nents when no ION_FLAG_CACHED.
However, do you really think it is meaningful to distinguish them?
diff mbox

Patch

diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 13f6e8d..60e97e5 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -88,25 +88,36 @@  struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
 					      struct ion_buffer *buffer)
 {
 	struct sg_table *table;
-	int ret;
+	struct scatterlist *sg;
+	int ret, i;
+	int nents = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	struct page *page = phys_to_page(buffer->priv_phys);
 
 	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!table)
 		return ERR_PTR(-ENOMEM);
-	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+
+	ret = sg_alloc_table(table, nents, GFP_KERNEL);
 	if (ret) {
 		kfree(table);
 		return ERR_PTR(ret);
 	}
-	sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
-		    0);
+
+	sg = table->sgl;
+	for (i = 0; i < nents; i++) {
+		sg_set_page(sg, page + i, PAGE_SIZE, 0);
+		sg = sg_next(sg);
+	}
+
 	return table;
 }
 
 void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
 				 struct ion_buffer *buffer)
 {
-	sg_free_table(buffer->sg_table);
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
 }
 
 void *ion_carveout_heap_map_kernel(struct ion_heap *heap,
@@ -157,7 +168,7 @@  struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
 	if (!carveout_heap)
 		return ERR_PTR(-ENOMEM);
 
-	carveout_heap->pool = gen_pool_create(12, -1);
+	carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
 	if (!carveout_heap->pool) {
 		kfree(carveout_heap);
 		return ERR_PTR(-ENOMEM);