diff mbox

[2/3] gpu: ion: carveout_heap page wised cache flush

Message ID 1346212340-25236-3-git-send-email-zhangfei.gao@marvell.com
State Rejected
Headers show

Commit Message

Zhangfei Gao Aug. 29, 2012, 3:52 a.m. UTC
Extend dirty bit per PAGE_SIZE
Page wised cache flush is supported and only takes effect for dirty buffer

Signed-off-by: Zhangfei Gao <zhangfei.gao@marvell.com>
---
 drivers/gpu/ion/ion_carveout_heap.c |   21 ++++++++++++++++-----
 1 files changed, 16 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 13f6e8d..24f8ef2 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -88,25 +88,36 @@  struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
 					      struct ion_buffer *buffer)
 {
 	struct sg_table *table;
-	int ret;
+	struct scatterlist *sg;
+	int ret, i;
+	int nents = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	struct page *page = phys_to_page(buffer->priv_phys);
 
 	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!table)
 		return ERR_PTR(-ENOMEM);
-	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+
+	ret = sg_alloc_table(table, nents, GFP_KERNEL);
 	if (ret) {
 		kfree(table);
 		return ERR_PTR(ret);
 	}
-	sg_set_page(table->sgl, phys_to_page(buffer->priv_phys), buffer->size,
-		    0);
+
+	sg = table->sgl;
+	for (i = 0; i < nents; i++) {
+		sg_set_page(sg, page + i, PAGE_SIZE, 0);
+		sg = sg_next(sg);
+	}
+
 	return table;
 }
 
 void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
 				 struct ion_buffer *buffer)
 {
-	sg_free_table(buffer->sg_table);
+	if (buffer->sg_table)
+		sg_free_table(buffer->sg_table);
+	kfree(buffer->sg_table);
 }
 
 void *ion_carveout_heap_map_kernel(struct ion_heap *heap,