[v7,3/3] gpu: ion: add CMA heap

Message ID 50c094cf.c361b40a.59fe.4212@mx.google.com
State New
Headers show

Commit Message

Benjamin Gaignard Dec. 6, 2012, 12:50 p.m.
From: Benjamin Gaignard <benjamin.gaignard@linaro.org>

New heap type ION_HEAP_TYPE_DMA where allocation is done with dma_alloc_coherent API.
device coherent_dma_mask must be set to DMA_BIT_MASK(32).
ion_platform_heap private field is used to retrieve the device linked to CMA,
if NULL the default CMA area is used.
ion_cma_get_sgtable is a copy of dma_common_get_sgtable function which should
be in kernel 3.5

Signed-off-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
---
 drivers/gpu/ion/Kconfig        |    5 ++
 drivers/gpu/ion/Makefile       |    1 +
 drivers/gpu/ion/ion_cma_heap.c |  187 ++++++++++++++++++++++++++++++++++++++++
 drivers/gpu/ion/ion_heap.c     |    6 ++
 drivers/gpu/ion/ion_priv.h     |   14 +++
 include/linux/ion.h            |    3 +
 6 files changed, 216 insertions(+)
 create mode 100644 drivers/gpu/ion/ion_cma_heap.c

Comments

Nishanth Peethambaran Dec. 6, 2012, 1:35 p.m. | #1
Hello Benjamin,

On Thu, Dec 6, 2012 at 6:20 PM,  <benjamin.gaignard@linaro.org> wrote:
> From: Benjamin Gaignard <benjamin.gaignard@linaro.org>
>
> New heap type ION_HEAP_TYPE_DMA where allocation is done with dma_alloc_coherent API.
> device coherent_dma_mask must be set to DMA_BIT_MASK(32).
> ion_platform_heap private field is used to retrieve the device linked to CMA,
> if NULL the default CMA area is used.
> ion_cma_get_sgtable is a copy of dma_common_get_sgtable function which should
> be in kernel 3.5
>
> Signed-off-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
> ---
>  drivers/gpu/ion/Kconfig        |    5 ++
>  drivers/gpu/ion/Makefile       |    1 +
>  drivers/gpu/ion/ion_cma_heap.c |  187 ++++++++++++++++++++++++++++++++++++++++
>  drivers/gpu/ion/ion_heap.c     |    6 ++
>  drivers/gpu/ion/ion_priv.h     |   14 +++
>  include/linux/ion.h            |    3 +
>  6 files changed, 216 insertions(+)
>  create mode 100644 drivers/gpu/ion/ion_cma_heap.c
>
> diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
> index b5bfdb4..ea0a896 100644
> --- a/drivers/gpu/ion/Kconfig
> +++ b/drivers/gpu/ion/Kconfig
> @@ -5,6 +5,11 @@ menuconfig ION
>         help
>           Chose this option to enable the ION Memory Manager.
>
> +config ION_CMA
> +       bool
> +       default y
> +       depends on CMA && ION
> +
>  config ION_TEGRA
>         tristate "Ion for Tegra"
>         depends on ARCH_TEGRA && ION
> diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
> index d1ddebb..fddb85c 100644
> --- a/drivers/gpu/ion/Makefile
> +++ b/drivers/gpu/ion/Makefile
> @@ -1,3 +1,4 @@
>  obj-$(CONFIG_ION) +=   ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
>                         ion_carveout_heap.o
> +obj-$(CONFIG_ION_CMA) += ion_cma_heap.o
>  obj-$(CONFIG_ION_TEGRA) += tegra/
> diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
> new file mode 100644
> index 0000000..543f891
> --- /dev/null
> +++ b/drivers/gpu/ion/ion_cma_heap.c
> @@ -0,0 +1,187 @@
> +/*
> + * drivers/gpu/ion/ion_cma_heap.c
> + *
> + * Copyright (C) Linaro 2012
> + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
> + *
> + * This software is licensed under the terms of the GNU General Public
> + * License version 2, as published by the Free Software Foundation, and
> + * may be copied, distributed, and modified under those terms.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
> + * GNU General Public License for more details.
> + *
> + */
> +
> +#include <linux/device.h>
> +#include <linux/ion.h>
> +#include <linux/slab.h>
> +#include <linux/errno.h>
> +#include <linux/err.h>
> +#include <linux/dma-mapping.h>
> +
> +/* for ion_heap_ops structure */
> +#include "ion_priv.h"
> +
> +#define ION_CMA_ALLOCATE_FAILED -1
> +
> +struct ion_cma_buffer_info {
> +       void *cpu_addr;
> +       dma_addr_t handle;
> +       struct sg_table *table;
> +};
> +
> +/*
> + * Create scatter-list for the already allocated DMA buffer.
> + * This function could be replaced by dma_common_get_sgtable
> + * as soon as it will avalaible.
> + */
> +int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
> +                       void *cpu_addr, dma_addr_t handle, size_t size)
> +{
> +       struct page *page = virt_to_page(cpu_addr);
> +       int ret;
> +
> +       ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
> +       if (unlikely(ret))
> +               return ret;
> +
> +       sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
> +       return 0;
> +}
> +
> +/* ION CMA heap operations functions */
> +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
> +                           unsigned long len, unsigned long align,
> +                           unsigned long flags)
> +{
> +       struct device *dev = heap->priv;
> +       struct ion_cma_buffer_info *info;
> +
> +       dev_dbg(dev, "Request buffer allocation len %ld\n", len);
> +
> +       info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
> +       if (!info) {
> +               dev_err(dev, "Can't allocate buffer info\n");
> +               return ION_CMA_ALLOCATE_FAILED;
> +       }
> +
> +       info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 0);
> +
> +       if (!info->cpu_addr) {
> +               dev_err(dev, "Fail to allocate buffer\n");
> +               goto err;
> +       }
> +
> +       info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
> +       if (!info->table) {
> +               dev_err(dev, "Fail to allocate sg table\n");
> +               goto err;
> +       }
> +
> +       ion_cma_get_sgtable(dev,
> +                       info->table, info->cpu_addr, info->handle, len);

Better to check the return value of ion_cma_get_sgtable and free up
the cma and info->table memory.

> +
> +       /* keep this for memory release */
> +       buffer->priv_virt = info;
> +       dev_dbg(dev, "Allocate buffer %p\n", buffer);
> +       return 0;
> +
> +err:
> +       kfree(info);
> +       return ION_CMA_ALLOCATE_FAILED;
> +}
> +
> +static void ion_cma_free(struct ion_buffer *buffer)
> +{
> +       struct device *dev = buffer->heap->priv;
> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
> +
> +       dev_dbg(dev, "Release buffer %p\n", buffer);
> +       /* release memory */
> +       dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
> +       /* release sg table */

sg_free_table(info->table);

> +       kfree(info->table);
> +       kfree(info);
> +}
> +
> +/* return physical address in addr */
> +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
> +                       ion_phys_addr_t *addr, size_t *len)
> +{
> +       struct device *dev = heap->priv;
> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
> +
> +       dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
> +               virt_to_phys(info->cpu_addr));
> +
> +       *addr = virt_to_phys(info->cpu_addr);
> +       *len = buffer->size;
> +
> +       return 0;
> +}
> +
> +struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
> +                                        struct ion_buffer *buffer)
> +{
> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
> +
> +       return info->table;
> +}
> +
> +void ion_cma_heap_unmap_dma(struct ion_heap *heap,
> +                              struct ion_buffer *buffer)
> +{
> +       return;
> +}
> +
> +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
> +                       struct vm_area_struct *vma)
> +{
> +       struct device *dev = buffer->heap->priv;
> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
> +
> +       return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
> +                                buffer->size);
> +}
> +
> +void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
> +{
> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
> +       /* kernel memory mapping has been done at allocation time */
> +       return info->cpu_addr;
> +}
> +
> +static struct ion_heap_ops ion_cma_ops = {
> +       .allocate = ion_cma_allocate,
> +       .free = ion_cma_free,
> +       .map_dma = ion_cma_heap_map_dma,
> +       .unmap_dma = ion_cma_heap_unmap_dma,
> +       .phys = ion_cma_phys,
> +       .map_user = ion_cma_mmap,
> +       .map_kernel = ion_cma_map_kernel,
> +};
> +
> +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
> +{
> +       struct ion_heap *heap;
> +
> +       heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
> +
> +       if (!heap)
> +               return ERR_PTR(-ENOMEM);
> +
> +       heap->ops = &ion_cma_ops;
> +       /* set device as private heaps data, later it will be
> +        * used to make the link with reserved CMA memory */
> +       heap->priv = data->priv;
> +       heap->type = ION_HEAP_TYPE_DMA;
> +       return heap;
> +}
> +
> +void ion_cma_heap_destroy(struct ion_heap *heap)
> +{
> +       kfree(heap);
> +}
> diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
> index f9ab1df..c960230 100644
> --- a/drivers/gpu/ion/ion_heap.c
> +++ b/drivers/gpu/ion/ion_heap.c
> @@ -32,6 +32,9 @@ struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
>         case ION_HEAP_TYPE_CARVEOUT:
>                 heap = ion_carveout_heap_create(heap_data);
>                 break;
> +       case ION_HEAP_TYPE_DMA:
> +               heap = ion_cma_heap_create(heap_data);
> +               break;
>         default:
>                 pr_err("%s: Invalid heap type %d\n", __func__,
>                        heap_data->type);
> @@ -66,6 +69,9 @@ void ion_heap_destroy(struct ion_heap *heap)
>         case ION_HEAP_TYPE_CARVEOUT:
>                 ion_carveout_heap_destroy(heap);
>                 break;
> +       case ION_HEAP_TYPE_DMA:
> +               ion_cma_heap_destroy(heap);
> +               break;
>         default:
>                 pr_err("%s: Invalid heap type %d\n", __func__,
>                        heap->type);
> diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
> index a31203b..046f1a2 100644
> --- a/drivers/gpu/ion/ion_priv.h
> +++ b/drivers/gpu/ion/ion_priv.h
> @@ -203,6 +203,20 @@ ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
>                                       unsigned long align);
>  void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
>                        unsigned long size);
> +
> +#ifdef CONFIG_CMA
> +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *heap);
> +void ion_cma_heap_destroy(struct ion_heap *heap);
> +#else
> +static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap
> +                                                  *heap)
> +{
> +       return NULL;
> +}
> +
> +static inline void ion_cma_heap_destroy(struct ion_heap *heap) {};
> +#endif
> +
>  /**
>   * The carveout heap returns physical addresses, since 0 may be a valid
>   * physical address, this is used to indicate allocation failed
> diff --git a/include/linux/ion.h b/include/linux/ion.h
> index b85f4b1..3ccc75a 100644
> --- a/include/linux/ion.h
> +++ b/include/linux/ion.h
> @@ -27,6 +27,7 @@ struct ion_handle;
>   * @ION_HEAP_TYPE_CARVEOUT:     memory allocated from a prereserved
>   *                              carveout heap, allocations are physically
>   *                              contiguous
> + * @ION_HEAP_TYPE_DMA:          memory allocated via DMA API
>   * @ION_NUM_HEAPS:              helper for iterating over heaps, a bit mask
>   *                              is used to identify the heaps, so only 32
>   *                              total heap types are supported
> @@ -35,6 +36,7 @@ enum ion_heap_type {
>         ION_HEAP_TYPE_SYSTEM,
>         ION_HEAP_TYPE_SYSTEM_CONTIG,
>         ION_HEAP_TYPE_CARVEOUT,
> +       ION_HEAP_TYPE_DMA,
>         ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
>                                  are at the end of this enum */
>         ION_NUM_HEAPS = 16,
> @@ -43,6 +45,7 @@ enum ion_heap_type {
>  #define ION_HEAP_SYSTEM_MASK           (1 << ION_HEAP_TYPE_SYSTEM)
>  #define ION_HEAP_SYSTEM_CONTIG_MASK    (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
>  #define ION_HEAP_CARVEOUT_MASK         (1 << ION_HEAP_TYPE_CARVEOUT)
> +#define ION_HEAP_TYPE_DMA_MASK         (1 << ION_HEAP_TYPE_DMA)
>
>  /**
>   * heap flags - the lower 16 bits are used by core ion, the upper 16
> --
> 1.7.10
>
>
> _______________________________________________
> Linaro-mm-sig mailing list
> Linaro-mm-sig@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/linaro-mm-sig


- Nishanth Peethambaran
Laura Abbott Dec. 6, 2012, 9:24 p.m. | #2
Hi,

On 12/6/2012 4:50 AM, benjamin.gaignard@linaro.org wrote:
> From: Benjamin Gaignard <benjamin.gaignard@linaro.org>

<snip>

> +
> +/* return physical address in addr */
> +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
> +			ion_phys_addr_t *addr, size_t *len)
> +{
> +	struct device *dev = heap->priv;
> +	struct ion_cma_buffer_info *info = buffer->priv_virt;
> +
> +	dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
> +		virt_to_phys(info->cpu_addr));
> +
> +	*addr = virt_to_phys(info->cpu_addr);
> +	*len = buffer->size;
> +
> +	return 0;
> +}
> +
info->handle should give back the physical address so we should be able 
to use that directly instead of calling virt_to_phys

> +struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
> +					 struct ion_buffer *buffer)
> +{
> +	struct ion_cma_buffer_info *info = buffer->priv_virt;
> +
> +	return info->table;
> +}
> +
> +void ion_cma_heap_unmap_dma(struct ion_heap *heap,
> +			       struct ion_buffer *buffer)
> +{
> +	return;
> +}
> +
> +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
> +			struct vm_area_struct *vma)
> +{
> +	struct device *dev = buffer->heap->priv;
> +	struct ion_cma_buffer_info *info = buffer->priv_virt;
> +
> +	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
> +				 buffer->size);
> +}
> +
> +void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
> +{
> +	struct ion_cma_buffer_info *info = buffer->priv_virt;
> +	/* kernel memory mapping has been done at allocation time */
> +	return info->cpu_addr;
> +}
> +
> +static struct ion_heap_ops ion_cma_ops = {
> +	.allocate = ion_cma_allocate,
> +	.free = ion_cma_free,
> +	.map_dma = ion_cma_heap_map_dma,
> +	.unmap_dma = ion_cma_heap_unmap_dma,
> +	.phys = ion_cma_phys,
> +	.map_user = ion_cma_mmap,
> +	.map_kernel = ion_cma_map_kernel,
> +};
> +

Missing the unmap_kernel?


Thanks,
Laura
Benjamin Gaignard Dec. 7, 2012, 8:54 a.m. | #3
Hi,

kernel mapping is implicit with dma_alloc_coherent,
in consequence ion_cma_map_kernel only have to return the address of mapped
memory (info->cpu_addr). unmap_kernel is useless in this context.

When DMA_ATTR_NO_KERNEL_MAPPING will be available in Android kernel, we
could set it when calling dma_alloc_coherent and the mapping will become
explicit with a map_kernel and unmap_kernel to do the map/unmap operations.

Regards,
Benjamin


2012/12/6 Laura Abbott <lauraa@codeaurora.org>

> Hi,
>
>
> On 12/6/2012 4:50 AM, benjamin.gaignard@linaro.org wrote:
>
>> From: Benjamin Gaignard <benjamin.gaignard@linaro.org>
>>
>
> <snip>
>
>
>  +
>> +/* return physical address in addr */
>> +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
>> +                       ion_phys_addr_t *addr, size_t *len)
>> +{
>> +       struct device *dev = heap->priv;
>> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
>> +
>> +       dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
>> +               virt_to_phys(info->cpu_addr));
>> +
>> +       *addr = virt_to_phys(info->cpu_addr);
>> +       *len = buffer->size;
>> +
>> +       return 0;
>> +}
>> +
>>
> info->handle should give back the physical address so we should be able to
> use that directly instead of calling virt_to_phys
>
>
>  +struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
>> +                                        struct ion_buffer *buffer)
>> +{
>> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
>> +
>> +       return info->table;
>> +}
>> +
>> +void ion_cma_heap_unmap_dma(struct ion_heap *heap,
>> +                              struct ion_buffer *buffer)
>> +{
>> +       return;
>> +}
>> +
>> +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer
>> *buffer,
>> +                       struct vm_area_struct *vma)
>> +{
>> +       struct device *dev = buffer->heap->priv;
>> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
>> +
>> +       return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
>> +                                buffer->size);
>> +}
>> +
>> +void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer
>> *buffer)
>> +{
>> +       struct ion_cma_buffer_info *info = buffer->priv_virt;
>> +       /* kernel memory mapping has been done at allocation time */
>> +       return info->cpu_addr;
>> +}
>> +
>> +static struct ion_heap_ops ion_cma_ops = {
>> +       .allocate = ion_cma_allocate,
>> +       .free = ion_cma_free,
>> +       .map_dma = ion_cma_heap_map_dma,
>> +       .unmap_dma = ion_cma_heap_unmap_dma,
>> +       .phys = ion_cma_phys,
>> +       .map_user = ion_cma_mmap,
>> +       .map_kernel = ion_cma_map_kernel,
>> +};
>> +
>>
>
> Missing the unmap_kernel?
>
>
> Thanks,
> Laura
>
> --
> Qualcomm Innovation Center, Inc. is a member of Code Aurora Forum,
> hosted by The Linux Foundation
>

Patch hide | download patch | download mbox

diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
index b5bfdb4..ea0a896 100644
--- a/drivers/gpu/ion/Kconfig
+++ b/drivers/gpu/ion/Kconfig
@@ -5,6 +5,11 @@  menuconfig ION
 	help
 	  Chose this option to enable the ION Memory Manager.
 
+config ION_CMA
+	bool
+	default y
+	depends on CMA && ION
+
 config ION_TEGRA
 	tristate "Ion for Tegra"
 	depends on ARCH_TEGRA && ION
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index d1ddebb..fddb85c 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,3 +1,4 @@ 
 obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
 			ion_carveout_heap.o
+obj-$(CONFIG_ION_CMA) += ion_cma_heap.o
 obj-$(CONFIG_ION_TEGRA) += tegra/
diff --git a/drivers/gpu/ion/ion_cma_heap.c b/drivers/gpu/ion/ion_cma_heap.c
new file mode 100644
index 0000000..543f891
--- /dev/null
+++ b/drivers/gpu/ion/ion_cma_heap.c
@@ -0,0 +1,187 @@ 
+/*
+ * drivers/gpu/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/ion.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+/* for ion_heap_ops structure */
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_buffer_info {
+	void *cpu_addr;
+	dma_addr_t handle;
+	struct sg_table *table;
+};
+
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replaced by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			void *cpu_addr, dma_addr_t handle, size_t size)
+{
+	struct page *page = virt_to_page(cpu_addr);
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	return 0;
+}
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct device *dev = heap->priv;
+	struct ion_cma_buffer_info *info;
+
+	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+	if (!info) {
+		dev_err(dev, "Can't allocate buffer info\n");
+		return ION_CMA_ALLOCATE_FAILED;
+	}
+
+	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 0);
+
+	if (!info->cpu_addr) {
+		dev_err(dev, "Fail to allocate buffer\n");
+		goto err;
+	}
+
+	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!info->table) {
+		dev_err(dev, "Fail to allocate sg table\n");
+		goto err;
+	}
+
+	ion_cma_get_sgtable(dev,
+			info->table, info->cpu_addr, info->handle, len);
+
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	dev_dbg(dev, "Allocate buffer %p\n", buffer);
+	return 0;
+
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+	struct device *dev = buffer->heap->priv;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Release buffer %p\n", buffer);
+	/* release memory */
+	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+	/* release sg table */
+	kfree(info->table);
+	kfree(info);
+}
+
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			ion_phys_addr_t *addr, size_t *len)
+{
+	struct device *dev = heap->priv;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
+		virt_to_phys(info->cpu_addr));
+
+	*addr = virt_to_phys(info->cpu_addr);
+	*len = buffer->size;
+
+	return 0;
+}
+
+struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+					 struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+			       struct ion_buffer *buffer)
+{
+	return;
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+			struct vm_area_struct *vma)
+{
+	struct device *dev = buffer->heap->priv;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+				 buffer->size);
+}
+
+void *ion_cma_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+	/* kernel memory mapping has been done at allocation time */
+	return info->cpu_addr;
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+	.allocate = ion_cma_allocate,
+	.free = ion_cma_free,
+	.map_dma = ion_cma_heap_map_dma,
+	.unmap_dma = ion_cma_heap_unmap_dma,
+	.phys = ion_cma_phys,
+	.map_user = ion_cma_mmap,
+	.map_kernel = ion_cma_map_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+
+	heap->ops = &ion_cma_ops;
+	/* set device as private heaps data, later it will be
+	 * used to make the link with reserved CMA memory */
+	heap->priv = data->priv;
+	heap->type = ION_HEAP_TYPE_DMA;
+	return heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
diff --git a/drivers/gpu/ion/ion_heap.c b/drivers/gpu/ion/ion_heap.c
index f9ab1df..c960230 100644
--- a/drivers/gpu/ion/ion_heap.c
+++ b/drivers/gpu/ion/ion_heap.c
@@ -32,6 +32,9 @@  struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
 	case ION_HEAP_TYPE_CARVEOUT:
 		heap = ion_carveout_heap_create(heap_data);
 		break;
+	case ION_HEAP_TYPE_DMA:
+		heap = ion_cma_heap_create(heap_data);
+		break;
 	default:
 		pr_err("%s: Invalid heap type %d\n", __func__,
 		       heap_data->type);
@@ -66,6 +69,9 @@  void ion_heap_destroy(struct ion_heap *heap)
 	case ION_HEAP_TYPE_CARVEOUT:
 		ion_carveout_heap_destroy(heap);
 		break;
+	case ION_HEAP_TYPE_DMA:
+		ion_cma_heap_destroy(heap);
+		break;
 	default:
 		pr_err("%s: Invalid heap type %d\n", __func__,
 		       heap->type);
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index a31203b..046f1a2 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -203,6 +203,20 @@  ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
 				      unsigned long align);
 void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
 		       unsigned long size);
+
+#ifdef CONFIG_CMA
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *heap);
+void ion_cma_heap_destroy(struct ion_heap *heap);
+#else
+static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap
+						   *heap)
+{
+	return NULL;
+}
+
+static inline void ion_cma_heap_destroy(struct ion_heap *heap) {};
+#endif
+
 /**
  * The carveout heap returns physical addresses, since 0 may be a valid
  * physical address, this is used to indicate allocation failed
diff --git a/include/linux/ion.h b/include/linux/ion.h
index b85f4b1..3ccc75a 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -27,6 +27,7 @@  struct ion_handle;
  * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
  * 				 carveout heap, allocations are physically
  * 				 contiguous
+ * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API
  * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
  * 				 is used to identify the heaps, so only 32
  * 				 total heap types are supported
@@ -35,6 +36,7 @@  enum ion_heap_type {
 	ION_HEAP_TYPE_SYSTEM,
 	ION_HEAP_TYPE_SYSTEM_CONTIG,
 	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_DMA,
 	ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
 				 are at the end of this enum */
 	ION_NUM_HEAPS = 16,
@@ -43,6 +45,7 @@  enum ion_heap_type {
 #define ION_HEAP_SYSTEM_MASK		(1 << ION_HEAP_TYPE_SYSTEM)
 #define ION_HEAP_SYSTEM_CONTIG_MASK	(1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
 #define ION_HEAP_CARVEOUT_MASK		(1 << ION_HEAP_TYPE_CARVEOUT)
+#define ION_HEAP_TYPE_DMA_MASK		(1 << ION_HEAP_TYPE_DMA)
 
 /**
  * heap flags - the lower 16 bits are used by core ion, the upper 16