@@ -4900,6 +4900,24 @@ F: include/linux/*fence.h
F: Documentation/driver-api/dma-buf.rst
T: git git://anongit.freedesktop.org/drm/drm-misc
+DMA-BUF HEAPS FRAMEWORK
+M: Sumit Semwal <sumit.semwal@linaro.org>
+R: Andrew F. Davis <afd@ti.com>
+R: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+R: Liam Mark <lmark@codeaurora.org>
+R: Laura Abbott <labbott@redhat.com>
+R: Brian Starkey <Brian.Starkey@arm.com>
+R: John Stultz <john.stultz@linaro.org>
+S: Maintained
+L: linux-media@vger.kernel.org
+L: dri-devel@lists.freedesktop.org
+L: linaro-mm-sig@lists.linaro.org (moderated for non-subscribers)
+F: include/uapi/linux/dma-heap.h
+F: include/linux/dma-heap.h
+F: drivers/dma-buf/dma-heap.c
+F: drivers/dma-buf/heaps/*
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
M: Vinod Koul <vkoul@kernel.org>
L: dmaengine@vger.kernel.org
@@ -39,4 +39,13 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
+menuconfig DMABUF_HEAPS
+ bool "DMA-BUF Userland Memory Heaps"
+ select DMA_SHARED_BUFFER
+ help
+ Choose this option to enable the DMA-BUF userland memory heaps,
+ this options creates per heap chardevs in /dev/dma_heap/ which
+ allows userspace to use to allocate dma-bufs that can be shared
+ between drivers.
+
endmenu
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
reservation.o seqno-fence.o
+obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
new file mode 100644
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for userspace DMA-BUF allocations
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/xarray.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
+
+#define DEVNAME "dma_heap"
+
+#define NUM_HEAP_MINORS 128
+
+/**
+ * struct dma_heap - represents a dmabuf heap in the system
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @minor minor number of this heap device
+ * @heap_devt heap device node
+ * @heap_cdev heap char device
+ *
+ * Represents a heap of memory from which buffers can be made.
+ */
+struct dma_heap {
+ const char *name;
+ struct dma_heap_ops *ops;
+ void *priv;
+ unsigned int minor;
+ dev_t heap_devt;
+ struct cdev heap_cdev;
+};
+
+static dev_t dma_heap_devt;
+static struct class *dma_heap_class;
+static DEFINE_XARRAY_ALLOC(dma_heap_minors);
+
+static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+ unsigned int fd_flags,
+ unsigned int heap_flags)
+{
+ /*
+ * Allocations from all heaps have to begin
+ * and end on page boundaries.
+ */
+ len = PAGE_ALIGN(len);
+ if (!len)
+ return -EINVAL;
+
+ return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+}
+
+static int dma_heap_open(struct inode *inode, struct file *file)
+{
+ struct dma_heap *heap;
+
+ heap = xa_load(&dma_heap_minors, iminor(inode));
+ if (!heap) {
+ pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
+ return -ENODEV;
+ }
+
+ /* instance data as context */
+ file->private_data = heap;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static long dma_heap_ioctl_allocate(struct file *file, unsigned long arg)
+{
+ struct dma_heap_allocation_data heap_allocation;
+ struct dma_heap *heap = file->private_data;
+ int fd;
+
+ if (copy_from_user(&heap_allocation, (void __user *)arg,
+ sizeof(heap_allocation)))
+ return -EFAULT;
+
+ if (heap_allocation.fd ||
+ heap_allocation.reserved0 ||
+ heap_allocation.reserved1) {
+ pr_warn_once("dma_heap: ioctl data not valid\n");
+ return -EINVAL;
+ }
+
+ if (heap_allocation.fd_flags & ~DMA_HEAP_VALID_FD_FLAGS) {
+ pr_warn_once("dma_heap: fd_flags has invalid or unsupported flags set\n");
+ return -EINVAL;
+ }
+
+ if (heap_allocation.heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS) {
+ pr_warn_once("dma_heap: heap flags has invalid or unsupported flags set\n");
+ return -EINVAL;
+ }
+
+
+ fd = dma_heap_buffer_alloc(heap, heap_allocation.len,
+ heap_allocation.fd_flags,
+ heap_allocation.heap_flags);
+ if (fd < 0)
+ return fd;
+
+ heap_allocation.fd = fd;
+
+ if (copy_to_user((void __user *)arg, &heap_allocation,
+ sizeof(heap_allocation))) {
+ ksys_close(fd);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static long dma_heap_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = 0;
+
+ switch (cmd) {
+ case DMA_HEAP_IOC_ALLOC:
+ ret = dma_heap_ioctl_allocate(file, arg);
+ break;
+ default:
+ return -ENOTTY;
+ }
+
+ return ret;
+}
+
+static const struct file_operations dma_heap_fops = {
+ .owner = THIS_MODULE,
+ .open = dma_heap_open,
+ .unlocked_ioctl = dma_heap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dma_heap_ioctl,
+#endif
+};
+
+/**
+ * dma_heap_get_data() - get per-subdriver data for the heap
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-subdriver data for the heap.
+ */
+void *dma_heap_get_data(struct dma_heap *heap)
+{
+ return heap->priv;
+}
+
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
+{
+ struct dma_heap *heap, *err_ret;
+ struct device *dev_ret;
+ int ret;
+
+ if (!exp_info->name || !strcmp(exp_info->name, "")) {
+ pr_err("dma_heap: Cannot add heap without a name\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!exp_info->ops || !exp_info->ops->allocate) {
+ pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+
+ heap->name = exp_info->name;
+ heap->ops = exp_info->ops;
+ heap->priv = exp_info->priv;
+
+ /* Find unused minor number */
+ ret = xa_alloc(&dma_heap_minors, &heap->minor, heap,
+ XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to get minor number for heap\n");
+ err_ret = ERR_PTR(ret);
+ goto err0;
+ }
+
+ /* Create device */
+ heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), heap->minor);
+
+ cdev_init(&heap->heap_cdev, &dma_heap_fops);
+ ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to add char device\n");
+ err_ret = ERR_PTR(ret);
+ goto err1;
+ }
+
+ dev_ret = device_create(dma_heap_class,
+ NULL,
+ heap->heap_devt,
+ NULL,
+ heap->name);
+ if (IS_ERR(dev_ret)) {
+ pr_err("dma_heap: Unable to create device\n");
+ err_ret = (struct dma_heap *)dev_ret;
+ goto err2;
+ }
+
+ return heap;
+
+err2:
+ cdev_del(&heap->heap_cdev);
+err1:
+ xa_erase(&dma_heap_minors, heap->minor);
+err0:
+ kfree(heap);
+ return err_ret;
+
+}
+
+static char *dma_heap_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
+}
+
+static int dma_heap_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
+ if (ret)
+ return ret;
+
+ dma_heap_class = class_create(THIS_MODULE, DEVNAME);
+ if (IS_ERR(dma_heap_class)) {
+ unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
+ return PTR_ERR(dma_heap_class);
+ }
+ dma_heap_class->devnode = dma_heap_devnode;
+
+ return 0;
+}
+subsys_initcall(dma_heap_init);
new file mode 100644
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps Allocation Infrastructure
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _DMA_HEAPS_H
+#define _DMA_HEAPS_H
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+
+struct dma_heap;
+
+/**
+ * struct dma_heap_ops - ops to operate on a given heap
+ * @allocate: allocate dmabuf and return fd
+ *
+ * allocate returns dmabuf fd on success, -errno on error.
+ */
+struct dma_heap_ops {
+ int (*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags);
+};
+
+/**
+ * struct dma_heap_export_info - information needed to export a new dmabuf heap
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @priv: heap exporter private data
+ *
+ * Information needed to export a new dmabuf heap.
+ */
+struct dma_heap_export_info {
+ const char *name;
+ struct dma_heap_ops *ops;
+ void *priv;
+};
+
+/**
+ * dma_heap_get_data() - get per-heap driver data
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-heap data for the heap.
+ */
+void *dma_heap_get_data(struct dma_heap *heap);
+
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info: information needed to register this heap
+ */
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+
+#endif /* _DMA_HEAPS_H */
new file mode 100644
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * DMABUF Heaps Userspace API
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+#ifndef _UAPI_LINUX_DMABUF_POOL_H
+#define _UAPI_LINUX_DMABUF_POOL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * DOC: DMABUF Heaps Userspace API
+ */
+
+/* Valid FD_FLAGS are O_CLOEXEC, O_RDONLY, O_WRONLY, O_RDWR */
+#define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
+
+/* Currently no heap flags */
+#define DMA_HEAP_VALID_HEAP_FLAGS (0)
+
+/**
+ * struct dma_heap_allocation_data - metadata passed from userspace for
+ * allocations
+ * @len: size of the allocation
+ * @fd: will be populated with a fd which provdes the
+ * handle to the allocated dma-buf
+ * @fd_flags: file descriptor flags used when allocating
+ * @heap_flags: flags passed to heap
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct dma_heap_allocation_data {
+ __u64 len;
+ __u32 fd;
+ __u32 fd_flags;
+ __u64 heap_flags;
+ __u32 reserved0;
+ __u32 reserved1;
+};
+
+#define DMA_HEAP_IOC_MAGIC 'H'
+
+/**
+ * DOC: DMA_HEAP_IOC_ALLOC - allocate memory from pool
+ *
+ * Takes an dma_heap_allocation_data struct and returns it with the fd field
+ * populated with the dmabuf handle of the allocation.
+ */
+#define DMA_HEAP_IOC_ALLOC _IOWR(DMA_HEAP_IOC_MAGIC, 0, \
+ struct dma_heap_allocation_data)
+
+#endif /* _UAPI_LINUX_DMABUF_POOL_H */