@@ -1,3 +1,6 @@
+config DMABUF_HEAPS_DEFERRED_FREE
+ tristate
+
config DMABUF_HEAPS_SYSTEM
bool "DMA-BUF System Heap"
depends on DMABUF_HEAPS
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMABUF_HEAPS_DEFERRED_FREE) += deferred-free-helper.o
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
obj-$(CONFIG_DMABUF_HEAPS_DSP) += dsp_heap.o
new file mode 100644
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Deferred dmabuf freeing helper
+ *
+ * Copyright (C) 2020 Linaro, Ltd.
+ *
+ * Based on the ION page pool code
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#include <linux/freezer.h>
+#include <linux/list.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+
+#include "deferred-free-helper.h"
+
+static LIST_HEAD(free_list);
+static size_t list_nr_pages;
+static wait_queue_head_t freelist_waitqueue;
+static struct task_struct *freelist_task;
+static DEFINE_SPINLOCK(free_list_lock);
+
+void deferred_free(struct deferred_freelist_item *item,
+ void (*free)(struct deferred_freelist_item*,
+ enum df_reason),
+ size_t nr_pages)
+{
+ unsigned long flags;
+
+ if (!nr_pages)
+ return;
+
+ INIT_LIST_HEAD(&item->list);
+ item->nr_pages = nr_pages;
+ item->free = free;
+
+ spin_lock_irqsave(&free_list_lock, flags);
+ list_add(&item->list, &free_list);
+ list_nr_pages += nr_pages;
+ spin_unlock_irqrestore(&free_list_lock, flags);
+ wake_up(&freelist_waitqueue);
+}
+EXPORT_SYMBOL_GPL(deferred_free);
+
+static size_t free_one_item(enum df_reason reason)
+{
+ unsigned long flags;
+ size_t nr_pages;
+ struct deferred_freelist_item *item;
+
+ spin_lock_irqsave(&free_list_lock, flags);
+ if (list_empty(&free_list)) {
+ spin_unlock_irqrestore(&free_list_lock, flags);
+ return 0;
+ }
+ item = list_first_entry(&free_list, struct deferred_freelist_item, list);
+ list_del(&item->list);
+ nr_pages = item->nr_pages;
+ list_nr_pages -= nr_pages;
+ spin_unlock_irqrestore(&free_list_lock, flags);
+
+ item->free(item, reason);
+ return nr_pages;
+}
+
+static unsigned long get_freelist_nr_pages(void)
+{
+ unsigned long nr_pages;
+ unsigned long flags;
+
+ spin_lock_irqsave(&free_list_lock, flags);
+ nr_pages = list_nr_pages;
+ spin_unlock_irqrestore(&free_list_lock, flags);
+ return nr_pages;
+}
+
+static unsigned long freelist_shrink_count(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ return get_freelist_nr_pages();
+}
+
+static unsigned long freelist_shrink_scan(struct shrinker *shrinker,
+ struct shrink_control *sc)
+{
+ unsigned long total_freed = 0;
+
+ while (total_freed < sc->nr_to_scan) {
+ size_t pages_freed = free_one_item(DF_UNDER_PRESSURE);
+
+ if (!pages_freed)
+ break;
+
+ total_freed += pages_freed;
+ }
+
+ return total_freed;
+}
+
+static struct shrinker freelist_shrinker = {
+ .count_objects = freelist_shrink_count,
+ .scan_objects = freelist_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+ .batch = 0,
+};
+
+static int deferred_free_thread(void *data)
+{
+ while (true) {
+ wait_event_freezable(freelist_waitqueue,
+ get_freelist_nr_pages() > 0);
+
+ free_one_item(DF_NORMAL);
+ }
+
+ return 0;
+}
+
+static int deferred_freelist_init(void)
+{
+ init_waitqueue_head(&freelist_waitqueue);
+ freelist_task = kthread_run(deferred_free_thread, NULL,
+ "%s", "dmabuf-deferred-free-worker");
+ if (IS_ERR(freelist_task)) {
+ pr_err("Creating thread for deferred free failed\n");
+ return PTR_ERR(freelist_task);
+ }
+ sched_set_normal(freelist_task, 19);
+
+ return register_shrinker(&freelist_shrinker);
+}
+module_init(deferred_freelist_init);
+MODULE_LICENSE("GPL v2");
+
new file mode 100644
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Deferred dmabuf freeing helper
+ *
+ * Copyright (C) 2020 Linaro, Ltd.
+ *
+ * Based on the ION page pool code
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#ifndef DEFERRED_FREE_HELPER_H
+#define DEFERRED_FREE_HELPER_H
+
+/**
+ * df_reason - enum for reason why item was freed
+ *
+ * This provides a reason for why the free function was called
+ * on the item. This is useful when deferred_free is used in
+ * combination with a pagepool, so under pressure the page can
+ * be immediately freed.
+ *
+ * DF_NORMAL: Normal deferred free
+ *
+ * DF_UNDER_PRESSURE: Free was called because the system
+ * is under memory pressure. Usually
+ * from a shrinker. Avoid allocating
+ * memory in the free call, as it may
+ * fail.
+ */
+enum df_reason {
+ DF_NORMAL,
+ DF_UNDER_PRESSURE,
+};
+
+/**
+ * deferred_freelist_item - item structure for deferred freelist
+ *
+ * This is to be added to the structure for whatever you want to
+ * defer freeing on.
+ *
+ * @nr_pages: number of pages used by item to be freed
+ * @free: function pointer to be called when freeing the item
+ * @list: list entry for the deferred list
+ */
+struct deferred_freelist_item {
+ size_t nr_pages;
+ void (*free)(struct deferred_freelist_item *i,
+ enum df_reason reason);
+ struct list_head list;
+};
+
+/**
+ * deferred_free - add item to the deferred free list
+ *
+ * @item: Pointer to deferred_freelist_item field of a structure
+ * @free: Function pointer to the free call
+ * @nr_pages: number of pages to be freed
+ */
+void deferred_free(struct deferred_freelist_item *item,
+ void (*free)(struct deferred_freelist_item *i,
+ enum df_reason reason),
+ size_t nr_pages);
+#endif