@@ -1374,6 +1374,54 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+/**
+ * dma_buf_charge_transfer - Change the GPU cgroup to which the provided dma_buf
+ * is charged.
+ * @dmabuf: [in] buffer whose charge will be migrated to a different GPU
+ * cgroup
+ * @gpucg: [in] the destination GPU cgroup for dmabuf's charge
+ *
+ * Only tasks that belong to the same cgroup the buffer is currently charged to
+ * may call this function, otherwise it will return -EPERM.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_charge_transfer(struct dma_buf *dmabuf, struct gpucg *gpucg)
+{
+#ifdef CONFIG_CGROUP_GPU
+ struct gpucg *current_gpucg;
+ int ret = 0;
+
+ /*
+ * Verify that the cgroup of the process requesting the transfer is the
+ * same as the one the buffer is currently charged to.
+ */
+ current_gpucg = gpucg_get(current);
+ mutex_lock(&dmabuf->lock);
+ if (current_gpucg != dmabuf->gpucg) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ ret = gpucg_try_charge(gpucg, dmabuf->gpucg_dev, dmabuf->size);
+ if (ret)
+ goto err;
+
+ dmabuf->gpucg = gpucg;
+
+ /* uncharge the buffer from the cgroup it's currently charged to. */
+ gpucg_uncharge(current_gpucg, dmabuf->gpucg_dev, dmabuf->size);
+
+err:
+ mutex_unlock(&dmabuf->lock);
+ gpucg_put(current_gpucg);
+ return ret;
+#else
+ return 0;
+#endif /* CONFIG_CGROUP_GPU */
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_charge_transfer, DMA_BUF);
+
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
@@ -646,4 +646,6 @@ int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+
+int dma_buf_charge_transfer(struct dma_buf *dmabuf, struct gpucg *gpucg);
#endif /* __DMA_BUF_H__ */