@@ -38,6 +38,7 @@ static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.print_info = drm_gem_cma_print_info,
.get_sg_table = drm_gem_cma_get_sg_table,
.vmap = drm_gem_cma_vmap,
+ .vmap_local = drm_gem_cma_vmap_local,
.mmap = drm_gem_cma_mmap,
.vm_ops = &drm_gem_cma_vm_ops,
};
@@ -471,6 +472,32 @@ int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_vmap);
+/**
+ * drm_gem_cma_vmap_local - map a CMA GEM object into the kernel's virtual
+ * address space
+ * @obj: GEM object
+ * @map: Returns the kernel virtual address of the CMA GEM object's backing
+ * store.
+ *
+ * This function maps a buffer into the kernel's
+ * virtual address space. Since the CMA buffers are already mapped into the
+ * kernel virtual address space this simply returns the cached virtual
+ * address. Drivers using the CMA helpers should set this as their DRM
+ * driver's &drm_gem_object_funcs.vmap_local callback.
+ *
+ * Returns:
+ * 0 on success, or a negative error code otherwise.
+ */
+int drm_gem_cma_vmap_local(struct drm_gem_object *obj, struct dma_buf_map *map)
+{
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
+
+ dma_buf_map_set_vaddr(map, cma_obj->vaddr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_cma_vmap_local);
+
/**
* drm_gem_cma_mmap - memory-map an exported CMA GEM object
* @obj: GEM object
@@ -387,6 +387,7 @@ static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
.export = vc4_prime_export,
.get_sg_table = drm_gem_cma_get_sg_table,
.vmap = vc4_prime_vmap,
+ .vmap_local = drm_gem_cma_vmap_local,
.vm_ops = &vc4_vm_ops,
};
@@ -99,6 +99,7 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map);
+int drm_gem_cma_vmap_local(struct drm_gem_object *obj, struct dma_buf_map *map);
int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**