diff mbox

[PATCHv8,20/26] v4l: vb2-dma-contig: add support for DMABUF exporting

Message ID 1344958496-9373-21-git-send-email-t.stanislaws@samsung.com
State New
Headers show

Commit Message

Tomasz Stanislawski Aug. 14, 2012, 3:34 p.m. UTC
This patch adds support for exporting a dma-contig buffer using
DMABUF interface.

Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
---
 drivers/media/video/videobuf2-dma-contig.c |  204 ++++++++++++++++++++++++++++
 1 file changed, 204 insertions(+)

Comments

Laurent Pinchart Aug. 21, 2012, 10:03 a.m. UTC | #1
Hi Tomasz,

Thanks for the patch.

Just a couple of small comments below.

On Tuesday 14 August 2012 17:34:50 Tomasz Stanislawski wrote:
> This patch adds support for exporting a dma-contig buffer using
> DMABUF interface.
> 
> Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
> ---
>  drivers/media/video/videobuf2-dma-contig.c |  204 +++++++++++++++++++++++++
>  1 file changed, 204 insertions(+)
> 
> diff --git a/drivers/media/video/videobuf2-dma-contig.c
> b/drivers/media/video/videobuf2-dma-contig.c index 7fc71a0..bb2b4ac8 100644
> --- a/drivers/media/video/videobuf2-dma-contig.c
> +++ b/drivers/media/video/videobuf2-dma-contig.c

[snip]

> +static struct sg_table *vb2_dc_dmabuf_ops_map(
> +	struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
> +{
> +	struct vb2_dc_attachment *attach = db_attach->priv;
> +	/* stealing dmabuf mutex to serialize map/unmap operations */

Why isn't this operation serialized by the dma-buf core itself ?

> +	struct mutex *lock = &db_attach->dmabuf->lock;
> +	struct sg_table *sgt;
> +	int ret;
> +
> +	mutex_lock(lock);
> +
> +	sgt = &attach->sgt;
> +	/* return previously mapped sg table */
> +	if (attach->dir == dir) {
> +		mutex_unlock(lock);
> +		return sgt;
> +	}
> +
> +	/* release any previous cache */
> +	if (attach->dir != DMA_NONE) {
> +		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
> +			attach->dir);
> +		attach->dir = DMA_NONE;
> +	}
> +
> +	/* mapping to the client with new direction */
> +	ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
> +	if (ret <= 0) {
> +		pr_err("failed to map scatterlist\n");
> +		mutex_unlock(lock);
> +		return ERR_PTR(-EIO);
> +	}
> +
> +	attach->dir = dir;
> +
> +	mutex_unlock(lock);
> +
> +	return sgt;
> +}

[snip]

> +static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
> +	struct vm_area_struct *vma)
> +{
> +	/* Dummy support for mmap */
> +	return -ENOTTY;

What about calling the dma-contig mmap handler here ? Is there a specific 
reason why you haven't implemented mmap support for dmabuf ?

> +}
> +
> +static struct dma_buf_ops vb2_dc_dmabuf_ops = {
> +	.attach = vb2_dc_dmabuf_ops_attach,
> +	.detach = vb2_dc_dmabuf_ops_detach,
> +	.map_dma_buf = vb2_dc_dmabuf_ops_map,
> +	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
> +	.kmap = vb2_dc_dmabuf_ops_kmap,
> +	.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
> +	.vmap = vb2_dc_dmabuf_ops_vmap,
> +	.mmap = vb2_dc_dmabuf_ops_mmap,
> +	.release = vb2_dc_dmabuf_ops_release,
> +};
> +
> +static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
> +{
> +	int ret;
> +	struct sg_table *sgt;
> +
> +	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
> +	if (!sgt) {
> +		dev_err(buf->dev, "failed to alloc sg table\n");
> +		return ERR_PTR(-ENOMEM);
> +	}
> +
> +	ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
> +		buf->size);
> +	if (ret < 0) {
> +		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
> +		kfree(sgt);
> +		return ERR_PTR(ret);
> +	}

As this function is only used below, where the exact value of the error code 
is ignored, what about just returning NULL on failure ? Another option is to 
return the error code in vb2_dc_get_dmabuf (not sure if that would be useful 
though).

> +
> +	return sgt;
> +}
> +
> +static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
> +{
> +	struct vb2_dc_buf *buf = buf_priv;
> +	struct dma_buf *dbuf;
> +	struct sg_table *sgt = buf->sgt_base;
> +
> +	if (!sgt)
> +		sgt = vb2_dc_get_base_sgt(buf);
> +	if (WARN_ON(IS_ERR(sgt)))
> +		return NULL;
> +
> +	/* cache base sgt for future use */
> +	buf->sgt_base = sgt;

You can move this assignment inside the first if, there's no need to execute 
it every time. The WARN_ON can also be moved inside the first if, as buf-
>sgt_base will either be NULL or valid. You can then get rid of the sgt 
variable initialization by testing if (!buf->sgt_base).

> +	dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
> +	if (IS_ERR(dbuf))
> +		return NULL;
> +
> +	/* dmabuf keeps reference to vb2 buffer */
> +	atomic_inc(&buf->refcount);
> +
> +	return dbuf;
> +}
Tomasz Stanislawski Aug. 21, 2012, 1:47 p.m. UTC | #2
Hi Laurent,
Thank you for your comments.

On 08/21/2012 12:03 PM, Laurent Pinchart wrote:
> Hi Tomasz,
> 
> Thanks for the patch.
> 
> Just a couple of small comments below.
> 
> On Tuesday 14 August 2012 17:34:50 Tomasz Stanislawski wrote:
>> This patch adds support for exporting a dma-contig buffer using
>> DMABUF interface.
>>
>> Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
>> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
>> ---
>>  drivers/media/video/videobuf2-dma-contig.c |  204 +++++++++++++++++++++++++
>>  1 file changed, 204 insertions(+)
>>
>> diff --git a/drivers/media/video/videobuf2-dma-contig.c
>> b/drivers/media/video/videobuf2-dma-contig.c index 7fc71a0..bb2b4ac8 100644
>> --- a/drivers/media/video/videobuf2-dma-contig.c
>> +++ b/drivers/media/video/videobuf2-dma-contig.c
> 
> [snip]
> 
>> +static struct sg_table *vb2_dc_dmabuf_ops_map(
>> +	struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
>> +{
>> +	struct vb2_dc_attachment *attach = db_attach->priv;
>> +	/* stealing dmabuf mutex to serialize map/unmap operations */
> 
> Why isn't this operation serialized by the dma-buf core itself ?
> 

Indeed, it is a very good question. The lock was introduced in RFCv3 of
DMABUF patches. It was dedicated to serialize attach/detach calls.
No requirements for map/unmap serialization were stated so serialization
was delegated to an exporter.

A deadlock could occur if dma_map_attachment is called from inside
of attach ops. IMO, such an operation is invalid because an attachment
list is not in a valid state while attach ops is being processed.

Do you think that stealing a lock from dma-buf internals is too hacky?
I prefer not to introduce any extra locks in dma-contig allocator
but it is not a big deal to add it.

>> +	struct mutex *lock = &db_attach->dmabuf->lock;
>> +	struct sg_table *sgt;
>> +	int ret;
>> +
>> +	mutex_lock(lock);
>> +
>> +	sgt = &attach->sgt;
>> +	/* return previously mapped sg table */
>> +	if (attach->dir == dir) {
>> +		mutex_unlock(lock);
>> +		return sgt;
>> +	}
>> +
>> +	/* release any previous cache */
>> +	if (attach->dir != DMA_NONE) {
>> +		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
>> +			attach->dir);
>> +		attach->dir = DMA_NONE;
>> +	}
>> +
>> +	/* mapping to the client with new direction */
>> +	ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
>> +	if (ret <= 0) {
>> +		pr_err("failed to map scatterlist\n");
>> +		mutex_unlock(lock);
>> +		return ERR_PTR(-EIO);
>> +	}
>> +
>> +	attach->dir = dir;
>> +
>> +	mutex_unlock(lock);
>> +
>> +	return sgt;
>> +}
> 
> [snip]
> 
>> +static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
>> +	struct vm_area_struct *vma)
>> +{
>> +	/* Dummy support for mmap */
>> +	return -ENOTTY;
> 
> What about calling the dma-contig mmap handler here ? Is there a specific 
> reason why you haven't implemented mmap support for dmabuf ?
> 

The mmap ops is mandatory in the latest DMABUF api.
I added a stub function to make DC work with DMABUF without any big effort.
Calling vb2_dc_mmap from mmap ops seams to be a simple and safe way to
handle mmap functionality.  Thank you for spotting this :)

>> +}
>> +
>> +static struct dma_buf_ops vb2_dc_dmabuf_ops = {
>> +	.attach = vb2_dc_dmabuf_ops_attach,
>> +	.detach = vb2_dc_dmabuf_ops_detach,
>> +	.map_dma_buf = vb2_dc_dmabuf_ops_map,
>> +	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
>> +	.kmap = vb2_dc_dmabuf_ops_kmap,
>> +	.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
>> +	.vmap = vb2_dc_dmabuf_ops_vmap,
>> +	.mmap = vb2_dc_dmabuf_ops_mmap,
>> +	.release = vb2_dc_dmabuf_ops_release,
>> +};
>> +
>> +static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
>> +{
>> +	int ret;
>> +	struct sg_table *sgt;
>> +
>> +	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
>> +	if (!sgt) {
>> +		dev_err(buf->dev, "failed to alloc sg table\n");
>> +		return ERR_PTR(-ENOMEM);
>> +	}
>> +
>> +	ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
>> +		buf->size);
>> +	if (ret < 0) {
>> +		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
>> +		kfree(sgt);
>> +		return ERR_PTR(ret);
>> +	}
> 
> As this function is only used below, where the exact value of the error code 
> is ignored, what about just returning NULL on failure ? Another option is to 
> return the error code in vb2_dc_get_dmabuf (not sure if that would be useful 
> though).
> 
>> +
>> +	return sgt;
>> +}
>> +
>> +static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
>> +{
>> +	struct vb2_dc_buf *buf = buf_priv;
>> +	struct dma_buf *dbuf;
>> +	struct sg_table *sgt = buf->sgt_base;
>> +
>> +	if (!sgt)
>> +		sgt = vb2_dc_get_base_sgt(buf);
>> +	if (WARN_ON(IS_ERR(sgt)))
>> +		return NULL;
>> +
>> +	/* cache base sgt for future use */
>> +	buf->sgt_base = sgt;
> 
> You can move this assignment inside the first if, there's no need to execute 
> it every time. The WARN_ON can also be moved inside the first if, as buf-
>> sgt_base will either be NULL or valid. You can then get rid of the sgt 
> variable initialization by testing if (!buf->sgt_base).

I agree. I will apply this fix in v9.

> 
>> +	dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
>> +	if (IS_ERR(dbuf))
>> +		return NULL;
>> +
>> +	/* dmabuf keeps reference to vb2 buffer */
>> +	atomic_inc(&buf->refcount);
>> +
>> +	return dbuf;
>> +}
>

Regards,
Tomasz Stanislawski
Laurent Pinchart Aug. 21, 2012, 2:07 p.m. UTC | #3
Hi Tomasz,

On Tuesday 21 August 2012 15:47:13 Tomasz Stanislawski wrote:
> On 08/21/2012 12:03 PM, Laurent Pinchart wrote:
> > On Tuesday 14 August 2012 17:34:50 Tomasz Stanislawski wrote:
> >> This patch adds support for exporting a dma-contig buffer using
> >> DMABUF interface.
> >> 
> >> Signed-off-by: Tomasz Stanislawski <t.stanislaws@samsung.com>
> >> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
> >> ---
> >> 
> >>  drivers/media/video/videobuf2-dma-contig.c |  204 ++++++++++++++++++++++
> >>  1 file changed, 204 insertions(+)
> >> 
> >> diff --git a/drivers/media/video/videobuf2-dma-contig.c
> >> b/drivers/media/video/videobuf2-dma-contig.c index 7fc71a0..bb2b4ac8
> >> 100644
> >> --- a/drivers/media/video/videobuf2-dma-contig.c
> >> +++ b/drivers/media/video/videobuf2-dma-contig.c
> > 
> > [snip]
> > 
> >> +static struct sg_table *vb2_dc_dmabuf_ops_map(
> >> +	struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
> >> +{
> >> +	struct vb2_dc_attachment *attach = db_attach->priv;
> >> +	/* stealing dmabuf mutex to serialize map/unmap operations */
> > 
> > Why isn't this operation serialized by the dma-buf core itself ?
> 
> Indeed, it is a very good question. The lock was introduced in RFCv3 of
> DMABUF patches. It was dedicated to serialize attach/detach calls.
> No requirements for map/unmap serialization were stated so serialization
> was delegated to an exporter.
> 
> A deadlock could occur if dma_map_attachment is called from inside
> of attach ops. IMO, such an operation is invalid because an attachment
> list is not in a valid state while attach ops is being processed.
> 
> Do you think that stealing a lock from dma-buf internals is too hacky?

No, I would be OK with that, but I'd like to make sure that it won't bite us 
back later. If there's a specific reason why the lock is not taken by the 
dmabuf core around map/unmap calls, stealing the same lock might cause 
unforeseen problems. That's why I would like to understand why the core 
doesn't perform locking on its own.

> I prefer not to introduce any extra locks in dma-contig allocator

Agreed.

> but it is not a big deal to add it.
> 
> >> +	struct mutex *lock = &db_attach->dmabuf->lock;
> >> +	struct sg_table *sgt;
> >> +	int ret;
> >> +
> >> +	mutex_lock(lock);
> >> +
> >> +	sgt = &attach->sgt;
> >> +	/* return previously mapped sg table */
> >> +	if (attach->dir == dir) {
> >> +		mutex_unlock(lock);
> >> +		return sgt;
> >> +	}
> >> +
> >> +	/* release any previous cache */
> >> +	if (attach->dir != DMA_NONE) {
> >> +		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
> >> +			attach->dir);
> >> +		attach->dir = DMA_NONE;
> >> +	}
> >> +
> >> +	/* mapping to the client with new direction */
> >> +	ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
> >> +	if (ret <= 0) {
> >> +		pr_err("failed to map scatterlist\n");
> >> +		mutex_unlock(lock);
> >> +		return ERR_PTR(-EIO);
> >> +	}
> >> +
> >> +	attach->dir = dir;
> >> +
> >> +	mutex_unlock(lock);
> >> +
> >> +	return sgt;
> >> +}
diff mbox

Patch

diff --git a/drivers/media/video/videobuf2-dma-contig.c b/drivers/media/video/videobuf2-dma-contig.c
index 7fc71a0..bb2b4ac8 100644
--- a/drivers/media/video/videobuf2-dma-contig.c
+++ b/drivers/media/video/videobuf2-dma-contig.c
@@ -36,6 +36,7 @@  struct vb2_dc_buf {
 	/* MMAP related */
 	struct vb2_vmarea_handler	handler;
 	atomic_t			refcount;
+	struct sg_table			*sgt_base;
 
 	/* USERPTR related */
 	struct vm_area_struct		*vma;
@@ -142,6 +143,10 @@  static void vb2_dc_put(void *buf_priv)
 	if (!atomic_dec_and_test(&buf->refcount))
 		return;
 
+	if (buf->sgt_base) {
+		sg_free_table(buf->sgt_base);
+		kfree(buf->sgt_base);
+	}
 	dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
 	kfree(buf);
 }
@@ -213,6 +218,204 @@  static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
 }
 
 /*********************************************/
+/*         DMABUF ops for exporters          */
+/*********************************************/
+
+struct vb2_dc_attachment {
+	struct sg_table sgt;
+	enum dma_data_direction dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
+	struct dma_buf_attachment *dbuf_attach)
+{
+	struct vb2_dc_attachment *attach;
+	unsigned int i;
+	struct scatterlist *rd, *wr;
+	struct sg_table *sgt;
+	struct vb2_dc_buf *buf = dbuf->priv;
+	int ret;
+
+	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		return -ENOMEM;
+
+	sgt = &attach->sgt;
+	/* Copy the buf->base_sgt scatter list to the attachment, as we can't
+	 * map the same scatter list to multiple attachments at the same time.
+	 */
+	ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+	if (ret) {
+		kfree(attach);
+		return -ENOMEM;
+	}
+
+	rd = buf->sgt_base->sgl;
+	wr = sgt->sgl;
+	for (i = 0; i < sgt->orig_nents; ++i) {
+		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+		rd = sg_next(rd);
+		wr = sg_next(wr);
+	}
+
+	attach->dir = DMA_NONE;
+	dbuf_attach->priv = attach;
+
+	return 0;
+}
+
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+	struct dma_buf_attachment *db_attach)
+{
+	struct vb2_dc_attachment *attach = db_attach->priv;
+	struct sg_table *sgt;
+
+	if (!attach)
+		return;
+
+	sgt = &attach->sgt;
+
+	/* release the scatterlist cache */
+	if (attach->dir != DMA_NONE)
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dir);
+	sg_free_table(sgt);
+	kfree(attach);
+	db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+	struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
+{
+	struct vb2_dc_attachment *attach = db_attach->priv;
+	/* stealing dmabuf mutex to serialize map/unmap operations */
+	struct mutex *lock = &db_attach->dmabuf->lock;
+	struct sg_table *sgt;
+	int ret;
+
+	mutex_lock(lock);
+
+	sgt = &attach->sgt;
+	/* return previously mapped sg table */
+	if (attach->dir == dir) {
+		mutex_unlock(lock);
+		return sgt;
+	}
+
+	/* release any previous cache */
+	if (attach->dir != DMA_NONE) {
+		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
+			attach->dir);
+		attach->dir = DMA_NONE;
+	}
+
+	/* mapping to the client with new direction */
+	ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
+	if (ret <= 0) {
+		pr_err("failed to map scatterlist\n");
+		mutex_unlock(lock);
+		return ERR_PTR(-EIO);
+	}
+
+	attach->dir = dir;
+
+	mutex_unlock(lock);
+
+	return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+	struct sg_table *sgt, enum dma_data_direction dir)
+{
+	/* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+	/* drop reference obtained in vb2_dc_get_dmabuf */
+	vb2_dc_put(dbuf->priv);
+}
+
+static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
+{
+	struct vb2_dc_buf *buf = dbuf->priv;
+
+	return buf->vaddr + pgnum * PAGE_SIZE;
+}
+
+static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+{
+	struct vb2_dc_buf *buf = dbuf->priv;
+
+	return buf->vaddr;
+}
+
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+	struct vm_area_struct *vma)
+{
+	/* Dummy support for mmap */
+	return -ENOTTY;
+}
+
+static struct dma_buf_ops vb2_dc_dmabuf_ops = {
+	.attach = vb2_dc_dmabuf_ops_attach,
+	.detach = vb2_dc_dmabuf_ops_detach,
+	.map_dma_buf = vb2_dc_dmabuf_ops_map,
+	.unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+	.kmap = vb2_dc_dmabuf_ops_kmap,
+	.kmap_atomic = vb2_dc_dmabuf_ops_kmap,
+	.vmap = vb2_dc_dmabuf_ops_vmap,
+	.mmap = vb2_dc_dmabuf_ops_mmap,
+	.release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+	int ret;
+	struct sg_table *sgt;
+
+	sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+	if (!sgt) {
+		dev_err(buf->dev, "failed to alloc sg table\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
+		buf->size);
+	if (ret < 0) {
+		dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+		kfree(sgt);
+		return ERR_PTR(ret);
+	}
+
+	return sgt;
+}
+
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
+{
+	struct vb2_dc_buf *buf = buf_priv;
+	struct dma_buf *dbuf;
+	struct sg_table *sgt = buf->sgt_base;
+
+	if (!sgt)
+		sgt = vb2_dc_get_base_sgt(buf);
+	if (WARN_ON(IS_ERR(sgt)))
+		return NULL;
+
+	/* cache base sgt for future use */
+	buf->sgt_base = sgt;
+
+	dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+	if (IS_ERR(dbuf))
+		return NULL;
+
+	/* dmabuf keeps reference to vb2 buffer */
+	atomic_inc(&buf->refcount);
+
+	return dbuf;
+}
+
+/*********************************************/
 /*       callbacks for USERPTR buffers       */
 /*********************************************/
 
@@ -519,6 +722,7 @@  static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
 const struct vb2_mem_ops vb2_dma_contig_memops = {
 	.alloc		= vb2_dc_alloc,
 	.put		= vb2_dc_put,
+	.get_dmabuf	= vb2_dc_get_dmabuf,
 	.cookie		= vb2_dc_cookie,
 	.vaddr		= vb2_dc_vaddr,
 	.mmap		= vb2_dc_mmap,