diff mbox

RFC: dma-buf: userspace mmap support

Message ID 1331775148-5001-1-git-send-email-rob.clark@linaro.org
State New
Headers show

Commit Message

Rob Clark March 15, 2012, 1:32 a.m. UTC
From: Rob Clark <rob@ti.com>

Enable optional userspace access to dma-buf buffers via mmap() on the
dma-buf file descriptor.  Userspace access to the buffer should be
bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
give the exporting driver a chance to deal with cache synchronization
and such for cached userspace mappings without resorting to page
faulting tricks.  The reasoning behind this is that, while drm
drivers tend to have all the mechanisms in place for dealing with
page faulting tricks, other driver subsystems may not.  And in
addition, while page faulting tricks make userspace simpler, there
are some associated overheads.

In all cases, the mmap() call is allowed to fail, and the associated
dma_buf_ops are optional (mmap() will fail if at least the mmap()
op is not implemented by the exporter, but in either case the
{prepare,finish}_access() ops are optional).

For now the prepare/finish access ioctls are kept simple with no
argument, although there is possibility to add additional ioctls
(or simply change the existing ioctls from _IO() to _IOW()) later
to provide optimization to allow userspace to specify a region of
interest.

For a final patch, dma-buf.h would need to be split into what is
exported to userspace, and what is kernel private, but I wanted to
get feedback on the idea of requiring userspace to bracket access
first (vs. limiting this to coherent mappings or exporters who play
page faltings plus PTE shoot-down games) before I split the header
which would cause conflicts with other pending dma-buf patches.  So
flame-on!
---
 drivers/base/dma-buf.c  |   42 ++++++++++++++++++++++++++++++++++++++++++
 include/linux/dma-buf.h |   22 ++++++++++++++++++++++
 2 files changed, 64 insertions(+), 0 deletions(-)

Comments

Abhinav Kochhar March 15, 2012, 7:16 a.m. UTC | #1
do we need to pass the dmabuf object to dmabuf->ops->mmap(dmabuf, file,
vma)?

as file->private_data can retrieve the dmabuf object.

*"dmabuf = file->private_data"*

*removing dmabuf from the function arguments will keep it consistent with
basic "mmap" definitions: *

*"static int xxxx_mmap(struct file *file, struct vm_area_struct *vma)"*
**
On Thu, Mar 15, 2012 at 10:32 AM, Rob Clark <rob.clark@linaro.org> wrote:

> From: Rob Clark <rob@ti.com>
>
> Enable optional userspace access to dma-buf buffers via mmap() on the
> dma-buf file descriptor.  Userspace access to the buffer should be
> bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
> give the exporting driver a chance to deal with cache synchronization
> and such for cached userspace mappings without resorting to page
> faulting tricks.  The reasoning behind this is that, while drm
> drivers tend to have all the mechanisms in place for dealing with
> page faulting tricks, other driver subsystems may not.  And in
> addition, while page faulting tricks make userspace simpler, there
> are some associated overheads.
>
> In all cases, the mmap() call is allowed to fail, and the associated
> dma_buf_ops are optional (mmap() will fail if at least the mmap()
> op is not implemented by the exporter, but in either case the
> {prepare,finish}_access() ops are optional).
>
> For now the prepare/finish access ioctls are kept simple with no
> argument, although there is possibility to add additional ioctls
> (or simply change the existing ioctls from _IO() to _IOW()) later
> to provide optimization to allow userspace to specify a region of
> interest.
>
> For a final patch, dma-buf.h would need to be split into what is
> exported to userspace, and what is kernel private, but I wanted to
> get feedback on the idea of requiring userspace to bracket access
> first (vs. limiting this to coherent mappings or exporters who play
> page faltings plus PTE shoot-down games) before I split the header
> which would cause conflicts with other pending dma-buf patches.  So
> flame-on!
> ---
>  drivers/base/dma-buf.c  |   42 ++++++++++++++++++++++++++++++++++++++++++
>  include/linux/dma-buf.h |   22 ++++++++++++++++++++++
>  2 files changed, 64 insertions(+), 0 deletions(-)
>
> diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
> index c9a945f..382b78a 100644
> --- a/drivers/base/dma-buf.c
> +++ b/drivers/base/dma-buf.c
> @@ -30,6 +30,46 @@
>
>  static inline int is_dma_buf_file(struct file *);
>
> +static int dma_buf_mmap(struct file *file, struct vm_area_struct *vma)
> +{
> +       struct dma_buf *dmabuf;
> +
> +       if (!is_dma_buf_file(file))
> +               return -EINVAL;
> +
> +       dmabuf = file->private_data;
> +
> +       if (dmabuf->ops->mmap)
> +               return dmabuf->ops->mmap(dmabuf, file, vma);
> +
> +       return -ENODEV;
> +}
> +
> +static long dma_buf_ioctl(struct file *file, unsigned int cmd,
> +               unsigned long arg)
> +{
> +       struct dma_buf *dmabuf;
> +
> +       if (!is_dma_buf_file(file))
> +               return -EINVAL;
> +
> +       dmabuf = file->private_data;
> +
> +       switch (_IOC_NR(cmd)) {
> +       case _IOC_NR(DMA_BUF_IOCTL_PREPARE_ACCESS):
> +               if (dmabuf->ops->prepare_access)
> +                       return dmabuf->ops->prepare_access(dmabuf);
> +               return 0;
> +       case _IOC_NR(DMA_BUF_IOCTL_FINISH_ACCESS):
> +               if (dmabuf->ops->finish_access)
> +                       return dmabuf->ops->finish_access(dmabuf);
> +               return 0;
> +       default:
> +               return -EINVAL;
> +       }
> +}
> +
> +
>  static int dma_buf_release(struct inode *inode, struct file *file)
>  {
>        struct dma_buf *dmabuf;
> @@ -45,6 +85,8 @@ static int dma_buf_release(struct inode *inode, struct
> file *file)
>  }
>
>  static const struct file_operations dma_buf_fops = {
> +       .mmap           = dma_buf_mmap,
> +       .unlocked_ioctl = dma_buf_ioctl,
>        .release        = dma_buf_release,
>  };
>
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index a885b26..cbdff81 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -34,6 +34,17 @@
>  struct dma_buf;
>  struct dma_buf_attachment;
>
> +/* TODO: dma-buf.h should be the userspace visible header, and
> dma-buf-priv.h (?)
> + * the kernel internal header.. for now just stuff these here to avoid
> conflicting
> + * with other patches..
> + *
> + * For now, no arg to keep things simple, but we could consider adding an
> + * optional region of interest later.
> + */
> +#define DMA_BUF_IOCTL_PREPARE_ACCESS   _IO('Z', 0)
> +#define DMA_BUF_IOCTL_FINISH_ACCESS    _IO('Z', 1)
> +
> +
>  /**
>  * struct dma_buf_ops - operations possible on struct dma_buf
>  * @attach: [optional] allows different devices to 'attach' themselves to
> the
> @@ -49,6 +60,13 @@ struct dma_buf_attachment;
>  * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
>  *                pages.
>  * @release: release this buffer; to be called after the last dma_buf_put.
> + * @mmap: [optional, allowed to fail] operation called if userspace calls
> + *              mmap() on the dmabuf fd.  Note that userspace should use
> the
> + *              DMA_BUF_PREPARE_ACCESS / DMA_BUF_FINISH_ACCESS ioctls
> before/after
> + *              sw access to the buffer, to give the exporter an
> opportunity to
> + *              deal with cache maintenance.
> + * @prepare_access: [optional] handler for PREPARE_ACCESS ioctl.
> + * @finish_access: [optional] handler for FINISH_ACCESS ioctl.
>  */
>  struct dma_buf_ops {
>        int (*attach)(struct dma_buf *, struct device *,
> @@ -72,6 +90,10 @@ struct dma_buf_ops {
>        /* after final dma_buf_put() */
>        void (*release)(struct dma_buf *);
>
> +       int (*mmap)(struct dma_buf *, struct file *, struct vm_area_struct
> *);
> +       int (*prepare_access)(struct dma_buf *);
> +       int (*finish_access)(struct dma_buf *);
> +
>  };
>
>  /**
> --
> 1.7.5.4
>
>
> _______________________________________________
> Linaro-mm-sig mailing list
> Linaro-mm-sig@lists.linaro.org
> http://lists.linaro.org/mailman/listinfo/linaro-mm-sig
>
Rob Clark March 15, 2012, 12:27 p.m. UTC | #2
On Thu, Mar 15, 2012 at 2:16 AM, Abhinav Kochhar
<kochhar.abhinav@gmail.com> wrote:
> do we need to pass the dmabuf object to dmabuf->ops->mmap(dmabuf, file,
> vma)?
>
> as file->private_data can retrieve the dmabuf object.
>
> "dmabuf = file->private_data"
>
> removing dmabuf from the function arguments will keep it consistent with
> basic "mmap" definitions:
>
> "static int xxxx_mmap(struct file *file, struct vm_area_struct *vma)"
>

This was intentional to deviate from standard mmap fxn signature..  it
was to avoid encouraging incorrect use.

What I mean is, most subsystems interested in participating in dmabuf
support mmap'ing multiple buffers on a single chrdev, with some
offset->object mapping mechanism.  But in the case of a dmabuf fd, the
buffer userspace would mmap would be at offset=0.  So you wouldn't get
the expected results if you just directly plugged v4l2_mmap or
drm_gem_mmap, etc, into your dmabuf-ops.  Not to mention the
file->file_private wouldn't be what you expected.  So basically I was
just trying to follow principle-of-least-surprise ;-)

BR,
-R

> On Thu, Mar 15, 2012 at 10:32 AM, Rob Clark <rob.clark@linaro.org> wrote:
>>
>> From: Rob Clark <rob@ti.com>
>>
>> Enable optional userspace access to dma-buf buffers via mmap() on the
>> dma-buf file descriptor.  Userspace access to the buffer should be
>> bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
>> give the exporting driver a chance to deal with cache synchronization
>> and such for cached userspace mappings without resorting to page
>> faulting tricks.  The reasoning behind this is that, while drm
>> drivers tend to have all the mechanisms in place for dealing with
>> page faulting tricks, other driver subsystems may not.  And in
>> addition, while page faulting tricks make userspace simpler, there
>> are some associated overheads.
>>
>> In all cases, the mmap() call is allowed to fail, and the associated
>> dma_buf_ops are optional (mmap() will fail if at least the mmap()
>> op is not implemented by the exporter, but in either case the
>> {prepare,finish}_access() ops are optional).
>>
>> For now the prepare/finish access ioctls are kept simple with no
>> argument, although there is possibility to add additional ioctls
>> (or simply change the existing ioctls from _IO() to _IOW()) later
>> to provide optimization to allow userspace to specify a region of
>> interest.
>>
>> For a final patch, dma-buf.h would need to be split into what is
>> exported to userspace, and what is kernel private, but I wanted to
>> get feedback on the idea of requiring userspace to bracket access
>> first (vs. limiting this to coherent mappings or exporters who play
>> page faltings plus PTE shoot-down games) before I split the header
>> which would cause conflicts with other pending dma-buf patches.  So
>> flame-on!
>> ---
>>  drivers/base/dma-buf.c  |   42 ++++++++++++++++++++++++++++++++++++++++++
>>  include/linux/dma-buf.h |   22 ++++++++++++++++++++++
>>  2 files changed, 64 insertions(+), 0 deletions(-)
>>
>> diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
>> index c9a945f..382b78a 100644
>> --- a/drivers/base/dma-buf.c
>> +++ b/drivers/base/dma-buf.c
>> @@ -30,6 +30,46 @@
>>
>>  static inline int is_dma_buf_file(struct file *);
>>
>> +static int dma_buf_mmap(struct file *file, struct vm_area_struct *vma)
>> +{
>> +       struct dma_buf *dmabuf;
>> +
>> +       if (!is_dma_buf_file(file))
>> +               return -EINVAL;
>> +
>> +       dmabuf = file->private_data;
>> +
>> +       if (dmabuf->ops->mmap)
>> +               return dmabuf->ops->mmap(dmabuf, file, vma);
>> +
>> +       return -ENODEV;
>> +}
>> +
>> +static long dma_buf_ioctl(struct file *file, unsigned int cmd,
>> +               unsigned long arg)
>> +{
>> +       struct dma_buf *dmabuf;
>> +
>> +       if (!is_dma_buf_file(file))
>> +               return -EINVAL;
>> +
>> +       dmabuf = file->private_data;
>> +
>> +       switch (_IOC_NR(cmd)) {
>> +       case _IOC_NR(DMA_BUF_IOCTL_PREPARE_ACCESS):
>> +               if (dmabuf->ops->prepare_access)
>> +                       return dmabuf->ops->prepare_access(dmabuf);
>> +               return 0;
>> +       case _IOC_NR(DMA_BUF_IOCTL_FINISH_ACCESS):
>> +               if (dmabuf->ops->finish_access)
>> +                       return dmabuf->ops->finish_access(dmabuf);
>> +               return 0;
>> +       default:
>> +               return -EINVAL;
>> +       }
>> +}
>> +
>> +
>>  static int dma_buf_release(struct inode *inode, struct file *file)
>>  {
>>        struct dma_buf *dmabuf;
>> @@ -45,6 +85,8 @@ static int dma_buf_release(struct inode *inode, struct
>> file *file)
>>  }
>>
>>  static const struct file_operations dma_buf_fops = {
>> +       .mmap           = dma_buf_mmap,
>> +       .unlocked_ioctl = dma_buf_ioctl,
>>        .release        = dma_buf_release,
>>  };
>>
>> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
>> index a885b26..cbdff81 100644
>> --- a/include/linux/dma-buf.h
>> +++ b/include/linux/dma-buf.h
>> @@ -34,6 +34,17 @@
>>  struct dma_buf;
>>  struct dma_buf_attachment;
>>
>> +/* TODO: dma-buf.h should be the userspace visible header, and
>> dma-buf-priv.h (?)
>> + * the kernel internal header.. for now just stuff these here to avoid
>> conflicting
>> + * with other patches..
>> + *
>> + * For now, no arg to keep things simple, but we could consider adding an
>> + * optional region of interest later.
>> + */
>> +#define DMA_BUF_IOCTL_PREPARE_ACCESS   _IO('Z', 0)
>> +#define DMA_BUF_IOCTL_FINISH_ACCESS    _IO('Z', 1)
>> +
>> +
>>  /**
>>  * struct dma_buf_ops - operations possible on struct dma_buf
>>  * @attach: [optional] allows different devices to 'attach' themselves to
>> the
>> @@ -49,6 +60,13 @@ struct dma_buf_attachment;
>>  * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
>>  *                pages.
>>  * @release: release this buffer; to be called after the last dma_buf_put.
>> + * @mmap: [optional, allowed to fail] operation called if userspace calls
>> + *              mmap() on the dmabuf fd.  Note that userspace should use
>> the
>> + *              DMA_BUF_PREPARE_ACCESS / DMA_BUF_FINISH_ACCESS ioctls
>> before/after
>> + *              sw access to the buffer, to give the exporter an
>> opportunity to
>> + *              deal with cache maintenance.
>> + * @prepare_access: [optional] handler for PREPARE_ACCESS ioctl.
>> + * @finish_access: [optional] handler for FINISH_ACCESS ioctl.
>>  */
>>  struct dma_buf_ops {
>>        int (*attach)(struct dma_buf *, struct device *,
>> @@ -72,6 +90,10 @@ struct dma_buf_ops {
>>        /* after final dma_buf_put() */
>>        void (*release)(struct dma_buf *);
>>
>> +       int (*mmap)(struct dma_buf *, struct file *, struct vm_area_struct
>> *);
>> +       int (*prepare_access)(struct dma_buf *);
>> +       int (*finish_access)(struct dma_buf *);
>> +
>>  };
>>
>>  /**
>> --
>> 1.7.5.4
>>
>>
>> _______________________________________________
>> Linaro-mm-sig mailing list
>> Linaro-mm-sig@lists.linaro.org
>> http://lists.linaro.org/mailman/listinfo/linaro-mm-sig
>
>
Rebecca Schultz Zavin March 15, 2012, 8:30 p.m. UTC | #3
This looks perfect to me and will close really the last remaining blocking
issue for converting ion to be a dma-buf exporter.  Assuming there are no
major objections to this I'll post some patches to the list next week that
make that change to ion.  Looking forward to meeting in the middle on this!
 Thanks Rob!

I'm happy with the bracketing calls as well.  We've been discussing a
similar api to ion to manage the caches.  It'll allow us to enable debug
features like remapping the buffers read only when a process says it's
finished with them to catch cache offenders.

Rebecca

On Thu, Mar 15, 2012 at 5:27 AM, Rob Clark <rob.clark@linaro.org> wrote:

> On Thu, Mar 15, 2012 at 2:16 AM, Abhinav Kochhar
> <kochhar.abhinav@gmail.com> wrote:
> > do we need to pass the dmabuf object to dmabuf->ops->mmap(dmabuf, file,
> > vma)?
> >
> > as file->private_data can retrieve the dmabuf object.
> >
> > "dmabuf = file->private_data"
> >
> > removing dmabuf from the function arguments will keep it consistent with
> > basic "mmap" definitions:
> >
> > "static int xxxx_mmap(struct file *file, struct vm_area_struct *vma)"
> >
>
> This was intentional to deviate from standard mmap fxn signature..  it
> was to avoid encouraging incorrect use.
>
> What I mean is, most subsystems interested in participating in dmabuf
> support mmap'ing multiple buffers on a single chrdev, with some
> offset->object mapping mechanism.  But in the case of a dmabuf fd, the
> buffer userspace would mmap would be at offset=0.  So you wouldn't get
> the expected results if you just directly plugged v4l2_mmap or
> drm_gem_mmap, etc, into your dmabuf-ops.  Not to mention the
> file->file_private wouldn't be what you expected.  So basically I was
> just trying to follow principle-of-least-surprise ;-)
>
> BR,
> -R
>
> > On Thu, Mar 15, 2012 at 10:32 AM, Rob Clark <rob.clark@linaro.org>
> wrote:
> >>
> >> From: Rob Clark <rob@ti.com>
> >>
> >> Enable optional userspace access to dma-buf buffers via mmap() on the
> >> dma-buf file descriptor.  Userspace access to the buffer should be
> >> bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
> >> give the exporting driver a chance to deal with cache synchronization
> >> and such for cached userspace mappings without resorting to page
> >> faulting tricks.  The reasoning behind this is that, while drm
> >> drivers tend to have all the mechanisms in place for dealing with
> >> page faulting tricks, other driver subsystems may not.  And in
> >> addition, while page faulting tricks make userspace simpler, there
> >> are some associated overheads.
> >>
> >> In all cases, the mmap() call is allowed to fail, and the associated
> >> dma_buf_ops are optional (mmap() will fail if at least the mmap()
> >> op is not implemented by the exporter, but in either case the
> >> {prepare,finish}_access() ops are optional).
> >>
> >> For now the prepare/finish access ioctls are kept simple with no
> >> argument, although there is possibility to add additional ioctls
> >> (or simply change the existing ioctls from _IO() to _IOW()) later
> >> to provide optimization to allow userspace to specify a region of
> >> interest.
> >>
> >> For a final patch, dma-buf.h would need to be split into what is
> >> exported to userspace, and what is kernel private, but I wanted to
> >> get feedback on the idea of requiring userspace to bracket access
> >> first (vs. limiting this to coherent mappings or exporters who play
> >> page faltings plus PTE shoot-down games) before I split the header
> >> which would cause conflicts with other pending dma-buf patches.  So
> >> flame-on!
> >> ---
> >>  drivers/base/dma-buf.c  |   42
> ++++++++++++++++++++++++++++++++++++++++++
> >>  include/linux/dma-buf.h |   22 ++++++++++++++++++++++
> >>  2 files changed, 64 insertions(+), 0 deletions(-)
> >>
> >> diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
> >> index c9a945f..382b78a 100644
> >> --- a/drivers/base/dma-buf.c
> >> +++ b/drivers/base/dma-buf.c
> >> @@ -30,6 +30,46 @@
> >>
> >>  static inline int is_dma_buf_file(struct file *);
> >>
> >> +static int dma_buf_mmap(struct file *file, struct vm_area_struct *vma)
> >> +{
> >> +       struct dma_buf *dmabuf;
> >> +
> >> +       if (!is_dma_buf_file(file))
> >> +               return -EINVAL;
> >> +
> >> +       dmabuf = file->private_data;
> >> +
> >> +       if (dmabuf->ops->mmap)
> >> +               return dmabuf->ops->mmap(dmabuf, file, vma);
> >> +
> >> +       return -ENODEV;
> >> +}
> >> +
> >> +static long dma_buf_ioctl(struct file *file, unsigned int cmd,
> >> +               unsigned long arg)
> >> +{
> >> +       struct dma_buf *dmabuf;
> >> +
> >> +       if (!is_dma_buf_file(file))
> >> +               return -EINVAL;
> >> +
> >> +       dmabuf = file->private_data;
> >> +
> >> +       switch (_IOC_NR(cmd)) {
> >> +       case _IOC_NR(DMA_BUF_IOCTL_PREPARE_ACCESS):
> >> +               if (dmabuf->ops->prepare_access)
> >> +                       return dmabuf->ops->prepare_access(dmabuf);
> >> +               return 0;
> >> +       case _IOC_NR(DMA_BUF_IOCTL_FINISH_ACCESS):
> >> +               if (dmabuf->ops->finish_access)
> >> +                       return dmabuf->ops->finish_access(dmabuf);
> >> +               return 0;
> >> +       default:
> >> +               return -EINVAL;
> >> +       }
> >> +}
> >> +
> >> +
> >>  static int dma_buf_release(struct inode *inode, struct file *file)
> >>  {
> >>        struct dma_buf *dmabuf;
> >> @@ -45,6 +85,8 @@ static int dma_buf_release(struct inode *inode, struct
> >> file *file)
> >>  }
> >>
> >>  static const struct file_operations dma_buf_fops = {
> >> +       .mmap           = dma_buf_mmap,
> >> +       .unlocked_ioctl = dma_buf_ioctl,
> >>        .release        = dma_buf_release,
> >>  };
> >>
> >> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> >> index a885b26..cbdff81 100644
> >> --- a/include/linux/dma-buf.h
> >> +++ b/include/linux/dma-buf.h
> >> @@ -34,6 +34,17 @@
> >>  struct dma_buf;
> >>  struct dma_buf_attachment;
> >>
> >> +/* TODO: dma-buf.h should be the userspace visible header, and
> >> dma-buf-priv.h (?)
> >> + * the kernel internal header.. for now just stuff these here to avoid
> >> conflicting
> >> + * with other patches..
> >> + *
> >> + * For now, no arg to keep things simple, but we could consider adding
> an
> >> + * optional region of interest later.
> >> + */
> >> +#define DMA_BUF_IOCTL_PREPARE_ACCESS   _IO('Z', 0)
> >> +#define DMA_BUF_IOCTL_FINISH_ACCESS    _IO('Z', 1)
> >> +
> >> +
> >>  /**
> >>  * struct dma_buf_ops - operations possible on struct dma_buf
> >>  * @attach: [optional] allows different devices to 'attach' themselves
> to
> >> the
> >> @@ -49,6 +60,13 @@ struct dma_buf_attachment;
> >>  * @unmap_dma_buf: decreases usecount of buffer, might deallocate
> scatter
> >>  *                pages.
> >>  * @release: release this buffer; to be called after the last
> dma_buf_put.
> >> + * @mmap: [optional, allowed to fail] operation called if userspace
> calls
> >> + *              mmap() on the dmabuf fd.  Note that userspace should
> use
> >> the
> >> + *              DMA_BUF_PREPARE_ACCESS / DMA_BUF_FINISH_ACCESS ioctls
> >> before/after
> >> + *              sw access to the buffer, to give the exporter an
> >> opportunity to
> >> + *              deal with cache maintenance.
> >> + * @prepare_access: [optional] handler for PREPARE_ACCESS ioctl.
> >> + * @finish_access: [optional] handler for FINISH_ACCESS ioctl.
> >>  */
> >>  struct dma_buf_ops {
> >>        int (*attach)(struct dma_buf *, struct device *,
> >> @@ -72,6 +90,10 @@ struct dma_buf_ops {
> >>        /* after final dma_buf_put() */
> >>        void (*release)(struct dma_buf *);
> >>
> >> +       int (*mmap)(struct dma_buf *, struct file *, struct
> vm_area_struct
> >> *);
> >> +       int (*prepare_access)(struct dma_buf *);
> >> +       int (*finish_access)(struct dma_buf *);
> >> +
> >>  };
> >>
> >>  /**
> >> --
> >> 1.7.5.4
> >>
> >>
> >> _______________________________________________
> >> Linaro-mm-sig mailing list
> >> Linaro-mm-sig@lists.linaro.org
> >> http://lists.linaro.org/mailman/listinfo/linaro-mm-sig
> >
> >
>
Rebecca Schultz Zavin March 15, 2012, 8:49 p.m. UTC | #4
This looks perfect to me and will close really the last remaining blocking
issue for converting ion to be a dma-buf exporter.  Assuming there are no
major objections to this I'll post some patches to the list next week that
make that change to ion.  Looking forward to meeting in the middle on this!
 Thanks Rob!

I'm happy with the bracketing calls as well.  We've been discussing a
similar api to ion to manage the caches.  It'll allow us to enable debug
features like remapping the buffers read only when a process says it's
finished with them to catch cache offenders.

Rebecca

On Thu, Mar 15, 2012 at 5:27 AM, Rob Clark <rob.clark@linaro.org> wrote:

> On Thu, Mar 15, 2012 at 2:16 AM, Abhinav Kochhar
> <kochhar.abhinav@gmail.com> wrote:
> > do we need to pass the dmabuf object to dmabuf->ops->mmap(dmabuf, file,
> > vma)?
> >
> > as file->private_data can retrieve the dmabuf object.
> >
> > "dmabuf = file->private_data"
> >
> > removing dmabuf from the function arguments will keep it consistent with
> > basic "mmap" definitions:
> >
> > "static int xxxx_mmap(struct file *file, struct vm_area_struct *vma)"
> >
>
> This was intentional to deviate from standard mmap fxn signature..  it
> was to avoid encouraging incorrect use.
>
> What I mean is, most subsystems interested in participating in dmabuf
> support mmap'ing multiple buffers on a single chrdev, with some
> offset->object mapping mechanism.  But in the case of a dmabuf fd, the
> buffer userspace would mmap would be at offset=0.  So you wouldn't get
> the expected results if you just directly plugged v4l2_mmap or
> drm_gem_mmap, etc, into your dmabuf-ops.  Not to mention the
> file->file_private wouldn't be what you expected.  So basically I was
> just trying to follow principle-of-least-surprise ;-)
>
> BR,
> -R
>
> > On Thu, Mar 15, 2012 at 10:32 AM, Rob Clark <rob.clark@linaro.org>
> wrote:
> >>
> >> From: Rob Clark <rob@ti.com>
> >>
> >> Enable optional userspace access to dma-buf buffers via mmap() on the
> >> dma-buf file descriptor.  Userspace access to the buffer should be
> >> bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
> >> give the exporting driver a chance to deal with cache synchronization
> >> and such for cached userspace mappings without resorting to page
> >> faulting tricks.  The reasoning behind this is that, while drm
> >> drivers tend to have all the mechanisms in place for dealing with
> >> page faulting tricks, other driver subsystems may not.  And in
> >> addition, while page faulting tricks make userspace simpler, there
> >> are some associated overheads.
> >>
> >> In all cases, the mmap() call is allowed to fail, and the associated
> >> dma_buf_ops are optional (mmap() will fail if at least the mmap()
> >> op is not implemented by the exporter, but in either case the
> >> {prepare,finish}_access() ops are optional).
> >>
> >> For now the prepare/finish access ioctls are kept simple with no
> >> argument, although there is possibility to add additional ioctls
> >> (or simply change the existing ioctls from _IO() to _IOW()) later
> >> to provide optimization to allow userspace to specify a region of
> >> interest.
> >>
> >> For a final patch, dma-buf.h would need to be split into what is
> >> exported to userspace, and what is kernel private, but I wanted to
> >> get feedback on the idea of requiring userspace to bracket access
> >> first (vs. limiting this to coherent mappings or exporters who play
> >> page faltings plus PTE shoot-down games) before I split the header
> >> which would cause conflicts with other pending dma-buf patches.  So
> >> flame-on!
> >> ---
> >>  drivers/base/dma-buf.c  |   42
> ++++++++++++++++++++++++++++++++++++++++++
> >>  include/linux/dma-buf.h |   22 ++++++++++++++++++++++
> >>  2 files changed, 64 insertions(+), 0 deletions(-)
> >>
> >> diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
> >> index c9a945f..382b78a 100644
> >> --- a/drivers/base/dma-buf.c
> >> +++ b/drivers/base/dma-buf.c
> >> @@ -30,6 +30,46 @@
> >>
> >>  static inline int is_dma_buf_file(struct file *);
> >>
> >> +static int dma_buf_mmap(struct file *file, struct vm_area_struct *vma)
> >> +{
> >> +       struct dma_buf *dmabuf;
> >> +
> >> +       if (!is_dma_buf_file(file))
> >> +               return -EINVAL;
> >> +
> >> +       dmabuf = file->private_data;
> >> +
> >> +       if (dmabuf->ops->mmap)
> >> +               return dmabuf->ops->mmap(dmabuf, file, vma);
> >> +
> >> +       return -ENODEV;
> >> +}
> >> +
> >> +static long dma_buf_ioctl(struct file *file, unsigned int cmd,
> >> +               unsigned long arg)
> >> +{
> >> +       struct dma_buf *dmabuf;
> >> +
> >> +       if (!is_dma_buf_file(file))
> >> +               return -EINVAL;
> >> +
> >> +       dmabuf = file->private_data;
> >> +
> >> +       switch (_IOC_NR(cmd)) {
> >> +       case _IOC_NR(DMA_BUF_IOCTL_PREPARE_ACCESS):
> >> +               if (dmabuf->ops->prepare_access)
> >> +                       return dmabuf->ops->prepare_access(dmabuf);
> >> +               return 0;
> >> +       case _IOC_NR(DMA_BUF_IOCTL_FINISH_ACCESS):
> >> +               if (dmabuf->ops->finish_access)
> >> +                       return dmabuf->ops->finish_access(dmabuf);
> >> +               return 0;
> >> +       default:
> >> +               return -EINVAL;
> >> +       }
> >> +}
> >> +
> >> +
> >>  static int dma_buf_release(struct inode *inode, struct file *file)
> >>  {
> >>        struct dma_buf *dmabuf;
> >> @@ -45,6 +85,8 @@ static int dma_buf_release(struct inode *inode, struct
> >> file *file)
> >>  }
> >>
> >>  static const struct file_operations dma_buf_fops = {
> >> +       .mmap           = dma_buf_mmap,
> >> +       .unlocked_ioctl = dma_buf_ioctl,
> >>        .release        = dma_buf_release,
> >>  };
> >>
> >> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> >> index a885b26..cbdff81 100644
> >> --- a/include/linux/dma-buf.h
> >> +++ b/include/linux/dma-buf.h
> >> @@ -34,6 +34,17 @@
> >>  struct dma_buf;
> >>  struct dma_buf_attachment;
> >>
> >> +/* TODO: dma-buf.h should be the userspace visible header, and
> >> dma-buf-priv.h (?)
> >> + * the kernel internal header.. for now just stuff these here to avoid
> >> conflicting
> >> + * with other patches..
> >> + *
> >> + * For now, no arg to keep things simple, but we could consider adding
> an
> >> + * optional region of interest later.
> >> + */
> >> +#define DMA_BUF_IOCTL_PREPARE_ACCESS   _IO('Z', 0)
> >> +#define DMA_BUF_IOCTL_FINISH_ACCESS    _IO('Z', 1)
> >> +
> >> +
> >>  /**
> >>  * struct dma_buf_ops - operations possible on struct dma_buf
> >>  * @attach: [optional] allows different devices to 'attach' themselves
> to
> >> the
> >> @@ -49,6 +60,13 @@ struct dma_buf_attachment;
> >>  * @unmap_dma_buf: decreases usecount of buffer, might deallocate
> scatter
> >>  *                pages.
> >>  * @release: release this buffer; to be called after the last
> dma_buf_put.
> >> + * @mmap: [optional, allowed to fail] operation called if userspace
> calls
> >> + *              mmap() on the dmabuf fd.  Note that userspace should
> use
> >> the
> >> + *              DMA_BUF_PREPARE_ACCESS / DMA_BUF_FINISH_ACCESS ioctls
> >> before/after
> >> + *              sw access to the buffer, to give the exporter an
> >> opportunity to
> >> + *              deal with cache maintenance.
> >> + * @prepare_access: [optional] handler for PREPARE_ACCESS ioctl.
> >> + * @finish_access: [optional] handler for FINISH_ACCESS ioctl.
> >>  */
> >>  struct dma_buf_ops {
> >>        int (*attach)(struct dma_buf *, struct device *,
> >> @@ -72,6 +90,10 @@ struct dma_buf_ops {
> >>        /* after final dma_buf_put() */
> >>        void (*release)(struct dma_buf *);
> >>
> >> +       int (*mmap)(struct dma_buf *, struct file *, struct
> vm_area_struct
> >> *);
> >> +       int (*prepare_access)(struct dma_buf *);
> >> +       int (*finish_access)(struct dma_buf *);
> >> +
> >>  };
> >>
> >>  /**
> >> --
> >> 1.7.5.4
> >>
> >>
> >> _______________________________________________
> >> Linaro-mm-sig mailing list
> >> Linaro-mm-sig@lists.linaro.org
> >> http://lists.linaro.org/mailman/listinfo/linaro-mm-sig
> >
> >
>
Marcus Lorentzon March 16, 2012, 10:50 a.m. UTC | #5
On 03/15/2012 02:32 AM, Rob Clark wrote:
> From: Rob Clark<rob@ti.com>
> [snip]
> In all cases, the mmap() call is allowed to fail, and the associated
> dma_buf_ops are optional (mmap() will fail if at least the mmap()
> op is not implemented by the exporter, but in either case the
> {prepare,finish}_access() ops are optional).
I sort of understand this approach. It allowes some implementations 
(ARM/Android) to move forward. But how would an application act if mmap 
fails? What is the option? How can the application detect if mmap is 
possible or not? Or is this mmap only supposed to be used from device 
specific libs like libdrm-xx/libv4l2-xx/libgralloc?

Can mmap fail for one buffer, but not another? Can it fail for a buffer 
that have successfully been mmapped once before (except for the usual 
ENOMEM/EAGAIN etc)?
> For now the prepare/finish access ioctls are kept simple with no
> argument, although there is possibility to add additional ioctls
> (or simply change the existing ioctls from _IO() to _IOW()) later
> to provide optimization to allow userspace to specify a region of
> interest.
I like the idea of simple, assume the worst, no args, versions of 
begin/end access. But once we move forward, I don't just like the 
region, but also access type (R/W). R/W info allows the driver to make 
cache management optimizations otherwise impossible. Like if CPU with no 
alloc-on-write just write, a write buffer flush is enough to switch to a 
HW read. And (at least on ARM) cache clean can be done for all cache for 
large areas, but invalidate has to be done line by line. Eliminating the 
need to do invalidate, especially if region is small, compared to 
invalidate entire buffer line by line can make a huge difference.
But I would like these in a separate ioctl to keep the normal case 
simple. Maybe as a separate patch even.
>
> For a final patch, dma-buf.h would need to be split into what is
> exported to userspace, and what is kernel private, but I wanted to
> get feedback on the idea of requiring userspace to bracket access
> first (vs. limiting this to coherent mappings or exporters who play
> page faltings plus PTE shoot-down games) before I split the header
> which would cause conflicts with other pending dma-buf patches.  So
> flame-on!
Why not just guard the kernel parts with __KERNEL__ or something? Or 
there are guidelines preventing this?

> [snip]
>
> +
> +static long dma_buf_ioctl(struct file *file, unsigned int cmd,
> +		unsigned long arg)
> +{
> +	struct dma_buf *dmabuf;
> +
> +	if (!is_dma_buf_file(file))
> +		return -EINVAL;
> +
> +	dmabuf = file->private_data;
> +
> +	switch (_IOC_NR(cmd)) {
> +	case _IOC_NR(DMA_BUF_IOCTL_PREPARE_ACCESS):
> +		if (dmabuf->ops->prepare_access)
> +			return dmabuf->ops->prepare_access(dmabuf);
> +		return 0;
> +	case _IOC_NR(DMA_BUF_IOCTL_FINISH_ACCESS):
> +		if (dmabuf->ops->finish_access)
> +			return dmabuf->ops->finish_access(dmabuf);
> +		return 0;
> +	default:
> +		return -EINVAL;
> +	}
> +}
> +
> +
Multiple empty lines
>   static int dma_buf_release(struct inode *inode, struct file *file)
>   {
>   	struct dma_buf *dmabuf;
> @@ -45,6 +85,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
>   }
>
>   static const struct file_operations dma_buf_fops = {
> +	.mmap 		= dma_buf_mmap,
> +	.unlocked_ioctl = dma_buf_ioctl,
>   	.release	= dma_buf_release,
>   };
>
> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
> index a885b26..cbdff81 100644
> --- a/include/linux/dma-buf.h
> +++ b/include/linux/dma-buf.h
> @@ -34,6 +34,17 @@
>   struct dma_buf;
>   struct dma_buf_attachment;
>
> +/* TODO: dma-buf.h should be the userspace visible header, and dma-buf-priv.h (?)
> + * the kernel internal header.. for now just stuff these here to avoid conflicting
> + * with other patches..
> + *
> + * For now, no arg to keep things simple, but we could consider adding an
> + * optional region of interest later.
> + */
> +#define DMA_BUF_IOCTL_PREPARE_ACCESS   _IO('Z', 0)
> +#define DMA_BUF_IOCTL_FINISH_ACCESS    _IO('Z', 1)
> +
> +
Multiple empty lines
>   /**
>    * struct dma_buf_ops - operations possible on struct dma_buf
>    * @attach: [optional] allows different devices to 'attach' themselves to the
> @@ -49,6 +60,13 @@ struct dma_buf_attachment;
>    * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
>    *		   pages.
>    * @release: release this buffer; to be called after the last dma_buf_put.
> + * @mmap: [optional, allowed to fail] operation called if userspace calls
> + *		 mmap() on the dmabuf fd.  Note that userspace should use the
> + *		 DMA_BUF_PREPARE_ACCESS / DMA_BUF_FINISH_ACCESS ioctls before/after
> + *		 sw access to the buffer, to give the exporter an opportunity to
> + *		 deal with cache maintenance.
> + * @prepare_access: [optional] handler for PREPARE_ACCESS ioctl.
> + * @finish_access: [optional] handler for FINISH_ACCESS ioctl.
xx_access should only be optional if you don't implement mmap. Otherwise 
it will be very hard to implement cache sync in dma_buf (the cpu2dev and 
dev2cpu parts). Introducing cache sync in dma_buf should be a way to 
remove it from dma_buf clients. An option would be for the cache sync 
code to assume the worst for each cpu2dev sync. Even if the CPU has not 
touched anything.

In short, very welcome patch ...

/BR
/Marcus
Rob Clark March 16, 2012, 2:19 p.m. UTC | #6
On Fri, Mar 16, 2012 at 5:50 AM, Marcus Lorentzon
<marcus.xm.lorentzon@stericsson.com> wrote:
> On 03/15/2012 02:32 AM, Rob Clark wrote:
>>
>> From: Rob Clark<rob@ti.com>
>> [snip]
>>
>> In all cases, the mmap() call is allowed to fail, and the associated
>> dma_buf_ops are optional (mmap() will fail if at least the mmap()
>> op is not implemented by the exporter, but in either case the
>> {prepare,finish}_access() ops are optional).
>
> I sort of understand this approach. It allowes some implementations
> (ARM/Android) to move forward. But how would an application act if mmap
> fails? What is the option? How can the application detect if mmap is
> possible or not? Or is this mmap only supposed to be used from device
> specific libs like libdrm-xx/libv4l2-xx/libgralloc?

The most sane fail path probably depends on the use case.  For
example, if you have some code that is writing subtitle overlay into
frames of video, then the fallback that makes sense is probably just
to skip the buffer.  In other cases, showing error message and exiting
gracefully might be what makes sense.

I expect it is most likely that a particular exporting device will
either support mmap or not.  But I expect there would at least some
special cases, for example in product kernels with secure/protected
content.

> Can mmap fail for one buffer, but not another? Can it fail for a buffer that
> have successfully been mmapped once before (except for the usual
> ENOMEM/EAGAIN etc)?

well, this is always possible regardless, for example if client
process virtual address space is exhausted.

>> For now the prepare/finish access ioctls are kept simple with no
>> argument, although there is possibility to add additional ioctls
>> (or simply change the existing ioctls from _IO() to _IOW()) later
>> to provide optimization to allow userspace to specify a region of
>> interest.
>
> I like the idea of simple, assume the worst, no args, versions of begin/end
> access. But once we move forward, I don't just like the region, but also
> access type (R/W). R/W info allows the driver to make cache management
> optimizations otherwise impossible. Like if CPU with no alloc-on-write just
> write, a write buffer flush is enough to switch to a HW read. And (at least
> on ARM) cache clean can be done for all cache for large areas, but
> invalidate has to be done line by line. Eliminating the need to do
> invalidate, especially if region is small, compared to invalidate entire
> buffer line by line can make a huge difference.
> But I would like these in a separate ioctl to keep the normal case simple.
> Maybe as a separate patch even.

yeah, I expect that there will be some proposals and discussions about
that, so I wanted to get basic support in first :-)

>>
>> For a final patch, dma-buf.h would need to be split into what is
>> exported to userspace, and what is kernel private, but I wanted to
>> get feedback on the idea of requiring userspace to bracket access
>> first (vs. limiting this to coherent mappings or exporters who play
>> page faltings plus PTE shoot-down games) before I split the header
>> which would cause conflicts with other pending dma-buf patches.  So
>> flame-on!
>
> Why not just guard the kernel parts with __KERNEL__ or something? Or there
> are guidelines preventing this?

I think that is at least not how things are normally done, and seems a bit ugly

BR,
-R

>> [snip]
>>
>>
>> +
>> +static long dma_buf_ioctl(struct file *file, unsigned int cmd,
>> +               unsigned long arg)
>> +{
>> +       struct dma_buf *dmabuf;
>> +
>> +       if (!is_dma_buf_file(file))
>> +               return -EINVAL;
>> +
>> +       dmabuf = file->private_data;
>> +
>> +       switch (_IOC_NR(cmd)) {
>> +       case _IOC_NR(DMA_BUF_IOCTL_PREPARE_ACCESS):
>> +               if (dmabuf->ops->prepare_access)
>> +                       return dmabuf->ops->prepare_access(dmabuf);
>> +               return 0;
>> +       case _IOC_NR(DMA_BUF_IOCTL_FINISH_ACCESS):
>> +               if (dmabuf->ops->finish_access)
>> +                       return dmabuf->ops->finish_access(dmabuf);
>> +               return 0;
>> +       default:
>> +               return -EINVAL;
>> +       }
>> +}
>> +
>> +
>
> Multiple empty lines
>
>>  static int dma_buf_release(struct inode *inode, struct file *file)
>>  {
>>        struct dma_buf *dmabuf;
>> @@ -45,6 +85,8 @@ static int dma_buf_release(struct inode *inode, struct
>> file *file)
>>  }
>>
>>  static const struct file_operations dma_buf_fops = {
>> +       .mmap           = dma_buf_mmap,
>> +       .unlocked_ioctl = dma_buf_ioctl,
>>        .release        = dma_buf_release,
>>  };
>>
>> diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
>> index a885b26..cbdff81 100644
>> --- a/include/linux/dma-buf.h
>> +++ b/include/linux/dma-buf.h
>> @@ -34,6 +34,17 @@
>>  struct dma_buf;
>>  struct dma_buf_attachment;
>>
>> +/* TODO: dma-buf.h should be the userspace visible header, and
>> dma-buf-priv.h (?)
>> + * the kernel internal header.. for now just stuff these here to avoid
>> conflicting
>> + * with other patches..
>> + *
>> + * For now, no arg to keep things simple, but we could consider adding an
>> + * optional region of interest later.
>> + */
>> +#define DMA_BUF_IOCTL_PREPARE_ACCESS   _IO('Z', 0)
>> +#define DMA_BUF_IOCTL_FINISH_ACCESS    _IO('Z', 1)
>> +
>> +
>
> Multiple empty lines
>
>>  /**
>>   * struct dma_buf_ops - operations possible on struct dma_buf
>>   * @attach: [optional] allows different devices to 'attach' themselves to
>> the
>> @@ -49,6 +60,13 @@ struct dma_buf_attachment;
>>   * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
>>   *               pages.
>>   * @release: release this buffer; to be called after the last
>> dma_buf_put.
>> + * @mmap: [optional, allowed to fail] operation called if userspace calls
>> + *              mmap() on the dmabuf fd.  Note that userspace should use
>> the
>> + *              DMA_BUF_PREPARE_ACCESS / DMA_BUF_FINISH_ACCESS ioctls
>> before/after
>> + *              sw access to the buffer, to give the exporter an
>> opportunity to
>> + *              deal with cache maintenance.
>> + * @prepare_access: [optional] handler for PREPARE_ACCESS ioctl.
>> + * @finish_access: [optional] handler for FINISH_ACCESS ioctl.
>
> xx_access should only be optional if you don't implement mmap. Otherwise it
> will be very hard to implement cache sync in dma_buf (the cpu2dev and
> dev2cpu parts). Introducing cache sync in dma_buf should be a way to remove
> it from dma_buf clients. An option would be for the cache sync code to
> assume the worst for each cpu2dev sync. Even if the CPU has not touched
> anything.
>
> In short, very welcome patch ...
>
> /BR
> /Marcus
>
>
> --
> To unsubscribe from this list: send the line "unsubscribe linux-media" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
Tom Cooksey March 16, 2012, 5:24 p.m. UTC | #7
> From: Rob Clark <rob@ti.com>
> 
> Enable optional userspace access to dma-buf buffers via mmap() on the
> dma-buf file descriptor.  Userspace access to the buffer should be
> bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
> give the exporting driver a chance to deal with cache synchronization
> and such for cached userspace mappings without resorting to page
> faulting tricks.  The reasoning behind this is that, while drm
> drivers tend to have all the mechanisms in place for dealing with
> page faulting tricks, other driver subsystems may not.  And in
> addition, while page faulting tricks make userspace simpler, there
> are some associated overheads.

Speaking for the ARM Mali T6xx driver point of view, this API looks
good for us. Our use-case for mmap is glReadPixels and
glTex[Sub]Image2D on buffers the driver has imported via dma_buf. In
the case of glReadPixels, the finish ioctl isn't strictly necessary
as the CPU won't have written to the buffer and so doesn't need
flushing. As such, we'd get an additional cache flush which isn't
really necessary. But hey, it's glReadPixels - it's supposed to be
slow. :-)

I think requiring the finish ioctl in the API contract is a good
idea, even if the CPU has only done a ro access as it allows future
enhancements*. To "fix" the unnecessary flush in glReadPixels, I
think we'd like to keep the finish but see an "access type"
parameter added to prepare ioctl indicating if the access is ro or
rw to allow the cache flush in finish to be skipped if the access
was ro. As Rebecca says, a debug feature could even be added to
re-map the pages as ro in prepare(ro) to catch naughty accesses. I'd
also go as far as to say the debug feature should completely unmap
the pages after finish too. Though for us, both the access-type
parameter and debug features are "nice to haves" - we can make
progress with the code as it currently stands (assuming exporters
start using the API that is).

Something which also came up when discussing internally is the topic
of mmap APIs of the importing device driver. For example, I believe
DRM has an mmap API on GEM buffer objects. If a new dma_buf import
ioctl was added to GEM (maybe the PRIME patches already add this),
how would GEM's bo mmap API work?


* Future enhancements: The prepare/finish bracketing could be used
as part of a wider synchronization scheme with other devices.
E.g. If another device was writing to the buffer, the prepare ioctl
could block until that device had finished accessing that buffer.
In the same way, another device could be blocked from accessing that
buffer until the client process called finish. We have already
started playing with such a scheme in the T6xx driver stack we're
terming "kernel dependency system". In this scheme each buffer has a
FIFO of "buffer consumers" waiting to access a buffer. The idea
being that a "buffer consumer" is fairly abstract and could be any
device or userspace process participating in the synchronization
scheme. Examples would be GPU jobs, display controller "scan-out"
jobs, etc.

So for example, a userspace application could dispatch a GPU
fragment shading job into the GPU's kernel driver which will write
to a KMS scanout buffer. The application then immediately issues a
drm_mode_crtc_page_flip ioctl on the display controller's DRM driver
to display the soon-to-be-rendered buffer. Inside the kernel, the
GPU driver adds the fragment job to the dma_buf's FIFO. As the FIFO
was empty, dma_buf calls into the GPU kernel driver to tell it it
"owns" access to the buffer and the GPU driver schedules the job to
run on the GPU. Upon receiving the drm_mode_crtc_page_flip ioctl,
the DRM/KMS driver adds a scan-out job to the buffer's FIFO.
However, the FIFO already has the GPU's fragment shading job in it
so nothing happens until the GPU job completes. When the GPU job
completes, the GPU driver calls into dma_buf to mark its job
complete. dma_buf then takes the next job in its FIFO which the KMS
driver's scanout job, calls into the KMS driver to schedule the
pageflip. The result? A buffer gets scanned out as soon as it has
finished being rendered without needing a round-trip to userspace.
Sure, there are easier ways to achieve that goal, but the idea is
that the mechanism can be used to synchronize access across multiple
devices, which makes it useful for lots of other use-cases too.


As I say, we have already implemented something which works as I
describe but where the buffers are abstract resources not linked to
dma_buf. I'd like to discuss the finer points of the mechanisms
further, but if it's looking like there's interest in this approach
we'll start re-writing the code we have to sit on-top of dma_buf
and posting it as RFCs to the various lists. The intention is to get
this to mainline, if mainline wants it. :-)

Personally, what I particularly like about this approach to
synchronization is that it doesn't require any interfaces to be
modified. I think/hope that makes it easier to port existing drivers
and sub-systems to take advantage of it. The buffer itself is the
synchronization object and interfaces already pass buffers around so
don't need modification. There are of course some limitations with
this approach, the main one we can think of being that it can't
really be used for A/V sync. It kinda assumes "jobs" in the FIFO
should be run as soon as the preceding job completes, which isn't
the case when streaming real-time video. Though nothing precludes
more explicit sync objects being used in conjunction with this
approach.


Cheers,

Tom
Rob Clark March 16, 2012, 6:55 p.m. UTC | #8
On Fri, Mar 16, 2012 at 12:24 PM, Tom Cooksey <tom.cooksey@arm.com> wrote:
>
>> From: Rob Clark <rob@ti.com>
>>
>> Enable optional userspace access to dma-buf buffers via mmap() on the
>> dma-buf file descriptor.  Userspace access to the buffer should be
>> bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
>> give the exporting driver a chance to deal with cache synchronization
>> and such for cached userspace mappings without resorting to page
>> faulting tricks.  The reasoning behind this is that, while drm
>> drivers tend to have all the mechanisms in place for dealing with
>> page faulting tricks, other driver subsystems may not.  And in
>> addition, while page faulting tricks make userspace simpler, there
>> are some associated overheads.
>
> Speaking for the ARM Mali T6xx driver point of view, this API looks
> good for us. Our use-case for mmap is glReadPixels and
> glTex[Sub]Image2D on buffers the driver has imported via dma_buf. In
> the case of glReadPixels, the finish ioctl isn't strictly necessary
> as the CPU won't have written to the buffer and so doesn't need
> flushing. As such, we'd get an additional cache flush which isn't
> really necessary. But hey, it's glReadPixels - it's supposed to be
> slow. :-)
>
> I think requiring the finish ioctl in the API contract is a good
> idea, even if the CPU has only done a ro access as it allows future
> enhancements*. To "fix" the unnecessary flush in glReadPixels, I
> think we'd like to keep the finish but see an "access type"
> parameter added to prepare ioctl indicating if the access is ro or
> rw to allow the cache flush in finish to be skipped if the access
> was ro. As Rebecca says, a debug feature could even be added to
> re-map the pages as ro in prepare(ro) to catch naughty accesses. I'd
> also go as far as to say the debug feature should completely unmap
> the pages after finish too. Though for us, both the access-type
> parameter and debug features are "nice to haves" - we can make
> progress with the code as it currently stands (assuming exporters
> start using the API that is).

Perhaps it isn't a bad idea to include access-type bitmask in the
first version.  It would help optimize a bit the cache operations.

> Something which also came up when discussing internally is the topic
> of mmap APIs of the importing device driver. For example, I believe
> DRM has an mmap API on GEM buffer objects. If a new dma_buf import
> ioctl was added to GEM (maybe the PRIME patches already add this),
> how would GEM's bo mmap API work?

My first thought is maybe we should just dis-allow this for now until
we have a chance to see if there are any possible issues with an
importer mmap'ing the buffer to userspace.  We could possible have a
helper dma_buf_mmap() fxn which in turn calls dmabuf ops->mmap() so
the mmap'ing is actually performed by the exporter on behalf of the
importer.

>
> * Future enhancements: The prepare/finish bracketing could be used
> as part of a wider synchronization scheme with other devices.
> E.g. If another device was writing to the buffer, the prepare ioctl
> could block until that device had finished accessing that buffer.
> In the same way, another device could be blocked from accessing that
> buffer until the client process called finish. We have already
> started playing with such a scheme in the T6xx driver stack we're
> terming "kernel dependency system". In this scheme each buffer has a
> FIFO of "buffer consumers" waiting to access a buffer. The idea
> being that a "buffer consumer" is fairly abstract and could be any
> device or userspace process participating in the synchronization
> scheme. Examples would be GPU jobs, display controller "scan-out"
> jobs, etc.
>
> So for example, a userspace application could dispatch a GPU
> fragment shading job into the GPU's kernel driver which will write
> to a KMS scanout buffer. The application then immediately issues a
> drm_mode_crtc_page_flip ioctl on the display controller's DRM driver
> to display the soon-to-be-rendered buffer. Inside the kernel, the
> GPU driver adds the fragment job to the dma_buf's FIFO. As the FIFO
> was empty, dma_buf calls into the GPU kernel driver to tell it it
> "owns" access to the buffer and the GPU driver schedules the job to
> run on the GPU. Upon receiving the drm_mode_crtc_page_flip ioctl,
> the DRM/KMS driver adds a scan-out job to the buffer's FIFO.
> However, the FIFO already has the GPU's fragment shading job in it
> so nothing happens until the GPU job completes. When the GPU job
> completes, the GPU driver calls into dma_buf to mark its job
> complete. dma_buf then takes the next job in its FIFO which the KMS
> driver's scanout job, calls into the KMS driver to schedule the
> pageflip. The result? A buffer gets scanned out as soon as it has
> finished being rendered without needing a round-trip to userspace.
> Sure, there are easier ways to achieve that goal, but the idea is
> that the mechanism can be used to synchronize access across multiple
> devices, which makes it useful for lots of other use-cases too.
>
>
> As I say, we have already implemented something which works as I
> describe but where the buffers are abstract resources not linked to
> dma_buf. I'd like to discuss the finer points of the mechanisms
> further, but if it's looking like there's interest in this approach
> we'll start re-writing the code we have to sit on-top of dma_buf
> and posting it as RFCs to the various lists. The intention is to get
> this to mainline, if mainline wants it. :-)

I think we do need some sort of 'sync object' (which might really just
be a 'synchronization queue' object) in the kernel.  It probably
shouldn't be built-in to dma-buf, but I expect we'd want the dma_buf
struct to have a 'struct sync_queue *' (or whatever it ends up being
called).

The sync-queue seems like a reasonable approach for pure cpu-sw based
synchronization.  The only thing I'm not sure is how to also deal with
hw that supports any sort of auto synchronization without cpu sw
involvement.

BR,
-R

> Personally, what I particularly like about this approach to
> synchronization is that it doesn't require any interfaces to be
> modified. I think/hope that makes it easier to port existing drivers
> and sub-systems to take advantage of it. The buffer itself is the
> synchronization object and interfaces already pass buffers around so
> don't need modification. There are of course some limitations with
> this approach, the main one we can think of being that it can't
> really be used for A/V sync. It kinda assumes "jobs" in the FIFO
> should be run as soon as the preceding job completes, which isn't
> the case when streaming real-time video. Though nothing precludes
> more explicit sync objects being used in conjunction with this
> approach.
>
>
> Cheers,
>
> Tom
>
>
>
>
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Alan Cox March 17, 2012, 8:17 p.m. UTC | #9
> > dma-buf file descriptor.  Userspace access to the buffer should be
> > bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
> > give the exporting driver a chance to deal with cache synchronization
> > and such for cached userspace mappings without resorting to page

There should be flags indicating if this is necessary. We don't want extra
syscalls on hardware that doesn't need it. The other question is what
info is needed as you may only want to poke a few pages out of cache and
the prepare/finish on its own gives no info.

> E.g. If another device was writing to the buffer, the prepare ioctl
> could block until that device had finished accessing that buffer.

How do you avoid deadlocks on this ? We need very clear ways to ensure
things always complete in some form given multiple buffer
owner/requestors and the fact this API has no "prepare-multiple-buffers"
support.

Alan
Rob Clark March 17, 2012, 10:12 p.m. UTC | #10
On Sat, Mar 17, 2012 at 3:17 PM, Alan Cox <alan@lxorguk.ukuu.org.uk> wrote:
>> > dma-buf file descriptor.  Userspace access to the buffer should be
>> > bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
>> > give the exporting driver a chance to deal with cache synchronization
>> > and such for cached userspace mappings without resorting to page
>
> There should be flags indicating if this is necessary. We don't want extra
> syscalls on hardware that doesn't need it. The other question is what
> info is needed as you may only want to poke a few pages out of cache and
> the prepare/finish on its own gives no info.

Well, there isn't really a convenient way to know, for some random
code that is just handed a dmabuf fd, what the flags are without
passing around an extra param in userspace.  So I'd tend to say, just
live with the syscall even if it is a no-op (because if you are doing
sw access to the buffer, that is anyways some slow/legacy path).  But
I'm open to suggestions.

As far as just peeking/poking a few pages, that is where some later
ioctls or additional params could come in, to give some hints.  But I
wanted to keep it simple to start.

>> E.g. If another device was writing to the buffer, the prepare ioctl
>> could block until that device had finished accessing that buffer.
>
> How do you avoid deadlocks on this ? We need very clear ways to ensure
> things always complete in some form given multiple buffer
> owner/requestors and the fact this API has no "prepare-multiple-buffers"
> support.

Probably some separate synchronization is needed.. I'm not really sure
if prepare/finish (or map/unmap on kernel side) is a the right way to
handle that.

BR,
-R

> Alan
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
Tom Cooksey March 19, 2012, 4:42 p.m. UTC | #11
> -----Original Message-----
> From: Alan Cox [mailto:alan@lxorguk.ukuu.org.uk]
> Sent: 17 March 2012 20:17
> To: Tom Cooksey
> Cc: 'Rob Clark'; linaro-mm-sig@lists.linaro.org; dri-
> devel@lists.freedesktop.org; linux-media@vger.kernel.org;
> rschultz@google.com; Rob Clark; sumit.semwal@linaro.org;
> patches@linaro.org
> Subject: Re: [PATCH] RFC: dma-buf: userspace mmap support
> 
> > > dma-buf file descriptor.  Userspace access to the buffer should be
> > > bracketed with DMA_BUF_IOCTL_{PREPARE,FINISH}_ACCESS ioctl calls to
> > > give the exporting driver a chance to deal with cache
> synchronization
> > > and such for cached userspace mappings without resorting to page
> 
> There should be flags indicating if this is necessary. We don't want
> extra
> syscalls on hardware that doesn't need it. The other question is what
> info is needed as you may only want to poke a few pages out of cache
> and
> the prepare/finish on its own gives no info.
> 
> > E.g. If another device was writing to the buffer, the prepare ioctl
> > could block until that device had finished accessing that buffer.
> 
> How do you avoid deadlocks on this ? We need very clear ways to ensure
> things always complete in some form given multiple buffer
> owner/requestors and the fact this API has no "prepare-multiple-
> buffers"
> support.

Yes, good point.

If the API was to also be used for synchronization it would have to
include an atomic "prepare multiple" ioctl which blocked until all
the buffers listed by the application were available. In the same
way, the kernel interface would also need to allow drivers to pass a
list of buffers a job will access in an atomic "add job" operation.
Actually, our current "KDS" (Kernel Dependency System) implementation
already works like this.

This might be a good argument for keeping synchronization and cache
maintenance separate, though even ignoring synchronization I would
think being able to issue cache maintenance operations for multiple
buffers in a single ioctl might present some small efficiency gains.
However as Rob points out, CPU access is already in slow/legacy
territory.

Note: Making the ioctl a "prepare multiple" would at least prevent
accidental dead-locks due to cross-dependencies, etc., but I think
some kind of watchdog/timeout would be useful on userspace locks to
stop a malicious application from preventing other devices and
processes from using buffers indefinitely.

Finally, it's probably worth mentioning that when we implemented
KDS we differentiated jobs which needed "exclusive access" to a
buffer and jobs which needed "shared access" to a buffer. Multiple
jobs could access a buffer at the same time if those jobs all
indicated they only needed shared access. Typically this would be
ajob which will only read a buffer, such as a display controller
or texture read. The main use-case for this was implementing EGL's
preserved swap behaviour when using "buffer flipping". Here, the
display controller will be reading the front buffer, but the GPU
might also need to read that front buffer. So perhaps adding
"read-only" & "read-write" access flags to prepare could also be
interpreted as shared & exclusive accesses, if we went down this
route for synchronization that is. :-)


Cheers,

Tom
Alan Cox March 19, 2012, 4:56 p.m. UTC | #12
> If the API was to also be used for synchronization it would have to
> include an atomic "prepare multiple" ioctl which blocked until all
> the buffers listed by the application were available. In the same

Too slow already. You are now serializing stuff while what we want to do
really is

	nobody_else_gets_buffers_next([list])
	on available(buffer)
		dispatch_work(buffer)

so that you can maximise parallelism without allowing deadlocks. If
you've got a high memory bandwith and 8+ cores the 'stop everything'
model isn't great.

> This might be a good argument for keeping synchronization and cache
> maintenance separate, though even ignoring synchronization I would
> think being able to issue cache maintenance operations for multiple
> buffers in a single ioctl might present some small efficiency gains.
> However as Rob points out, CPU access is already in slow/legacy
> territory.

Dangerous assumption. I do think they should be separate. For one it
makes the case of synchronization needed but hardware cache management
much easier to split cleanly. Assuming CPU access is slow/legacy reflects
a certain model of relatively slow CPU and accelerators where falling off
the acceleration path is bad. On a higher end processor falling off the
acceleration path isn't a performance matter so much as a power concern.

> KDS we differentiated jobs which needed "exclusive access" to a
> buffer and jobs which needed "shared access" to a buffer. Multiple
> jobs could access a buffer at the same time if those jobs all

Makes sense as it's a reader/writer lock and it reflects MESI/MOESI
caching and cache policy in some hardware/software assists.

> display controller will be reading the front buffer, but the GPU
> might also need to read that front buffer. So perhaps adding
> "read-only" & "read-write" access flags to prepare could also be
> interpreted as shared & exclusive accesses, if we went down this
> route for synchronization that is. :-)

mmap includes read/write info so probably using that works out. It also
means that you have the stuff mapped in a way that will bus error or
segfault anyone who goofs rather than give them the usual 'deep
weirdness' behaviour you get with mishandling of caching bits.

Alan
Marcus Lorentzon March 19, 2012, 5:10 p.m. UTC | #13
On 03/19/2012 05:56 PM, Alan Cox wrote:
>> display controller will be reading the front buffer, but the GPU
>> >  might also need to read that front buffer. So perhaps adding
>> >  "read-only"&  "read-write" access flags to prepare could also be
>> >  interpreted as shared&  exclusive accesses, if we went down this
>> >  route for synchronization that is.:-)
> mmap includes read/write info so probably using that works out. It also
> means that you have the stuff mapped in a way that will bus error or
> segfault anyone who goofs rather than give them the usual 'deep
> weirdness' behaviour you get with mishandling of caching bits.
>
> Alan
mmap only give you this info at time of mmap call. prepare/finish would 
give you this info for each CPU access to the buffer (assuming mmap 
lasts multiple frames). Which means that you can optimize and have zero 
cache syncs for frames where CPU doesn't touch the buffer at all. If you 
use mmap info, then you would be forced to sync cache before each GPU 
access to the buffer. For example sub texture updates in glyph caches. 
They will only rarely change, so you don't want to sync CPU cache each 
time buffer is used in GPU, just because mmap says CPU has write access.

/Marcus
Tom Cooksey March 19, 2012, 6:44 p.m. UTC | #14
> -----Original Message-----
> From: Alan Cox [mailto:alan@lxorguk.ukuu.org.uk]
> Sent: 19 March 2012 16:57
> To: Tom Cooksey
> Cc: 'Rob Clark'; linaro-mm-sig@lists.linaro.org; dri-
> devel@lists.freedesktop.org; linux-media@vger.kernel.org;
> rschultz@google.com; Rob Clark; sumit.semwal@linaro.org;
> patches@linaro.org
> Subject: Re: [PATCH] RFC: dma-buf: userspace mmap support
> 
> > If the API was to also be used for synchronization it would have to
> > include an atomic "prepare multiple" ioctl which blocked until all
> > the buffers listed by the application were available. In the same
> 
> Too slow already. You are now serializing stuff while what we want to
> do
> really is
> 
> 	nobody_else_gets_buffers_next([list])
> 	on available(buffer)
> 		dispatch_work(buffer)
> 
> so that you can maximise parallelism without allowing deadlocks. If
> you've got a high memory bandwith and 8+ cores the 'stop everything'
> model isn't great.

Yes, sorry I wasn't clear here. By atomic I meant that a job starts
using all buffers at the same time, once they are available. You are
right, a job waiting for a list of buffers to become available should
not prevent other jobs running or queuing new jobs (eughh). We actually
have the option of using asynchronous call-backs in KDS: A driver lists
all the buffers it needs when adding a job and that job gets added to
the FIFO of each buffer as an atomic operation. However, once the job
is added to all the FIFOs, that atomic operation is complete and another
job can be "queued" up. When a job completes, it is removed from each
buffer's FIFO. At that point, all the "next" jobs in each buffer's FIFO
are evaluated to see if they can run. If they can run, the job's
"start" call-back is called. There's also a synchronous mode of
operation where a blocked thread is "woken up" instead of calling a
call-back function. It is this synchronous mode I would imagine
would be used for user-space access.


> > This might be a good argument for keeping synchronization and cache
> > maintenance separate, though even ignoring synchronization I would
> > think being able to issue cache maintenance operations for multiple
> > buffers in a single ioctl might present some small efficiency gains.
> > However as Rob points out, CPU access is already in slow/legacy
> > territory.
> 
> Dangerous assumption. I do think they should be separate. For one it
> makes the case of synchronization needed but hardware cache management
> much easier to split cleanly. Assuming CPU access is slow/legacy
> reflects a certain model of relatively slow CPU and accelerators
> where falling off the acceleration path is bad. On a higher end
> processor falling off the acceleration path isn't a performance
> matter so much as a power concern.

On some GPU architectures, glReadPixels is a _very_ heavy-weight
operation, so is very much a performance issue and I think always
will be. However I think this might be a special case for certain
GPUs: Other GPU architectures or device-types might be able to
share data with the CPU without such a large impact to performance.
The example of writing subtitles onto a video frame decoded by
a v4l2 hardware codec seems a good example.

> > KDS we differentiated jobs which needed "exclusive access" to a
> > buffer and jobs which needed "shared access" to a buffer. Multiple
> > jobs could access a buffer at the same time if those jobs all
> 
> Makes sense as it's a reader/writer lock and it reflects MESI/MOESI
> caching and cache policy in some hardware/software assists.

Actually, this got me thinking... Several ARM implementations rely
on CPU/NEON to perform X.Org's 2D operations and those tend to
operate directly on the framebuffer. So in that case, both the CPU
and display controller need to access the same buffer at the same
time, even though one of them is writing to the buffer. This is
the main reason we called it shared/exclusive access in KDS rather
than read-only/read-write access. In such scenarios, you'd still
want to do a CPU cache flush after CPU-based 2D drawing is complete
to make sure the display controller "saw" those changes. So yes,
perhaps there's actually a use-case where synchronization must be
kept separate to cache-maintenance? In which case, it is worth
making the proposed prepare/finish API more explicit in that it is
a CPU cache invalidate and CPU cache flush operation only? Or are
there other things one might want to do in prepare/finish?
Automatic cache domain tracking for example?


> > display controller will be reading the front buffer, but the GPU
> > might also need to read that front buffer. So perhaps adding
> > "read-only" & "read-write" access flags to prepare could also be
> > interpreted as shared & exclusive accesses, if we went down this
> > route for synchronization that is. :-)
> 
> mmap includes read/write info so probably using that works out. It also
> means that you have the stuff mapped in a way that will bus error or
> segfault anyone who goofs rather than give them the usual 'deep
> weirdness' behaviour you get with mishandling of caching bits.

I think it might be possible to make the case to cache user-space
mappings. In which case, you might want to always mmap read-write
but sometimes do a read operation and sometimes a write. So I think
we'd prefer not to make the read-only/read-write decision at mmap
time.


Cheers,

Tom
diff mbox

Patch

diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index c9a945f..382b78a 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -30,6 +30,46 @@ 
 
 static inline int is_dma_buf_file(struct file *);
 
+static int dma_buf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	struct dma_buf *dmabuf;
+
+	if (!is_dma_buf_file(file))
+		return -EINVAL;
+
+	dmabuf = file->private_data;
+
+	if (dmabuf->ops->mmap)
+		return dmabuf->ops->mmap(dmabuf, file, vma);
+
+	return -ENODEV;
+}
+
+static long dma_buf_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	struct dma_buf *dmabuf;
+
+	if (!is_dma_buf_file(file))
+		return -EINVAL;
+
+	dmabuf = file->private_data;
+
+	switch (_IOC_NR(cmd)) {
+	case _IOC_NR(DMA_BUF_IOCTL_PREPARE_ACCESS):
+		if (dmabuf->ops->prepare_access)
+			return dmabuf->ops->prepare_access(dmabuf);
+		return 0;
+	case _IOC_NR(DMA_BUF_IOCTL_FINISH_ACCESS):
+		if (dmabuf->ops->finish_access)
+			return dmabuf->ops->finish_access(dmabuf);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+
 static int dma_buf_release(struct inode *inode, struct file *file)
 {
 	struct dma_buf *dmabuf;
@@ -45,6 +85,8 @@  static int dma_buf_release(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dma_buf_fops = {
+	.mmap 		= dma_buf_mmap,
+	.unlocked_ioctl = dma_buf_ioctl,
 	.release	= dma_buf_release,
 };
 
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index a885b26..cbdff81 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -34,6 +34,17 @@ 
 struct dma_buf;
 struct dma_buf_attachment;
 
+/* TODO: dma-buf.h should be the userspace visible header, and dma-buf-priv.h (?)
+ * the kernel internal header.. for now just stuff these here to avoid conflicting
+ * with other patches..
+ *
+ * For now, no arg to keep things simple, but we could consider adding an
+ * optional region of interest later.
+ */
+#define DMA_BUF_IOCTL_PREPARE_ACCESS   _IO('Z', 0)
+#define DMA_BUF_IOCTL_FINISH_ACCESS    _IO('Z', 1)
+
+
 /**
  * struct dma_buf_ops - operations possible on struct dma_buf
  * @attach: [optional] allows different devices to 'attach' themselves to the
@@ -49,6 +60,13 @@  struct dma_buf_attachment;
  * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter
  *		   pages.
  * @release: release this buffer; to be called after the last dma_buf_put.
+ * @mmap: [optional, allowed to fail] operation called if userspace calls
+ *		 mmap() on the dmabuf fd.  Note that userspace should use the
+ *		 DMA_BUF_PREPARE_ACCESS / DMA_BUF_FINISH_ACCESS ioctls before/after
+ *		 sw access to the buffer, to give the exporter an opportunity to
+ *		 deal with cache maintenance.
+ * @prepare_access: [optional] handler for PREPARE_ACCESS ioctl.
+ * @finish_access: [optional] handler for FINISH_ACCESS ioctl.
  */
 struct dma_buf_ops {
 	int (*attach)(struct dma_buf *, struct device *,
@@ -72,6 +90,10 @@  struct dma_buf_ops {
 	/* after final dma_buf_put() */
 	void (*release)(struct dma_buf *);
 
+	int (*mmap)(struct dma_buf *, struct file *, struct vm_area_struct *);
+	int (*prepare_access)(struct dma_buf *);
+	int (*finish_access)(struct dma_buf *);
+
 };
 
 /**