diff mbox

drm/gem: add functions to get/put pages

Message ID 1316126878-29262-1-git-send-email-rob.clark@linaro.org
State New
Headers show

Commit Message

Rob Clark Sept. 15, 2011, 10:47 p.m. UTC
From: Rob Clark <rob@ti.com>

This factors out common code from psb_gtt_attach_pages()/
i915_gem_object_get_pages_gtt() and psb_gtt_detach_pages()/
i915_gem_object_put_pages_gtt().

Signed-off-by: Rob Clark <rob@ti.com>
---
 drivers/gpu/drm/drm_gem.c |   87 +++++++++++++++++++++++++++++++++++++++++++++
 include/drm/drmP.h        |    3 ++
 2 files changed, 90 insertions(+), 0 deletions(-)

Comments

Rob Clark Sept. 26, 2011, 6:18 p.m. UTC | #1
On Thu, Sep 15, 2011 at 5:47 PM, Rob Clark <rob.clark@linaro.org> wrote:
> +/**
> + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
> + * @obj: obj in question
> + * @gfpmask: gfp mask of requested pages
> + */
> +struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
> +{


Hmm, in working through tiled buffer support over the weekend, I think
I've hit a case where I want to decouple the physical size (in terms
of pages) from virtual size.. which means I don't want to rely on the
same obj->size value for mmap offset creation as for determining # of
pages to allocate.

Since the patch for drm_gem_{get,put}_pages() doesn't seem to be on
drm-core-next yet, I think the more straightforward thing to do is add
a size (or numpages) arg to the get/put_pages functions resubmit this
patch..

BR,
-R

> +       struct inode *inode;
> +       struct address_space *mapping;
> +       struct page *p, **pages;
> +       int i, npages;
> +
> +       /* This is the shared memory object that backs the GEM resource */
> +       inode = obj->filp->f_path.dentry->d_inode;
> +       mapping = inode->i_mapping;
> +
> +       npages = obj->size >> PAGE_SHIFT;
> +
> +       pages = drm_malloc_ab(npages, sizeof(struct page *));
> +       if (pages == NULL)
> +               return ERR_PTR(-ENOMEM);
> +
> +       gfpmask |= mapping_gfp_mask(mapping);
> +
> +       for (i = 0; i < npages; i++) {
> +               p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
> +               if (IS_ERR(p))
> +                       goto fail;
> +               pages[i] = p;
> +
> +               /* There is a hypothetical issue w/ drivers that require
> +                * buffer memory in the low 4GB.. if the pages are un-
> +                * pinned, and swapped out, they can end up swapped back
> +                * in above 4GB.  If pages are already in memory, then
> +                * shmem_read_mapping_page_gfp will ignore the gfpmask,
> +                * even if the already in-memory page disobeys the mask.
> +                *
> +                * It is only a theoretical issue today, because none of
> +                * the devices with this limitation can be populated with
> +                * enough memory to trigger the issue.  But this BUG_ON()
> +                * is here as a reminder in case the problem with
> +                * shmem_read_mapping_page_gfp() isn't solved by the time
> +                * it does become a real issue.
> +                *
> +                * See this thread: http://lkml.org/lkml/2011/7/11/238
> +                */
> +               BUG_ON((gfpmask & __GFP_DMA32) &&
> +                               (page_to_pfn(p) >= 0x00100000UL));
> +       }
> +
> +       return pages;
> +
> +fail:
> +       while (i--) {
> +               page_cache_release(pages[i]);
> +       }
> +       drm_free_large(pages);
> +       return ERR_PTR(PTR_ERR(p));
> +}
> +EXPORT_SYMBOL(drm_gem_get_pages);
> +
> +/**
> + * drm_gem_put_pages - helper to free backing pages for a GEM object
> + * @obj: obj in question
> + * @pages: pages to free
> + */
> +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
> +               bool dirty, bool accessed)
> +{
> +       int i, npages;
> +
> +       npages = obj->size >> PAGE_SHIFT;
> +
> +       for (i = 0; i < npages; i++) {
> +               if (dirty)
> +                       set_page_dirty(pages[i]);
> +
> +               if (accessed)
> +                       mark_page_accessed(pages[i]);
> +
> +               /* Undo the reference we took when populating the table */
> +               page_cache_release(pages[i]);
> +       }
> +
> +       drm_free_large(pages);
> +}
> +EXPORT_SYMBOL(drm_gem_put_pages);
>
>  /**
>  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
Daniel Vetter Sept. 26, 2011, 7:43 p.m. UTC | #2
On Mon, Sep 26, 2011 at 01:18:40PM -0500, Rob Clark wrote:
> On Thu, Sep 15, 2011 at 5:47 PM, Rob Clark <rob.clark@linaro.org> wrote:
> > +/**
> > + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
> > + * @obj: obj in question
> > + * @gfpmask: gfp mask of requested pages
> > + */
> > +struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
> > +{
> 
> 
> Hmm, in working through tiled buffer support over the weekend, I think
> I've hit a case where I want to decouple the physical size (in terms
> of pages) from virtual size.. which means I don't want to rely on the
> same obj->size value for mmap offset creation as for determining # of
> pages to allocate.
> 
> Since the patch for drm_gem_{get,put}_pages() doesn't seem to be on
> drm-core-next yet, I think the more straightforward thing to do is add
> a size (or numpages) arg to the get/put_pages functions resubmit this
> patch..

I think making obj->size not be the size of the backing storage is the
wrong thing. In i915 we have a similar issue with tiled regions on gen2/3:
The minimal sizes for these are pretty large, so objects are smaller than
a the tiled region. So backing storage and maps are both obj->size large,
but the area the object occupies in the gtt may be much large. To put some
packing storage behind the not-used region (which sounds like what you're
trying to do here) we simply use one global dummy page. Userspace should
never use this, so that's fine.

Or is this a case of insane arm hw, and you can't actually put the same
physical page at different gtt locations?

-Daniel
Rob Clark Sept. 26, 2011, 7:56 p.m. UTC | #3
On Mon, Sep 26, 2011 at 2:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Mon, Sep 26, 2011 at 01:18:40PM -0500, Rob Clark wrote:
>> On Thu, Sep 15, 2011 at 5:47 PM, Rob Clark <rob.clark@linaro.org> wrote:
>> > +/**
>> > + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
>> > + * @obj: obj in question
>> > + * @gfpmask: gfp mask of requested pages
>> > + */
>> > +struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
>> > +{
>>
>>
>> Hmm, in working through tiled buffer support over the weekend, I think
>> I've hit a case where I want to decouple the physical size (in terms
>> of pages) from virtual size.. which means I don't want to rely on the
>> same obj->size value for mmap offset creation as for determining # of
>> pages to allocate.
>>
>> Since the patch for drm_gem_{get,put}_pages() doesn't seem to be on
>> drm-core-next yet, I think the more straightforward thing to do is add
>> a size (or numpages) arg to the get/put_pages functions resubmit this
>> patch..
>
> I think making obj->size not be the size of the backing storage is the
> wrong thing. In i915 we have a similar issue with tiled regions on gen2/3:
> The minimal sizes for these are pretty large, so objects are smaller than
> a the tiled region. So backing storage and maps are both obj->size large,
> but the area the object occupies in the gtt may be much large. To put some
> packing storage behind the not-used region (which sounds like what you're
> trying to do here) we simply use one global dummy page. Userspace should
> never use this, so that's fine.
>
> Or is this a case of insane arm hw, and you can't actually put the same
> physical page at different gtt locations?

Well, the "GART" in our case is a bit restrictive.. we can have same
page in multiple locations, but not arbitrary locations.  Things need
to go to slot boundaries.  So it isn't really possible to get row n+1
to appear directly after row n.  To handle this we just round the
buffer stride up to next 4kb boundary and insert some dummy page the
the remaining slots to pad out to 4kb.

The only other way I could think of to handle that would be to have a
separate vsize and psize in 'struct drm_gem_object'..

BR,
-R


>
> -Daniel
> --
> Daniel Vetter
> Mail: daniel@ffwll.ch
> Mobile: +41 (0)79 365 57 48
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
>
Daniel Vetter Sept. 26, 2011, 8:22 p.m. UTC | #4
On Mon, Sep 26, 2011 at 21:56, Rob Clark <rob.clark@linaro.org> wrote:
> On Mon, Sep 26, 2011 at 2:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>> On Mon, Sep 26, 2011 at 01:18:40PM -0500, Rob Clark wrote:
>>> On Thu, Sep 15, 2011 at 5:47 PM, Rob Clark <rob.clark@linaro.org> wrote:
>>> > +/**
>>> > + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
>>> > + * @obj: obj in question
>>> > + * @gfpmask: gfp mask of requested pages
>>> > + */
>>> > +struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
>>> > +{
>>>
>>>
>>> Hmm, in working through tiled buffer support over the weekend, I think
>>> I've hit a case where I want to decouple the physical size (in terms
>>> of pages) from virtual size.. which means I don't want to rely on the
>>> same obj->size value for mmap offset creation as for determining # of
>>> pages to allocate.
>>>
>>> Since the patch for drm_gem_{get,put}_pages() doesn't seem to be on
>>> drm-core-next yet, I think the more straightforward thing to do is add
>>> a size (or numpages) arg to the get/put_pages functions resubmit this
>>> patch..
>>
>> I think making obj->size not be the size of the backing storage is the
>> wrong thing. In i915 we have a similar issue with tiled regions on gen2/3:
>> The minimal sizes for these are pretty large, so objects are smaller than
>> a the tiled region. So backing storage and maps are both obj->size large,
>> but the area the object occupies in the gtt may be much large. To put some
>> packing storage behind the not-used region (which sounds like what you're
>> trying to do here) we simply use one global dummy page. Userspace should
>> never use this, so that's fine.
>>
>> Or is this a case of insane arm hw, and you can't actually put the same
>> physical page at different gtt locations?
>
> Well, the "GART" in our case is a bit restrictive.. we can have same
> page in multiple locations, but not arbitrary locations.  Things need
> to go to slot boundaries.  So it isn't really possible to get row n+1
> to appear directly after row n.  To handle this we just round the
> buffer stride up to next 4kb boundary and insert some dummy page the
> the remaining slots to pad out to 4kb.
>
> The only other way I could think of to handle that would be to have a
> separate vsize and psize in 'struct drm_gem_object'..

Well I think for this case the solution is simple: Tiling not allowed
if userspace is too dumb to properly round the buffer up so it
fulfills whatever odd requirement the hw has. I think hiding the fact
that certain buffers need more backing storage than a naive userspace
might assume is ripe for ugly problems down the road.
-Daniel
Rob Clark Sept. 26, 2011, 9:01 p.m. UTC | #5
On Mon, Sep 26, 2011 at 3:22 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
> On Mon, Sep 26, 2011 at 21:56, Rob Clark <rob.clark@linaro.org> wrote:
>> On Mon, Sep 26, 2011 at 2:43 PM, Daniel Vetter <daniel@ffwll.ch> wrote:
>>> On Mon, Sep 26, 2011 at 01:18:40PM -0500, Rob Clark wrote:
>>>> On Thu, Sep 15, 2011 at 5:47 PM, Rob Clark <rob.clark@linaro.org> wrote:
>>>> > +/**
>>>> > + * drm_gem_get_pages - helper to allocate backing pages for a GEM object
>>>> > + * @obj: obj in question
>>>> > + * @gfpmask: gfp mask of requested pages
>>>> > + */
>>>> > +struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
>>>> > +{
>>>>
>>>>
>>>> Hmm, in working through tiled buffer support over the weekend, I think
>>>> I've hit a case where I want to decouple the physical size (in terms
>>>> of pages) from virtual size.. which means I don't want to rely on the
>>>> same obj->size value for mmap offset creation as for determining # of
>>>> pages to allocate.
>>>>
>>>> Since the patch for drm_gem_{get,put}_pages() doesn't seem to be on
>>>> drm-core-next yet, I think the more straightforward thing to do is add
>>>> a size (or numpages) arg to the get/put_pages functions resubmit this
>>>> patch..
>>>
>>> I think making obj->size not be the size of the backing storage is the
>>> wrong thing. In i915 we have a similar issue with tiled regions on gen2/3:
>>> The minimal sizes for these are pretty large, so objects are smaller than
>>> a the tiled region. So backing storage and maps are both obj->size large,
>>> but the area the object occupies in the gtt may be much large. To put some
>>> packing storage behind the not-used region (which sounds like what you're
>>> trying to do here) we simply use one global dummy page. Userspace should
>>> never use this, so that's fine.
>>>
>>> Or is this a case of insane arm hw, and you can't actually put the same
>>> physical page at different gtt locations?
>>
>> Well, the "GART" in our case is a bit restrictive.. we can have same
>> page in multiple locations, but not arbitrary locations.  Things need
>> to go to slot boundaries.  So it isn't really possible to get row n+1
>> to appear directly after row n.  To handle this we just round the
>> buffer stride up to next 4kb boundary and insert some dummy page the
>> the remaining slots to pad out to 4kb.
>>
>> The only other way I could think of to handle that would be to have a
>> separate vsize and psize in 'struct drm_gem_object'..
>
> Well I think for this case the solution is simple: Tiling not allowed
> if userspace is too dumb to properly round the buffer up so it
> fulfills whatever odd requirement the hw has. I think hiding the fact
> that certain buffers need more backing storage than a naive userspace
> might assume is ripe for ugly problems down the road.

I don't think this is quite the issue.. in case of tiled buffers, the
way I had setup the interface, userspace passes width/height and on
the kernel side it is calculating the sizes..

It is just that if we restrict to size in # of backing pages and size
of userspace mapping being the same, then we end up allocating a lot
of extra pages because every random tiled buffer, if it might end up
being mapped to userspace, we having to round the size up to stride
aligned to 4kb..  whereas everything beyond the nearest slot boundary
doesn't really need to have a backing page

BR,
-R

> -Daniel
> --
> Daniel Vetter
> daniel.vetter@ffwll.ch - +41 (0) 79 364 57 48 - http://blog.ffwll.ch
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
>
Alan Cox Sept. 27, 2011, 9:35 a.m. UTC | #6
> Well I think for this case the solution is simple: Tiling not allowed
> if userspace is too dumb to properly round the buffer up so it
> fulfills whatever odd requirement the hw has. I think hiding the fact
> that certain buffers need more backing storage than a naive userspace
> might assume is ripe for ugly problems down the road.

That depends a lot upon the interface. One good reason for hiding it for
example is that if you have hardware where a limit goes away (or worse
yet appears) in some rev of the device or an erratum you don't have to
issue a new X server.

For some of the other interfaces like the dumb fb api it's even more
important the code doesn't know.

I don't however think the helper should know about padding because I
think a driver can implement its own function which wraps the helper and
then adds the padding itself ?

Alan
Rob Clark Sept. 27, 2011, 1:03 p.m. UTC | #7
On Tue, Sep 27, 2011 at 4:35 AM, Alan Cox <alan@lxorguk.ukuu.org.uk> wrote:
>> Well I think for this case the solution is simple: Tiling not allowed
>> if userspace is too dumb to properly round the buffer up so it
>> fulfills whatever odd requirement the hw has. I think hiding the fact
>> that certain buffers need more backing storage than a naive userspace
>> might assume is ripe for ugly problems down the road.
>
> That depends a lot upon the interface. One good reason for hiding it for
> example is that if you have hardware where a limit goes away (or worse
> yet appears) in some rev of the device or an erratum you don't have to
> issue a new X server.
>
> For some of the other interfaces like the dumb fb api it's even more
> important the code doesn't know.
>
> I don't however think the helper should know about padding because I
> think a driver can implement its own function which wraps the helper and
> then adds the padding itself ?


fwiw, Daniel convinced me to go a slightly different route, and keep
get/put_pages() as-is, but instead go with a variant of
drm_gem_create_mmap_offset() that takes a size parameter.. ie. roughly
like:

int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
	return drm_gem_create_mmap_offset_size(obj, obj->size);
}
int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
...
}

I'll just call drm_gem_create_mmap_offset_size() directly, normal
drivers can just use drm_gem_create_mmap_offset().  That seems like it
should work..

BR,
-R

> Alan
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
>
diff mbox

Patch

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 396e60c..821ba8a 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -285,6 +285,93 @@  again:
 }
 EXPORT_SYMBOL(drm_gem_handle_create);
 
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+	struct inode *inode;
+	struct address_space *mapping;
+	struct page *p, **pages;
+	int i, npages;
+
+	/* This is the shared memory object that backs the GEM resource */
+	inode = obj->filp->f_path.dentry->d_inode;
+	mapping = inode->i_mapping;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	pages = drm_malloc_ab(npages, sizeof(struct page *));
+	if (pages == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	gfpmask |= mapping_gfp_mask(mapping);
+
+	for (i = 0; i < npages; i++) {
+		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		if (IS_ERR(p))
+			goto fail;
+		pages[i] = p;
+
+		/* There is a hypothetical issue w/ drivers that require
+		 * buffer memory in the low 4GB.. if the pages are un-
+		 * pinned, and swapped out, they can end up swapped back
+		 * in above 4GB.  If pages are already in memory, then
+		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
+		 * even if the already in-memory page disobeys the mask.
+		 *
+		 * It is only a theoretical issue today, because none of
+		 * the devices with this limitation can be populated with
+		 * enough memory to trigger the issue.  But this BUG_ON()
+		 * is here as a reminder in case the problem with
+		 * shmem_read_mapping_page_gfp() isn't solved by the time
+		 * it does become a real issue.
+		 *
+		 * See this thread: http://lkml.org/lkml/2011/7/11/238
+		 */
+		BUG_ON((gfpmask & __GFP_DMA32) &&
+				(page_to_pfn(p) >= 0x00100000UL));
+	}
+
+	return pages;
+
+fail:
+	while (i--) {
+		page_cache_release(pages[i]);
+	}
+	drm_free_large(pages);
+	return ERR_PTR(PTR_ERR(p));
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+		bool dirty, bool accessed)
+{
+	int i, npages;
+
+	npages = obj->size >> PAGE_SHIFT;
+
+	for (i = 0; i < npages; i++) {
+		if (dirty)
+			set_page_dirty(pages[i]);
+
+		if (accessed)
+			mark_page_accessed(pages[i]);
+
+		/* Undo the reference we took when populating the table */
+		page_cache_release(pages[i]);
+	}
+
+	drm_free_large(pages);
+}
+EXPORT_SYMBOL(drm_gem_put_pages);
 
 /**
  * drm_gem_free_mmap_offset - release a fake mmap offset for an object
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 43538b6..a62d8fe 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1624,6 +1624,9 @@  drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
 	drm_gem_object_unreference_unlocked(obj);
 }
 
+struct page ** drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+		bool dirty, bool accessed);
 void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);