@@ -26,6 +26,11 @@ EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
static DEFINE_SPINLOCK(dma_fence_stub_lock);
static struct dma_fence dma_fence_stub;
+struct drm_fence_private_stub {
+ struct dma_fence base;
+ spinlock_t lock;
+};
+
/*
* fence context counter: each execution context should have its own
* fence context, this allows checking if fences belong to the same
@@ -123,7 +128,9 @@ static const struct dma_fence_ops dma_fence_stub_ops = {
/**
* dma_fence_get_stub - return a signaled fence
*
- * Return a stub fence which is already signaled.
+ * Return a stub fence which is already signaled. The fence's
+ * timestamp corresponds to the first time after boot this
+ * function is called.
*/
struct dma_fence *dma_fence_get_stub(void)
{
@@ -141,6 +148,30 @@ struct dma_fence *dma_fence_get_stub(void)
}
EXPORT_SYMBOL(dma_fence_get_stub);
+/**
+ * dma_fence_allocate_private_stub - return a private, signaled fence
+ *
+ * Return a newly allocated and signaled stub fence.
+ */
+struct dma_fence *dma_fence_allocate_private_stub(void)
+{
+ struct drm_fence_private_stub *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&fence->lock);
+ dma_fence_init(&fence->base,
+ &dma_fence_stub_ops,
+ &fence->lock,
+ 0, 0);
+ dma_fence_signal(&fence->base);
+
+ return &fence->base;
+}
+EXPORT_SYMBOL(dma_fence_allocate_private_stub);
+
/**
* dma_fence_context_alloc - allocate an array of fence contexts
* @num: amount of contexts to allocate
@@ -350,12 +350,16 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
*
* Assign a already signaled stub fence to the sync object.
*/
-static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
+static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
{
- struct dma_fence *fence = dma_fence_get_stub();
+ struct dma_fence *fence = dma_fence_allocate_private_stub();
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
drm_syncobj_replace_fence(syncobj, fence);
dma_fence_put(fence);
+ return 0;
}
/* 5s default for wait submission */
@@ -469,6 +473,7 @@ EXPORT_SYMBOL(drm_syncobj_free);
int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
struct dma_fence *fence)
{
+ int ret;
struct drm_syncobj *syncobj;
syncobj = kzalloc(sizeof(struct drm_syncobj), GFP_KERNEL);
@@ -479,8 +484,13 @@ int drm_syncobj_create(struct drm_syncobj **out_syncobj, uint32_t flags,
INIT_LIST_HEAD(&syncobj->cb_list);
spin_lock_init(&syncobj->lock);
- if (flags & DRM_SYNCOBJ_CREATE_SIGNALED)
- drm_syncobj_assign_null_handle(syncobj);
+ if (flags & DRM_SYNCOBJ_CREATE_SIGNALED) {
+ ret = drm_syncobj_assign_null_handle(syncobj);
+ if (ret < 0) {
+ drm_syncobj_put(syncobj);
+ return ret;
+ }
+ }
if (fence)
drm_syncobj_replace_fence(syncobj, fence);
@@ -1322,8 +1332,11 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
- for (i = 0; i < args->count_handles; i++)
- drm_syncobj_assign_null_handle(syncobjs[i]);
+ for (i = 0; i < args->count_handles; i++) {
+ ret = drm_syncobj_assign_null_handle(syncobjs[i]);
+ if (ret < 0)
+ break;
+ }
drm_syncobj_array_free(syncobjs, args->count_handles);
@@ -587,6 +587,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
}
struct dma_fence *dma_fence_get_stub(void);
+struct dma_fence *dma_fence_allocate_private_stub(void);
u64 dma_fence_context_alloc(unsigned num);
#define DMA_FENCE_TRACE(f, fmt, args...) \