@@ -71,7 +71,7 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
* it returns true, the eventfd_signal() call should be deferred to a
* safe context.
*/
- if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
+ if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count) > EFD_WAKE_DEPTH))
return 0;
spin_lock_irqsave(&ctx->wqh.lock, flags);
@@ -29,6 +29,9 @@
#define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE)
+/* Maximum recursion depth */
+#define EFD_WAKE_DEPTH 1
+
struct eventfd_ctx;
struct file;
@@ -47,7 +50,7 @@ DECLARE_PER_CPU(int, eventfd_wake_count);
static inline bool eventfd_signal_count(void)
{
- return this_cpu_read(eventfd_wake_count);
+ return this_cpu_read(eventfd_wake_count) > EFD_WAKE_DEPTH;
}
#else /* CONFIG_EVENTFD */