diff mbox series

[5.10,2/5] io_uring: fix racy IOPOLL completions

Message ID 3a8d4b11f6cbb28c5067fd77ba35c2995f4658be.1607293068.git.asml.silence@gmail.com
State Accepted
Commit 31bff9a51b264df6d144931a6a5f1d6cc815ed4b
Headers show
Series [5.10,1/5] io_uring: always let io_iopoll_complete() complete polled io. | expand

Commit Message

Pavel Begunkov Dec. 6, 2020, 10:22 p.m. UTC
IOPOLL allows buffer remove/provide requests, but they doesn't
synchronise by rules of IOPOLL, namely it have to hold uring_lock.

Cc: <stable@vger.kernel.org> # 5.7+
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 23 ++++++++++++++++++-----
 1 file changed, 18 insertions(+), 5 deletions(-)

Comments

Pavel Begunkov Dec. 7, 2020, 6:31 p.m. UTC | #1
On 06/12/2020 22:22, Pavel Begunkov wrote:
> IOPOLL allows buffer remove/provide requests, but they doesn't

> synchronise by rules of IOPOLL, namely it have to hold uring_lock.

> 

> Cc: <stable@vger.kernel.org> # 5.7+

> Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>

> ---

>  fs/io_uring.c | 23 ++++++++++++++++++-----

>  1 file changed, 18 insertions(+), 5 deletions(-)

> 

> diff --git a/fs/io_uring.c b/fs/io_uring.c

> index c895a306f919..4fac02ea5f4c 100644

> --- a/fs/io_uring.c

> +++ b/fs/io_uring.c

> @@ -3948,11 +3948,17 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,

>  	head = idr_find(&ctx->io_buffer_idr, p->bgid);

>  	if (head)

>  		ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);

> -

> -	io_ring_submit_lock(ctx, !force_nonblock);


Here should be unlock(), not a second lock(). I didn't notice
first, but the patch fixes it. maybe for 5.10?

>  	if (ret < 0)

>  		req_set_fail_links(req);

> -	__io_req_complete(req, ret, 0, cs);

> +

> +	/* need to hold the lock to complete IOPOLL requests */

> +	if (ctx->flags & IORING_SETUP_IOPOLL) {

> +		__io_req_complete(req, ret, 0, cs);

> +		io_ring_submit_unlock(ctx, !force_nonblock);

> +	} else {

> +		io_ring_submit_unlock(ctx, !force_nonblock);

> +		__io_req_complete(req, ret, 0, cs);

> +	}

>  	return 0;

>  }



-- 
Pavel Begunkov
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index c895a306f919..4fac02ea5f4c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -3948,11 +3948,17 @@  static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
 	head = idr_find(&ctx->io_buffer_idr, p->bgid);
 	if (head)
 		ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
-
-	io_ring_submit_lock(ctx, !force_nonblock);
 	if (ret < 0)
 		req_set_fail_links(req);
-	__io_req_complete(req, ret, 0, cs);
+
+	/* need to hold the lock to complete IOPOLL requests */
+	if (ctx->flags & IORING_SETUP_IOPOLL) {
+		__io_req_complete(req, ret, 0, cs);
+		io_ring_submit_unlock(ctx, !force_nonblock);
+	} else {
+		io_ring_submit_unlock(ctx, !force_nonblock);
+		__io_req_complete(req, ret, 0, cs);
+	}
 	return 0;
 }
 
@@ -4037,10 +4043,17 @@  static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
 		}
 	}
 out:
-	io_ring_submit_unlock(ctx, !force_nonblock);
 	if (ret < 0)
 		req_set_fail_links(req);
-	__io_req_complete(req, ret, 0, cs);
+
+	/* need to hold the lock to complete IOPOLL requests */
+	if (ctx->flags & IORING_SETUP_IOPOLL) {
+		__io_req_complete(req, ret, 0, cs);
+		io_ring_submit_unlock(ctx, !force_nonblock);
+	} else {
+		io_ring_submit_unlock(ctx, !force_nonblock);
+		__io_req_complete(req, ret, 0, cs);
+	}
 	return 0;
 }