aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-10-25 12:06:15 -0400
committerJens Axboe <axboe@kernel.dk>2019-10-25 12:58:53 -0400
commit2b2ed9750fc9d040b9f6d076afcef6f00b6f1f7c (patch)
treee7688dd8e1ca4f2b2beabda1b9cc9d824724aa76 /fs/io_uring.c
parent498ccd9eda49117c34e0041563d0da6ac40e52b8 (diff)
io_uring: fix bad inflight accounting for SETUP_IOPOLL|SETUP_SQTHREAD
We currently assume that submissions from the sqthread are successful, and if IO polling is enabled, we use that value for knowing how many completions to look for. But if we overflowed the CQ ring or some requests simply got errored and already completed, they won't be available for polling. For the case of IO polling and SQTHREAD usage, look at the pending poll list. If it ever hits empty then we know that we don't have anymore pollable requests inflight. For that case, simply reset the inflight count to zero. Reported-by: Pavel Begunkov <asml.silence@gmail.com> Reviewed-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c44
1 files changed, 32 insertions, 12 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 292c4c733cbe..a30c4f622cb3 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -874,19 +874,11 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
874 mutex_unlock(&ctx->uring_lock); 874 mutex_unlock(&ctx->uring_lock);
875} 875}
876 876
877static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, 877static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
878 long min) 878 long min)
879{ 879{
880 int iters, ret = 0; 880 int iters = 0, ret = 0;
881 881
882 /*
883 * We disallow the app entering submit/complete with polling, but we
884 * still need to lock the ring to prevent racing with polled issue
885 * that got punted to a workqueue.
886 */
887 mutex_lock(&ctx->uring_lock);
888
889 iters = 0;
890 do { 882 do {
891 int tmin = 0; 883 int tmin = 0;
892 884
@@ -922,6 +914,21 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
922 ret = 0; 914 ret = 0;
923 } while (min && !*nr_events && !need_resched()); 915 } while (min && !*nr_events && !need_resched());
924 916
917 return ret;
918}
919
920static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
921 long min)
922{
923 int ret;
924
925 /*
926 * We disallow the app entering submit/complete with polling, but we
927 * still need to lock the ring to prevent racing with polled issue
928 * that got punted to a workqueue.
929 */
930 mutex_lock(&ctx->uring_lock);
931 ret = __io_iopoll_check(ctx, nr_events, min);
925 mutex_unlock(&ctx->uring_lock); 932 mutex_unlock(&ctx->uring_lock);
926 return ret; 933 return ret;
927} 934}
@@ -2657,7 +2664,20 @@ static int io_sq_thread(void *data)
2657 unsigned nr_events = 0; 2664 unsigned nr_events = 0;
2658 2665
2659 if (ctx->flags & IORING_SETUP_IOPOLL) { 2666 if (ctx->flags & IORING_SETUP_IOPOLL) {
2660 io_iopoll_check(ctx, &nr_events, 0); 2667 /*
2668 * inflight is the count of the maximum possible
2669 * entries we submitted, but it can be smaller
2670 * if we dropped some of them. If we don't have
2671 * poll entries available, then we know that we
2672 * have nothing left to poll for. Reset the
2673 * inflight count to zero in that case.
2674 */
2675 mutex_lock(&ctx->uring_lock);
2676 if (!list_empty(&ctx->poll_list))
2677 __io_iopoll_check(ctx, &nr_events, 0);
2678 else
2679 inflight = 0;
2680 mutex_unlock(&ctx->uring_lock);
2661 } else { 2681 } else {
2662 /* 2682 /*
2663 * Normal IO, just pretend everything completed. 2683 * Normal IO, just pretend everything completed.