summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorzhangyi (F) <yi.zhang@huawei.com>2019-10-23 03:10:08 -0400
committerJens Axboe <axboe@kernel.dk>2019-10-24 00:09:56 -0400
commitef03681ae8df770745978148a7fb84796ae99cba (patch)
tree3112e7810ebfb63944675e06320061f98b723c02 /fs/io_uring.c
parentbc808bced39f4e4b626c5ea8c63d5e41fce7205a (diff)
io_uring : correct timeout req sequence when waiting timeout
The sequence number of reqs on the timeout_list before the timeout req should be adjusted in io_timeout_fn(), because the current timeout req will consumes a slot in the cq_ring and cq_tail pointer will be increased, otherwise other timeout reqs may return in advance without waiting for enough wait_nr. Signed-off-by: zhangyi (F) <yi.zhang@huawei.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 08c2c428e212..b65a68582a7c 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1877,7 +1877,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1877static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 1877static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1878{ 1878{
1879 struct io_ring_ctx *ctx; 1879 struct io_ring_ctx *ctx;
1880 struct io_kiocb *req; 1880 struct io_kiocb *req, *prev;
1881 unsigned long flags; 1881 unsigned long flags;
1882 1882
1883 req = container_of(timer, struct io_kiocb, timeout.timer); 1883 req = container_of(timer, struct io_kiocb, timeout.timer);
@@ -1885,6 +1885,15 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1885 atomic_inc(&ctx->cq_timeouts); 1885 atomic_inc(&ctx->cq_timeouts);
1886 1886
1887 spin_lock_irqsave(&ctx->completion_lock, flags); 1887 spin_lock_irqsave(&ctx->completion_lock, flags);
1888 /*
1889 * Adjust the reqs sequence before the current one because it
1890 * will consume a slot in the cq_ring and the the cq_tail pointer
1891 * will be increased, otherwise other timeout reqs may return in
1892 * advance without waiting for enough wait_nr.
1893 */
1894 prev = req;
1895 list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
1896 prev->sequence++;
1888 list_del(&req->list); 1897 list_del(&req->list);
1889 1898
1890 io_cqring_fill_event(ctx, req->user_data, -ETIME); 1899 io_cqring_fill_event(ctx, req->user_data, -ETIME);