aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authoryangerkun <yangerkun@huawei.com>2019-10-15 09:59:29 -0400
committerJens Axboe <axboe@kernel.dk>2019-10-15 10:55:50 -0400
commit5da0fb1ab34ccfe6d49210b4f5a739c59fcbf25e (patch)
treec9eaf517afbc8c7dc8e2f9633ec17d6f0416e647 /fs
parent7a7c5e715e722c86d602c56a09e77f000364e263 (diff)
io_uring: consider the overflow of sequence for timeout req
Now we recalculate the sequence of timeout with 'req->sequence = ctx->cached_sq_head + count - 1', judge the right place to insert for timeout_list by compare the number of request we still expected for completion. But we have not consider about the situation of overflow: 1. ctx->cached_sq_head + count - 1 may overflow. And a bigger count for the new timeout req can have a small req->sequence. 2. cached_sq_head of now may overflow compare with before req. And it will lead the timeout req with small req->sequence. This overflow will lead to the misorder of timeout_list, which can lead to the wrong order of the completion of timeout_list. Fix it by reuse req->submit.sequence to store the count, and change the logic of inserting sort in io_timeout. Signed-off-by: yangerkun <yangerkun@huawei.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 38d274fc0f25..d2cb277da2f4 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1884,7 +1884,7 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1884 1884
1885static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) 1885static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1886{ 1886{
1887 unsigned count, req_dist, tail_index; 1887 unsigned count;
1888 struct io_ring_ctx *ctx = req->ctx; 1888 struct io_ring_ctx *ctx = req->ctx;
1889 struct list_head *entry; 1889 struct list_head *entry;
1890 struct timespec64 ts; 1890 struct timespec64 ts;
@@ -1907,21 +1907,36 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1907 count = 1; 1907 count = 1;
1908 1908
1909 req->sequence = ctx->cached_sq_head + count - 1; 1909 req->sequence = ctx->cached_sq_head + count - 1;
1910 /* reuse it to store the count */
1911 req->submit.sequence = count;
1910 req->flags |= REQ_F_TIMEOUT; 1912 req->flags |= REQ_F_TIMEOUT;
1911 1913
1912 /* 1914 /*
1913 * Insertion sort, ensuring the first entry in the list is always 1915 * Insertion sort, ensuring the first entry in the list is always
1914 * the one we need first. 1916 * the one we need first.
1915 */ 1917 */
1916 tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
1917 req_dist = req->sequence - tail_index;
1918 spin_lock_irq(&ctx->completion_lock); 1918 spin_lock_irq(&ctx->completion_lock);
1919 list_for_each_prev(entry, &ctx->timeout_list) { 1919 list_for_each_prev(entry, &ctx->timeout_list) {
1920 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); 1920 struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
1921 unsigned dist; 1921 unsigned nxt_sq_head;
1922 long long tmp, tmp_nxt;
1922 1923
1923 dist = nxt->sequence - tail_index; 1924 /*
1924 if (req_dist >= dist) 1925 * Since cached_sq_head + count - 1 can overflow, use type long
1926 * long to store it.
1927 */
1928 tmp = (long long)ctx->cached_sq_head + count - 1;
1929 nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
1930 tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
1931
1932 /*
1933 * cached_sq_head may overflow, and it will never overflow twice
1934 * once there is some timeout req still be valid.
1935 */
1936 if (ctx->cached_sq_head < nxt_sq_head)
1937 tmp_nxt += UINT_MAX;
1938
1939 if (tmp >= tmp_nxt)
1925 break; 1940 break;
1926 } 1941 }
1927 list_add(&req->list, entry); 1942 list_add(&req->list, entry);