diff options
| author | Stefan Bühler <source@stbuehler.de> | 2019-05-11 13:08:01 -0400 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2019-05-13 11:15:42 -0400 |
| commit | e2033e33cb3821c26d4f9e70677910827d3b7885 (patch) | |
| tree | 9a4c0da34b35916590b1104581ccd89339ea6d94 | |
| parent | a13f0655503a4a89df67fdc7cac6a7810795d4b3 (diff) | |
io_uring: fix race condition reading SQE data
When punting to workers the SQE gets copied after the initial try.
There is a race condition between reading SQE data for the initial try
and copying it for punting it to the workers.
For example io_rw_done calls kiocb->ki_complete even if it was prepared
for IORING_OP_FSYNC (and would be NULL).
The easiest solution for now is to alway prepare again in the worker.
req->file is safe to prepare though as long as it is checked before use.
Signed-off-by: Stefan Bühler <source@stbuehler.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| -rw-r--r-- | fs/io_uring.c | 17 |
1 files changed, 2 insertions, 15 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 48ea3977012a..576d9c652b4c 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c | |||
| @@ -329,9 +329,8 @@ struct io_kiocb { | |||
| 329 | #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ | 329 | #define REQ_F_IOPOLL_COMPLETED 2 /* polled IO has completed */ |
| 330 | #define REQ_F_FIXED_FILE 4 /* ctx owns file */ | 330 | #define REQ_F_FIXED_FILE 4 /* ctx owns file */ |
| 331 | #define REQ_F_SEQ_PREV 8 /* sequential with previous */ | 331 | #define REQ_F_SEQ_PREV 8 /* sequential with previous */ |
| 332 | #define REQ_F_PREPPED 16 /* prep already done */ | 332 | #define REQ_F_IO_DRAIN 16 /* drain existing IO first */ |
| 333 | #define REQ_F_IO_DRAIN 32 /* drain existing IO first */ | 333 | #define REQ_F_IO_DRAINED 32 /* drain done */ |
| 334 | #define REQ_F_IO_DRAINED 64 /* drain done */ | ||
| 335 | u64 user_data; | 334 | u64 user_data; |
| 336 | u32 error; /* iopoll result from callback */ | 335 | u32 error; /* iopoll result from callback */ |
| 337 | u32 sequence; | 336 | u32 sequence; |
| @@ -896,9 +895,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, | |||
| 896 | 895 | ||
| 897 | if (!req->file) | 896 | if (!req->file) |
| 898 | return -EBADF; | 897 | return -EBADF; |
| 899 | /* For -EAGAIN retry, everything is already prepped */ | ||
| 900 | if (req->flags & REQ_F_PREPPED) | ||
| 901 | return 0; | ||
| 902 | 898 | ||
| 903 | if (force_nonblock && !io_file_supports_async(req->file)) | 899 | if (force_nonblock && !io_file_supports_async(req->file)) |
| 904 | force_nonblock = false; | 900 | force_nonblock = false; |
| @@ -941,7 +937,6 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, | |||
| 941 | return -EINVAL; | 937 | return -EINVAL; |
| 942 | kiocb->ki_complete = io_complete_rw; | 938 | kiocb->ki_complete = io_complete_rw; |
| 943 | } | 939 | } |
| 944 | req->flags |= REQ_F_PREPPED; | ||
| 945 | return 0; | 940 | return 0; |
| 946 | } | 941 | } |
| 947 | 942 | ||
| @@ -1227,16 +1222,12 @@ static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |||
| 1227 | 1222 | ||
| 1228 | if (!req->file) | 1223 | if (!req->file) |
| 1229 | return -EBADF; | 1224 | return -EBADF; |
| 1230 | /* Prep already done (EAGAIN retry) */ | ||
| 1231 | if (req->flags & REQ_F_PREPPED) | ||
| 1232 | return 0; | ||
| 1233 | 1225 | ||
| 1234 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) | 1226 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1235 | return -EINVAL; | 1227 | return -EINVAL; |
| 1236 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) | 1228 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) |
| 1237 | return -EINVAL; | 1229 | return -EINVAL; |
| 1238 | 1230 | ||
| 1239 | req->flags |= REQ_F_PREPPED; | ||
| 1240 | return 0; | 1231 | return 0; |
| 1241 | } | 1232 | } |
| 1242 | 1233 | ||
| @@ -1277,16 +1268,12 @@ static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) | |||
| 1277 | 1268 | ||
| 1278 | if (!req->file) | 1269 | if (!req->file) |
| 1279 | return -EBADF; | 1270 | return -EBADF; |
| 1280 | /* Prep already done (EAGAIN retry) */ | ||
| 1281 | if (req->flags & REQ_F_PREPPED) | ||
| 1282 | return 0; | ||
| 1283 | 1271 | ||
| 1284 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) | 1272 | if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) |
| 1285 | return -EINVAL; | 1273 | return -EINVAL; |
| 1286 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) | 1274 | if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) |
| 1287 | return -EINVAL; | 1275 | return -EINVAL; |
| 1288 | 1276 | ||
| 1289 | req->flags |= REQ_F_PREPPED; | ||
| 1290 | return ret; | 1277 | return ret; |
| 1291 | } | 1278 | } |
| 1292 | 1279 | ||
