aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-04-23 10:17:58 -0400
committerJens Axboe <axboe@kernel.dk>2019-04-23 10:17:58 -0400
commit8358e3a8264a228cf2dfb6f3a05c0328f4118f12 (patch)
treef310ade889e169f6654ef4ba5747a7bb581e859c
parentfb775faa9e46ff481e4ced11116c9bd45359cb43 (diff)
io_uring: remove 'state' argument from io_{read,write} path
Since commit 09bb839434b we don't use the state argument for any sort of on-stack caching in the io read and write path. Remove the stale and unused argument from them, and bubble it up to __io_submit_sqe() and down to io_prep_rw(). Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b998e98acd01..0e9fb2cb1984 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -740,7 +740,7 @@ static bool io_file_supports_async(struct file *file)
740} 740}
741 741
742static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s, 742static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
743 bool force_nonblock, struct io_submit_state *state) 743 bool force_nonblock)
744{ 744{
745 const struct io_uring_sqe *sqe = s->sqe; 745 const struct io_uring_sqe *sqe = s->sqe;
746 struct io_ring_ctx *ctx = req->ctx; 746 struct io_ring_ctx *ctx = req->ctx;
@@ -938,7 +938,7 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
938} 938}
939 939
940static int io_read(struct io_kiocb *req, const struct sqe_submit *s, 940static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
941 bool force_nonblock, struct io_submit_state *state) 941 bool force_nonblock)
942{ 942{
943 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 943 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
944 struct kiocb *kiocb = &req->rw; 944 struct kiocb *kiocb = &req->rw;
@@ -947,7 +947,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
947 size_t iov_count; 947 size_t iov_count;
948 int ret; 948 int ret;
949 949
950 ret = io_prep_rw(req, s, force_nonblock, state); 950 ret = io_prep_rw(req, s, force_nonblock);
951 if (ret) 951 if (ret)
952 return ret; 952 return ret;
953 file = kiocb->ki_filp; 953 file = kiocb->ki_filp;
@@ -985,7 +985,7 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
985} 985}
986 986
987static int io_write(struct io_kiocb *req, const struct sqe_submit *s, 987static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
988 bool force_nonblock, struct io_submit_state *state) 988 bool force_nonblock)
989{ 989{
990 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 990 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
991 struct kiocb *kiocb = &req->rw; 991 struct kiocb *kiocb = &req->rw;
@@ -994,7 +994,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
994 size_t iov_count; 994 size_t iov_count;
995 int ret; 995 int ret;
996 996
997 ret = io_prep_rw(req, s, force_nonblock, state); 997 ret = io_prep_rw(req, s, force_nonblock);
998 if (ret) 998 if (ret)
999 return ret; 999 return ret;
1000 1000
@@ -1336,8 +1336,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1336} 1336}
1337 1337
1338static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, 1338static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1339 const struct sqe_submit *s, bool force_nonblock, 1339 const struct sqe_submit *s, bool force_nonblock)
1340 struct io_submit_state *state)
1341{ 1340{
1342 int ret, opcode; 1341 int ret, opcode;
1343 1342
@@ -1353,18 +1352,18 @@ static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
1353 case IORING_OP_READV: 1352 case IORING_OP_READV:
1354 if (unlikely(s->sqe->buf_index)) 1353 if (unlikely(s->sqe->buf_index))
1355 return -EINVAL; 1354 return -EINVAL;
1356 ret = io_read(req, s, force_nonblock, state); 1355 ret = io_read(req, s, force_nonblock);
1357 break; 1356 break;
1358 case IORING_OP_WRITEV: 1357 case IORING_OP_WRITEV:
1359 if (unlikely(s->sqe->buf_index)) 1358 if (unlikely(s->sqe->buf_index))
1360 return -EINVAL; 1359 return -EINVAL;
1361 ret = io_write(req, s, force_nonblock, state); 1360 ret = io_write(req, s, force_nonblock);
1362 break; 1361 break;
1363 case IORING_OP_READ_FIXED: 1362 case IORING_OP_READ_FIXED:
1364 ret = io_read(req, s, force_nonblock, state); 1363 ret = io_read(req, s, force_nonblock);
1365 break; 1364 break;
1366 case IORING_OP_WRITE_FIXED: 1365 case IORING_OP_WRITE_FIXED:
1367 ret = io_write(req, s, force_nonblock, state); 1366 ret = io_write(req, s, force_nonblock);
1368 break; 1367 break;
1369 case IORING_OP_FSYNC: 1368 case IORING_OP_FSYNC:
1370 ret = io_fsync(req, s->sqe, force_nonblock); 1369 ret = io_fsync(req, s->sqe, force_nonblock);
@@ -1457,7 +1456,7 @@ restart:
1457 s->has_user = cur_mm != NULL; 1456 s->has_user = cur_mm != NULL;
1458 s->needs_lock = true; 1457 s->needs_lock = true;
1459 do { 1458 do {
1460 ret = __io_submit_sqe(ctx, req, s, false, NULL); 1459 ret = __io_submit_sqe(ctx, req, s, false);
1461 /* 1460 /*
1462 * We can get EAGAIN for polled IO even though 1461 * We can get EAGAIN for polled IO even though
1463 * we're forcing a sync submission from here, 1462 * we're forcing a sync submission from here,
@@ -1623,7 +1622,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
1623 if (unlikely(ret)) 1622 if (unlikely(ret))
1624 goto out; 1623 goto out;
1625 1624
1626 ret = __io_submit_sqe(ctx, req, s, true, state); 1625 ret = __io_submit_sqe(ctx, req, s, true);
1627 if (ret == -EAGAIN) { 1626 if (ret == -EAGAIN) {
1628 struct io_uring_sqe *sqe_copy; 1627 struct io_uring_sqe *sqe_copy;
1629 1628