summaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-09-10 11:13:05 -0400
committerJens Axboe <axboe@kernel.dk>2019-09-10 11:13:05 -0400
commit18d9be1a970c3704366df902b00871bea88d9f14 (patch)
tree35ea440ed8b0d7cd43c487261823cba1e002aac2 /fs/io_uring.c
parentc576666863b788c2d7e8ab4ef4edd0e9059cb47b (diff)
io_uring: add io_queue_async_work() helper
Add a helper for queueing a request for async execution, in preparation for optimizing it. No functional change in this patch. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index b2f88c2dc2fd..41840bf26d3b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -443,6 +443,12 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
443 } 443 }
444} 444}
445 445
446static inline void io_queue_async_work(struct io_ring_ctx *ctx,
447 struct io_kiocb *req)
448{
449 queue_work(ctx->sqo_wq, &req->work);
450}
451
446static void io_commit_cqring(struct io_ring_ctx *ctx) 452static void io_commit_cqring(struct io_ring_ctx *ctx)
447{ 453{
448 struct io_kiocb *req; 454 struct io_kiocb *req;
@@ -456,7 +462,7 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
456 continue; 462 continue;
457 } 463 }
458 req->flags |= REQ_F_IO_DRAINED; 464 req->flags |= REQ_F_IO_DRAINED;
459 queue_work(ctx->sqo_wq, &req->work); 465 io_queue_async_work(ctx, req);
460 } 466 }
461} 467}
462 468
@@ -619,7 +625,7 @@ static void io_req_link_next(struct io_kiocb *req)
619 625
620 nxt->flags |= REQ_F_LINK_DONE; 626 nxt->flags |= REQ_F_LINK_DONE;
621 INIT_WORK(&nxt->work, io_sq_wq_submit_work); 627 INIT_WORK(&nxt->work, io_sq_wq_submit_work);
622 queue_work(req->ctx->sqo_wq, &nxt->work); 628 io_queue_async_work(req->ctx, nxt);
623 } 629 }
624} 630}
625 631
@@ -1519,7 +1525,7 @@ static void io_poll_remove_one(struct io_kiocb *req)
1519 WRITE_ONCE(poll->canceled, true); 1525 WRITE_ONCE(poll->canceled, true);
1520 if (!list_empty(&poll->wait.entry)) { 1526 if (!list_empty(&poll->wait.entry)) {
1521 list_del_init(&poll->wait.entry); 1527 list_del_init(&poll->wait.entry);
1522 queue_work(req->ctx->sqo_wq, &req->work); 1528 io_queue_async_work(req->ctx, req);
1523 } 1529 }
1524 spin_unlock(&poll->head->lock); 1530 spin_unlock(&poll->head->lock);
1525 1531
@@ -1633,7 +1639,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1633 io_cqring_ev_posted(ctx); 1639 io_cqring_ev_posted(ctx);
1634 io_put_req(req); 1640 io_put_req(req);
1635 } else { 1641 } else {
1636 queue_work(ctx->sqo_wq, &req->work); 1642 io_queue_async_work(ctx, req);
1637 } 1643 }
1638 1644
1639 return 1; 1645 return 1;
@@ -2073,7 +2079,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2073 if (list) 2079 if (list)
2074 atomic_inc(&list->cnt); 2080 atomic_inc(&list->cnt);
2075 INIT_WORK(&req->work, io_sq_wq_submit_work); 2081 INIT_WORK(&req->work, io_sq_wq_submit_work);
2076 queue_work(ctx->sqo_wq, &req->work); 2082 io_queue_async_work(ctx, req);
2077 } 2083 }
2078 2084
2079 /* 2085 /*