aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-11-19 17:57:42 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-18 10:29:58 -0500
commit432c79978c33ecef91b1b04cea6936c20810da29 (patch)
tree63b7dd99d723848d5559e13779cabd5805184ad0 /fs/aio.c
parentbc9bff61624ac33b7c95861abea1af24ee7a94fc (diff)
aio: separate out ring reservation from req allocation
This is in preparation for certain types of IO not needing a ring reserveration. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c30
1 files changed, 17 insertions, 13 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 6f32d30d7f45..7556709b9b8d 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -902,7 +902,7 @@ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
902 local_irq_restore(flags); 902 local_irq_restore(flags);
903} 903}
904 904
905static bool get_reqs_available(struct kioctx *ctx) 905static bool __get_reqs_available(struct kioctx *ctx)
906{ 906{
907 struct kioctx_cpu *kcpu; 907 struct kioctx_cpu *kcpu;
908 bool ret = false; 908 bool ret = false;
@@ -994,6 +994,14 @@ static void user_refill_reqs_available(struct kioctx *ctx)
994 spin_unlock_irq(&ctx->completion_lock); 994 spin_unlock_irq(&ctx->completion_lock);
995} 995}
996 996
997static bool get_reqs_available(struct kioctx *ctx)
998{
999 if (__get_reqs_available(ctx))
1000 return true;
1001 user_refill_reqs_available(ctx);
1002 return __get_reqs_available(ctx);
1003}
1004
997/* aio_get_req 1005/* aio_get_req
998 * Allocate a slot for an aio request. 1006 * Allocate a slot for an aio request.
999 * Returns NULL if no requests are free. 1007 * Returns NULL if no requests are free.
@@ -1002,24 +1010,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1002{ 1010{
1003 struct aio_kiocb *req; 1011 struct aio_kiocb *req;
1004 1012
1005 if (!get_reqs_available(ctx)) {
1006 user_refill_reqs_available(ctx);
1007 if (!get_reqs_available(ctx))
1008 return NULL;
1009 }
1010
1011 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); 1013 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
1012 if (unlikely(!req)) 1014 if (unlikely(!req))
1013 goto out_put; 1015 return NULL;
1014 1016
1015 percpu_ref_get(&ctx->reqs); 1017 percpu_ref_get(&ctx->reqs);
1016 INIT_LIST_HEAD(&req->ki_list); 1018 INIT_LIST_HEAD(&req->ki_list);
1017 refcount_set(&req->ki_refcnt, 0); 1019 refcount_set(&req->ki_refcnt, 0);
1018 req->ki_ctx = ctx; 1020 req->ki_ctx = ctx;
1019 return req; 1021 return req;
1020out_put:
1021 put_reqs_available(ctx, 1);
1022 return NULL;
1023} 1022}
1024 1023
1025static struct kioctx *lookup_ioctx(unsigned long ctx_id) 1024static struct kioctx *lookup_ioctx(unsigned long ctx_id)
@@ -1807,9 +1806,13 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1807 return -EINVAL; 1806 return -EINVAL;
1808 } 1807 }
1809 1808
1809 if (!get_reqs_available(ctx))
1810 return -EAGAIN;
1811
1812 ret = -EAGAIN;
1810 req = aio_get_req(ctx); 1813 req = aio_get_req(ctx);
1811 if (unlikely(!req)) 1814 if (unlikely(!req))
1812 return -EAGAIN; 1815 goto out_put_reqs_available;
1813 1816
1814 if (iocb.aio_flags & IOCB_FLAG_RESFD) { 1817 if (iocb.aio_flags & IOCB_FLAG_RESFD) {
1815 /* 1818 /*
@@ -1872,11 +1875,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1872 goto out_put_req; 1875 goto out_put_req;
1873 return 0; 1876 return 0;
1874out_put_req: 1877out_put_req:
1875 put_reqs_available(ctx, 1);
1876 percpu_ref_put(&ctx->reqs); 1878 percpu_ref_put(&ctx->reqs);
1877 if (req->ki_eventfd) 1879 if (req->ki_eventfd)
1878 eventfd_ctx_put(req->ki_eventfd); 1880 eventfd_ctx_put(req->ki_eventfd);
1879 kmem_cache_free(kiocb_cachep, req); 1881 kmem_cache_free(kiocb_cachep, req);
1882out_put_reqs_available:
1883 put_reqs_available(ctx, 1);
1880 return ret; 1884 return ret;
1881} 1885}
1882 1886