diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 20:22:54 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2019-03-17 20:52:24 -0400 |
commit | b53119f13a04879c3bf502828d99d13726639ead (patch) | |
tree | 6ada7dfb2a552754f28b7c883992b2417c4b4b3f /fs | |
parent | 9e98c678c2d6ae3a17cb2de55d17f69dddaa231b (diff) |
pin iocb through aio.
aio_poll() is not the only case that needs file pinned; worse, while
aio_read()/aio_write() can live without pinning iocb itself, the
proof is rather brittle and can easily break on later changes.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/aio.c | 37 |
1 files changed, 21 insertions, 16 deletions
@@ -1022,6 +1022,9 @@ static bool get_reqs_available(struct kioctx *ctx) | |||
1022 | /* aio_get_req | 1022 | /* aio_get_req |
1023 | * Allocate a slot for an aio request. | 1023 | * Allocate a slot for an aio request. |
1024 | * Returns NULL if no requests are free. | 1024 | * Returns NULL if no requests are free. |
1025 | * | ||
1026 | * The refcount is initialized to 2 - one for the async op completion, | ||
1027 | * one for the synchronous code that does this. | ||
1025 | */ | 1028 | */ |
1026 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) | 1029 | static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) |
1027 | { | 1030 | { |
@@ -1034,7 +1037,7 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) | |||
1034 | percpu_ref_get(&ctx->reqs); | 1037 | percpu_ref_get(&ctx->reqs); |
1035 | req->ki_ctx = ctx; | 1038 | req->ki_ctx = ctx; |
1036 | INIT_LIST_HEAD(&req->ki_list); | 1039 | INIT_LIST_HEAD(&req->ki_list); |
1037 | refcount_set(&req->ki_refcnt, 0); | 1040 | refcount_set(&req->ki_refcnt, 2); |
1038 | req->ki_eventfd = NULL; | 1041 | req->ki_eventfd = NULL; |
1039 | return req; | 1042 | return req; |
1040 | } | 1043 | } |
@@ -1067,15 +1070,18 @@ out: | |||
1067 | return ret; | 1070 | return ret; |
1068 | } | 1071 | } |
1069 | 1072 | ||
1073 | static inline void iocb_destroy(struct aio_kiocb *iocb) | ||
1074 | { | ||
1075 | if (iocb->ki_filp) | ||
1076 | fput(iocb->ki_filp); | ||
1077 | percpu_ref_put(&iocb->ki_ctx->reqs); | ||
1078 | kmem_cache_free(kiocb_cachep, iocb); | ||
1079 | } | ||
1080 | |||
1070 | static inline void iocb_put(struct aio_kiocb *iocb) | 1081 | static inline void iocb_put(struct aio_kiocb *iocb) |
1071 | { | 1082 | { |
1072 | if (refcount_read(&iocb->ki_refcnt) == 0 || | 1083 | if (refcount_dec_and_test(&iocb->ki_refcnt)) |
1073 | refcount_dec_and_test(&iocb->ki_refcnt)) { | 1084 | iocb_destroy(iocb); |
1074 | if (iocb->ki_filp) | ||
1075 | fput(iocb->ki_filp); | ||
1076 | percpu_ref_put(&iocb->ki_ctx->reqs); | ||
1077 | kmem_cache_free(kiocb_cachep, iocb); | ||
1078 | } | ||
1079 | } | 1085 | } |
1080 | 1086 | ||
1081 | static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, | 1087 | static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb, |
@@ -1749,9 +1755,6 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) | |||
1749 | INIT_LIST_HEAD(&req->wait.entry); | 1755 | INIT_LIST_HEAD(&req->wait.entry); |
1750 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); | 1756 | init_waitqueue_func_entry(&req->wait, aio_poll_wake); |
1751 | 1757 | ||
1752 | /* one for removal from waitqueue, one for this function */ | ||
1753 | refcount_set(&aiocb->ki_refcnt, 2); | ||
1754 | |||
1755 | mask = vfs_poll(req->file, &apt.pt) & req->events; | 1758 | mask = vfs_poll(req->file, &apt.pt) & req->events; |
1756 | if (unlikely(!req->head)) { | 1759 | if (unlikely(!req->head)) { |
1757 | /* we did not manage to set up a waitqueue, done */ | 1760 | /* we did not manage to set up a waitqueue, done */ |
@@ -1782,7 +1785,6 @@ out: | |||
1782 | 1785 | ||
1783 | if (mask) | 1786 | if (mask) |
1784 | aio_poll_complete(aiocb, mask); | 1787 | aio_poll_complete(aiocb, mask); |
1785 | iocb_put(aiocb); | ||
1786 | return 0; | 1788 | return 0; |
1787 | } | 1789 | } |
1788 | 1790 | ||
@@ -1873,18 +1875,21 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, | |||
1873 | break; | 1875 | break; |
1874 | } | 1876 | } |
1875 | 1877 | ||
1878 | /* Done with the synchronous reference */ | ||
1879 | iocb_put(req); | ||
1880 | |||
1876 | /* | 1881 | /* |
1877 | * If ret is 0, we'd either done aio_complete() ourselves or have | 1882 | * If ret is 0, we'd either done aio_complete() ourselves or have |
1878 | * arranged for that to be done asynchronously. Anything non-zero | 1883 | * arranged for that to be done asynchronously. Anything non-zero |
1879 | * means that we need to destroy req ourselves. | 1884 | * means that we need to destroy req ourselves. |
1880 | */ | 1885 | */ |
1881 | if (ret) | 1886 | if (!ret) |
1882 | goto out_put_req; | 1887 | return 0; |
1883 | return 0; | 1888 | |
1884 | out_put_req: | 1889 | out_put_req: |
1885 | if (req->ki_eventfd) | 1890 | if (req->ki_eventfd) |
1886 | eventfd_ctx_put(req->ki_eventfd); | 1891 | eventfd_ctx_put(req->ki_eventfd); |
1887 | iocb_put(req); | 1892 | iocb_destroy(req); |
1888 | out_put_reqs_available: | 1893 | out_put_reqs_available: |
1889 | put_reqs_available(ctx, 1); | 1894 | put_reqs_available(ctx, 1); |
1890 | return ret; | 1895 | return ret; |