diff options
author | Benjamin LaHaise <bcrl@kvack.org> | 2014-04-29 12:45:17 -0400 |
---|---|---|
committer | Benjamin LaHaise <bcrl@kvack.org> | 2014-04-29 12:45:17 -0400 |
commit | fb2d44838320b78e6e3b5eb2e35b70f62f262e4c (patch) | |
tree | bb88488c0bcb41cd7e1267afa85a5d8871eb37b3 /fs/aio.c | |
parent | d52a8f9ead60338306c4f03e9ce575c5f23a4b65 (diff) |
aio: report error from io_destroy() when threads race in io_destroy()
As reported by Anatol Pomozov, io_destroy() fails to report an error when
it loses the race to destroy a given ioctx. Since there is a difference in
behaviour between the thread that wins the race (which blocks on outstanding
io requests) versus lthe thread that loses (which returns immediately), wire
up a return code from kill_ioctx() to the io_destroy() syscall.
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Cc: Anatol Pomozov <anatol.pomozov@gmail.com>
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 16 |
1 files changed, 9 insertions, 7 deletions
@@ -727,7 +727,7 @@ err: | |||
727 | * when the processes owning a context have all exited to encourage | 727 | * when the processes owning a context have all exited to encourage |
728 | * the rapid destruction of the kioctx. | 728 | * the rapid destruction of the kioctx. |
729 | */ | 729 | */ |
730 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | 730 | static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
731 | struct completion *requests_done) | 731 | struct completion *requests_done) |
732 | { | 732 | { |
733 | if (!atomic_xchg(&ctx->dead, 1)) { | 733 | if (!atomic_xchg(&ctx->dead, 1)) { |
@@ -759,10 +759,10 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | |||
759 | 759 | ||
760 | ctx->requests_done = requests_done; | 760 | ctx->requests_done = requests_done; |
761 | percpu_ref_kill(&ctx->users); | 761 | percpu_ref_kill(&ctx->users); |
762 | } else { | 762 | return 0; |
763 | if (requests_done) | ||
764 | complete(requests_done); | ||
765 | } | 763 | } |
764 | |||
765 | return -EINVAL; | ||
766 | } | 766 | } |
767 | 767 | ||
768 | /* wait_on_sync_kiocb: | 768 | /* wait_on_sync_kiocb: |
@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | |||
1219 | if (likely(NULL != ioctx)) { | 1219 | if (likely(NULL != ioctx)) { |
1220 | struct completion requests_done = | 1220 | struct completion requests_done = |
1221 | COMPLETION_INITIALIZER_ONSTACK(requests_done); | 1221 | COMPLETION_INITIALIZER_ONSTACK(requests_done); |
1222 | int ret; | ||
1222 | 1223 | ||
1223 | /* Pass requests_done to kill_ioctx() where it can be set | 1224 | /* Pass requests_done to kill_ioctx() where it can be set |
1224 | * in a thread-safe way. If we try to set it here then we have | 1225 | * in a thread-safe way. If we try to set it here then we have |
1225 | * a race condition if two io_destroy() called simultaneously. | 1226 | * a race condition if two io_destroy() called simultaneously. |
1226 | */ | 1227 | */ |
1227 | kill_ioctx(current->mm, ioctx, &requests_done); | 1228 | ret = kill_ioctx(current->mm, ioctx, &requests_done); |
1228 | percpu_ref_put(&ioctx->users); | 1229 | percpu_ref_put(&ioctx->users); |
1229 | 1230 | ||
1230 | /* Wait until all IO for the context are done. Otherwise kernel | 1231 | /* Wait until all IO for the context are done. Otherwise kernel |
1231 | * keep using user-space buffers even if user thinks the context | 1232 | * keep using user-space buffers even if user thinks the context |
1232 | * is destroyed. | 1233 | * is destroyed. |
1233 | */ | 1234 | */ |
1234 | wait_for_completion(&requests_done); | 1235 | if (!ret) |
1236 | wait_for_completion(&requests_done); | ||
1235 | 1237 | ||
1236 | return 0; | 1238 | return ret; |
1237 | } | 1239 | } |
1238 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | 1240 | pr_debug("EINVAL: io_destroy: invalid context id\n"); |
1239 | return -EINVAL; | 1241 | return -EINVAL; |