diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-01 11:54:03 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-05-01 11:54:03 -0400 |
commit | 98794f932138c81bfaa609fda21ddd7e97227abd (patch) | |
tree | 8d98bdfec881e25d15728cba5873b9f50ca0b5e6 /fs | |
parent | 8aa9e85adac609588eeec356e5a85059b3b819ba (diff) | |
parent | 754320d6e166d3a12cb4810a452bde00afbd4e9a (diff) |
Merge git://git.kvack.org/~bcrl/aio-fixes
Pull aio fixes from Ben LaHaise:
"The first change from Anatol fixes a regression where io_destroy() no
longer waits for outstanding aios to complete. The second corrects a
memory leak in an error path for vectored aio operations.
Both of these bug fixes should be queued up for stable as well"
* git://git.kvack.org/~bcrl/aio-fixes:
aio: fix potential leak in aio_run_iocb().
aio: block io_destroy() until all context requests are completed
Diffstat (limited to 'fs')
-rw-r--r-- | fs/aio.c | 42 |
1 files changed, 34 insertions, 8 deletions
@@ -112,6 +112,11 @@ struct kioctx { | |||
112 | 112 | ||
113 | struct work_struct free_work; | 113 | struct work_struct free_work; |
114 | 114 | ||
115 | /* | ||
116 | * signals when all in-flight requests are done | ||
117 | */ | ||
118 | struct completion *requests_done; | ||
119 | |||
115 | struct { | 120 | struct { |
116 | /* | 121 | /* |
117 | * This counts the number of available slots in the ringbuffer, | 122 | * This counts the number of available slots in the ringbuffer, |
@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref) | |||
508 | { | 513 | { |
509 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); | 514 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); |
510 | 515 | ||
516 | /* At this point we know that there are no any in-flight requests */ | ||
517 | if (ctx->requests_done) | ||
518 | complete(ctx->requests_done); | ||
519 | |||
511 | INIT_WORK(&ctx->free_work, free_ioctx); | 520 | INIT_WORK(&ctx->free_work, free_ioctx); |
512 | schedule_work(&ctx->free_work); | 521 | schedule_work(&ctx->free_work); |
513 | } | 522 | } |
@@ -718,7 +727,8 @@ err: | |||
718 | * when the processes owning a context have all exited to encourage | 727 | * when the processes owning a context have all exited to encourage |
719 | * the rapid destruction of the kioctx. | 728 | * the rapid destruction of the kioctx. |
720 | */ | 729 | */ |
721 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) | 730 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
731 | struct completion *requests_done) | ||
722 | { | 732 | { |
723 | if (!atomic_xchg(&ctx->dead, 1)) { | 733 | if (!atomic_xchg(&ctx->dead, 1)) { |
724 | struct kioctx_table *table; | 734 | struct kioctx_table *table; |
@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) | |||
747 | if (ctx->mmap_size) | 757 | if (ctx->mmap_size) |
748 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | 758 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
749 | 759 | ||
760 | ctx->requests_done = requests_done; | ||
750 | percpu_ref_kill(&ctx->users); | 761 | percpu_ref_kill(&ctx->users); |
762 | } else { | ||
763 | if (requests_done) | ||
764 | complete(requests_done); | ||
751 | } | 765 | } |
752 | } | 766 | } |
753 | 767 | ||
@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm) | |||
809 | */ | 823 | */ |
810 | ctx->mmap_size = 0; | 824 | ctx->mmap_size = 0; |
811 | 825 | ||
812 | kill_ioctx(mm, ctx); | 826 | kill_ioctx(mm, ctx, NULL); |
813 | } | 827 | } |
814 | } | 828 | } |
815 | 829 | ||
@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | |||
1185 | if (!IS_ERR(ioctx)) { | 1199 | if (!IS_ERR(ioctx)) { |
1186 | ret = put_user(ioctx->user_id, ctxp); | 1200 | ret = put_user(ioctx->user_id, ctxp); |
1187 | if (ret) | 1201 | if (ret) |
1188 | kill_ioctx(current->mm, ioctx); | 1202 | kill_ioctx(current->mm, ioctx, NULL); |
1189 | percpu_ref_put(&ioctx->users); | 1203 | percpu_ref_put(&ioctx->users); |
1190 | } | 1204 | } |
1191 | 1205 | ||
@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | |||
1203 | { | 1217 | { |
1204 | struct kioctx *ioctx = lookup_ioctx(ctx); | 1218 | struct kioctx *ioctx = lookup_ioctx(ctx); |
1205 | if (likely(NULL != ioctx)) { | 1219 | if (likely(NULL != ioctx)) { |
1206 | kill_ioctx(current->mm, ioctx); | 1220 | struct completion requests_done = |
1221 | COMPLETION_INITIALIZER_ONSTACK(requests_done); | ||
1222 | |||
1223 | /* Pass requests_done to kill_ioctx() where it can be set | ||
1224 | * in a thread-safe way. If we try to set it here then we have | ||
1225 | * a race condition if two io_destroy() called simultaneously. | ||
1226 | */ | ||
1227 | kill_ioctx(current->mm, ioctx, &requests_done); | ||
1207 | percpu_ref_put(&ioctx->users); | 1228 | percpu_ref_put(&ioctx->users); |
1229 | |||
1230 | /* Wait until all IO for the context are done. Otherwise kernel | ||
1231 | * keep using user-space buffers even if user thinks the context | ||
1232 | * is destroyed. | ||
1233 | */ | ||
1234 | wait_for_completion(&requests_done); | ||
1235 | |||
1208 | return 0; | 1236 | return 0; |
1209 | } | 1237 | } |
1210 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | 1238 | pr_debug("EINVAL: io_destroy: invalid context id\n"); |
@@ -1299,10 +1327,8 @@ rw_common: | |||
1299 | &iovec, compat) | 1327 | &iovec, compat) |
1300 | : aio_setup_single_vector(req, rw, buf, &nr_segs, | 1328 | : aio_setup_single_vector(req, rw, buf, &nr_segs, |
1301 | iovec); | 1329 | iovec); |
1302 | if (ret) | 1330 | if (!ret) |
1303 | return ret; | 1331 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); |
1304 | |||
1305 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); | ||
1306 | if (ret < 0) { | 1332 | if (ret < 0) { |
1307 | if (iovec != &inline_vec) | 1333 | if (iovec != &inline_vec) |
1308 | kfree(iovec); | 1334 | kfree(iovec); |