diff options
author | Al Viro <viro@ZenIV.linux.org.uk> | 2012-03-08 12:51:19 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-03-19 11:57:43 -0400 |
commit | e808b6adb5d0cfef4ab82a210bc680003b200e77 (patch) | |
tree | dabf98b619b3d52f743d5368d3f8716111476be2 | |
parent | 8ef749e355dfb8932d4ae6d4aa8eacf3b0ab4444 (diff) |
aio: fix the "too late munmap()" race
commit c7b285550544c22bc005ec20978472c9ac7138c6 upstream.
Current code has put_ioctx() called asynchronously from aio_fput_routine();
that's done *after* we have killed the request that used to pin ioctx,
so there's nothing to stop io_destroy() waiting in wait_for_all_aios()
from progressing. As the result, we can end up with async call of
put_ioctx() being the last one and possibly happening during exit_mmap()
or elf_core_dump(), neither of which expects stray munmap() being done
to them...
We do need to prevent _freeing_ ioctx until aio_fput_routine() is done
with that, but that's all we care about - neither io_destroy() nor
exit_aio() will progress past wait_for_all_aios() until aio_fput_routine()
does really_put_req(), so the ioctx teardown won't be done until then
and we don't care about the contents of ioctx past that point.
Since actual freeing of these suckers is RCU-delayed, we don't need to
bump ioctx refcount when request goes into list for async removal.
All we need is rcu_read_lock held just over the ->ctx_lock-protected
area in aio_fput_routine().
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Acked-by: Benjamin LaHaise <bcrl@kvack.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | fs/aio.c | 14 |
1 files changed, 6 insertions, 8 deletions
@@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx) | |||
228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
229 | } | 229 | } |
230 | 230 | ||
231 | static inline void get_ioctx(struct kioctx *kioctx) | ||
232 | { | ||
233 | BUG_ON(atomic_read(&kioctx->users) <= 0); | ||
234 | atomic_inc(&kioctx->users); | ||
235 | } | ||
236 | |||
237 | static inline int try_get_ioctx(struct kioctx *kioctx) | 231 | static inline int try_get_ioctx(struct kioctx *kioctx) |
238 | { | 232 | { |
239 | return atomic_inc_not_zero(&kioctx->users); | 233 | return atomic_inc_not_zero(&kioctx->users); |
@@ -527,11 +521,16 @@ static void aio_fput_routine(struct work_struct *data) | |||
527 | fput(req->ki_filp); | 521 | fput(req->ki_filp); |
528 | 522 | ||
529 | /* Link the iocb into the context's free list */ | 523 | /* Link the iocb into the context's free list */ |
524 | rcu_read_lock(); | ||
530 | spin_lock_irq(&ctx->ctx_lock); | 525 | spin_lock_irq(&ctx->ctx_lock); |
531 | really_put_req(ctx, req); | 526 | really_put_req(ctx, req); |
527 | /* | ||
528 | * at that point ctx might've been killed, but actual | ||
529 | * freeing is RCU'd | ||
530 | */ | ||
532 | spin_unlock_irq(&ctx->ctx_lock); | 531 | spin_unlock_irq(&ctx->ctx_lock); |
532 | rcu_read_unlock(); | ||
533 | 533 | ||
534 | put_ioctx(ctx); | ||
535 | spin_lock_irq(&fput_lock); | 534 | spin_lock_irq(&fput_lock); |
536 | } | 535 | } |
537 | spin_unlock_irq(&fput_lock); | 536 | spin_unlock_irq(&fput_lock); |
@@ -562,7 +561,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) | |||
562 | * this function will be executed w/out any aio kthread wakeup. | 561 | * this function will be executed w/out any aio kthread wakeup. |
563 | */ | 562 | */ |
564 | if (unlikely(!fput_atomic(req->ki_filp))) { | 563 | if (unlikely(!fput_atomic(req->ki_filp))) { |
565 | get_ioctx(ctx); | ||
566 | spin_lock(&fput_lock); | 564 | spin_lock(&fput_lock); |
567 | list_add(&req->ki_list, &fput_head); | 565 | list_add(&req->ki_list, &fput_head); |
568 | spin_unlock(&fput_lock); | 566 | spin_unlock(&fput_lock); |