diff options
-rw-r--r-- | fs/aio.c | 36 |
1 files changed, 16 insertions, 20 deletions
@@ -141,9 +141,6 @@ static void aio_free_ring(struct kioctx *ctx) | |||
141 | for (i = 0; i < ctx->nr_pages; i++) | 141 | for (i = 0; i < ctx->nr_pages; i++) |
142 | put_page(ctx->ring_pages[i]); | 142 | put_page(ctx->ring_pages[i]); |
143 | 143 | ||
144 | if (ctx->mmap_size) | ||
145 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | ||
146 | |||
147 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) | 144 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) |
148 | kfree(ctx->ring_pages); | 145 | kfree(ctx->ring_pages); |
149 | } | 146 | } |
@@ -322,11 +319,6 @@ static void free_ioctx(struct kioctx *ctx) | |||
322 | 319 | ||
323 | aio_free_ring(ctx); | 320 | aio_free_ring(ctx); |
324 | 321 | ||
325 | spin_lock(&aio_nr_lock); | ||
326 | BUG_ON(aio_nr - ctx->max_reqs > aio_nr); | ||
327 | aio_nr -= ctx->max_reqs; | ||
328 | spin_unlock(&aio_nr_lock); | ||
329 | |||
330 | pr_debug("freeing %p\n", ctx); | 322 | pr_debug("freeing %p\n", ctx); |
331 | 323 | ||
332 | /* | 324 | /* |
@@ -435,17 +427,24 @@ static void kill_ioctx(struct kioctx *ctx) | |||
435 | { | 427 | { |
436 | if (!atomic_xchg(&ctx->dead, 1)) { | 428 | if (!atomic_xchg(&ctx->dead, 1)) { |
437 | hlist_del_rcu(&ctx->list); | 429 | hlist_del_rcu(&ctx->list); |
438 | /* Between hlist_del_rcu() and dropping the initial ref */ | ||
439 | synchronize_rcu(); | ||
440 | 430 | ||
441 | /* | 431 | /* |
442 | * We can't punt to workqueue here because put_ioctx() -> | 432 | * It'd be more correct to do this in free_ioctx(), after all |
443 | * free_ioctx() will unmap the ringbuffer, and that has to be | 433 | * the outstanding kiocbs have finished - but by then io_destroy |
444 | * done in the original process's context. kill_ioctx_rcu/work() | 434 | * has already returned, so io_setup() could potentially return |
445 | * exist for exit_aio(), as in that path free_ioctx() won't do | 435 | * -EAGAIN with no ioctxs actually in use (as far as userspace |
446 | * the unmap. | 436 | * could tell). |
447 | */ | 437 | */ |
448 | kill_ioctx_work(&ctx->rcu_work); | 438 | spin_lock(&aio_nr_lock); |
439 | BUG_ON(aio_nr - ctx->max_reqs > aio_nr); | ||
440 | aio_nr -= ctx->max_reqs; | ||
441 | spin_unlock(&aio_nr_lock); | ||
442 | |||
443 | if (ctx->mmap_size) | ||
444 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | ||
445 | |||
446 | /* Between hlist_del_rcu() and dropping the initial ref */ | ||
447 | call_rcu(&ctx->rcu_head, kill_ioctx_rcu); | ||
449 | } | 448 | } |
450 | } | 449 | } |
451 | 450 | ||
@@ -495,10 +494,7 @@ void exit_aio(struct mm_struct *mm) | |||
495 | */ | 494 | */ |
496 | ctx->mmap_size = 0; | 495 | ctx->mmap_size = 0; |
497 | 496 | ||
498 | if (!atomic_xchg(&ctx->dead, 1)) { | 497 | kill_ioctx(ctx); |
499 | hlist_del_rcu(&ctx->list); | ||
500 | call_rcu(&ctx->rcu_head, kill_ioctx_rcu); | ||
501 | } | ||
502 | } | 498 | } |
503 | } | 499 | } |
504 | 500 | ||