diff options
author | Benjamin LaHaise <bcrl@kvack.org> | 2014-04-29 12:55:48 -0400 |
---|---|---|
committer | Benjamin LaHaise <bcrl@kvack.org> | 2014-04-29 12:55:48 -0400 |
commit | fa88b6f8803c87c4ced5aac11261ced7cedaa05e (patch) | |
tree | 81333e517558899cda357538a2289f2005f60609 /fs/aio.c | |
parent | fb2d44838320b78e6e3b5eb2e35b70f62f262e4c (diff) |
aio: cleanup: flatten kill_ioctx()
There is no need to have most of the code in kill_ioctx() indented. Flatten
it.
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 52 |
1 files changed, 26 insertions, 26 deletions
@@ -730,39 +730,39 @@ err: | |||
730 | static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | 730 | static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, |
731 | struct completion *requests_done) | 731 | struct completion *requests_done) |
732 | { | 732 | { |
733 | if (!atomic_xchg(&ctx->dead, 1)) { | 733 | struct kioctx_table *table; |
734 | struct kioctx_table *table; | ||
735 | 734 | ||
736 | spin_lock(&mm->ioctx_lock); | 735 | if (atomic_xchg(&ctx->dead, 1)) |
737 | rcu_read_lock(); | 736 | return -EINVAL; |
738 | table = rcu_dereference(mm->ioctx_table); | ||
739 | 737 | ||
740 | WARN_ON(ctx != table->table[ctx->id]); | ||
741 | table->table[ctx->id] = NULL; | ||
742 | rcu_read_unlock(); | ||
743 | spin_unlock(&mm->ioctx_lock); | ||
744 | 738 | ||
745 | /* percpu_ref_kill() will do the necessary call_rcu() */ | 739 | spin_lock(&mm->ioctx_lock); |
746 | wake_up_all(&ctx->wait); | 740 | rcu_read_lock(); |
741 | table = rcu_dereference(mm->ioctx_table); | ||
742 | |||
743 | WARN_ON(ctx != table->table[ctx->id]); | ||
744 | table->table[ctx->id] = NULL; | ||
745 | rcu_read_unlock(); | ||
746 | spin_unlock(&mm->ioctx_lock); | ||
747 | 747 | ||
748 | /* | 748 | /* percpu_ref_kill() will do the necessary call_rcu() */ |
749 | * It'd be more correct to do this in free_ioctx(), after all | 749 | wake_up_all(&ctx->wait); |
750 | * the outstanding kiocbs have finished - but by then io_destroy | ||
751 | * has already returned, so io_setup() could potentially return | ||
752 | * -EAGAIN with no ioctxs actually in use (as far as userspace | ||
753 | * could tell). | ||
754 | */ | ||
755 | aio_nr_sub(ctx->max_reqs); | ||
756 | 750 | ||
757 | if (ctx->mmap_size) | 751 | /* |
758 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | 752 | * It'd be more correct to do this in free_ioctx(), after all |
753 | * the outstanding kiocbs have finished - but by then io_destroy | ||
754 | * has already returned, so io_setup() could potentially return | ||
755 | * -EAGAIN with no ioctxs actually in use (as far as userspace | ||
756 | * could tell). | ||
757 | */ | ||
758 | aio_nr_sub(ctx->max_reqs); | ||
759 | 759 | ||
760 | ctx->requests_done = requests_done; | 760 | if (ctx->mmap_size) |
761 | percpu_ref_kill(&ctx->users); | 761 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
762 | return 0; | ||
763 | } | ||
764 | 762 | ||
765 | return -EINVAL; | 763 | ctx->requests_done = requests_done; |
764 | percpu_ref_kill(&ctx->users); | ||
765 | return 0; | ||
766 | } | 766 | } |
767 | 767 | ||
768 | /* wait_on_sync_kiocb: | 768 | /* wait_on_sync_kiocb: |