aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-14 20:43:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-14 20:43:27 -0400
commita311c480384c5aa9aaae195b89c3ec89c3b66379 (patch)
tree9e3f141bb7649278e0fe30e68d965f2243c32bb9 /fs
parent05064084e82d057f8d74590c51581650e060fbb8 (diff)
parentfa88b6f8803c87c4ced5aac11261ced7cedaa05e (diff)
Merge git://git.kvack.org/~bcrl/aio-next
Pull aio fix and cleanups from Ben LaHaise: "This consists of a couple of code cleanups plus a minor bug fix" * git://git.kvack.org/~bcrl/aio-next: aio: cleanup: flatten kill_ioctx() aio: report error from io_destroy() when threads race in io_destroy() fs/aio.c: Remove ctx parameter in kiocb_cancel
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c70
1 files changed, 36 insertions, 34 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 56b28607c32d..4f078c054b41 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -477,7 +477,7 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel)
477} 477}
478EXPORT_SYMBOL(kiocb_set_cancel_fn); 478EXPORT_SYMBOL(kiocb_set_cancel_fn);
479 479
480static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) 480static int kiocb_cancel(struct kiocb *kiocb)
481{ 481{
482 kiocb_cancel_fn *old, *cancel; 482 kiocb_cancel_fn *old, *cancel;
483 483
@@ -538,7 +538,7 @@ static void free_ioctx_users(struct percpu_ref *ref)
538 struct kiocb, ki_list); 538 struct kiocb, ki_list);
539 539
540 list_del_init(&req->ki_list); 540 list_del_init(&req->ki_list);
541 kiocb_cancel(ctx, req); 541 kiocb_cancel(req);
542 } 542 }
543 543
544 spin_unlock_irq(&ctx->ctx_lock); 544 spin_unlock_irq(&ctx->ctx_lock);
@@ -727,42 +727,42 @@ err:
727 * when the processes owning a context have all exited to encourage 727 * when the processes owning a context have all exited to encourage
728 * the rapid destruction of the kioctx. 728 * the rapid destruction of the kioctx.
729 */ 729 */
730static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, 730static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
731 struct completion *requests_done) 731 struct completion *requests_done)
732{ 732{
733 if (!atomic_xchg(&ctx->dead, 1)) { 733 struct kioctx_table *table;
734 struct kioctx_table *table;
735 734
736 spin_lock(&mm->ioctx_lock); 735 if (atomic_xchg(&ctx->dead, 1))
737 rcu_read_lock(); 736 return -EINVAL;
738 table = rcu_dereference(mm->ioctx_table);
739 737
740 WARN_ON(ctx != table->table[ctx->id]);
741 table->table[ctx->id] = NULL;
742 rcu_read_unlock();
743 spin_unlock(&mm->ioctx_lock);
744 738
745 /* percpu_ref_kill() will do the necessary call_rcu() */ 739 spin_lock(&mm->ioctx_lock);
746 wake_up_all(&ctx->wait); 740 rcu_read_lock();
741 table = rcu_dereference(mm->ioctx_table);
747 742
748 /* 743 WARN_ON(ctx != table->table[ctx->id]);
749 * It'd be more correct to do this in free_ioctx(), after all 744 table->table[ctx->id] = NULL;
750 * the outstanding kiocbs have finished - but by then io_destroy 745 rcu_read_unlock();
751 * has already returned, so io_setup() could potentially return 746 spin_unlock(&mm->ioctx_lock);
752 * -EAGAIN with no ioctxs actually in use (as far as userspace
753 * could tell).
754 */
755 aio_nr_sub(ctx->max_reqs);
756 747
757 if (ctx->mmap_size) 748 /* percpu_ref_kill() will do the necessary call_rcu() */
758 vm_munmap(ctx->mmap_base, ctx->mmap_size); 749 wake_up_all(&ctx->wait);
759 750
760 ctx->requests_done = requests_done; 751 /*
761 percpu_ref_kill(&ctx->users); 752 * It'd be more correct to do this in free_ioctx(), after all
762 } else { 753 * the outstanding kiocbs have finished - but by then io_destroy
763 if (requests_done) 754 * has already returned, so io_setup() could potentially return
764 complete(requests_done); 755 * -EAGAIN with no ioctxs actually in use (as far as userspace
765 } 756 * could tell).
757 */
758 aio_nr_sub(ctx->max_reqs);
759
760 if (ctx->mmap_size)
761 vm_munmap(ctx->mmap_base, ctx->mmap_size);
762
763 ctx->requests_done = requests_done;
764 percpu_ref_kill(&ctx->users);
765 return 0;
766} 766}
767 767
768/* wait_on_sync_kiocb: 768/* wait_on_sync_kiocb:
@@ -1219,21 +1219,23 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1219 if (likely(NULL != ioctx)) { 1219 if (likely(NULL != ioctx)) {
1220 struct completion requests_done = 1220 struct completion requests_done =
1221 COMPLETION_INITIALIZER_ONSTACK(requests_done); 1221 COMPLETION_INITIALIZER_ONSTACK(requests_done);
1222 int ret;
1222 1223
1223 /* Pass requests_done to kill_ioctx() where it can be set 1224 /* Pass requests_done to kill_ioctx() where it can be set
1224 * in a thread-safe way. If we try to set it here then we have 1225 * in a thread-safe way. If we try to set it here then we have
1225 * a race condition if two io_destroy() called simultaneously. 1226 * a race condition if two io_destroy() called simultaneously.
1226 */ 1227 */
1227 kill_ioctx(current->mm, ioctx, &requests_done); 1228 ret = kill_ioctx(current->mm, ioctx, &requests_done);
1228 percpu_ref_put(&ioctx->users); 1229 percpu_ref_put(&ioctx->users);
1229 1230
1230 /* Wait until all IO for the context are done. Otherwise kernel 1231 /* Wait until all IO for the context are done. Otherwise kernel
1231 * keep using user-space buffers even if user thinks the context 1232 * keep using user-space buffers even if user thinks the context
1232 * is destroyed. 1233 * is destroyed.
1233 */ 1234 */
1234 wait_for_completion(&requests_done); 1235 if (!ret)
1236 wait_for_completion(&requests_done);
1235 1237
1236 return 0; 1238 return ret;
1237 } 1239 }
1238 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1240 pr_debug("EINVAL: io_destroy: invalid context id\n");
1239 return -EINVAL; 1241 return -EINVAL;
@@ -1595,7 +1597,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1595 1597
1596 kiocb = lookup_kiocb(ctx, iocb, key); 1598 kiocb = lookup_kiocb(ctx, iocb, key);
1597 if (kiocb) 1599 if (kiocb)
1598 ret = kiocb_cancel(ctx, kiocb); 1600 ret = kiocb_cancel(kiocb);
1599 else 1601 else
1600 ret = -EINVAL; 1602 ret = -EINVAL;
1601 1603