diff options
author | Kent Overstreet <koverstreet@google.com> | 2013-05-13 16:42:52 -0400 |
---|---|---|
committer | Benjamin LaHaise <bcrl@kvack.org> | 2013-07-30 11:53:12 -0400 |
commit | 57282d8fd744072d6d6f18fa6ebe3cc1149015bf (patch) | |
tree | fd01d088c8eb9e15ed4b1d486c9868b206b179c7 /fs/aio.c | |
parent | 8bc92afcf7f5c598001dd04e62d88f57f6e89e51 (diff) |
aio: Kill ki_users
The kiocb refcount is only needed for cancellation - to ensure a kiocb
isn't freed while a ki_cancel callback is running. But if we restrict
ki_cancel callbacks to not block (which they currently don't), we can
simply drop the refcount.
Signed-off-by: Kent Overstreet <koverstreet@google.com>
Cc: Zach Brown <zab@redhat.com>
Cc: Felipe Balbi <balbi@ti.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Mark Fasheh <mfasheh@suse.com>
Cc: Joel Becker <jlbec@evilplan.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Asai Thambi S P <asamymuthupa@micron.com>
Cc: Selvan Mani <smani@micron.com>
Cc: Sam Bradshaw <sbradshaw@micron.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Benjamin LaHaise <bcrl@kvack.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 47 |
1 files changed, 12 insertions, 35 deletions
@@ -361,7 +361,6 @@ EXPORT_SYMBOL(kiocb_set_cancel_fn); | |||
361 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) | 361 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) |
362 | { | 362 | { |
363 | kiocb_cancel_fn *old, *cancel; | 363 | kiocb_cancel_fn *old, *cancel; |
364 | int ret = -EINVAL; | ||
365 | 364 | ||
366 | /* | 365 | /* |
367 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it | 366 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it |
@@ -371,21 +370,13 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) | |||
371 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | 370 | cancel = ACCESS_ONCE(kiocb->ki_cancel); |
372 | do { | 371 | do { |
373 | if (!cancel || cancel == KIOCB_CANCELLED) | 372 | if (!cancel || cancel == KIOCB_CANCELLED) |
374 | return ret; | 373 | return -EINVAL; |
375 | 374 | ||
376 | old = cancel; | 375 | old = cancel; |
377 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); | 376 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); |
378 | } while (cancel != old); | 377 | } while (cancel != old); |
379 | 378 | ||
380 | atomic_inc(&kiocb->ki_users); | 379 | return cancel(kiocb); |
381 | spin_unlock_irq(&ctx->ctx_lock); | ||
382 | |||
383 | ret = cancel(kiocb); | ||
384 | |||
385 | spin_lock_irq(&ctx->ctx_lock); | ||
386 | aio_put_req(kiocb); | ||
387 | |||
388 | return ret; | ||
389 | } | 380 | } |
390 | 381 | ||
391 | static void free_ioctx_rcu(struct rcu_head *head) | 382 | static void free_ioctx_rcu(struct rcu_head *head) |
@@ -599,16 +590,16 @@ static void kill_ioctx(struct kioctx *ctx) | |||
599 | /* wait_on_sync_kiocb: | 590 | /* wait_on_sync_kiocb: |
600 | * Waits on the given sync kiocb to complete. | 591 | * Waits on the given sync kiocb to complete. |
601 | */ | 592 | */ |
602 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) | 593 | ssize_t wait_on_sync_kiocb(struct kiocb *req) |
603 | { | 594 | { |
604 | while (atomic_read(&iocb->ki_users)) { | 595 | while (!req->ki_ctx) { |
605 | set_current_state(TASK_UNINTERRUPTIBLE); | 596 | set_current_state(TASK_UNINTERRUPTIBLE); |
606 | if (!atomic_read(&iocb->ki_users)) | 597 | if (req->ki_ctx) |
607 | break; | 598 | break; |
608 | io_schedule(); | 599 | io_schedule(); |
609 | } | 600 | } |
610 | __set_current_state(TASK_RUNNING); | 601 | __set_current_state(TASK_RUNNING); |
611 | return iocb->ki_user_data; | 602 | return req->ki_user_data; |
612 | } | 603 | } |
613 | EXPORT_SYMBOL(wait_on_sync_kiocb); | 604 | EXPORT_SYMBOL(wait_on_sync_kiocb); |
614 | 605 | ||
@@ -687,14 +678,8 @@ out: | |||
687 | } | 678 | } |
688 | 679 | ||
689 | /* aio_get_req | 680 | /* aio_get_req |
690 | * Allocate a slot for an aio request. Increments the ki_users count | 681 | * Allocate a slot for an aio request. |
691 | * of the kioctx so that the kioctx stays around until all requests are | 682 | * Returns NULL if no requests are free. |
692 | * complete. Returns NULL if no requests are free. | ||
693 | * | ||
694 | * Returns with kiocb->ki_users set to 2. The io submit code path holds | ||
695 | * an extra reference while submitting the i/o. | ||
696 | * This prevents races between the aio code path referencing the | ||
697 | * req (after submitting it) and aio_complete() freeing the req. | ||
698 | */ | 683 | */ |
699 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) | 684 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) |
700 | { | 685 | { |
@@ -707,7 +692,6 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx) | |||
707 | if (unlikely(!req)) | 692 | if (unlikely(!req)) |
708 | goto out_put; | 693 | goto out_put; |
709 | 694 | ||
710 | atomic_set(&req->ki_users, 1); | ||
711 | req->ki_ctx = ctx; | 695 | req->ki_ctx = ctx; |
712 | return req; | 696 | return req; |
713 | out_put: | 697 | out_put: |
@@ -726,13 +710,6 @@ static void kiocb_free(struct kiocb *req) | |||
726 | kmem_cache_free(kiocb_cachep, req); | 710 | kmem_cache_free(kiocb_cachep, req); |
727 | } | 711 | } |
728 | 712 | ||
729 | void aio_put_req(struct kiocb *req) | ||
730 | { | ||
731 | if (atomic_dec_and_test(&req->ki_users)) | ||
732 | kiocb_free(req); | ||
733 | } | ||
734 | EXPORT_SYMBOL(aio_put_req); | ||
735 | |||
736 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) | 713 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
737 | { | 714 | { |
738 | struct mm_struct *mm = current->mm; | 715 | struct mm_struct *mm = current->mm; |
@@ -771,9 +748,9 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
771 | * - the sync task helpfully left a reference to itself in the iocb | 748 | * - the sync task helpfully left a reference to itself in the iocb |
772 | */ | 749 | */ |
773 | if (is_sync_kiocb(iocb)) { | 750 | if (is_sync_kiocb(iocb)) { |
774 | BUG_ON(atomic_read(&iocb->ki_users) != 1); | ||
775 | iocb->ki_user_data = res; | 751 | iocb->ki_user_data = res; |
776 | atomic_set(&iocb->ki_users, 0); | 752 | smp_wmb(); |
753 | iocb->ki_ctx = ERR_PTR(-EXDEV); | ||
777 | wake_up_process(iocb->ki_obj.tsk); | 754 | wake_up_process(iocb->ki_obj.tsk); |
778 | return; | 755 | return; |
779 | } | 756 | } |
@@ -845,7 +822,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
845 | eventfd_signal(iocb->ki_eventfd, 1); | 822 | eventfd_signal(iocb->ki_eventfd, 1); |
846 | 823 | ||
847 | /* everything turned out well, dispose of the aiocb. */ | 824 | /* everything turned out well, dispose of the aiocb. */ |
848 | aio_put_req(iocb); | 825 | kiocb_free(iocb); |
849 | 826 | ||
850 | /* | 827 | /* |
851 | * We have to order our ring_info tail store above and test | 828 | * We have to order our ring_info tail store above and test |
@@ -1269,7 +1246,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1269 | return 0; | 1246 | return 0; |
1270 | out_put_req: | 1247 | out_put_req: |
1271 | put_reqs_available(ctx, 1); | 1248 | put_reqs_available(ctx, 1); |
1272 | aio_put_req(req); | 1249 | kiocb_free(req); |
1273 | return ret; | 1250 | return ret; |
1274 | } | 1251 | } |
1275 | 1252 | ||