aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-28 18:14:48 -0400
committerBenjamin LaHaise <bcrl@kvack.org>2013-07-30 11:53:11 -0400
commit723be6e39d14254bb5bb9f422b434566d359fa6e (patch)
tree69f910a77986812c2a9f095a102083bf9c66be8b /fs/aio.c
parente1bdd5f27a5b14e24a658d5511bebceb67679d83 (diff)
aio: percpu ioctx refcount
This just converts the ioctx refcount to the new generic dynamic percpu refcount code. Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Reviewed-by: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c66
1 files changed, 27 insertions, 39 deletions
diff --git a/fs/aio.c b/fs/aio.c
index bb1a6c433110..7b470bfbf891 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -39,6 +39,7 @@
39#include <linux/anon_inodes.h> 39#include <linux/anon_inodes.h>
40#include <linux/migrate.h> 40#include <linux/migrate.h>
41#include <linux/ramfs.h> 41#include <linux/ramfs.h>
42#include <linux/percpu-refcount.h>
42 43
43#include <asm/kmap_types.h> 44#include <asm/kmap_types.h>
44#include <asm/uaccess.h> 45#include <asm/uaccess.h>
@@ -70,7 +71,7 @@ struct kioctx_cpu {
70}; 71};
71 72
72struct kioctx { 73struct kioctx {
73 atomic_t users; 74 struct percpu_ref users;
74 atomic_t dead; 75 atomic_t dead;
75 76
76 /* This needs improving */ 77 /* This needs improving */
@@ -103,7 +104,7 @@ struct kioctx {
103 long nr_pages; 104 long nr_pages;
104 105
105 struct rcu_head rcu_head; 106 struct rcu_head rcu_head;
106 struct work_struct rcu_work; 107 struct work_struct free_work;
107 108
108 struct { 109 struct {
109 /* 110 /*
@@ -403,8 +404,9 @@ static void free_ioctx_rcu(struct rcu_head *head)
403 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - 404 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
404 * now it's safe to cancel any that need to be. 405 * now it's safe to cancel any that need to be.
405 */ 406 */
406static void free_ioctx(struct kioctx *ctx) 407static void free_ioctx(struct work_struct *work)
407{ 408{
409 struct kioctx *ctx = container_of(work, struct kioctx, free_work);
408 struct aio_ring *ring; 410 struct aio_ring *ring;
409 struct io_event res; 411 struct io_event res;
410 struct kiocb *req; 412 struct kiocb *req;
@@ -462,10 +464,12 @@ static void free_ioctx(struct kioctx *ctx)
462 call_rcu(&ctx->rcu_head, free_ioctx_rcu); 464 call_rcu(&ctx->rcu_head, free_ioctx_rcu);
463} 465}
464 466
465static void put_ioctx(struct kioctx *ctx) 467static void free_ioctx_ref(struct percpu_ref *ref)
466{ 468{
467 if (unlikely(atomic_dec_and_test(&ctx->users))) 469 struct kioctx *ctx = container_of(ref, struct kioctx, users);
468 free_ioctx(ctx); 470
471 INIT_WORK(&ctx->free_work, free_ioctx);
472 schedule_work(&ctx->free_work);
469} 473}
470 474
471/* ioctx_alloc 475/* ioctx_alloc
@@ -505,8 +509,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
505 509
506 ctx->max_reqs = nr_events; 510 ctx->max_reqs = nr_events;
507 511
508 atomic_set(&ctx->users, 2); 512 if (percpu_ref_init(&ctx->users, free_ioctx_ref))
509 atomic_set(&ctx->dead, 0); 513 goto out_freectx;
514
510 spin_lock_init(&ctx->ctx_lock); 515 spin_lock_init(&ctx->ctx_lock);
511 spin_lock_init(&ctx->completion_lock); 516 spin_lock_init(&ctx->completion_lock);
512 mutex_init(&ctx->ring_lock); 517 mutex_init(&ctx->ring_lock);
@@ -516,7 +521,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
516 521
517 ctx->cpu = alloc_percpu(struct kioctx_cpu); 522 ctx->cpu = alloc_percpu(struct kioctx_cpu);
518 if (!ctx->cpu) 523 if (!ctx->cpu)
519 goto out_freectx; 524 goto out_freeref;
520 525
521 if (aio_setup_ring(ctx) < 0) 526 if (aio_setup_ring(ctx) < 0)
522 goto out_freepcpu; 527 goto out_freepcpu;
@@ -535,6 +540,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
535 aio_nr += ctx->max_reqs; 540 aio_nr += ctx->max_reqs;
536 spin_unlock(&aio_nr_lock); 541 spin_unlock(&aio_nr_lock);
537 542
543 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
544
538 /* now link into global list. */ 545 /* now link into global list. */
539 spin_lock(&mm->ioctx_lock); 546 spin_lock(&mm->ioctx_lock);
540 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); 547 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
@@ -549,6 +556,8 @@ out_cleanup:
549 aio_free_ring(ctx); 556 aio_free_ring(ctx);
550out_freepcpu: 557out_freepcpu:
551 free_percpu(ctx->cpu); 558 free_percpu(ctx->cpu);
559out_freeref:
560 free_percpu(ctx->users.pcpu_count);
552out_freectx: 561out_freectx:
553 if (ctx->aio_ring_file) 562 if (ctx->aio_ring_file)
554 fput(ctx->aio_ring_file); 563 fput(ctx->aio_ring_file);
@@ -557,22 +566,6 @@ out_freectx:
557 return ERR_PTR(err); 566 return ERR_PTR(err);
558} 567}
559 568
560static void kill_ioctx_work(struct work_struct *work)
561{
562 struct kioctx *ctx = container_of(work, struct kioctx, rcu_work);
563
564 wake_up_all(&ctx->wait);
565 put_ioctx(ctx);
566}
567
568static void kill_ioctx_rcu(struct rcu_head *head)
569{
570 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
571
572 INIT_WORK(&ctx->rcu_work, kill_ioctx_work);
573 schedule_work(&ctx->rcu_work);
574}
575
576/* kill_ioctx 569/* kill_ioctx
577 * Cancels all outstanding aio requests on an aio context. Used 570 * Cancels all outstanding aio requests on an aio context. Used
578 * when the processes owning a context have all exited to encourage 571 * when the processes owning a context have all exited to encourage
@@ -582,6 +575,8 @@ static void kill_ioctx(struct kioctx *ctx)
582{ 575{
583 if (!atomic_xchg(&ctx->dead, 1)) { 576 if (!atomic_xchg(&ctx->dead, 1)) {
584 hlist_del_rcu(&ctx->list); 577 hlist_del_rcu(&ctx->list);
578 /* percpu_ref_kill() will do the necessary call_rcu() */
579 wake_up_all(&ctx->wait);
585 580
586 /* 581 /*
587 * It'd be more correct to do this in free_ioctx(), after all 582 * It'd be more correct to do this in free_ioctx(), after all
@@ -598,8 +593,7 @@ static void kill_ioctx(struct kioctx *ctx)
598 if (ctx->mmap_size) 593 if (ctx->mmap_size)
599 vm_munmap(ctx->mmap_base, ctx->mmap_size); 594 vm_munmap(ctx->mmap_base, ctx->mmap_size);
600 595
601 /* Between hlist_del_rcu() and dropping the initial ref */ 596 percpu_ref_kill(&ctx->users);
602 call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
603 } 597 }
604} 598}
605 599
@@ -633,12 +627,6 @@ void exit_aio(struct mm_struct *mm)
633 struct hlist_node *n; 627 struct hlist_node *n;
634 628
635 hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) { 629 hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) {
636 if (1 != atomic_read(&ctx->users))
637 printk(KERN_DEBUG
638 "exit_aio:ioctx still alive: %d %d %d\n",
639 atomic_read(&ctx->users),
640 atomic_read(&ctx->dead),
641 atomic_read(&ctx->reqs_available));
642 /* 630 /*
643 * We don't need to bother with munmap() here - 631 * We don't need to bother with munmap() here -
644 * exit_mmap(mm) is coming and it'll unmap everything. 632 * exit_mmap(mm) is coming and it'll unmap everything.
@@ -757,7 +745,7 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
757 745
758 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { 746 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
759 if (ctx->user_id == ctx_id) { 747 if (ctx->user_id == ctx_id) {
760 atomic_inc(&ctx->users); 748 percpu_ref_get(&ctx->users);
761 ret = ctx; 749 ret = ctx;
762 break; 750 break;
763 } 751 }
@@ -1054,7 +1042,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1054 ret = put_user(ioctx->user_id, ctxp); 1042 ret = put_user(ioctx->user_id, ctxp);
1055 if (ret) 1043 if (ret)
1056 kill_ioctx(ioctx); 1044 kill_ioctx(ioctx);
1057 put_ioctx(ioctx); 1045 percpu_ref_put(&ioctx->users);
1058 } 1046 }
1059 1047
1060out: 1048out:
@@ -1072,7 +1060,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1072 struct kioctx *ioctx = lookup_ioctx(ctx); 1060 struct kioctx *ioctx = lookup_ioctx(ctx);
1073 if (likely(NULL != ioctx)) { 1061 if (likely(NULL != ioctx)) {
1074 kill_ioctx(ioctx); 1062 kill_ioctx(ioctx);
1075 put_ioctx(ioctx); 1063 percpu_ref_put(&ioctx->users);
1076 return 0; 1064 return 0;
1077 } 1065 }
1078 pr_debug("EINVAL: io_destroy: invalid context id\n"); 1066 pr_debug("EINVAL: io_destroy: invalid context id\n");
@@ -1394,7 +1382,7 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1394 } 1382 }
1395 blk_finish_plug(&plug); 1383 blk_finish_plug(&plug);
1396 1384
1397 put_ioctx(ctx); 1385 percpu_ref_put(&ctx->users);
1398 return i ? i : ret; 1386 return i ? i : ret;
1399} 1387}
1400 1388
@@ -1483,7 +1471,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1483 ret = -EFAULT; 1471 ret = -EFAULT;
1484 } 1472 }
1485 1473
1486 put_ioctx(ctx); 1474 percpu_ref_put(&ctx->users);
1487 1475
1488 return ret; 1476 return ret;
1489} 1477}
@@ -1512,7 +1500,7 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1512 if (likely(ioctx)) { 1500 if (likely(ioctx)) {
1513 if (likely(min_nr <= nr && min_nr >= 0)) 1501 if (likely(min_nr <= nr && min_nr >= 0))
1514 ret = read_events(ioctx, min_nr, nr, events, timeout); 1502 ret = read_events(ioctx, min_nr, nr, events, timeout);
1515 put_ioctx(ioctx); 1503 percpu_ref_put(&ioctx->users);
1516 } 1504 }
1517 return ret; 1505 return ret;
1518} 1506}