aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-04-25 20:58:39 -0400
committerBenjamin LaHaise <bcrl@kvack.org>2013-07-30 11:53:11 -0400
commit34e83fc618085e00dc9803286c581f51966673bd (patch)
tree9c5054b4b9fc6946765f7d61a8eb47885ef2a424 /fs/aio.c
parent0c45355fc7c48c82db151bf0e7ca305d513e639e (diff)
aio: reqs_active -> reqs_available
The number of outstanding kiocbs is one of the few shared things left that has to be touched for every kiocb - it'd be nice to make it percpu. We can make it per cpu by treating it like an allocation problem: we have a maximum number of kiocbs that can be outstanding (i.e. slots) - then we just allocate and free slots, and we know how to write per cpu allocators. So as prep work for that, we convert reqs_active to reqs_available. Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Reviewed-by: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/fs/aio.c b/fs/aio.c
index dedeea01e4e4..0e23dfa77b0e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -94,7 +94,13 @@ struct kioctx {
94 struct work_struct rcu_work; 94 struct work_struct rcu_work;
95 95
96 struct { 96 struct {
97 atomic_t reqs_active; 97 /*
98 * This counts the number of available slots in the ringbuffer,
99 * so we avoid overflowing it: it's decremented (if positive)
100 * when allocating a kiocb and incremented when the resulting
101 * io_event is pulled off the ringbuffer.
102 */
103 atomic_t reqs_available;
98 } ____cacheline_aligned_in_smp; 104 } ____cacheline_aligned_in_smp;
99 105
100 struct { 106 struct {
@@ -404,19 +410,20 @@ static void free_ioctx(struct kioctx *ctx)
404 head = ring->head; 410 head = ring->head;
405 kunmap_atomic(ring); 411 kunmap_atomic(ring);
406 412
407 while (atomic_read(&ctx->reqs_active) > 0) { 413 while (atomic_read(&ctx->reqs_available) < ctx->nr_events - 1) {
408 wait_event(ctx->wait, 414 wait_event(ctx->wait,
409 head != ctx->tail || 415 (head != ctx->tail) ||
410 atomic_read(&ctx->reqs_active) <= 0); 416 (atomic_read(&ctx->reqs_available) >=
417 ctx->nr_events - 1));
411 418
412 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; 419 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
413 420
414 atomic_sub(avail, &ctx->reqs_active); 421 atomic_add(avail, &ctx->reqs_available);
415 head += avail; 422 head += avail;
416 head %= ctx->nr_events; 423 head %= ctx->nr_events;
417 } 424 }
418 425
419 WARN_ON(atomic_read(&ctx->reqs_active) < 0); 426 WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
420 427
421 aio_free_ring(ctx); 428 aio_free_ring(ctx);
422 429
@@ -475,6 +482,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
475 if (aio_setup_ring(ctx) < 0) 482 if (aio_setup_ring(ctx) < 0)
476 goto out_freectx; 483 goto out_freectx;
477 484
485 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
486
478 /* limit the number of system wide aios */ 487 /* limit the number of system wide aios */
479 spin_lock(&aio_nr_lock); 488 spin_lock(&aio_nr_lock);
480 if (aio_nr + nr_events > aio_max_nr || 489 if (aio_nr + nr_events > aio_max_nr ||
@@ -586,7 +595,7 @@ void exit_aio(struct mm_struct *mm)
586 "exit_aio:ioctx still alive: %d %d %d\n", 595 "exit_aio:ioctx still alive: %d %d %d\n",
587 atomic_read(&ctx->users), 596 atomic_read(&ctx->users),
588 atomic_read(&ctx->dead), 597 atomic_read(&ctx->dead),
589 atomic_read(&ctx->reqs_active)); 598 atomic_read(&ctx->reqs_available));
590 /* 599 /*
591 * We don't need to bother with munmap() here - 600 * We don't need to bother with munmap() here -
592 * exit_mmap(mm) is coming and it'll unmap everything. 601 * exit_mmap(mm) is coming and it'll unmap everything.
@@ -615,12 +624,9 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
615{ 624{
616 struct kiocb *req; 625 struct kiocb *req;
617 626
618 if (atomic_read(&ctx->reqs_active) >= ctx->nr_events) 627 if (atomic_dec_if_positive(&ctx->reqs_available) <= 0)
619 return NULL; 628 return NULL;
620 629
621 if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1)
622 goto out_put;
623
624 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); 630 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
625 if (unlikely(!req)) 631 if (unlikely(!req))
626 goto out_put; 632 goto out_put;
@@ -630,7 +636,7 @@ static inline struct kiocb *aio_get_req(struct kioctx *ctx)
630 636
631 return req; 637 return req;
632out_put: 638out_put:
633 atomic_dec(&ctx->reqs_active); 639 atomic_inc(&ctx->reqs_available);
634 return NULL; 640 return NULL;
635} 641}
636 642
@@ -701,7 +707,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
701 707
702 /* 708 /*
703 * Take rcu_read_lock() in case the kioctx is being destroyed, as we 709 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
704 * need to issue a wakeup after decrementing reqs_active. 710 * need to issue a wakeup after incrementing reqs_available.
705 */ 711 */
706 rcu_read_lock(); 712 rcu_read_lock();
707 713
@@ -719,7 +725,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
719 */ 725 */
720 if (unlikely(xchg(&iocb->ki_cancel, 726 if (unlikely(xchg(&iocb->ki_cancel,
721 KIOCB_CANCELLED) == KIOCB_CANCELLED)) { 727 KIOCB_CANCELLED) == KIOCB_CANCELLED)) {
722 atomic_dec(&ctx->reqs_active); 728 atomic_inc(&ctx->reqs_available);
723 /* Still need the wake_up in case free_ioctx is waiting */ 729 /* Still need the wake_up in case free_ioctx is waiting */
724 goto put_rq; 730 goto put_rq;
725 } 731 }
@@ -857,7 +863,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
857 863
858 pr_debug("%li h%u t%u\n", ret, head, ctx->tail); 864 pr_debug("%li h%u t%u\n", ret, head, ctx->tail);
859 865
860 atomic_sub(ret, &ctx->reqs_active); 866 atomic_add(ret, &ctx->reqs_available);
861out: 867out:
862 mutex_unlock(&ctx->ring_lock); 868 mutex_unlock(&ctx->ring_lock);
863 869
@@ -1241,7 +1247,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1241 aio_put_req(req); /* drop extra ref to req */ 1247 aio_put_req(req); /* drop extra ref to req */
1242 return 0; 1248 return 0;
1243out_put_req: 1249out_put_req:
1244 atomic_dec(&ctx->reqs_active); 1250 atomic_inc(&ctx->reqs_available);
1245 aio_put_req(req); /* drop extra ref to req */ 1251 aio_put_req(req); /* drop extra ref to req */
1246 aio_put_req(req); /* drop i/o ref to req */ 1252 aio_put_req(req); /* drop i/o ref to req */
1247 return ret; 1253 return ret;