aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-09 18:36:07 -0400
committerBenjamin LaHaise <bcrl@kvack.org>2013-07-30 11:53:11 -0400
commit5ffac122dbda89fbb29885f35a5d47b0edb8936d (patch)
tree35c6ee1e0b439983ba6d163e37d7cc66783fef9f
parentbec68faaf3ba74ed0dcd5dc3a881b30aec542973 (diff)
aio: Don't use ctx->tail unnecessarily
aio_complete() (arguably) needs to keep its own trusted copy of the tail pointer, but io_getevents() doesn't have to use it - it's already using the head pointer from the ring buffer. So convert it to use the tail from the ring buffer so it touches fewer cachelines and doesn't contend with the cacheline aio_complete() needs. Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
-rw-r--r--fs/aio.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 12b37689dd2c..57b02791d04e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -406,7 +406,8 @@ static void free_ioctx(struct work_struct *work)
406 struct kioctx *ctx = container_of(work, struct kioctx, free_work); 406 struct kioctx *ctx = container_of(work, struct kioctx, free_work);
407 struct aio_ring *ring; 407 struct aio_ring *ring;
408 struct kiocb *req; 408 struct kiocb *req;
409 unsigned cpu, head, avail; 409 unsigned cpu, avail;
410 DEFINE_WAIT(wait);
410 411
411 spin_lock_irq(&ctx->ctx_lock); 412 spin_lock_irq(&ctx->ctx_lock);
412 413
@@ -427,22 +428,24 @@ static void free_ioctx(struct work_struct *work)
427 kcpu->reqs_available = 0; 428 kcpu->reqs_available = 0;
428 } 429 }
429 430
430 ring = kmap_atomic(ctx->ring_pages[0]); 431 while (1) {
431 head = ring->head; 432 prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
432 kunmap_atomic(ring);
433 433
434 while (atomic_read(&ctx->reqs_available) < ctx->nr_events - 1) { 434 ring = kmap_atomic(ctx->ring_pages[0]);
435 wait_event(ctx->wait, 435 avail = (ring->head <= ring->tail)
436 (head != ctx->tail) || 436 ? ring->tail - ring->head
437 (atomic_read(&ctx->reqs_available) >= 437 : ctx->nr_events - ring->head + ring->tail;
438 ctx->nr_events - 1));
439
440 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
441 438
442 atomic_add(avail, &ctx->reqs_available); 439 atomic_add(avail, &ctx->reqs_available);
443 head += avail; 440 ring->head = ring->tail;
444 head %= ctx->nr_events; 441 kunmap_atomic(ring);
442
443 if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
444 break;
445
446 schedule();
445 } 447 }
448 finish_wait(&ctx->wait, &wait);
446 449
447 WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1); 450 WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
448 451
@@ -869,7 +872,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
869 struct io_event __user *event, long nr) 872 struct io_event __user *event, long nr)
870{ 873{
871 struct aio_ring *ring; 874 struct aio_ring *ring;
872 unsigned head, pos; 875 unsigned head, tail, pos;
873 long ret = 0; 876 long ret = 0;
874 int copy_ret; 877 int copy_ret;
875 878
@@ -877,11 +880,12 @@ static long aio_read_events_ring(struct kioctx *ctx,
877 880
878 ring = kmap_atomic(ctx->ring_pages[0]); 881 ring = kmap_atomic(ctx->ring_pages[0]);
879 head = ring->head; 882 head = ring->head;
883 tail = ring->tail;
880 kunmap_atomic(ring); 884 kunmap_atomic(ring);
881 885
882 pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events); 886 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
883 887
884 if (head == ctx->tail) 888 if (head == tail)
885 goto out; 889 goto out;
886 890
887 while (ret < nr) { 891 while (ret < nr) {
@@ -889,8 +893,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
889 struct io_event *ev; 893 struct io_event *ev;
890 struct page *page; 894 struct page *page;
891 895
892 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; 896 avail = (head <= tail ? tail : ctx->nr_events) - head;
893 if (head == ctx->tail) 897 if (head == tail)
894 break; 898 break;
895 899
896 avail = min(avail, nr - ret); 900 avail = min(avail, nr - ret);
@@ -921,7 +925,7 @@ static long aio_read_events_ring(struct kioctx *ctx,
921 kunmap_atomic(ring); 925 kunmap_atomic(ring);
922 flush_dcache_page(ctx->ring_pages[0]); 926 flush_dcache_page(ctx->ring_pages[0]);
923 927
924 pr_debug("%li h%u t%u\n", ret, head, ctx->tail); 928 pr_debug("%li h%u t%u\n", ret, head, tail);
925 929
926 put_reqs_available(ctx, ret); 930 put_reqs_available(ctx, ret);
927out: 931out: