aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
authorKent Overstreet <koverstreet@google.com>2013-05-07 19:18:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-07 21:38:29 -0400
commita1c8eae75ea3b0168fca93788db1b5aef2424921 (patch)
tree9f22b400935bf4a58e676a0515822d674efddf52 /fs/aio.c
parent3e845ce01a391d7c5d59ff2f28db5381bf02fa27 (diff)
aio: kill batch allocation
Previously, allocating a kiocb required touching quite a few global (well, per kioctx) cachelines... so batching up allocation to amortize those was worthwhile. But we've gotten rid of some of those, and in another couple of patches kiocb allocation won't require writing to any shared cachelines, so that means we can just rip this code out. Signed-off-by: Kent Overstreet <koverstreet@google.com> Cc: Zach Brown <zab@redhat.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jens Axboe <axboe@kernel.dk> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Benjamin LaHaise <bcrl@kvack.org> Reviewed-by: "Theodore Ts'o" <tytso@mit.edu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c116
1 files changed, 15 insertions, 101 deletions
diff --git a/fs/aio.c b/fs/aio.c
index d3bff60b5fe6..263ebce940c0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -510,108 +510,27 @@ void exit_aio(struct mm_struct *mm)
510 * This prevents races between the aio code path referencing the 510 * This prevents races between the aio code path referencing the
511 * req (after submitting it) and aio_complete() freeing the req. 511 * req (after submitting it) and aio_complete() freeing the req.
512 */ 512 */
513static struct kiocb *__aio_get_req(struct kioctx *ctx) 513static inline struct kiocb *aio_get_req(struct kioctx *ctx)
514{ 514{
515 struct kiocb *req = NULL; 515 struct kiocb *req;
516
517 if (atomic_read(&ctx->reqs_active) >= ctx->ring_info.nr)
518 return NULL;
519
520 if (atomic_inc_return(&ctx->reqs_active) > ctx->ring_info.nr - 1)
521 goto out_put;
516 522
517 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); 523 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
518 if (unlikely(!req)) 524 if (unlikely(!req))
519 return NULL; 525 goto out_put;
520 526
521 atomic_set(&req->ki_users, 2); 527 atomic_set(&req->ki_users, 2);
522 req->ki_ctx = ctx; 528 req->ki_ctx = ctx;
523 529
524 return req; 530 return req;
525} 531out_put:
526 532 atomic_dec(&ctx->reqs_active);
527/* 533 return NULL;
528 * struct kiocb's are allocated in batches to reduce the number of
529 * times the ctx lock is acquired and released.
530 */
531#define KIOCB_BATCH_SIZE 32L
532struct kiocb_batch {
533 struct list_head head;
534 long count; /* number of requests left to allocate */
535};
536
537static void kiocb_batch_init(struct kiocb_batch *batch, long total)
538{
539 INIT_LIST_HEAD(&batch->head);
540 batch->count = total;
541}
542
543static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
544{
545 struct kiocb *req, *n;
546
547 if (list_empty(&batch->head))
548 return;
549
550 spin_lock_irq(&ctx->ctx_lock);
551 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
552 list_del(&req->ki_batch);
553 kmem_cache_free(kiocb_cachep, req);
554 atomic_dec(&ctx->reqs_active);
555 }
556 spin_unlock_irq(&ctx->ctx_lock);
557}
558
559/*
560 * Allocate a batch of kiocbs. This avoids taking and dropping the
561 * context lock a lot during setup.
562 */
563static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
564{
565 unsigned short allocated, to_alloc;
566 long avail;
567 struct kiocb *req, *n;
568
569 to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
570 for (allocated = 0; allocated < to_alloc; allocated++) {
571 req = __aio_get_req(ctx);
572 if (!req)
573 /* allocation failed, go with what we've got */
574 break;
575 list_add(&req->ki_batch, &batch->head);
576 }
577
578 if (allocated == 0)
579 goto out;
580
581 spin_lock_irq(&ctx->ctx_lock);
582
583 avail = ctx->ring_info.nr - atomic_read(&ctx->reqs_active) - 1;
584 BUG_ON(avail < 0);
585 if (avail < allocated) {
586 /* Trim back the number of requests. */
587 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
588 list_del(&req->ki_batch);
589 kmem_cache_free(kiocb_cachep, req);
590 if (--allocated <= avail)
591 break;
592 }
593 }
594
595 batch->count -= allocated;
596 atomic_add(allocated, &ctx->reqs_active);
597
598 spin_unlock_irq(&ctx->ctx_lock);
599
600out:
601 return allocated;
602}
603
604static inline struct kiocb *aio_get_req(struct kioctx *ctx,
605 struct kiocb_batch *batch)
606{
607 struct kiocb *req;
608
609 if (list_empty(&batch->head))
610 if (kiocb_batch_refill(ctx, batch) == 0)
611 return NULL;
612 req = list_first_entry(&batch->head, struct kiocb, ki_batch);
613 list_del(&req->ki_batch);
614 return req;
615} 534}
616 535
617static void kiocb_free(struct kiocb *req) 536static void kiocb_free(struct kiocb *req)
@@ -1198,8 +1117,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1198} 1117}
1199 1118
1200static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1119static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1201 struct iocb *iocb, struct kiocb_batch *batch, 1120 struct iocb *iocb, bool compat)
1202 bool compat)
1203{ 1121{
1204 struct kiocb *req; 1122 struct kiocb *req;
1205 ssize_t ret; 1123 ssize_t ret;
@@ -1220,7 +1138,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1220 return -EINVAL; 1138 return -EINVAL;
1221 } 1139 }
1222 1140
1223 req = aio_get_req(ctx, batch); /* returns with 2 references to req */ 1141 req = aio_get_req(ctx); /* returns with 2 references to req */
1224 if (unlikely(!req)) 1142 if (unlikely(!req))
1225 return -EAGAIN; 1143 return -EAGAIN;
1226 1144
@@ -1293,7 +1211,6 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1293 long ret = 0; 1211 long ret = 0;
1294 int i = 0; 1212 int i = 0;
1295 struct blk_plug plug; 1213 struct blk_plug plug;
1296 struct kiocb_batch batch;
1297 1214
1298 if (unlikely(nr < 0)) 1215 if (unlikely(nr < 0))
1299 return -EINVAL; 1216 return -EINVAL;
@@ -1310,8 +1227,6 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1310 return -EINVAL; 1227 return -EINVAL;
1311 } 1228 }
1312 1229
1313 kiocb_batch_init(&batch, nr);
1314
1315 blk_start_plug(&plug); 1230 blk_start_plug(&plug);
1316 1231
1317 /* 1232 /*
@@ -1332,13 +1247,12 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1332 break; 1247 break;
1333 } 1248 }
1334 1249
1335 ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat); 1250 ret = io_submit_one(ctx, user_iocb, &tmp, compat);
1336 if (ret) 1251 if (ret)
1337 break; 1252 break;
1338 } 1253 }
1339 blk_finish_plug(&plug); 1254 blk_finish_plug(&plug);
1340 1255
1341 kiocb_batch_free(ctx, &batch);
1342 put_ioctx(ctx); 1256 put_ioctx(ctx);
1343 return i ? i : ret; 1257 return i ? i : ret;
1344} 1258}