aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-02 20:12:18 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:27 -0500
commitcf15900e1209d5b46ec2d24643adbf561830935f (patch)
treef66ed809657402d7c0a082b19a1a64745b604fe7
parent9f5b9425468c85a901d863d241ba5c5dff9b23b8 (diff)
aio: remove request submission batching
This should be useless now that we have on-stack plugging. So lets just kill it. Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
-rw-r--r--fs/aio.c75
1 files changed, 3 insertions, 72 deletions
diff --git a/fs/aio.c b/fs/aio.c
index 1476bed1c5fb..020de5cb4a67 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -34,8 +34,6 @@
34#include <linux/security.h> 34#include <linux/security.h>
35#include <linux/eventfd.h> 35#include <linux/eventfd.h>
36#include <linux/blkdev.h> 36#include <linux/blkdev.h>
37#include <linux/mempool.h>
38#include <linux/hash.h>
39#include <linux/compat.h> 37#include <linux/compat.h>
40 38
41#include <asm/kmap_types.h> 39#include <asm/kmap_types.h>
@@ -65,14 +63,6 @@ static DECLARE_WORK(fput_work, aio_fput_routine);
65static DEFINE_SPINLOCK(fput_lock); 63static DEFINE_SPINLOCK(fput_lock);
66static LIST_HEAD(fput_head); 64static LIST_HEAD(fput_head);
67 65
68#define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */
69#define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS)
70struct aio_batch_entry {
71 struct hlist_node list;
72 struct address_space *mapping;
73};
74mempool_t *abe_pool;
75
76static void aio_kick_handler(struct work_struct *); 66static void aio_kick_handler(struct work_struct *);
77static void aio_queue_work(struct kioctx *); 67static void aio_queue_work(struct kioctx *);
78 68
@@ -86,8 +76,7 @@ static int __init aio_setup(void)
86 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 76 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
87 77
88 aio_wq = create_workqueue("aio"); 78 aio_wq = create_workqueue("aio");
89 abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); 79 BUG_ON(!aio_wq);
90 BUG_ON(!aio_wq || !abe_pool);
91 80
92 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); 81 pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
93 82
@@ -1512,59 +1501,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1512 return 0; 1501 return 0;
1513} 1502}
1514 1503
1515static void aio_batch_add(struct address_space *mapping,
1516 struct hlist_head *batch_hash)
1517{
1518 struct aio_batch_entry *abe;
1519 struct hlist_node *pos;
1520 unsigned bucket;
1521
1522 bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS);
1523 hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) {
1524 if (abe->mapping == mapping)
1525 return;
1526 }
1527
1528 abe = mempool_alloc(abe_pool, GFP_KERNEL);
1529
1530 /*
1531 * we should be using igrab here, but
1532 * we don't want to hammer on the global
1533 * inode spinlock just to take an extra
1534 * reference on a file that we must already
1535 * have a reference to.
1536 *
1537 * When we're called, we always have a reference
1538 * on the file, so we must always have a reference
1539 * on the inode, so ihold() is safe here.
1540 */
1541 ihold(mapping->host);
1542 abe->mapping = mapping;
1543 hlist_add_head(&abe->list, &batch_hash[bucket]);
1544 return;
1545}
1546
1547static void aio_batch_free(struct hlist_head *batch_hash)
1548{
1549 struct aio_batch_entry *abe;
1550 struct hlist_node *pos, *n;
1551 int i;
1552
1553 /*
1554 * TODO: kill this
1555 */
1556 for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) {
1557 hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) {
1558 iput(abe->mapping->host);
1559 hlist_del(&abe->list);
1560 mempool_free(abe, abe_pool);
1561 }
1562 }
1563}
1564
1565static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1504static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1566 struct iocb *iocb, struct hlist_head *batch_hash, 1505 struct iocb *iocb, bool compat)
1567 bool compat)
1568{ 1506{
1569 struct kiocb *req; 1507 struct kiocb *req;
1570 struct file *file; 1508 struct file *file;
@@ -1638,11 +1576,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1638 ; 1576 ;
1639 } 1577 }
1640 spin_unlock_irq(&ctx->ctx_lock); 1578 spin_unlock_irq(&ctx->ctx_lock);
1641 if (req->ki_opcode == IOCB_CMD_PREAD ||
1642 req->ki_opcode == IOCB_CMD_PREADV ||
1643 req->ki_opcode == IOCB_CMD_PWRITE ||
1644 req->ki_opcode == IOCB_CMD_PWRITEV)
1645 aio_batch_add(file->f_mapping, batch_hash);
1646 1579
1647 aio_put_req(req); /* drop extra ref to req */ 1580 aio_put_req(req); /* drop extra ref to req */
1648 return 0; 1581 return 0;
@@ -1659,7 +1592,6 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1659 struct kioctx *ctx; 1592 struct kioctx *ctx;
1660 long ret = 0; 1593 long ret = 0;
1661 int i; 1594 int i;
1662 struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, };
1663 struct blk_plug plug; 1595 struct blk_plug plug;
1664 1596
1665 if (unlikely(nr < 0)) 1597 if (unlikely(nr < 0))
@@ -1697,12 +1629,11 @@ long do_io_submit(aio_context_t ctx_id, long nr,
1697 break; 1629 break;
1698 } 1630 }
1699 1631
1700 ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); 1632 ret = io_submit_one(ctx, user_iocb, &tmp, compat);
1701 if (ret) 1633 if (ret)
1702 break; 1634 break;
1703 } 1635 }
1704 blk_finish_plug(&plug); 1636 blk_finish_plug(&plug);
1705 aio_batch_free(batch_hash);
1706 1637
1707 put_ioctx(ctx); 1638 put_ioctx(ctx);
1708 return i ? i : ret; 1639 return i ? i : ret;