diff options
Diffstat (limited to 'fs/aio.c')
| -rw-r--r-- | fs/aio.c | 62 |
1 files changed, 60 insertions, 2 deletions
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/aio_abi.h> | 15 | #include <linux/aio_abi.h> |
| 16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
| 17 | #include <linux/syscalls.h> | 17 | #include <linux/syscalls.h> |
| 18 | #include <linux/backing-dev.h> | ||
| 18 | #include <linux/uio.h> | 19 | #include <linux/uio.h> |
| 19 | 20 | ||
| 20 | #define DEBUG 0 | 21 | #define DEBUG 0 |
| @@ -32,6 +33,9 @@ | |||
| 32 | #include <linux/workqueue.h> | 33 | #include <linux/workqueue.h> |
| 33 | #include <linux/security.h> | 34 | #include <linux/security.h> |
| 34 | #include <linux/eventfd.h> | 35 | #include <linux/eventfd.h> |
| 36 | #include <linux/blkdev.h> | ||
| 37 | #include <linux/mempool.h> | ||
| 38 | #include <linux/hash.h> | ||
| 35 | 39 | ||
| 36 | #include <asm/kmap_types.h> | 40 | #include <asm/kmap_types.h> |
| 37 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
| @@ -60,6 +64,14 @@ static DECLARE_WORK(fput_work, aio_fput_routine); | |||
| 60 | static DEFINE_SPINLOCK(fput_lock); | 64 | static DEFINE_SPINLOCK(fput_lock); |
| 61 | static LIST_HEAD(fput_head); | 65 | static LIST_HEAD(fput_head); |
| 62 | 66 | ||
| 67 | #define AIO_BATCH_HASH_BITS 3 /* allocated on-stack, so don't go crazy */ | ||
| 68 | #define AIO_BATCH_HASH_SIZE (1 << AIO_BATCH_HASH_BITS) | ||
| 69 | struct aio_batch_entry { | ||
| 70 | struct hlist_node list; | ||
| 71 | struct address_space *mapping; | ||
| 72 | }; | ||
| 73 | mempool_t *abe_pool; | ||
| 74 | |||
| 63 | static void aio_kick_handler(struct work_struct *); | 75 | static void aio_kick_handler(struct work_struct *); |
| 64 | static void aio_queue_work(struct kioctx *); | 76 | static void aio_queue_work(struct kioctx *); |
| 65 | 77 | ||
| @@ -73,6 +85,8 @@ static int __init aio_setup(void) | |||
| 73 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 85 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
| 74 | 86 | ||
| 75 | aio_wq = create_workqueue("aio"); | 87 | aio_wq = create_workqueue("aio"); |
| 88 | abe_pool = mempool_create_kmalloc_pool(1, sizeof(struct aio_batch_entry)); | ||
| 89 | BUG_ON(!abe_pool); | ||
| 76 | 90 | ||
| 77 | pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); | 91 | pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page)); |
| 78 | 92 | ||
| @@ -1531,8 +1545,44 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode, | |||
| 1531 | return 1; | 1545 | return 1; |
| 1532 | } | 1546 | } |
| 1533 | 1547 | ||
| 1548 | static void aio_batch_add(struct address_space *mapping, | ||
| 1549 | struct hlist_head *batch_hash) | ||
| 1550 | { | ||
| 1551 | struct aio_batch_entry *abe; | ||
| 1552 | struct hlist_node *pos; | ||
| 1553 | unsigned bucket; | ||
| 1554 | |||
| 1555 | bucket = hash_ptr(mapping, AIO_BATCH_HASH_BITS); | ||
| 1556 | hlist_for_each_entry(abe, pos, &batch_hash[bucket], list) { | ||
| 1557 | if (abe->mapping == mapping) | ||
| 1558 | return; | ||
| 1559 | } | ||
| 1560 | |||
| 1561 | abe = mempool_alloc(abe_pool, GFP_KERNEL); | ||
| 1562 | BUG_ON(!igrab(mapping->host)); | ||
| 1563 | abe->mapping = mapping; | ||
| 1564 | hlist_add_head(&abe->list, &batch_hash[bucket]); | ||
| 1565 | return; | ||
| 1566 | } | ||
| 1567 | |||
| 1568 | static void aio_batch_free(struct hlist_head *batch_hash) | ||
| 1569 | { | ||
| 1570 | struct aio_batch_entry *abe; | ||
| 1571 | struct hlist_node *pos, *n; | ||
| 1572 | int i; | ||
| 1573 | |||
| 1574 | for (i = 0; i < AIO_BATCH_HASH_SIZE; i++) { | ||
| 1575 | hlist_for_each_entry_safe(abe, pos, n, &batch_hash[i], list) { | ||
| 1576 | blk_run_address_space(abe->mapping); | ||
| 1577 | iput(abe->mapping->host); | ||
| 1578 | hlist_del(&abe->list); | ||
| 1579 | mempool_free(abe, abe_pool); | ||
| 1580 | } | ||
| 1581 | } | ||
| 1582 | } | ||
| 1583 | |||
| 1534 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1584 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
| 1535 | struct iocb *iocb) | 1585 | struct iocb *iocb, struct hlist_head *batch_hash) |
| 1536 | { | 1586 | { |
| 1537 | struct kiocb *req; | 1587 | struct kiocb *req; |
| 1538 | struct file *file; | 1588 | struct file *file; |
| @@ -1608,6 +1658,12 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
| 1608 | ; | 1658 | ; |
| 1609 | } | 1659 | } |
| 1610 | spin_unlock_irq(&ctx->ctx_lock); | 1660 | spin_unlock_irq(&ctx->ctx_lock); |
| 1661 | if (req->ki_opcode == IOCB_CMD_PREAD || | ||
| 1662 | req->ki_opcode == IOCB_CMD_PREADV || | ||
| 1663 | req->ki_opcode == IOCB_CMD_PWRITE || | ||
| 1664 | req->ki_opcode == IOCB_CMD_PWRITEV) | ||
| 1665 | aio_batch_add(file->f_mapping, batch_hash); | ||
| 1666 | |||
| 1611 | aio_put_req(req); /* drop extra ref to req */ | 1667 | aio_put_req(req); /* drop extra ref to req */ |
| 1612 | return 0; | 1668 | return 0; |
| 1613 | 1669 | ||
| @@ -1635,6 +1691,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |||
| 1635 | struct kioctx *ctx; | 1691 | struct kioctx *ctx; |
| 1636 | long ret = 0; | 1692 | long ret = 0; |
| 1637 | int i; | 1693 | int i; |
| 1694 | struct hlist_head batch_hash[AIO_BATCH_HASH_SIZE] = { { 0, }, }; | ||
| 1638 | 1695 | ||
| 1639 | if (unlikely(nr < 0)) | 1696 | if (unlikely(nr < 0)) |
| 1640 | return -EINVAL; | 1697 | return -EINVAL; |
| @@ -1666,10 +1723,11 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, | |||
| 1666 | break; | 1723 | break; |
| 1667 | } | 1724 | } |
| 1668 | 1725 | ||
| 1669 | ret = io_submit_one(ctx, user_iocb, &tmp); | 1726 | ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash); |
| 1670 | if (ret) | 1727 | if (ret) |
| 1671 | break; | 1728 | break; |
| 1672 | } | 1729 | } |
| 1730 | aio_batch_free(batch_hash); | ||
| 1673 | 1731 | ||
| 1674 | put_ioctx(ctx); | 1732 | put_ioctx(ctx); |
| 1675 | return i ? i : ret; | 1733 | return i ? i : ret; |
