diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-07-28 07:06:00 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:09 -0400 |
commit | b646fc59b332ef307895558c9cd1359dc2d25813 (patch) | |
tree | 47d703053144c099abee8326e52ed0d3b05920c6 /block/blk-core.c | |
parent | 0835da67c11e879ed5dc23160934d8970470a2ce (diff) |
block: split softirq handling into blk-softirq.c
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 88 |
1 files changed, 0 insertions, 88 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 98138f002524..527b3382a610 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -26,8 +26,6 @@ | |||
26 | #include <linux/swap.h> | 26 | #include <linux/swap.h> |
27 | #include <linux/writeback.h> | 27 | #include <linux/writeback.h> |
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/blktrace_api.h> | 29 | #include <linux/blktrace_api.h> |
32 | #include <linux/fault-inject.h> | 30 | #include <linux/fault-inject.h> |
33 | 31 | ||
@@ -50,8 +48,6 @@ struct kmem_cache *blk_requestq_cachep; | |||
50 | */ | 48 | */ |
51 | static struct workqueue_struct *kblockd_workqueue; | 49 | static struct workqueue_struct *kblockd_workqueue; |
52 | 50 | ||
53 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | ||
54 | |||
55 | static void drive_stat_acct(struct request *rq, int new_io) | 51 | static void drive_stat_acct(struct request *rq, int new_io) |
56 | { | 52 | { |
57 | struct hd_struct *part; | 53 | struct hd_struct *part; |
@@ -1644,82 +1640,6 @@ static int __end_that_request_first(struct request *req, int error, | |||
1644 | } | 1640 | } |
1645 | 1641 | ||
1646 | /* | 1642 | /* |
1647 | * splice the completion data to a local structure and hand off to | ||
1648 | * process_completion_queue() to complete the requests | ||
1649 | */ | ||
1650 | static void blk_done_softirq(struct softirq_action *h) | ||
1651 | { | ||
1652 | struct list_head *cpu_list, local_list; | ||
1653 | |||
1654 | local_irq_disable(); | ||
1655 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
1656 | list_replace_init(cpu_list, &local_list); | ||
1657 | local_irq_enable(); | ||
1658 | |||
1659 | while (!list_empty(&local_list)) { | ||
1660 | struct request *rq; | ||
1661 | |||
1662 | rq = list_entry(local_list.next, struct request, donelist); | ||
1663 | list_del_init(&rq->donelist); | ||
1664 | rq->q->softirq_done_fn(rq); | ||
1665 | } | ||
1666 | } | ||
1667 | |||
1668 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, | ||
1669 | unsigned long action, void *hcpu) | ||
1670 | { | ||
1671 | /* | ||
1672 | * If a CPU goes away, splice its entries to the current CPU | ||
1673 | * and trigger a run of the softirq | ||
1674 | */ | ||
1675 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
1676 | int cpu = (unsigned long) hcpu; | ||
1677 | |||
1678 | local_irq_disable(); | ||
1679 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | ||
1680 | &__get_cpu_var(blk_cpu_done)); | ||
1681 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
1682 | local_irq_enable(); | ||
1683 | } | ||
1684 | |||
1685 | return NOTIFY_OK; | ||
1686 | } | ||
1687 | |||
1688 | |||
1689 | static struct notifier_block blk_cpu_notifier __cpuinitdata = { | ||
1690 | .notifier_call = blk_cpu_notify, | ||
1691 | }; | ||
1692 | |||
1693 | /** | ||
1694 | * blk_complete_request - end I/O on a request | ||
1695 | * @req: the request being processed | ||
1696 | * | ||
1697 | * Description: | ||
1698 | * Ends all I/O on a request. It does not handle partial completions, | ||
1699 | * unless the driver actually implements this in its completion callback | ||
1700 | * through requeueing. The actual completion happens out-of-order, | ||
1701 | * through a softirq handler. The user must have registered a completion | ||
1702 | * callback through blk_queue_softirq_done(). | ||
1703 | **/ | ||
1704 | |||
1705 | void blk_complete_request(struct request *req) | ||
1706 | { | ||
1707 | struct list_head *cpu_list; | ||
1708 | unsigned long flags; | ||
1709 | |||
1710 | BUG_ON(!req->q->softirq_done_fn); | ||
1711 | |||
1712 | local_irq_save(flags); | ||
1713 | |||
1714 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
1715 | list_add_tail(&req->donelist, cpu_list); | ||
1716 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
1717 | |||
1718 | local_irq_restore(flags); | ||
1719 | } | ||
1720 | EXPORT_SYMBOL(blk_complete_request); | ||
1721 | |||
1722 | /* | ||
1723 | * queue lock must be held | 1643 | * queue lock must be held |
1724 | */ | 1644 | */ |
1725 | static void end_that_request_last(struct request *req, int error) | 1645 | static void end_that_request_last(struct request *req, int error) |
@@ -2053,8 +1973,6 @@ EXPORT_SYMBOL(kblockd_flush_work); | |||
2053 | 1973 | ||
2054 | int __init blk_dev_init(void) | 1974 | int __init blk_dev_init(void) |
2055 | { | 1975 | { |
2056 | int i; | ||
2057 | |||
2058 | kblockd_workqueue = create_workqueue("kblockd"); | 1976 | kblockd_workqueue = create_workqueue("kblockd"); |
2059 | if (!kblockd_workqueue) | 1977 | if (!kblockd_workqueue) |
2060 | panic("Failed to create kblockd\n"); | 1978 | panic("Failed to create kblockd\n"); |
@@ -2065,12 +1983,6 @@ int __init blk_dev_init(void) | |||
2065 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", | 1983 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", |
2066 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | 1984 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); |
2067 | 1985 | ||
2068 | for_each_possible_cpu(i) | ||
2069 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | ||
2070 | |||
2071 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); | ||
2072 | register_hotcpu_notifier(&blk_cpu_notifier); | ||
2073 | |||
2074 | return 0; | 1986 | return 0; |
2075 | } | 1987 | } |
2076 | 1988 | ||