diff options
-rw-r--r-- | block/Makefile | 4 | ||||
-rw-r--r-- | block/blk-core.c | 88 | ||||
-rw-r--r-- | block/blk-softirq.c | 103 |
3 files changed, 105 insertions, 90 deletions
diff --git a/block/Makefile b/block/Makefile index 208000b0750d..0da976ce67dd 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -4,8 +4,8 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ | 5 | obj-$(CONFIG_BLOCK) := elevator.o blk-core.o blk-tag.o blk-sysfs.o \ |
6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ | 6 | blk-barrier.o blk-settings.o blk-ioc.o blk-map.o \ |
7 | blk-exec.o blk-merge.o ioctl.o genhd.o scsi_ioctl.o \ | 7 | blk-exec.o blk-merge.o blk-softirq.o ioctl.o genhd.o \ |
8 | cmd-filter.o | 8 | scsi_ioctl.o cmd-filter.o |
9 | 9 | ||
10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o | 10 | obj-$(CONFIG_BLK_DEV_BSG) += bsg.o |
11 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o | 11 | obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o |
diff --git a/block/blk-core.c b/block/blk-core.c index 98138f002524..527b3382a610 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -26,8 +26,6 @@ | |||
26 | #include <linux/swap.h> | 26 | #include <linux/swap.h> |
27 | #include <linux/writeback.h> | 27 | #include <linux/writeback.h> |
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/cpu.h> | ||
31 | #include <linux/blktrace_api.h> | 29 | #include <linux/blktrace_api.h> |
32 | #include <linux/fault-inject.h> | 30 | #include <linux/fault-inject.h> |
33 | 31 | ||
@@ -50,8 +48,6 @@ struct kmem_cache *blk_requestq_cachep; | |||
50 | */ | 48 | */ |
51 | static struct workqueue_struct *kblockd_workqueue; | 49 | static struct workqueue_struct *kblockd_workqueue; |
52 | 50 | ||
53 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | ||
54 | |||
55 | static void drive_stat_acct(struct request *rq, int new_io) | 51 | static void drive_stat_acct(struct request *rq, int new_io) |
56 | { | 52 | { |
57 | struct hd_struct *part; | 53 | struct hd_struct *part; |
@@ -1644,82 +1640,6 @@ static int __end_that_request_first(struct request *req, int error, | |||
1644 | } | 1640 | } |
1645 | 1641 | ||
1646 | /* | 1642 | /* |
1647 | * splice the completion data to a local structure and hand off to | ||
1648 | * process_completion_queue() to complete the requests | ||
1649 | */ | ||
1650 | static void blk_done_softirq(struct softirq_action *h) | ||
1651 | { | ||
1652 | struct list_head *cpu_list, local_list; | ||
1653 | |||
1654 | local_irq_disable(); | ||
1655 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
1656 | list_replace_init(cpu_list, &local_list); | ||
1657 | local_irq_enable(); | ||
1658 | |||
1659 | while (!list_empty(&local_list)) { | ||
1660 | struct request *rq; | ||
1661 | |||
1662 | rq = list_entry(local_list.next, struct request, donelist); | ||
1663 | list_del_init(&rq->donelist); | ||
1664 | rq->q->softirq_done_fn(rq); | ||
1665 | } | ||
1666 | } | ||
1667 | |||
1668 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, | ||
1669 | unsigned long action, void *hcpu) | ||
1670 | { | ||
1671 | /* | ||
1672 | * If a CPU goes away, splice its entries to the current CPU | ||
1673 | * and trigger a run of the softirq | ||
1674 | */ | ||
1675 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
1676 | int cpu = (unsigned long) hcpu; | ||
1677 | |||
1678 | local_irq_disable(); | ||
1679 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | ||
1680 | &__get_cpu_var(blk_cpu_done)); | ||
1681 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
1682 | local_irq_enable(); | ||
1683 | } | ||
1684 | |||
1685 | return NOTIFY_OK; | ||
1686 | } | ||
1687 | |||
1688 | |||
1689 | static struct notifier_block blk_cpu_notifier __cpuinitdata = { | ||
1690 | .notifier_call = blk_cpu_notify, | ||
1691 | }; | ||
1692 | |||
1693 | /** | ||
1694 | * blk_complete_request - end I/O on a request | ||
1695 | * @req: the request being processed | ||
1696 | * | ||
1697 | * Description: | ||
1698 | * Ends all I/O on a request. It does not handle partial completions, | ||
1699 | * unless the driver actually implements this in its completion callback | ||
1700 | * through requeueing. The actual completion happens out-of-order, | ||
1701 | * through a softirq handler. The user must have registered a completion | ||
1702 | * callback through blk_queue_softirq_done(). | ||
1703 | **/ | ||
1704 | |||
1705 | void blk_complete_request(struct request *req) | ||
1706 | { | ||
1707 | struct list_head *cpu_list; | ||
1708 | unsigned long flags; | ||
1709 | |||
1710 | BUG_ON(!req->q->softirq_done_fn); | ||
1711 | |||
1712 | local_irq_save(flags); | ||
1713 | |||
1714 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
1715 | list_add_tail(&req->donelist, cpu_list); | ||
1716 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
1717 | |||
1718 | local_irq_restore(flags); | ||
1719 | } | ||
1720 | EXPORT_SYMBOL(blk_complete_request); | ||
1721 | |||
1722 | /* | ||
1723 | * queue lock must be held | 1643 | * queue lock must be held |
1724 | */ | 1644 | */ |
1725 | static void end_that_request_last(struct request *req, int error) | 1645 | static void end_that_request_last(struct request *req, int error) |
@@ -2053,8 +1973,6 @@ EXPORT_SYMBOL(kblockd_flush_work); | |||
2053 | 1973 | ||
2054 | int __init blk_dev_init(void) | 1974 | int __init blk_dev_init(void) |
2055 | { | 1975 | { |
2056 | int i; | ||
2057 | |||
2058 | kblockd_workqueue = create_workqueue("kblockd"); | 1976 | kblockd_workqueue = create_workqueue("kblockd"); |
2059 | if (!kblockd_workqueue) | 1977 | if (!kblockd_workqueue) |
2060 | panic("Failed to create kblockd\n"); | 1978 | panic("Failed to create kblockd\n"); |
@@ -2065,12 +1983,6 @@ int __init blk_dev_init(void) | |||
2065 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", | 1983 | blk_requestq_cachep = kmem_cache_create("blkdev_queue", |
2066 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); | 1984 | sizeof(struct request_queue), 0, SLAB_PANIC, NULL); |
2067 | 1985 | ||
2068 | for_each_possible_cpu(i) | ||
2069 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | ||
2070 | |||
2071 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); | ||
2072 | register_hotcpu_notifier(&blk_cpu_notifier); | ||
2073 | |||
2074 | return 0; | 1986 | return 0; |
2075 | } | 1987 | } |
2076 | 1988 | ||
diff --git a/block/blk-softirq.c b/block/blk-softirq.c new file mode 100644 index 000000000000..9e1c43bff662 --- /dev/null +++ b/block/blk-softirq.c | |||
@@ -0,0 +1,103 @@ | |||
1 | /* | ||
2 | * Functions related to softirq rq completions | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/bio.h> | ||
8 | #include <linux/blkdev.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/cpu.h> | ||
11 | |||
12 | #include "blk.h" | ||
13 | |||
14 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | ||
15 | |||
16 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, | ||
17 | unsigned long action, void *hcpu) | ||
18 | { | ||
19 | /* | ||
20 | * If a CPU goes away, splice its entries to the current CPU | ||
21 | * and trigger a run of the softirq | ||
22 | */ | ||
23 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
24 | int cpu = (unsigned long) hcpu; | ||
25 | |||
26 | local_irq_disable(); | ||
27 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | ||
28 | &__get_cpu_var(blk_cpu_done)); | ||
29 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
30 | local_irq_enable(); | ||
31 | } | ||
32 | |||
33 | return NOTIFY_OK; | ||
34 | } | ||
35 | |||
36 | |||
37 | static struct notifier_block blk_cpu_notifier __cpuinitdata = { | ||
38 | .notifier_call = blk_cpu_notify, | ||
39 | }; | ||
40 | |||
41 | /* | ||
42 | * splice the completion data to a local structure and hand off to | ||
43 | * process_completion_queue() to complete the requests | ||
44 | */ | ||
45 | static void blk_done_softirq(struct softirq_action *h) | ||
46 | { | ||
47 | struct list_head *cpu_list, local_list; | ||
48 | |||
49 | local_irq_disable(); | ||
50 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
51 | list_replace_init(cpu_list, &local_list); | ||
52 | local_irq_enable(); | ||
53 | |||
54 | while (!list_empty(&local_list)) { | ||
55 | struct request *rq; | ||
56 | |||
57 | rq = list_entry(local_list.next, struct request, donelist); | ||
58 | list_del_init(&rq->donelist); | ||
59 | rq->q->softirq_done_fn(rq); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * blk_complete_request - end I/O on a request | ||
65 | * @req: the request being processed | ||
66 | * | ||
67 | * Description: | ||
68 | * Ends all I/O on a request. It does not handle partial completions, | ||
69 | * unless the driver actually implements this in its completion callback | ||
70 | * through requeueing. The actual completion happens out-of-order, | ||
71 | * through a softirq handler. The user must have registered a completion | ||
72 | * callback through blk_queue_softirq_done(). | ||
73 | **/ | ||
74 | |||
75 | void blk_complete_request(struct request *req) | ||
76 | { | ||
77 | struct list_head *cpu_list; | ||
78 | unsigned long flags; | ||
79 | |||
80 | BUG_ON(!req->q->softirq_done_fn); | ||
81 | |||
82 | local_irq_save(flags); | ||
83 | |||
84 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
85 | list_add_tail(&req->donelist, cpu_list); | ||
86 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
87 | |||
88 | local_irq_restore(flags); | ||
89 | } | ||
90 | EXPORT_SYMBOL(blk_complete_request); | ||
91 | |||
92 | int __init blk_softirq_init(void) | ||
93 | { | ||
94 | int i; | ||
95 | |||
96 | for_each_possible_cpu(i) | ||
97 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | ||
98 | |||
99 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); | ||
100 | register_hotcpu_notifier(&blk_cpu_notifier); | ||
101 | return 0; | ||
102 | } | ||
103 | subsys_initcall(blk_softirq_init); | ||