diff options
Diffstat (limited to 'block/blk-softirq.c')
-rw-r--r-- | block/blk-softirq.c | 175 |
1 files changed, 175 insertions, 0 deletions
diff --git a/block/blk-softirq.c b/block/blk-softirq.c new file mode 100644 index 000000000000..e660d26ca656 --- /dev/null +++ b/block/blk-softirq.c | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * Functions related to softirq rq completions | ||
3 | */ | ||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/module.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/bio.h> | ||
8 | #include <linux/blkdev.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/cpu.h> | ||
11 | |||
12 | #include "blk.h" | ||
13 | |||
14 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | ||
15 | |||
16 | /* | ||
17 | * Softirq action handler - move entries to local list and loop over them | ||
18 | * while passing them to the queue registered handler. | ||
19 | */ | ||
20 | static void blk_done_softirq(struct softirq_action *h) | ||
21 | { | ||
22 | struct list_head *cpu_list, local_list; | ||
23 | |||
24 | local_irq_disable(); | ||
25 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
26 | list_replace_init(cpu_list, &local_list); | ||
27 | local_irq_enable(); | ||
28 | |||
29 | while (!list_empty(&local_list)) { | ||
30 | struct request *rq; | ||
31 | |||
32 | rq = list_entry(local_list.next, struct request, csd.list); | ||
33 | list_del_init(&rq->csd.list); | ||
34 | rq->q->softirq_done_fn(rq); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) | ||
39 | static void trigger_softirq(void *data) | ||
40 | { | ||
41 | struct request *rq = data; | ||
42 | unsigned long flags; | ||
43 | struct list_head *list; | ||
44 | |||
45 | local_irq_save(flags); | ||
46 | list = &__get_cpu_var(blk_cpu_done); | ||
47 | list_add_tail(&rq->csd.list, list); | ||
48 | |||
49 | if (list->next == &rq->csd.list) | ||
50 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
51 | |||
52 | local_irq_restore(flags); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Setup and invoke a run of 'trigger_softirq' on the given cpu. | ||
57 | */ | ||
58 | static int raise_blk_irq(int cpu, struct request *rq) | ||
59 | { | ||
60 | if (cpu_online(cpu)) { | ||
61 | struct call_single_data *data = &rq->csd; | ||
62 | |||
63 | data->func = trigger_softirq; | ||
64 | data->info = rq; | ||
65 | data->flags = 0; | ||
66 | |||
67 | __smp_call_function_single(cpu, data); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | return 1; | ||
72 | } | ||
73 | #else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ | ||
74 | static int raise_blk_irq(int cpu, struct request *rq) | ||
75 | { | ||
76 | return 1; | ||
77 | } | ||
78 | #endif | ||
79 | |||
80 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, | ||
81 | unsigned long action, void *hcpu) | ||
82 | { | ||
83 | /* | ||
84 | * If a CPU goes away, splice its entries to the current CPU | ||
85 | * and trigger a run of the softirq | ||
86 | */ | ||
87 | if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { | ||
88 | int cpu = (unsigned long) hcpu; | ||
89 | |||
90 | local_irq_disable(); | ||
91 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | ||
92 | &__get_cpu_var(blk_cpu_done)); | ||
93 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
94 | local_irq_enable(); | ||
95 | } | ||
96 | |||
97 | return NOTIFY_OK; | ||
98 | } | ||
99 | |||
100 | static struct notifier_block __cpuinitdata blk_cpu_notifier = { | ||
101 | .notifier_call = blk_cpu_notify, | ||
102 | }; | ||
103 | |||
104 | void __blk_complete_request(struct request *req) | ||
105 | { | ||
106 | struct request_queue *q = req->q; | ||
107 | unsigned long flags; | ||
108 | int ccpu, cpu, group_cpu; | ||
109 | |||
110 | BUG_ON(!q->softirq_done_fn); | ||
111 | |||
112 | local_irq_save(flags); | ||
113 | cpu = smp_processor_id(); | ||
114 | group_cpu = blk_cpu_to_group(cpu); | ||
115 | |||
116 | /* | ||
117 | * Select completion CPU | ||
118 | */ | ||
119 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) | ||
120 | ccpu = req->cpu; | ||
121 | else | ||
122 | ccpu = cpu; | ||
123 | |||
124 | if (ccpu == cpu || ccpu == group_cpu) { | ||
125 | struct list_head *list; | ||
126 | do_local: | ||
127 | list = &__get_cpu_var(blk_cpu_done); | ||
128 | list_add_tail(&req->csd.list, list); | ||
129 | |||
130 | /* | ||
131 | * if the list only contains our just added request, | ||
132 | * signal a raise of the softirq. If there are already | ||
133 | * entries there, someone already raised the irq but it | ||
134 | * hasn't run yet. | ||
135 | */ | ||
136 | if (list->next == &req->csd.list) | ||
137 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
138 | } else if (raise_blk_irq(ccpu, req)) | ||
139 | goto do_local; | ||
140 | |||
141 | local_irq_restore(flags); | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * blk_complete_request - end I/O on a request | ||
146 | * @req: the request being processed | ||
147 | * | ||
148 | * Description: | ||
149 | * Ends all I/O on a request. It does not handle partial completions, | ||
150 | * unless the driver actually implements this in its completion callback | ||
151 | * through requeueing. The actual completion happens out-of-order, | ||
152 | * through a softirq handler. The user must have registered a completion | ||
153 | * callback through blk_queue_softirq_done(). | ||
154 | **/ | ||
155 | void blk_complete_request(struct request *req) | ||
156 | { | ||
157 | if (unlikely(blk_should_fake_timeout(req->q))) | ||
158 | return; | ||
159 | if (!blk_mark_rq_complete(req)) | ||
160 | __blk_complete_request(req); | ||
161 | } | ||
162 | EXPORT_SYMBOL(blk_complete_request); | ||
163 | |||
164 | __init int blk_softirq_init(void) | ||
165 | { | ||
166 | int i; | ||
167 | |||
168 | for_each_possible_cpu(i) | ||
169 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | ||
170 | |||
171 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); | ||
172 | register_hotcpu_notifier(&blk_cpu_notifier); | ||
173 | return 0; | ||
174 | } | ||
175 | subsys_initcall(blk_softirq_init); | ||