diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-09-13 14:26:01 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-10-09 02:56:09 -0400 |
commit | c7c22e4d5c1fdebfac4dba76de7d0338c2b0d832 (patch) | |
tree | ecc3d2517b3471ccc35d4cb4e3b48d4b57205061 /block/blk-softirq.c | |
parent | 18887ad910e56066233a07fd3cfb2fa11338b782 (diff) |
block: add support for IO CPU affinity
This patch adds support for controlling the IO completion CPU of
either all requests on a queue, or on a per-request basis. We export
a sysfs variable (rq_affinity) which, if set, migrates completions
of requests to the CPU that originally submitted it. A bio helper
(bio_set_completion_cpu()) is also added, so that queuers can ask
for completion on that specific CPU.
In testing, this has been show to cut the system time by as much
as 20-40% on synthetic workloads where CPU affinity is desired.
This requires a little help from the architecture, so it'll only
work as designed for archs that are using the new generic smp
helper infrastructure.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-softirq.c')
-rw-r--r-- | block/blk-softirq.c | 126 |
1 files changed, 95 insertions, 31 deletions
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 9e1c43bff662..3a1af551191e 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
@@ -13,6 +13,70 @@ | |||
13 | 13 | ||
14 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | 14 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); |
15 | 15 | ||
16 | /* | ||
17 | * Softirq action handler - move entries to local list and loop over them | ||
18 | * while passing them to the queue registered handler. | ||
19 | */ | ||
20 | static void blk_done_softirq(struct softirq_action *h) | ||
21 | { | ||
22 | struct list_head *cpu_list, local_list; | ||
23 | |||
24 | local_irq_disable(); | ||
25 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
26 | list_replace_init(cpu_list, &local_list); | ||
27 | local_irq_enable(); | ||
28 | |||
29 | while (!list_empty(&local_list)) { | ||
30 | struct request *rq; | ||
31 | |||
32 | rq = list_entry(local_list.next, struct request, csd.list); | ||
33 | list_del_init(&rq->csd.list); | ||
34 | rq->q->softirq_done_fn(rq); | ||
35 | } | ||
36 | } | ||
37 | |||
38 | #if defined(CONFIG_SMP) && defined(CONFIG_USE_GENERIC_SMP_HELPERS) | ||
39 | static void trigger_softirq(void *data) | ||
40 | { | ||
41 | struct request *rq = data; | ||
42 | unsigned long flags; | ||
43 | struct list_head *list; | ||
44 | |||
45 | local_irq_save(flags); | ||
46 | list = &__get_cpu_var(blk_cpu_done); | ||
47 | list_add_tail(&rq->csd.list, list); | ||
48 | |||
49 | if (list->next == &rq->csd.list) | ||
50 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
51 | |||
52 | local_irq_restore(flags); | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * Setup and invoke a run of 'trigger_softirq' on the given cpu. | ||
57 | */ | ||
58 | static int raise_blk_irq(int cpu, struct request *rq) | ||
59 | { | ||
60 | if (cpu_online(cpu)) { | ||
61 | struct call_single_data *data = &rq->csd; | ||
62 | |||
63 | data->func = trigger_softirq; | ||
64 | data->info = rq; | ||
65 | data->flags = 0; | ||
66 | |||
67 | __smp_call_function_single(cpu, data); | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | return 1; | ||
72 | } | ||
73 | #else /* CONFIG_SMP && CONFIG_USE_GENERIC_SMP_HELPERS */ | ||
74 | static int raise_blk_irq(int cpu, struct request *rq) | ||
75 | { | ||
76 | return 1; | ||
77 | } | ||
78 | #endif | ||
79 | |||
16 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, | 80 | static int __cpuinit blk_cpu_notify(struct notifier_block *self, |
17 | unsigned long action, void *hcpu) | 81 | unsigned long action, void *hcpu) |
18 | { | 82 | { |
@@ -33,33 +97,10 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self, | |||
33 | return NOTIFY_OK; | 97 | return NOTIFY_OK; |
34 | } | 98 | } |
35 | 99 | ||
36 | 100 | static struct notifier_block __cpuinitdata blk_cpu_notifier = { | |
37 | static struct notifier_block blk_cpu_notifier __cpuinitdata = { | ||
38 | .notifier_call = blk_cpu_notify, | 101 | .notifier_call = blk_cpu_notify, |
39 | }; | 102 | }; |
40 | 103 | ||
41 | /* | ||
42 | * splice the completion data to a local structure and hand off to | ||
43 | * process_completion_queue() to complete the requests | ||
44 | */ | ||
45 | static void blk_done_softirq(struct softirq_action *h) | ||
46 | { | ||
47 | struct list_head *cpu_list, local_list; | ||
48 | |||
49 | local_irq_disable(); | ||
50 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
51 | list_replace_init(cpu_list, &local_list); | ||
52 | local_irq_enable(); | ||
53 | |||
54 | while (!list_empty(&local_list)) { | ||
55 | struct request *rq; | ||
56 | |||
57 | rq = list_entry(local_list.next, struct request, donelist); | ||
58 | list_del_init(&rq->donelist); | ||
59 | rq->q->softirq_done_fn(rq); | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /** | 104 | /** |
64 | * blk_complete_request - end I/O on a request | 105 | * blk_complete_request - end I/O on a request |
65 | * @req: the request being processed | 106 | * @req: the request being processed |
@@ -71,25 +112,48 @@ static void blk_done_softirq(struct softirq_action *h) | |||
71 | * through a softirq handler. The user must have registered a completion | 112 | * through a softirq handler. The user must have registered a completion |
72 | * callback through blk_queue_softirq_done(). | 113 | * callback through blk_queue_softirq_done(). |
73 | **/ | 114 | **/ |
74 | |||
75 | void blk_complete_request(struct request *req) | 115 | void blk_complete_request(struct request *req) |
76 | { | 116 | { |
77 | struct list_head *cpu_list; | 117 | struct request_queue *q = req->q; |
78 | unsigned long flags; | 118 | unsigned long flags; |
119 | int ccpu, cpu, group_cpu; | ||
79 | 120 | ||
80 | BUG_ON(!req->q->softirq_done_fn); | 121 | BUG_ON(!q->softirq_done_fn); |
81 | 122 | ||
82 | local_irq_save(flags); | 123 | local_irq_save(flags); |
124 | cpu = smp_processor_id(); | ||
125 | group_cpu = blk_cpu_to_group(cpu); | ||
83 | 126 | ||
84 | cpu_list = &__get_cpu_var(blk_cpu_done); | 127 | /* |
85 | list_add_tail(&req->donelist, cpu_list); | 128 | * Select completion CPU |
86 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | 129 | */ |
130 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && req->cpu != -1) | ||
131 | ccpu = req->cpu; | ||
132 | else | ||
133 | ccpu = cpu; | ||
134 | |||
135 | if (ccpu == cpu || ccpu == group_cpu) { | ||
136 | struct list_head *list; | ||
137 | do_local: | ||
138 | list = &__get_cpu_var(blk_cpu_done); | ||
139 | list_add_tail(&req->csd.list, list); | ||
140 | |||
141 | /* | ||
142 | * if the list only contains our just added request, | ||
143 | * signal a raise of the softirq. If there are already | ||
144 | * entries there, someone already raised the irq but it | ||
145 | * hasn't run yet. | ||
146 | */ | ||
147 | if (list->next == &req->csd.list) | ||
148 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
149 | } else if (raise_blk_irq(ccpu, req)) | ||
150 | goto do_local; | ||
87 | 151 | ||
88 | local_irq_restore(flags); | 152 | local_irq_restore(flags); |
89 | } | 153 | } |
90 | EXPORT_SYMBOL(blk_complete_request); | 154 | EXPORT_SYMBOL(blk_complete_request); |
91 | 155 | ||
92 | int __init blk_softirq_init(void) | 156 | __init int blk_softirq_init(void) |
93 | { | 157 | { |
94 | int i; | 158 | int i; |
95 | 159 | ||