diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2012-01-26 06:44:34 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-01-27 07:28:48 -0500 |
commit | 39be350127ec60a078edffe5b4915dafba4ba514 (patch) | |
tree | 9b1ad6ee75c3b5842434b697b96ccdfbe1a40a2f /block/blk-softirq.c | |
parent | cb297a3e433dbdcf7ad81e0564e7b804c941ff0d (diff) |
sched, block: Unify cache detection
The block layer has some code trying to determine if two CPUs share a
cache, the scheduler has a similar function. Expose the function used
by the scheduler and make the block layer use it, thereby removing the
block layers usage of CONFIG_SCHED* and topology bits.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Jens Axboe <axboe@kernel.dk>
Link: http://lkml.kernel.org/r/1327579450.2446.95.camel@twins
Diffstat (limited to 'block/blk-softirq.c')
-rw-r--r-- | block/blk-softirq.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 1366a89d8e66..467c8de88642 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
11 | #include <linux/sched.h> | ||
11 | 12 | ||
12 | #include "blk.h" | 13 | #include "blk.h" |
13 | 14 | ||
@@ -103,9 +104,10 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = { | |||
103 | 104 | ||
104 | void __blk_complete_request(struct request *req) | 105 | void __blk_complete_request(struct request *req) |
105 | { | 106 | { |
106 | int ccpu, cpu, group_cpu = NR_CPUS; | 107 | int ccpu, cpu; |
107 | struct request_queue *q = req->q; | 108 | struct request_queue *q = req->q; |
108 | unsigned long flags; | 109 | unsigned long flags; |
110 | bool shared = false; | ||
109 | 111 | ||
110 | BUG_ON(!q->softirq_done_fn); | 112 | BUG_ON(!q->softirq_done_fn); |
111 | 113 | ||
@@ -117,22 +119,20 @@ void __blk_complete_request(struct request *req) | |||
117 | */ | 119 | */ |
118 | if (req->cpu != -1) { | 120 | if (req->cpu != -1) { |
119 | ccpu = req->cpu; | 121 | ccpu = req->cpu; |
120 | if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { | 122 | if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) |
121 | ccpu = blk_cpu_to_group(ccpu); | 123 | shared = cpus_share_cache(cpu, ccpu); |
122 | group_cpu = blk_cpu_to_group(cpu); | ||
123 | } | ||
124 | } else | 124 | } else |
125 | ccpu = cpu; | 125 | ccpu = cpu; |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * If current CPU and requested CPU are in the same group, running | 128 | * If current CPU and requested CPU share a cache, run the softirq on |
129 | * softirq in current CPU. One might concern this is just like | 129 | * the current CPU. One might concern this is just like |
130 | * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is | 130 | * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is |
131 | * running in interrupt handler, and currently I/O controller doesn't | 131 | * running in interrupt handler, and currently I/O controller doesn't |
132 | * support multiple interrupts, so current CPU is unique actually. This | 132 | * support multiple interrupts, so current CPU is unique actually. This |
133 | * avoids IPI sending from current CPU to the first CPU of a group. | 133 | * avoids IPI sending from current CPU to the first CPU of a group. |
134 | */ | 134 | */ |
135 | if (ccpu == cpu || ccpu == group_cpu) { | 135 | if (ccpu == cpu || shared) { |
136 | struct list_head *list; | 136 | struct list_head *list; |
137 | do_local: | 137 | do_local: |
138 | list = &__get_cpu_var(blk_cpu_done); | 138 | list = &__get_cpu_var(blk_cpu_done); |