diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:31:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-20 13:31:44 -0400 |
commit | 2ba68940c893c8f0bfc8573c041254251bb6aeab (patch) | |
tree | fa83ebb01d32abd98123fa28f9f6f0b3eaeee25d /block | |
parent | 9c2b957db1772ebf942ae7a9346b14eba6c8ca66 (diff) | |
parent | 600e145882802d6ccbfe2c4aea243d97caeb91a9 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler changes for v3.4 from Ingo Molnar
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
printk: Make it compile with !CONFIG_PRINTK
sched/x86: Fix overflow in cyc2ns_offset
sched: Fix nohz load accounting -- again!
sched: Update yield() docs
printk/sched: Introduce special printk_sched() for those awkward moments
sched/nohz: Correctly initialize 'next_balance' in 'nohz' idle balancer
sched: Cleanup cpu_active madness
sched: Fix load-balance wreckage
sched: Clean up parameter passing of proc_sched_autogroup_set_nice()
sched: Ditch per cgroup task lists for load-balancing
sched: Rename load-balancing fields
sched: Move load-balancing arguments into helper struct
sched/rt: Do not submit new work when PI-blocked
sched/rt: Prevent idle task boosting
sched/wait: Add __wake_up_all_locked() API
sched/rt: Document scheduler related skip-resched-check sites
sched/rt: Use schedule_preempt_disabled()
sched/rt: Add schedule_preempt_disabled()
sched/rt: Do not throttle when PI boosting
sched/rt: Keep period timer ticking when rt throttling is active
...
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-softirq.c | 16 | ||||
-rw-r--r-- | block/blk.h | 16 |
2 files changed, 8 insertions, 24 deletions
diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 1366a89d8e66..467c8de88642 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
11 | #include <linux/sched.h> | ||
11 | 12 | ||
12 | #include "blk.h" | 13 | #include "blk.h" |
13 | 14 | ||
@@ -103,9 +104,10 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = { | |||
103 | 104 | ||
104 | void __blk_complete_request(struct request *req) | 105 | void __blk_complete_request(struct request *req) |
105 | { | 106 | { |
106 | int ccpu, cpu, group_cpu = NR_CPUS; | 107 | int ccpu, cpu; |
107 | struct request_queue *q = req->q; | 108 | struct request_queue *q = req->q; |
108 | unsigned long flags; | 109 | unsigned long flags; |
110 | bool shared = false; | ||
109 | 111 | ||
110 | BUG_ON(!q->softirq_done_fn); | 112 | BUG_ON(!q->softirq_done_fn); |
111 | 113 | ||
@@ -117,22 +119,20 @@ void __blk_complete_request(struct request *req) | |||
117 | */ | 119 | */ |
118 | if (req->cpu != -1) { | 120 | if (req->cpu != -1) { |
119 | ccpu = req->cpu; | 121 | ccpu = req->cpu; |
120 | if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { | 122 | if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) |
121 | ccpu = blk_cpu_to_group(ccpu); | 123 | shared = cpus_share_cache(cpu, ccpu); |
122 | group_cpu = blk_cpu_to_group(cpu); | ||
123 | } | ||
124 | } else | 124 | } else |
125 | ccpu = cpu; | 125 | ccpu = cpu; |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * If current CPU and requested CPU are in the same group, running | 128 | * If current CPU and requested CPU share a cache, run the softirq on |
129 | * softirq in current CPU. One might concern this is just like | 129 | * the current CPU. One might concern this is just like |
130 | * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is | 130 | * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is |
131 | * running in interrupt handler, and currently I/O controller doesn't | 131 | * running in interrupt handler, and currently I/O controller doesn't |
132 | * support multiple interrupts, so current CPU is unique actually. This | 132 | * support multiple interrupts, so current CPU is unique actually. This |
133 | * avoids IPI sending from current CPU to the first CPU of a group. | 133 | * avoids IPI sending from current CPU to the first CPU of a group. |
134 | */ | 134 | */ |
135 | if (ccpu == cpu || ccpu == group_cpu) { | 135 | if (ccpu == cpu || shared) { |
136 | struct list_head *list; | 136 | struct list_head *list; |
137 | do_local: | 137 | do_local: |
138 | list = &__get_cpu_var(blk_cpu_done); | 138 | list = &__get_cpu_var(blk_cpu_done); |
diff --git a/block/blk.h b/block/blk.h index 9c12f80882b0..d45be871329e 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -166,22 +166,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) | |||
166 | return q->nr_congestion_off; | 166 | return q->nr_congestion_off; |
167 | } | 167 | } |
168 | 168 | ||
169 | static inline int blk_cpu_to_group(int cpu) | ||
170 | { | ||
171 | int group = NR_CPUS; | ||
172 | #ifdef CONFIG_SCHED_MC | ||
173 | const struct cpumask *mask = cpu_coregroup_mask(cpu); | ||
174 | group = cpumask_first(mask); | ||
175 | #elif defined(CONFIG_SCHED_SMT) | ||
176 | group = cpumask_first(topology_thread_cpumask(cpu)); | ||
177 | #else | ||
178 | return cpu; | ||
179 | #endif | ||
180 | if (likely(group < NR_CPUS)) | ||
181 | return group; | ||
182 | return cpu; | ||
183 | } | ||
184 | |||
185 | /* | 169 | /* |
186 | * Contribute to IO statistics IFF: | 170 | * Contribute to IO statistics IFF: |
187 | * | 171 | * |