aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-01-26 06:44:34 -0500
committerIngo Molnar <mingo@elte.hu>2012-01-27 07:28:48 -0500
commit39be350127ec60a078edffe5b4915dafba4ba514 (patch)
tree9b1ad6ee75c3b5842434b697b96ccdfbe1a40a2f /kernel/sched
parentcb297a3e433dbdcf7ad81e0564e7b804c941ff0d (diff)
sched, block: Unify cache detection
The block layer has some code trying to determine if two CPUs share a cache, the scheduler has a similar function. Expose the function used by the scheduler and make the block layer use it, thereby removing the block layers usage of CONFIG_SCHED* and topology bits. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Jens Axboe <axboe@kernel.dk> Link: http://lkml.kernel.org/r/1327579450.2446.95.camel@twins
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5255c9d2e05..d7c43227311 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1507,7 +1507,7 @@ static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1507} 1507}
1508#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ 1508#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1509 1509
1510static inline int ttwu_share_cache(int this_cpu, int that_cpu) 1510bool cpus_share_cache(int this_cpu, int that_cpu)
1511{ 1511{
1512 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); 1512 return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
1513} 1513}
@@ -1518,7 +1518,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
1518 struct rq *rq = cpu_rq(cpu); 1518 struct rq *rq = cpu_rq(cpu);
1519 1519
1520#if defined(CONFIG_SMP) 1520#if defined(CONFIG_SMP)
1521 if (sched_feat(TTWU_QUEUE) && !ttwu_share_cache(smp_processor_id(), cpu)) { 1521 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1522 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1522 sched_clock_cpu(cpu); /* sync clocks x-cpu */
1523 ttwu_queue_remote(p, cpu); 1523 ttwu_queue_remote(p, cpu);
1524 return; 1524 return;
@@ -5754,7 +5754,7 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5754 * 5754 *
5755 * Also keep a unique ID per domain (we use the first cpu number in 5755 * Also keep a unique ID per domain (we use the first cpu number in
5756 * the cpumask of the domain), this allows us to quickly tell if 5756 * the cpumask of the domain), this allows us to quickly tell if
5757 * two cpus are in the same cache domain, see ttwu_share_cache(). 5757 * two cpus are in the same cache domain, see cpus_share_cache().
5758 */ 5758 */
5759DEFINE_PER_CPU(struct sched_domain *, sd_llc); 5759DEFINE_PER_CPU(struct sched_domain *, sd_llc);
5760DEFINE_PER_CPU(int, sd_llc_id); 5760DEFINE_PER_CPU(int, sd_llc_id);