diff options
author | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:13 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:13 -0400 |
commit | 1871e52c76dd95895caeb772f845a1718dcbcd75 (patch) | |
tree | 49e8148326f65353e673204f427bd4545eb26c16 /kernel/sched.c | |
parent | 0f5e4816dbf38ce9488e611ca2296925c1e90d5e (diff) |
percpu: make percpu symbols under kernel/ and mm/ unique
This patch updates percpu related symbols under kernel/ and mm/ such
that percpu symbols are unique and don't clash with local symbols.
This serves two purposes of decreasing the possibility of global
percpu symbol collision and allowing dropping per_cpu__ prefix from
percpu symbols.
* kernel/lockdep.c: s/lock_stats/cpu_lock_stats/
* kernel/sched.c: s/init_rq_rt/init_rt_rq_var/ (any better idea?)
s/sched_group_cpus/sched_groups/
* kernel/softirq.c: s/ksoftirqd/run_ksoftirqd/a
* kernel/softlockup.c: s/(*)_timestamp/softlockup_\1_ts/
s/watchdog_task/softlockup_watchdog/
s/timestamp/ts/ for local variables
* kernel/time/timer_stats: s/lookup_lock/tstats_lookup_lock/
* mm/slab.c: s/reap_work/slab_reap_work/
s/reap_node/slab_reap_node/
* mm/vmstat.c: local variable changed to avoid collision with vmstat_work
Partly based on Rusty Russell's "alloc_percpu: rename percpu vars
which cause name clashes" patch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: (slab/vmstat) Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nick Piggin <npiggin@suse.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 1535f3884b88..854ab418fd42 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq); | |||
298 | 298 | ||
299 | #ifdef CONFIG_RT_GROUP_SCHED | 299 | #ifdef CONFIG_RT_GROUP_SCHED |
300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 300 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | 301 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var); |
302 | #endif /* CONFIG_RT_GROUP_SCHED */ | 302 | #endif /* CONFIG_RT_GROUP_SCHED */ |
303 | #else /* !CONFIG_USER_SCHED */ | 303 | #else /* !CONFIG_USER_SCHED */ |
304 | #define root_task_group init_task_group | 304 | #define root_task_group init_task_group |
@@ -8199,14 +8199,14 @@ enum s_alloc { | |||
8199 | */ | 8199 | */ |
8200 | #ifdef CONFIG_SCHED_SMT | 8200 | #ifdef CONFIG_SCHED_SMT |
8201 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | 8201 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
8202 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 8202 | static DEFINE_PER_CPU(struct static_sched_group, sched_groups); |
8203 | 8203 | ||
8204 | static int | 8204 | static int |
8205 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, | 8205 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
8206 | struct sched_group **sg, struct cpumask *unused) | 8206 | struct sched_group **sg, struct cpumask *unused) |
8207 | { | 8207 | { |
8208 | if (sg) | 8208 | if (sg) |
8209 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 8209 | *sg = &per_cpu(sched_groups, cpu).sg; |
8210 | return cpu; | 8210 | return cpu; |
8211 | } | 8211 | } |
8212 | #endif /* CONFIG_SCHED_SMT */ | 8212 | #endif /* CONFIG_SCHED_SMT */ |
@@ -9470,7 +9470,7 @@ void __init sched_init(void) | |||
9470 | #elif defined CONFIG_USER_SCHED | 9470 | #elif defined CONFIG_USER_SCHED |
9471 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); | 9471 | init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); |
9472 | init_tg_rt_entry(&init_task_group, | 9472 | init_tg_rt_entry(&init_task_group, |
9473 | &per_cpu(init_rt_rq, i), | 9473 | &per_cpu(init_rt_rq_var, i), |
9474 | &per_cpu(init_sched_rt_entity, i), i, 1, | 9474 | &per_cpu(init_sched_rt_entity, i), i, 1, |
9475 | root_task_group.rt_se[i]); | 9475 | root_task_group.rt_se[i]); |
9476 | #endif | 9476 | #endif |