diff options
author | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:13 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-10-29 09:34:13 -0400 |
commit | 1871e52c76dd95895caeb772f845a1718dcbcd75 (patch) | |
tree | 49e8148326f65353e673204f427bd4545eb26c16 /mm | |
parent | 0f5e4816dbf38ce9488e611ca2296925c1e90d5e (diff) |
percpu: make percpu symbols under kernel/ and mm/ unique
This patch updates percpu related symbols under kernel/ and mm/ such
that percpu symbols are unique and don't clash with local symbols.
This serves two purposes of decreasing the possibility of global
percpu symbol collision and allowing dropping per_cpu__ prefix from
percpu symbols.
* kernel/lockdep.c: s/lock_stats/cpu_lock_stats/
* kernel/sched.c: s/init_rq_rt/init_rt_rq_var/ (any better idea?)
s/sched_group_cpus/sched_groups/
* kernel/softirq.c: s/ksoftirqd/run_ksoftirqd/a
* kernel/softlockup.c: s/(*)_timestamp/softlockup_\1_ts/
s/watchdog_task/softlockup_watchdog/
s/timestamp/ts/ for local variables
* kernel/time/timer_stats: s/lookup_lock/tstats_lookup_lock/
* mm/slab.c: s/reap_work/slab_reap_work/
s/reap_node/slab_reap_node/
* mm/vmstat.c: local variable changed to avoid collision with vmstat_work
Partly based on Rusty Russell's "alloc_percpu: rename percpu vars
which cause name clashes" patch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: (slab/vmstat) Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Nick Piggin <npiggin@suse.de>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 18 | ||||
-rw-r--r-- | mm/vmstat.c | 7 |
2 files changed, 12 insertions, 13 deletions
@@ -685,7 +685,7 @@ int slab_is_available(void) | |||
685 | return g_cpucache_up >= EARLY; | 685 | return g_cpucache_up >= EARLY; |
686 | } | 686 | } |
687 | 687 | ||
688 | static DEFINE_PER_CPU(struct delayed_work, reap_work); | 688 | static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); |
689 | 689 | ||
690 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 690 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
691 | { | 691 | { |
@@ -826,7 +826,7 @@ __setup("noaliencache", noaliencache_setup); | |||
826 | * objects freed on different nodes from which they were allocated) and the | 826 | * objects freed on different nodes from which they were allocated) and the |
827 | * flushing of remote pcps by calling drain_node_pages. | 827 | * flushing of remote pcps by calling drain_node_pages. |
828 | */ | 828 | */ |
829 | static DEFINE_PER_CPU(unsigned long, reap_node); | 829 | static DEFINE_PER_CPU(unsigned long, slab_reap_node); |
830 | 830 | ||
831 | static void init_reap_node(int cpu) | 831 | static void init_reap_node(int cpu) |
832 | { | 832 | { |
@@ -836,17 +836,17 @@ static void init_reap_node(int cpu) | |||
836 | if (node == MAX_NUMNODES) | 836 | if (node == MAX_NUMNODES) |
837 | node = first_node(node_online_map); | 837 | node = first_node(node_online_map); |
838 | 838 | ||
839 | per_cpu(reap_node, cpu) = node; | 839 | per_cpu(slab_reap_node, cpu) = node; |
840 | } | 840 | } |
841 | 841 | ||
842 | static void next_reap_node(void) | 842 | static void next_reap_node(void) |
843 | { | 843 | { |
844 | int node = __get_cpu_var(reap_node); | 844 | int node = __get_cpu_var(slab_reap_node); |
845 | 845 | ||
846 | node = next_node(node, node_online_map); | 846 | node = next_node(node, node_online_map); |
847 | if (unlikely(node >= MAX_NUMNODES)) | 847 | if (unlikely(node >= MAX_NUMNODES)) |
848 | node = first_node(node_online_map); | 848 | node = first_node(node_online_map); |
849 | __get_cpu_var(reap_node) = node; | 849 | __get_cpu_var(slab_reap_node) = node; |
850 | } | 850 | } |
851 | 851 | ||
852 | #else | 852 | #else |
@@ -863,7 +863,7 @@ static void next_reap_node(void) | |||
863 | */ | 863 | */ |
864 | static void __cpuinit start_cpu_timer(int cpu) | 864 | static void __cpuinit start_cpu_timer(int cpu) |
865 | { | 865 | { |
866 | struct delayed_work *reap_work = &per_cpu(reap_work, cpu); | 866 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); |
867 | 867 | ||
868 | /* | 868 | /* |
869 | * When this gets called from do_initcalls via cpucache_init(), | 869 | * When this gets called from do_initcalls via cpucache_init(), |
@@ -1027,7 +1027,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep, | |||
1027 | */ | 1027 | */ |
1028 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) | 1028 | static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) |
1029 | { | 1029 | { |
1030 | int node = __get_cpu_var(reap_node); | 1030 | int node = __get_cpu_var(slab_reap_node); |
1031 | 1031 | ||
1032 | if (l3->alien) { | 1032 | if (l3->alien) { |
1033 | struct array_cache *ac = l3->alien[node]; | 1033 | struct array_cache *ac = l3->alien[node]; |
@@ -1286,9 +1286,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb, | |||
1286 | * anything expensive but will only modify reap_work | 1286 | * anything expensive but will only modify reap_work |
1287 | * and reschedule the timer. | 1287 | * and reschedule the timer. |
1288 | */ | 1288 | */ |
1289 | cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); | 1289 | cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu)); |
1290 | /* Now the cache_reaper is guaranteed to be not running. */ | 1290 | /* Now the cache_reaper is guaranteed to be not running. */ |
1291 | per_cpu(reap_work, cpu).work.func = NULL; | 1291 | per_cpu(slab_reap_work, cpu).work.func = NULL; |
1292 | break; | 1292 | break; |
1293 | case CPU_DOWN_FAILED: | 1293 | case CPU_DOWN_FAILED: |
1294 | case CPU_DOWN_FAILED_FROZEN: | 1294 | case CPU_DOWN_FAILED_FROZEN: |
diff --git a/mm/vmstat.c b/mm/vmstat.c index c81321f9feec..dad2327e4580 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c | |||
@@ -883,11 +883,10 @@ static void vmstat_update(struct work_struct *w) | |||
883 | 883 | ||
884 | static void __cpuinit start_cpu_timer(int cpu) | 884 | static void __cpuinit start_cpu_timer(int cpu) |
885 | { | 885 | { |
886 | struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); | 886 | struct delayed_work *work = &per_cpu(vmstat_work, cpu); |
887 | 887 | ||
888 | INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); | 888 | INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update); |
889 | schedule_delayed_work_on(cpu, vmstat_work, | 889 | schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu)); |
890 | __round_jiffies_relative(HZ, cpu)); | ||
891 | } | 890 | } |
892 | 891 | ||
893 | /* | 892 | /* |