aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-10-29 09:34:13 -0400
committerTejun Heo <tj@kernel.org>2009-10-29 09:34:13 -0400
commit1871e52c76dd95895caeb772f845a1718dcbcd75 (patch)
tree49e8148326f65353e673204f427bd4545eb26c16
parent0f5e4816dbf38ce9488e611ca2296925c1e90d5e (diff)
percpu: make percpu symbols under kernel/ and mm/ unique
This patch updates percpu related symbols under kernel/ and mm/ such that percpu symbols are unique and don't clash with local symbols. This serves two purposes of decreasing the possibility of global percpu symbol collision and allowing dropping per_cpu__ prefix from percpu symbols. * kernel/lockdep.c: s/lock_stats/cpu_lock_stats/ * kernel/sched.c: s/init_rq_rt/init_rt_rq_var/ (any better idea?) s/sched_group_cpus/sched_groups/ * kernel/softirq.c: s/ksoftirqd/run_ksoftirqd/a * kernel/softlockup.c: s/(*)_timestamp/softlockup_\1_ts/ s/watchdog_task/softlockup_watchdog/ s/timestamp/ts/ for local variables * kernel/time/timer_stats: s/lookup_lock/tstats_lookup_lock/ * mm/slab.c: s/reap_work/slab_reap_work/ s/reap_node/slab_reap_node/ * mm/vmstat.c: local variable changed to avoid collision with vmstat_work Partly based on Rusty Russell's "alloc_percpu: rename percpu vars which cause name clashes" patch. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: (slab/vmstat) Christoph Lameter <cl@linux-foundation.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Ingo Molnar <mingo@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Nick Piggin <npiggin@suse.de>
-rw-r--r--kernel/lockdep.c11
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c54
-rw-r--r--kernel/time/timer_stats.c11
-rw-r--r--mm/slab.c18
-rw-r--r--mm/vmstat.c7
7 files changed, 57 insertions, 56 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 3815ac1d58b2..8631320a50d0 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
140} 140}
141 141
142#ifdef CONFIG_LOCK_STAT 142#ifdef CONFIG_LOCK_STAT
143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
144 cpu_lock_stats);
144 145
145static int lock_point(unsigned long points[], unsigned long ip) 146static int lock_point(unsigned long points[], unsigned long ip)
146{ 147{
@@ -186,7 +187,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
186 memset(&stats, 0, sizeof(struct lock_class_stats)); 187 memset(&stats, 0, sizeof(struct lock_class_stats));
187 for_each_possible_cpu(cpu) { 188 for_each_possible_cpu(cpu) {
188 struct lock_class_stats *pcs = 189 struct lock_class_stats *pcs =
189 &per_cpu(lock_stats, cpu)[class - lock_classes]; 190 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
190 191
191 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) 192 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
192 stats.contention_point[i] += pcs->contention_point[i]; 193 stats.contention_point[i] += pcs->contention_point[i];
@@ -213,7 +214,7 @@ void clear_lock_stats(struct lock_class *class)
213 214
214 for_each_possible_cpu(cpu) { 215 for_each_possible_cpu(cpu) {
215 struct lock_class_stats *cpu_stats = 216 struct lock_class_stats *cpu_stats =
216 &per_cpu(lock_stats, cpu)[class - lock_classes]; 217 &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
217 218
218 memset(cpu_stats, 0, sizeof(struct lock_class_stats)); 219 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
219 } 220 }
@@ -223,12 +224,12 @@ void clear_lock_stats(struct lock_class *class)
223 224
224static struct lock_class_stats *get_lock_stats(struct lock_class *class) 225static struct lock_class_stats *get_lock_stats(struct lock_class *class)
225{ 226{
226 return &get_cpu_var(lock_stats)[class - lock_classes]; 227 return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
227} 228}
228 229
229static void put_lock_stats(struct lock_class_stats *stats) 230static void put_lock_stats(struct lock_class_stats *stats)
230{ 231{
231 put_cpu_var(lock_stats); 232 put_cpu_var(cpu_lock_stats);
232} 233}
233 234
234static void lock_release_holdtime(struct held_lock *hlock) 235static void lock_release_holdtime(struct held_lock *hlock)
diff --git a/kernel/sched.c b/kernel/sched.c
index 1535f3884b88..854ab418fd42 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -298,7 +298,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
298 298
299#ifdef CONFIG_RT_GROUP_SCHED 299#ifdef CONFIG_RT_GROUP_SCHED
300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 300static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); 301static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
302#endif /* CONFIG_RT_GROUP_SCHED */ 302#endif /* CONFIG_RT_GROUP_SCHED */
303#else /* !CONFIG_USER_SCHED */ 303#else /* !CONFIG_USER_SCHED */
304#define root_task_group init_task_group 304#define root_task_group init_task_group
@@ -8199,14 +8199,14 @@ enum s_alloc {
8199 */ 8199 */
8200#ifdef CONFIG_SCHED_SMT 8200#ifdef CONFIG_SCHED_SMT
8201static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); 8201static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
8202static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); 8202static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
8203 8203
8204static int 8204static int
8205cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, 8205cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
8206 struct sched_group **sg, struct cpumask *unused) 8206 struct sched_group **sg, struct cpumask *unused)
8207{ 8207{
8208 if (sg) 8208 if (sg)
8209 *sg = &per_cpu(sched_group_cpus, cpu).sg; 8209 *sg = &per_cpu(sched_groups, cpu).sg;
8210 return cpu; 8210 return cpu;
8211} 8211}
8212#endif /* CONFIG_SCHED_SMT */ 8212#endif /* CONFIG_SCHED_SMT */
@@ -9470,7 +9470,7 @@ void __init sched_init(void)
9470#elif defined CONFIG_USER_SCHED 9470#elif defined CONFIG_USER_SCHED
9471 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL); 9471 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
9472 init_tg_rt_entry(&init_task_group, 9472 init_tg_rt_entry(&init_task_group,
9473 &per_cpu(init_rt_rq, i), 9473 &per_cpu(init_rt_rq_var, i),
9474 &per_cpu(init_sched_rt_entity, i), i, 1, 9474 &per_cpu(init_sched_rt_entity, i), i, 1,
9475 root_task_group.rt_se[i]); 9475 root_task_group.rt_se[i]);
9476#endif 9476#endif
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f8749e5216e0..0740dfd55c51 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -697,7 +697,7 @@ void __init softirq_init(void)
697 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 697 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
698} 698}
699 699
700static int ksoftirqd(void * __bind_cpu) 700static int run_ksoftirqd(void * __bind_cpu)
701{ 701{
702 set_current_state(TASK_INTERRUPTIBLE); 702 set_current_state(TASK_INTERRUPTIBLE);
703 703
@@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
810 switch (action) { 810 switch (action) {
811 case CPU_UP_PREPARE: 811 case CPU_UP_PREPARE:
812 case CPU_UP_PREPARE_FROZEN: 812 case CPU_UP_PREPARE_FROZEN:
813 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); 813 p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
814 if (IS_ERR(p)) { 814 if (IS_ERR(p)) {
815 printk("ksoftirqd for %i failed\n", hotcpu); 815 printk("ksoftirqd for %i failed\n", hotcpu);
816 return NOTIFY_BAD; 816 return NOTIFY_BAD;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 81324d12eb35..d22579087e27 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -22,9 +22,9 @@
22 22
23static DEFINE_SPINLOCK(print_lock); 23static DEFINE_SPINLOCK(print_lock);
24 24
25static DEFINE_PER_CPU(unsigned long, touch_timestamp); 25static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
26static DEFINE_PER_CPU(unsigned long, print_timestamp); 26static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
27static DEFINE_PER_CPU(struct task_struct *, watchdog_task); 27static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
28 28
29static int __read_mostly did_panic; 29static int __read_mostly did_panic;
30int __read_mostly softlockup_thresh = 60; 30int __read_mostly softlockup_thresh = 60;
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
70{ 70{
71 int this_cpu = raw_smp_processor_id(); 71 int this_cpu = raw_smp_processor_id();
72 72
73 __raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu); 73 __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
74} 74}
75 75
76void touch_softlockup_watchdog(void) 76void touch_softlockup_watchdog(void)
77{ 77{
78 __raw_get_cpu_var(touch_timestamp) = 0; 78 __raw_get_cpu_var(softlockup_touch_ts) = 0;
79} 79}
80EXPORT_SYMBOL(touch_softlockup_watchdog); 80EXPORT_SYMBOL(touch_softlockup_watchdog);
81 81
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
85 85
86 /* Cause each CPU to re-update its timestamp rather than complain */ 86 /* Cause each CPU to re-update its timestamp rather than complain */
87 for_each_online_cpu(cpu) 87 for_each_online_cpu(cpu)
88 per_cpu(touch_timestamp, cpu) = 0; 88 per_cpu(softlockup_touch_ts, cpu) = 0;
89} 89}
90EXPORT_SYMBOL(touch_all_softlockup_watchdogs); 90EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
91 91
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
104void softlockup_tick(void) 104void softlockup_tick(void)
105{ 105{
106 int this_cpu = smp_processor_id(); 106 int this_cpu = smp_processor_id();
107 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); 107 unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
108 unsigned long print_timestamp; 108 unsigned long print_ts;
109 struct pt_regs *regs = get_irq_regs(); 109 struct pt_regs *regs = get_irq_regs();
110 unsigned long now; 110 unsigned long now;
111 111
112 /* Is detection switched off? */ 112 /* Is detection switched off? */
113 if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) { 113 if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
114 /* Be sure we don't false trigger if switched back on */ 114 /* Be sure we don't false trigger if switched back on */
115 if (touch_timestamp) 115 if (touch_ts)
116 per_cpu(touch_timestamp, this_cpu) = 0; 116 per_cpu(softlockup_touch_ts, this_cpu) = 0;
117 return; 117 return;
118 } 118 }
119 119
120 if (touch_timestamp == 0) { 120 if (touch_ts == 0) {
121 __touch_softlockup_watchdog(); 121 __touch_softlockup_watchdog();
122 return; 122 return;
123 } 123 }
124 124
125 print_timestamp = per_cpu(print_timestamp, this_cpu); 125 print_ts = per_cpu(softlockup_print_ts, this_cpu);
126 126
127 /* report at most once a second */ 127 /* report at most once a second */
128 if (print_timestamp == touch_timestamp || did_panic) 128 if (print_ts == touch_ts || did_panic)
129 return; 129 return;
130 130
131 /* do not print during early bootup: */ 131 /* do not print during early bootup: */
@@ -140,18 +140,18 @@ void softlockup_tick(void)
140 * Wake up the high-prio watchdog task twice per 140 * Wake up the high-prio watchdog task twice per
141 * threshold timespan. 141 * threshold timespan.
142 */ 142 */
143 if (now > touch_timestamp + softlockup_thresh/2) 143 if (now > touch_ts + softlockup_thresh/2)
144 wake_up_process(per_cpu(watchdog_task, this_cpu)); 144 wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
145 145
146 /* Warn about unreasonable delays: */ 146 /* Warn about unreasonable delays: */
147 if (now <= (touch_timestamp + softlockup_thresh)) 147 if (now <= (touch_ts + softlockup_thresh))
148 return; 148 return;
149 149
150 per_cpu(print_timestamp, this_cpu) = touch_timestamp; 150 per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
151 151
152 spin_lock(&print_lock); 152 spin_lock(&print_lock);
153 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", 153 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
154 this_cpu, now - touch_timestamp, 154 this_cpu, now - touch_ts,
155 current->comm, task_pid_nr(current)); 155 current->comm, task_pid_nr(current));
156 print_modules(); 156 print_modules();
157 print_irqtrace_events(current); 157 print_irqtrace_events(current);
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
209 switch (action) { 209 switch (action) {
210 case CPU_UP_PREPARE: 210 case CPU_UP_PREPARE:
211 case CPU_UP_PREPARE_FROZEN: 211 case CPU_UP_PREPARE_FROZEN:
212 BUG_ON(per_cpu(watchdog_task, hotcpu)); 212 BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
213 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); 213 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
214 if (IS_ERR(p)) { 214 if (IS_ERR(p)) {
215 printk(KERN_ERR "watchdog for %i failed\n", hotcpu); 215 printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
216 return NOTIFY_BAD; 216 return NOTIFY_BAD;
217 } 217 }
218 per_cpu(touch_timestamp, hotcpu) = 0; 218 per_cpu(softlockup_touch_ts, hotcpu) = 0;
219 per_cpu(watchdog_task, hotcpu) = p; 219 per_cpu(softlockup_watchdog, hotcpu) = p;
220 kthread_bind(p, hotcpu); 220 kthread_bind(p, hotcpu);
221 break; 221 break;
222 case CPU_ONLINE: 222 case CPU_ONLINE:
223 case CPU_ONLINE_FROZEN: 223 case CPU_ONLINE_FROZEN:
224 wake_up_process(per_cpu(watchdog_task, hotcpu)); 224 wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
225 break; 225 break;
226#ifdef CONFIG_HOTPLUG_CPU 226#ifdef CONFIG_HOTPLUG_CPU
227 case CPU_UP_CANCELED: 227 case CPU_UP_CANCELED:
228 case CPU_UP_CANCELED_FROZEN: 228 case CPU_UP_CANCELED_FROZEN:
229 if (!per_cpu(watchdog_task, hotcpu)) 229 if (!per_cpu(softlockup_watchdog, hotcpu))
230 break; 230 break;
231 /* Unbind so it can run. Fall thru. */ 231 /* Unbind so it can run. Fall thru. */
232 kthread_bind(per_cpu(watchdog_task, hotcpu), 232 kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
233 cpumask_any(cpu_online_mask)); 233 cpumask_any(cpu_online_mask));
234 case CPU_DEAD: 234 case CPU_DEAD:
235 case CPU_DEAD_FROZEN: 235 case CPU_DEAD_FROZEN:
236 p = per_cpu(watchdog_task, hotcpu); 236 p = per_cpu(softlockup_watchdog, hotcpu);
237 per_cpu(watchdog_task, hotcpu) = NULL; 237 per_cpu(softlockup_watchdog, hotcpu) = NULL;
238 kthread_stop(p); 238 kthread_stop(p);
239 break; 239 break;
240#endif /* CONFIG_HOTPLUG_CPU */ 240#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index ee5681f8d7ec..63b117e9eba1 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
86/* 86/*
87 * Per-CPU lookup locks for fast hash lookup: 87 * Per-CPU lookup locks for fast hash lookup:
88 */ 88 */
89static DEFINE_PER_CPU(spinlock_t, lookup_lock); 89static DEFINE_PER_CPU(spinlock_t, tstats_lookup_lock);
90 90
91/* 91/*
92 * Mutex to serialize state changes with show-stats activities: 92 * Mutex to serialize state changes with show-stats activities:
@@ -245,7 +245,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
245 if (likely(!timer_stats_active)) 245 if (likely(!timer_stats_active))
246 return; 246 return;
247 247
248 lock = &per_cpu(lookup_lock, raw_smp_processor_id()); 248 lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
249 249
250 input.timer = timer; 250 input.timer = timer;
251 input.start_func = startf; 251 input.start_func = startf;
@@ -348,9 +348,10 @@ static void sync_access(void)
348 int cpu; 348 int cpu;
349 349
350 for_each_online_cpu(cpu) { 350 for_each_online_cpu(cpu) {
351 spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags); 351 spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
352 spin_lock_irqsave(lock, flags);
352 /* nothing */ 353 /* nothing */
353 spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags); 354 spin_unlock_irqrestore(lock, flags);
354 } 355 }
355} 356}
356 357
@@ -408,7 +409,7 @@ void __init init_timer_stats(void)
408 int cpu; 409 int cpu;
409 410
410 for_each_possible_cpu(cpu) 411 for_each_possible_cpu(cpu)
411 spin_lock_init(&per_cpu(lookup_lock, cpu)); 412 spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
412} 413}
413 414
414static int __init init_tstats_procfs(void) 415static int __init init_tstats_procfs(void)
diff --git a/mm/slab.c b/mm/slab.c
index 7dfa481c96ba..211b1746c63c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -685,7 +685,7 @@ int slab_is_available(void)
685 return g_cpucache_up >= EARLY; 685 return g_cpucache_up >= EARLY;
686} 686}
687 687
688static DEFINE_PER_CPU(struct delayed_work, reap_work); 688static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
689 689
690static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 690static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
691{ 691{
@@ -826,7 +826,7 @@ __setup("noaliencache", noaliencache_setup);
826 * objects freed on different nodes from which they were allocated) and the 826 * objects freed on different nodes from which they were allocated) and the
827 * flushing of remote pcps by calling drain_node_pages. 827 * flushing of remote pcps by calling drain_node_pages.
828 */ 828 */
829static DEFINE_PER_CPU(unsigned long, reap_node); 829static DEFINE_PER_CPU(unsigned long, slab_reap_node);
830 830
831static void init_reap_node(int cpu) 831static void init_reap_node(int cpu)
832{ 832{
@@ -836,17 +836,17 @@ static void init_reap_node(int cpu)
836 if (node == MAX_NUMNODES) 836 if (node == MAX_NUMNODES)
837 node = first_node(node_online_map); 837 node = first_node(node_online_map);
838 838
839 per_cpu(reap_node, cpu) = node; 839 per_cpu(slab_reap_node, cpu) = node;
840} 840}
841 841
842static void next_reap_node(void) 842static void next_reap_node(void)
843{ 843{
844 int node = __get_cpu_var(reap_node); 844 int node = __get_cpu_var(slab_reap_node);
845 845
846 node = next_node(node, node_online_map); 846 node = next_node(node, node_online_map);
847 if (unlikely(node >= MAX_NUMNODES)) 847 if (unlikely(node >= MAX_NUMNODES))
848 node = first_node(node_online_map); 848 node = first_node(node_online_map);
849 __get_cpu_var(reap_node) = node; 849 __get_cpu_var(slab_reap_node) = node;
850} 850}
851 851
852#else 852#else
@@ -863,7 +863,7 @@ static void next_reap_node(void)
863 */ 863 */
864static void __cpuinit start_cpu_timer(int cpu) 864static void __cpuinit start_cpu_timer(int cpu)
865{ 865{
866 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 866 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
867 867
868 /* 868 /*
869 * When this gets called from do_initcalls via cpucache_init(), 869 * When this gets called from do_initcalls via cpucache_init(),
@@ -1027,7 +1027,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1027 */ 1027 */
1028static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1028static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1029{ 1029{
1030 int node = __get_cpu_var(reap_node); 1030 int node = __get_cpu_var(slab_reap_node);
1031 1031
1032 if (l3->alien) { 1032 if (l3->alien) {
1033 struct array_cache *ac = l3->alien[node]; 1033 struct array_cache *ac = l3->alien[node];
@@ -1286,9 +1286,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1286 * anything expensive but will only modify reap_work 1286 * anything expensive but will only modify reap_work
1287 * and reschedule the timer. 1287 * and reschedule the timer.
1288 */ 1288 */
1289 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1289 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
1290 /* Now the cache_reaper is guaranteed to be not running. */ 1290 /* Now the cache_reaper is guaranteed to be not running. */
1291 per_cpu(reap_work, cpu).work.func = NULL; 1291 per_cpu(slab_reap_work, cpu).work.func = NULL;
1292 break; 1292 break;
1293 case CPU_DOWN_FAILED: 1293 case CPU_DOWN_FAILED:
1294 case CPU_DOWN_FAILED_FROZEN: 1294 case CPU_DOWN_FAILED_FROZEN:
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c81321f9feec..dad2327e4580 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -883,11 +883,10 @@ static void vmstat_update(struct work_struct *w)
883 883
884static void __cpuinit start_cpu_timer(int cpu) 884static void __cpuinit start_cpu_timer(int cpu)
885{ 885{
886 struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); 886 struct delayed_work *work = &per_cpu(vmstat_work, cpu);
887 887
888 INIT_DELAYED_WORK_DEFERRABLE(vmstat_work, vmstat_update); 888 INIT_DELAYED_WORK_DEFERRABLE(work, vmstat_update);
889 schedule_delayed_work_on(cpu, vmstat_work, 889 schedule_delayed_work_on(cpu, work, __round_jiffies_relative(HZ, cpu));
890 __round_jiffies_relative(HZ, cpu));
891} 890}
892 891
893/* 892/*