aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c8
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c14
-rw-r--r--block/as-iosched.c10
-rw-r--r--block/cfq-iosched.c10
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
-rw-r--r--drivers/xen/events.c9
-rw-r--r--kernel/perf_counter.c6
-rw-r--r--kernel/trace/trace_events.c6
-rw-r--r--mm/kmemleak-test.c6
-rw-r--r--mm/page-writeback.c5
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv6/syncookies.c5
13 files changed, 58 insertions, 53 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 284d1de968bc..cba8cd3e957b 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
1091 */ 1091 */
1092static int check_interval = 5 * 60; /* 5 minutes */ 1092static int check_interval = 5 * 60; /* 5 minutes */
1093 1093
1094static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ 1094static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
1095static DEFINE_PER_CPU(struct timer_list, mce_timer); 1095static DEFINE_PER_CPU(struct timer_list, mce_timer);
1096 1096
1097static void mcheck_timer(unsigned long data) 1097static void mcheck_timer(unsigned long data)
@@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data)
1110 * Alert userspace if needed. If we logged an MCE, reduce the 1110 * Alert userspace if needed. If we logged an MCE, reduce the
1111 * polling interval, otherwise increase the polling interval. 1111 * polling interval, otherwise increase the polling interval.
1112 */ 1112 */
1113 n = &__get_cpu_var(next_interval); 1113 n = &__get_cpu_var(mce_next_interval);
1114 if (mce_notify_irq()) 1114 if (mce_notify_irq())
1115 *n = max(*n/2, HZ/100); 1115 *n = max(*n/2, HZ/100);
1116 else 1116 else
@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
1311static void mce_init_timer(void) 1311static void mce_init_timer(void)
1312{ 1312{
1313 struct timer_list *t = &__get_cpu_var(mce_timer); 1313 struct timer_list *t = &__get_cpu_var(mce_timer);
1314 int *n = &__get_cpu_var(next_interval); 1314 int *n = &__get_cpu_var(mce_next_interval);
1315 1315
1316 if (mce_ignore_ce) 1316 if (mce_ignore_ce)
1317 return; 1317 return;
@@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1914 case CPU_DOWN_FAILED: 1914 case CPU_DOWN_FAILED:
1915 case CPU_DOWN_FAILED_FROZEN: 1915 case CPU_DOWN_FAILED_FROZEN:
1916 t->expires = round_jiffies(jiffies + 1916 t->expires = round_jiffies(jiffies +
1917 __get_cpu_var(next_interval)); 1917 __get_cpu_var(mce_next_interval));
1918 add_timer_on(t, cpu); 1918 add_timer_on(t, cpu);
1919 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1919 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1920 break; 1920 break;
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 4946288d6832..5fdf63aaaba1 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
862 x86_pmu_disable_counter(hwc, idx); 862 x86_pmu_disable_counter(hwc, idx);
863} 863}
864 864
865static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); 865static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
866 866
867/* 867/*
868 * Set the next IRQ period, based on the hwc->period_left value. 868 * Set the next IRQ period, based on the hwc->period_left value.
@@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
901 if (left > x86_pmu.max_period) 901 if (left > x86_pmu.max_period)
902 left = x86_pmu.max_period; 902 left = x86_pmu.max_period;
903 903
904 per_cpu(prev_left[idx], smp_processor_id()) = left; 904 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
905 905
906 /* 906 /*
907 * The hw counter starts counting from this counter offset, 907 * The hw counter starts counting from this counter offset,
@@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void)
1089 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1089 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1090 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1090 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1091 1091
1092 prev_left = per_cpu(prev_left[idx], cpu); 1092 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1093 1093
1094 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1094 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1095 cpu, idx, pmc_ctrl); 1095 cpu, idx, pmc_ctrl);
@@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1561 entry->ip[entry->nr++] = ip; 1561 entry->ip[entry->nr++] = ip;
1562} 1562}
1563 1563
1564static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); 1564static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1565static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); 1565static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
1566 1566
1567 1567
1568static void 1568static void
@@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1709 struct perf_callchain_entry *entry; 1709 struct perf_callchain_entry *entry;
1710 1710
1711 if (in_nmi()) 1711 if (in_nmi())
1712 entry = &__get_cpu_var(nmi_entry); 1712 entry = &__get_cpu_var(pmc_nmi_entry);
1713 else 1713 else
1714 entry = &__get_cpu_var(irq_entry); 1714 entry = &__get_cpu_var(pmc_irq_entry);
1715 1715
1716 entry->nr = 0; 1716 entry->nr = 0;
1717 1717
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 7a12cf6ee1d3..ce8ba57c6557 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -146,7 +146,7 @@ enum arq_state {
146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2) 146#define RQ_STATE(rq) ((enum arq_state)(rq)->elevator_private2)
147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state) 147#define RQ_SET_STATE(rq, state) ((rq)->elevator_private2 = (void *) state)
148 148
149static DEFINE_PER_CPU(unsigned long, ioc_count); 149static DEFINE_PER_CPU(unsigned long, as_ioc_count);
150static struct completion *ioc_gone; 150static struct completion *ioc_gone;
151static DEFINE_SPINLOCK(ioc_gone_lock); 151static DEFINE_SPINLOCK(ioc_gone_lock);
152 152
@@ -161,7 +161,7 @@ static void as_antic_stop(struct as_data *ad);
161static void free_as_io_context(struct as_io_context *aic) 161static void free_as_io_context(struct as_io_context *aic)
162{ 162{
163 kfree(aic); 163 kfree(aic);
164 elv_ioc_count_dec(ioc_count); 164 elv_ioc_count_dec(as_ioc_count);
165 if (ioc_gone) { 165 if (ioc_gone) {
166 /* 166 /*
167 * AS scheduler is exiting, grab exit lock and check 167 * AS scheduler is exiting, grab exit lock and check
@@ -169,7 +169,7 @@ static void free_as_io_context(struct as_io_context *aic)
169 * complete ioc_gone and set it back to NULL. 169 * complete ioc_gone and set it back to NULL.
170 */ 170 */
171 spin_lock(&ioc_gone_lock); 171 spin_lock(&ioc_gone_lock);
172 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 172 if (ioc_gone && !elv_ioc_count_read(as_ioc_count)) {
173 complete(ioc_gone); 173 complete(ioc_gone);
174 ioc_gone = NULL; 174 ioc_gone = NULL;
175 } 175 }
@@ -211,7 +211,7 @@ static struct as_io_context *alloc_as_io_context(void)
211 ret->seek_total = 0; 211 ret->seek_total = 0;
212 ret->seek_samples = 0; 212 ret->seek_samples = 0;
213 ret->seek_mean = 0; 213 ret->seek_mean = 0;
214 elv_ioc_count_inc(ioc_count); 214 elv_ioc_count_inc(as_ioc_count);
215 } 215 }
216 216
217 return ret; 217 return ret;
@@ -1507,7 +1507,7 @@ static void __exit as_exit(void)
1507 ioc_gone = &all_gone; 1507 ioc_gone = &all_gone;
1508 /* ioc_gone's update must be visible before reading ioc_count */ 1508 /* ioc_gone's update must be visible before reading ioc_count */
1509 smp_wmb(); 1509 smp_wmb();
1510 if (elv_ioc_count_read(ioc_count)) 1510 if (elv_ioc_count_read(as_ioc_count))
1511 wait_for_completion(&all_gone); 1511 wait_for_completion(&all_gone);
1512 synchronize_rcu(); 1512 synchronize_rcu();
1513} 1513}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 833ec18eaa63..0f1cc7d3855e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,7 +48,7 @@ static int cfq_slice_idle = HZ / 125;
48static struct kmem_cache *cfq_pool; 48static struct kmem_cache *cfq_pool;
49static struct kmem_cache *cfq_ioc_pool; 49static struct kmem_cache *cfq_ioc_pool;
50 50
51static DEFINE_PER_CPU(unsigned long, ioc_count); 51static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
52static struct completion *ioc_gone; 52static struct completion *ioc_gone;
53static DEFINE_SPINLOCK(ioc_gone_lock); 53static DEFINE_SPINLOCK(ioc_gone_lock);
54 54
@@ -1422,7 +1422,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1422 cic = container_of(head, struct cfq_io_context, rcu_head); 1422 cic = container_of(head, struct cfq_io_context, rcu_head);
1423 1423
1424 kmem_cache_free(cfq_ioc_pool, cic); 1424 kmem_cache_free(cfq_ioc_pool, cic);
1425 elv_ioc_count_dec(ioc_count); 1425 elv_ioc_count_dec(cfq_ioc_count);
1426 1426
1427 if (ioc_gone) { 1427 if (ioc_gone) {
1428 /* 1428 /*
@@ -1431,7 +1431,7 @@ static void cfq_cic_free_rcu(struct rcu_head *head)
1431 * complete ioc_gone and set it back to NULL 1431 * complete ioc_gone and set it back to NULL
1432 */ 1432 */
1433 spin_lock(&ioc_gone_lock); 1433 spin_lock(&ioc_gone_lock);
1434 if (ioc_gone && !elv_ioc_count_read(ioc_count)) { 1434 if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
1435 complete(ioc_gone); 1435 complete(ioc_gone);
1436 ioc_gone = NULL; 1436 ioc_gone = NULL;
1437 } 1437 }
@@ -1557,7 +1557,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
1557 INIT_HLIST_NODE(&cic->cic_list); 1557 INIT_HLIST_NODE(&cic->cic_list);
1558 cic->dtor = cfq_free_io_context; 1558 cic->dtor = cfq_free_io_context;
1559 cic->exit = cfq_exit_io_context; 1559 cic->exit = cfq_exit_io_context;
1560 elv_ioc_count_inc(ioc_count); 1560 elv_ioc_count_inc(cfq_ioc_count);
1561 } 1561 }
1562 1562
1563 return cic; 1563 return cic;
@@ -2658,7 +2658,7 @@ static void __exit cfq_exit(void)
2658 * this also protects us from entering cfq_slab_kill() with 2658 * this also protects us from entering cfq_slab_kill() with
2659 * pending RCU callbacks 2659 * pending RCU callbacks
2660 */ 2660 */
2661 if (elv_ioc_count_read(ioc_count)) 2661 if (elv_ioc_count_read(cfq_ioc_count))
2662 wait_for_completion(&all_gone); 2662 wait_for_completion(&all_gone);
2663 cfq_slab_kill(); 2663 cfq_slab_kill();
2664} 2664}
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7fc58af748b4..a7ef465c83b9 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -65,7 +65,7 @@ struct cpu_dbs_info_s {
65 int cpu; 65 int cpu;
66 unsigned int enable:1; 66 unsigned int enable:1;
67}; 67};
68static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 68static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
69 69
70static unsigned int dbs_enable; /* number of CPUs using this policy */ 70static unsigned int dbs_enable; /* number of CPUs using this policy */
71 71
@@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
138 void *data) 138 void *data)
139{ 139{
140 struct cpufreq_freqs *freq = data; 140 struct cpufreq_freqs *freq = data;
141 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 141 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
142 freq->cpu); 142 freq->cpu);
143 143
144 struct cpufreq_policy *policy; 144 struct cpufreq_policy *policy;
@@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
298 /* we need to re-evaluate prev_cpu_idle */ 298 /* we need to re-evaluate prev_cpu_idle */
299 for_each_online_cpu(j) { 299 for_each_online_cpu(j) {
300 struct cpu_dbs_info_s *dbs_info; 300 struct cpu_dbs_info_s *dbs_info;
301 dbs_info = &per_cpu(cpu_dbs_info, j); 301 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
303 &dbs_info->prev_cpu_wall); 303 &dbs_info->prev_cpu_wall);
304 if (dbs_tuners_ins.ignore_nice) 304 if (dbs_tuners_ins.ignore_nice)
@@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
388 cputime64_t cur_wall_time, cur_idle_time; 388 cputime64_t cur_wall_time, cur_idle_time;
389 unsigned int idle_time, wall_time; 389 unsigned int idle_time, wall_time;
390 390
391 j_dbs_info = &per_cpu(cpu_dbs_info, j); 391 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
392 392
393 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 393 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
394 394
@@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
528 unsigned int j; 528 unsigned int j;
529 int rc; 529 int rc;
530 530
531 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 531 this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
532 532
533 switch (event) { 533 switch (event) {
534 case CPUFREQ_GOV_START: 534 case CPUFREQ_GOV_START:
@@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
548 548
549 for_each_cpu(j, policy->cpus) { 549 for_each_cpu(j, policy->cpus) {
550 struct cpu_dbs_info_s *j_dbs_info; 550 struct cpu_dbs_info_s *j_dbs_info;
551 j_dbs_info = &per_cpu(cpu_dbs_info, j); 551 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
552 j_dbs_info->cur_policy = policy; 552 j_dbs_info->cur_policy = policy;
553 553
554 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 554 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1911d1729353..36f292a7bd01 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,7 +73,7 @@ struct cpu_dbs_info_s {
73 unsigned int enable:1, 73 unsigned int enable:1,
74 sample_type:1; 74 sample_type:1;
75}; 75};
76static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 76static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
77 77
78static unsigned int dbs_enable; /* number of CPUs using this policy */ 78static unsigned int dbs_enable; /* number of CPUs using this policy */
79 79
@@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
151 unsigned int freq_hi, freq_lo; 151 unsigned int freq_hi, freq_lo;
152 unsigned int index = 0; 152 unsigned int index = 0;
153 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 153 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
154 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); 154 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
155 policy->cpu);
155 156
156 if (!dbs_info->freq_table) { 157 if (!dbs_info->freq_table) {
157 dbs_info->freq_lo = 0; 158 dbs_info->freq_lo = 0;
@@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void)
196{ 197{
197 int i; 198 int i;
198 for_each_online_cpu(i) { 199 for_each_online_cpu(i) {
199 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); 200 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i);
200 dbs_info->freq_table = cpufreq_frequency_get_table(i); 201 dbs_info->freq_table = cpufreq_frequency_get_table(i);
201 dbs_info->freq_lo = 0; 202 dbs_info->freq_lo = 0;
202 } 203 }
@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
297 /* we need to re-evaluate prev_cpu_idle */ 298 /* we need to re-evaluate prev_cpu_idle */
298 for_each_online_cpu(j) { 299 for_each_online_cpu(j) {
299 struct cpu_dbs_info_s *dbs_info; 300 struct cpu_dbs_info_s *dbs_info;
300 dbs_info = &per_cpu(cpu_dbs_info, j); 301 dbs_info = &per_cpu(od_cpu_dbs_info, j);
301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
302 &dbs_info->prev_cpu_wall); 303 &dbs_info->prev_cpu_wall);
303 if (dbs_tuners_ins.ignore_nice) 304 if (dbs_tuners_ins.ignore_nice)
@@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
391 unsigned int load, load_freq; 392 unsigned int load, load_freq;
392 int freq_avg; 393 int freq_avg;
393 394
394 j_dbs_info = &per_cpu(cpu_dbs_info, j); 395 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
395 396
396 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 397 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
397 398
@@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
548 unsigned int j; 549 unsigned int j;
549 int rc; 550 int rc;
550 551
551 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 552 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
552 553
553 switch (event) { 554 switch (event) {
554 case CPUFREQ_GOV_START: 555 case CPUFREQ_GOV_START:
@@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
570 571
571 for_each_cpu(j, policy->cpus) { 572 for_each_cpu(j, policy->cpus) {
572 struct cpu_dbs_info_s *j_dbs_info; 573 struct cpu_dbs_info_s *j_dbs_info;
573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 574 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
574 j_dbs_info->cur_policy = policy; 575 j_dbs_info->cur_policy = policy;
575 576
576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 577 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ab581fa62681..7d2987e9b1bb 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
602 return IRQ_HANDLED; 602 return IRQ_HANDLED;
603} 603}
604 604
605static DEFINE_PER_CPU(unsigned, xed_nesting_count);
606
605/* 607/*
606 * Search the CPUs pending events bitmasks. For each one found, map 608 * Search the CPUs pending events bitmasks. For each one found, map
607 * the event number to an irq, and feed it into do_IRQ() for 609 * the event number to an irq, and feed it into do_IRQ() for
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
617 struct pt_regs *old_regs = set_irq_regs(regs); 619 struct pt_regs *old_regs = set_irq_regs(regs);
618 struct shared_info *s = HYPERVISOR_shared_info; 620 struct shared_info *s = HYPERVISOR_shared_info;
619 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 621 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
620 static DEFINE_PER_CPU(unsigned, nesting_count);
621 unsigned count; 622 unsigned count;
622 623
623 exit_idle(); 624 exit_idle();
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
628 629
629 vcpu_info->evtchn_upcall_pending = 0; 630 vcpu_info->evtchn_upcall_pending = 0;
630 631
631 if (__get_cpu_var(nesting_count)++) 632 if (__get_cpu_var(xed_nesting_count)++)
632 goto out; 633 goto out;
633 634
634#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 635#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
653 654
654 BUG_ON(!irqs_disabled()); 655 BUG_ON(!irqs_disabled());
655 656
656 count = __get_cpu_var(nesting_count); 657 count = __get_cpu_var(xed_nesting_count);
657 __get_cpu_var(nesting_count) = 0; 658 __get_cpu_var(xed_nesting_count) = 0;
658 } while(count != 1); 659 } while(count != 1);
659 660
660out: 661out:
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 1a933a221ea4..1fd7a2e75754 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -98,16 +98,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
98 98
99void __weak perf_counter_print_debug(void) { } 99void __weak perf_counter_print_debug(void) { }
100 100
101static DEFINE_PER_CPU(int, disable_count); 101static DEFINE_PER_CPU(int, perf_disable_count);
102 102
103void __perf_disable(void) 103void __perf_disable(void)
104{ 104{
105 __get_cpu_var(disable_count)++; 105 __get_cpu_var(perf_disable_count)++;
106} 106}
107 107
108bool __perf_enable(void) 108bool __perf_enable(void)
109{ 109{
110 return !--__get_cpu_var(disable_count); 110 return !--__get_cpu_var(perf_disable_count);
111} 111}
112 112
113void perf_disable(void) 113void perf_disable(void)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index aa08be69a1b6..54b1de5074b6 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1318,7 +1318,7 @@ static __init void event_trace_self_tests(void)
1318 1318
1319#ifdef CONFIG_FUNCTION_TRACER 1319#ifdef CONFIG_FUNCTION_TRACER
1320 1320
1321static DEFINE_PER_CPU(atomic_t, test_event_disable); 1321static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1322 1322
1323static void 1323static void
1324function_test_events_call(unsigned long ip, unsigned long parent_ip) 1324function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1334,7 +1334,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1334 pc = preempt_count(); 1334 pc = preempt_count();
1335 resched = ftrace_preempt_disable(); 1335 resched = ftrace_preempt_disable();
1336 cpu = raw_smp_processor_id(); 1336 cpu = raw_smp_processor_id();
1337 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1337 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1338 1338
1339 if (disabled != 1) 1339 if (disabled != 1)
1340 goto out; 1340 goto out;
@@ -1352,7 +1352,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1352 trace_nowake_buffer_unlock_commit(event, flags, pc); 1352 trace_nowake_buffer_unlock_commit(event, flags, pc);
1353 1353
1354 out: 1354 out:
1355 atomic_dec(&per_cpu(test_event_disable, cpu)); 1355 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1356 ftrace_preempt_enable(resched); 1356 ftrace_preempt_enable(resched);
1357} 1357}
1358 1358
diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c
index d5292fc6f523..177a5169bbde 100644
--- a/mm/kmemleak-test.c
+++ b/mm/kmemleak-test.c
@@ -36,7 +36,7 @@ struct test_node {
36}; 36};
37 37
38static LIST_HEAD(test_list); 38static LIST_HEAD(test_list);
39static DEFINE_PER_CPU(void *, test_pointer); 39static DEFINE_PER_CPU(void *, kmemleak_test_pointer);
40 40
41/* 41/*
42 * Some very simple testing. This function needs to be extended for 42 * Some very simple testing. This function needs to be extended for
@@ -86,9 +86,9 @@ static int __init kmemleak_test_init(void)
86 } 86 }
87 87
88 for_each_possible_cpu(i) { 88 for_each_possible_cpu(i) {
89 per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); 89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL);
90 pr_info("kmemleak: kmalloc(129) = %p\n", 90 pr_info("kmemleak: kmalloc(129) = %p\n",
91 per_cpu(test_pointer, i)); 91 per_cpu(kmemleak_test_pointer, i));
92 } 92 }
93 93
94 return 0; 94 return 0;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7b0dcea4935b..2c075dcf03d4 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -607,6 +607,8 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
607 } 607 }
608} 608}
609 609
610static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;
611
610/** 612/**
611 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 613 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
612 * @mapping: address_space which was dirtied 614 * @mapping: address_space which was dirtied
@@ -624,7 +626,6 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite)
624void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 626void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
625 unsigned long nr_pages_dirtied) 627 unsigned long nr_pages_dirtied)
626{ 628{
627 static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
628 unsigned long ratelimit; 629 unsigned long ratelimit;
629 unsigned long *p; 630 unsigned long *p;
630 631
@@ -637,7 +638,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
637 * tasks in balance_dirty_pages(). Period. 638 * tasks in balance_dirty_pages(). Period.
638 */ 639 */
639 preempt_disable(); 640 preempt_disable();
640 p = &__get_cpu_var(ratelimits); 641 p = &__get_cpu_var(bdp_ratelimits);
641 *p += nr_pages_dirtied; 642 *p += nr_pages_dirtied;
642 if (unlikely(*p >= ratelimit)) { 643 if (unlikely(*p >= ratelimit)) {
643 *p = 0; 644 *p = 0;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 84d90f2799bb..a6e0e077ac33 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -37,12 +37,13 @@ __initcall(init_syncookies);
37#define COOKIEBITS 24 /* Upper bits store count */ 37#define COOKIEBITS 24 /* Upper bits store count */
38#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) 38#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
39 39
40static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); 40static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
41 ipv4_cookie_scratch);
41 42
42static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, 43static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
43 u32 count, int c) 44 u32 count, int c)
44{ 45{
45 __u32 *tmp = __get_cpu_var(cookie_scratch); 46 __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
46 47
47 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); 48 memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
48 tmp[0] = (__force u32)saddr; 49 tmp[0] = (__force u32)saddr;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 23d0d6db0461..6b6ae913b5d4 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
74 return child; 74 return child;
75} 75}
76 76
77static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], cookie_scratch); 77static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
78 ipv6_cookie_scratch);
78 79
79static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr, 80static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
80 __be16 sport, __be16 dport, u32 count, int c) 81 __be16 sport, __be16 dport, u32 count, int c)
81{ 82{
82 __u32 *tmp = __get_cpu_var(cookie_scratch); 83 __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
83 84
84 /* 85 /*
85 * we have 320 bits of information to hash, copy in the remaining 86 * we have 320 bits of information to hash, copy in the remaining