aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
committerTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
commit245b2e70eabd797932adb263a65da0bab3711753 (patch)
tree30f0b790dadd2b70bf06e534abcf66a76e97b05a /arch/x86
parentb9bf3121af348d9255f1c917830fe8c2df52efcb (diff)
percpu: clean up percpu variable definitions
Percpu variable definition is about to be updated such that all percpu symbols including the static ones must be unique. Update percpu variable definitions accordingly. * as,cfq: rename ioc_count uniquely * cpufreq: rename cpu_dbs_info uniquely * xen: move nesting_count out of xen_evtchn_do_upcall() and rename it * mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and rename it * ipv4,6: rename cookie_scratch uniquely * x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to pmc_irq_entry and nmi_entry to pmc_nmi_entry * perf_counter: rename disable_count to perf_disable_count * ftrace: rename test_event_disable to ftrace_test_event_disable * kmemleak: rename test_pointer to kmemleak_test_pointer * mce: rename next_interval to mce_next_interval [ Impact: percpu usage cleanups, no duplicate static percpu var names ] Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Dave Jones <davej@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: linux-mm <linux-mm@kvack.org> Cc: David S. Miller <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Andi Kleen <andi@firstfloor.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c8
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c14
2 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 284d1de968b..cba8cd3e957 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
1091 */ 1091 */
1092static int check_interval = 5 * 60; /* 5 minutes */ 1092static int check_interval = 5 * 60; /* 5 minutes */
1093 1093
1094static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ 1094static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
1095static DEFINE_PER_CPU(struct timer_list, mce_timer); 1095static DEFINE_PER_CPU(struct timer_list, mce_timer);
1096 1096
1097static void mcheck_timer(unsigned long data) 1097static void mcheck_timer(unsigned long data)
@@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data)
1110 * Alert userspace if needed. If we logged an MCE, reduce the 1110 * Alert userspace if needed. If we logged an MCE, reduce the
1111 * polling interval, otherwise increase the polling interval. 1111 * polling interval, otherwise increase the polling interval.
1112 */ 1112 */
1113 n = &__get_cpu_var(next_interval); 1113 n = &__get_cpu_var(mce_next_interval);
1114 if (mce_notify_irq()) 1114 if (mce_notify_irq())
1115 *n = max(*n/2, HZ/100); 1115 *n = max(*n/2, HZ/100);
1116 else 1116 else
@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
1311static void mce_init_timer(void) 1311static void mce_init_timer(void)
1312{ 1312{
1313 struct timer_list *t = &__get_cpu_var(mce_timer); 1313 struct timer_list *t = &__get_cpu_var(mce_timer);
1314 int *n = &__get_cpu_var(next_interval); 1314 int *n = &__get_cpu_var(mce_next_interval);
1315 1315
1316 if (mce_ignore_ce) 1316 if (mce_ignore_ce)
1317 return; 1317 return;
@@ -1914,7 +1914,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1914 case CPU_DOWN_FAILED: 1914 case CPU_DOWN_FAILED:
1915 case CPU_DOWN_FAILED_FROZEN: 1915 case CPU_DOWN_FAILED_FROZEN:
1916 t->expires = round_jiffies(jiffies + 1916 t->expires = round_jiffies(jiffies +
1917 __get_cpu_var(next_interval)); 1917 __get_cpu_var(mce_next_interval));
1918 add_timer_on(t, cpu); 1918 add_timer_on(t, cpu);
1919 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1919 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1920 break; 1920 break;
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 4946288d683..5fdf63aaaba 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -862,7 +862,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
862 x86_pmu_disable_counter(hwc, idx); 862 x86_pmu_disable_counter(hwc, idx);
863} 863}
864 864
865static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], prev_left); 865static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
866 866
867/* 867/*
868 * Set the next IRQ period, based on the hwc->period_left value. 868 * Set the next IRQ period, based on the hwc->period_left value.
@@ -901,7 +901,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
901 if (left > x86_pmu.max_period) 901 if (left > x86_pmu.max_period)
902 left = x86_pmu.max_period; 902 left = x86_pmu.max_period;
903 903
904 per_cpu(prev_left[idx], smp_processor_id()) = left; 904 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
905 905
906 /* 906 /*
907 * The hw counter starts counting from this counter offset, 907 * The hw counter starts counting from this counter offset,
@@ -1089,7 +1089,7 @@ void perf_counter_print_debug(void)
1089 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1089 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1090 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1090 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1091 1091
1092 prev_left = per_cpu(prev_left[idx], cpu); 1092 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1093 1093
1094 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1094 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1095 cpu, idx, pmc_ctrl); 1095 cpu, idx, pmc_ctrl);
@@ -1561,8 +1561,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1561 entry->ip[entry->nr++] = ip; 1561 entry->ip[entry->nr++] = ip;
1562} 1562}
1563 1563
1564static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); 1564static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1565static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); 1565static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
1566 1566
1567 1567
1568static void 1568static void
@@ -1709,9 +1709,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1709 struct perf_callchain_entry *entry; 1709 struct perf_callchain_entry *entry;
1710 1710
1711 if (in_nmi()) 1711 if (in_nmi())
1712 entry = &__get_cpu_var(nmi_entry); 1712 entry = &__get_cpu_var(pmc_nmi_entry);
1713 else 1713 else
1714 entry = &__get_cpu_var(irq_entry); 1714 entry = &__get_cpu_var(pmc_irq_entry);
1715 1715
1716 entry->nr = 0; 1716 entry->nr = 0;
1717 1717