aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-09-14 20:57:19 -0400
committerTejun Heo <tj@kernel.org>2009-09-14 20:57:19 -0400
commit5579fd7e6aed8860ea0c8e3f11897493153b10ad (patch)
tree8f797ccd0f1a2c88f1605ae9e90b3ac17485de27 /arch/x86/kernel/cpu
parent04a13c7c632e1fe04a5f6e6c83565d2559e37598 (diff)
parentc2a7e818019f20a5cf7fb26a6eb59e212e6c0cd8 (diff)
Merge branch 'for-next' into for-linus
* pcpu_chunk_page_occupied() doesn't exist in for-next. * pcpu_chunk_addr_search() updated to use raw_smp_processor_id(). Conflicts: mm/percpu.c
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/cpu_debug.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c14
4 files changed, 14 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
index 6b2a52dd0403..dca325c03999 100644
--- a/arch/x86/kernel/cpu/cpu_debug.c
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -30,8 +30,8 @@
30#include <asm/apic.h> 30#include <asm/apic.h>
31#include <asm/desc.h> 31#include <asm/desc.h>
32 32
33static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]); 33static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
34static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]); 34static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
35static DEFINE_PER_CPU(int, cpu_priv_count); 35static DEFINE_PER_CPU(int, cpu_priv_count);
36 36
37static DEFINE_MUTEX(cpu_debug_lock); 37static DEFINE_MUTEX(cpu_debug_lock);
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 1cfb623ce11c..14ce5d49b2ad 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1091,7 +1091,7 @@ void mce_log_therm_throt_event(__u64 status)
1091 */ 1091 */
1092static int check_interval = 5 * 60; /* 5 minutes */ 1092static int check_interval = 5 * 60; /* 5 minutes */
1093 1093
1094static DEFINE_PER_CPU(int, next_interval); /* in jiffies */ 1094static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
1095static DEFINE_PER_CPU(struct timer_list, mce_timer); 1095static DEFINE_PER_CPU(struct timer_list, mce_timer);
1096 1096
1097static void mcheck_timer(unsigned long data) 1097static void mcheck_timer(unsigned long data)
@@ -1110,7 +1110,7 @@ static void mcheck_timer(unsigned long data)
1110 * Alert userspace if needed. If we logged an MCE, reduce the 1110 * Alert userspace if needed. If we logged an MCE, reduce the
1111 * polling interval, otherwise increase the polling interval. 1111 * polling interval, otherwise increase the polling interval.
1112 */ 1112 */
1113 n = &__get_cpu_var(next_interval); 1113 n = &__get_cpu_var(mce_next_interval);
1114 if (mce_notify_irq()) 1114 if (mce_notify_irq())
1115 *n = max(*n/2, HZ/100); 1115 *n = max(*n/2, HZ/100);
1116 else 1116 else
@@ -1311,7 +1311,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
1311static void mce_init_timer(void) 1311static void mce_init_timer(void)
1312{ 1312{
1313 struct timer_list *t = &__get_cpu_var(mce_timer); 1313 struct timer_list *t = &__get_cpu_var(mce_timer);
1314 int *n = &__get_cpu_var(next_interval); 1314 int *n = &__get_cpu_var(mce_next_interval);
1315 1315
1316 if (mce_ignore_ce) 1316 if (mce_ignore_ce)
1317 return; 1317 return;
@@ -1912,7 +1912,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
1912 case CPU_DOWN_FAILED: 1912 case CPU_DOWN_FAILED:
1913 case CPU_DOWN_FAILED_FROZEN: 1913 case CPU_DOWN_FAILED_FROZEN:
1914 t->expires = round_jiffies(jiffies + 1914 t->expires = round_jiffies(jiffies +
1915 __get_cpu_var(next_interval)); 1915 __get_cpu_var(mce_next_interval));
1916 add_timer_on(t, cpu); 1916 add_timer_on(t, cpu);
1917 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 1917 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
1918 break; 1918 break;
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ddae21620bda..bd2a2fa84628 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -69,7 +69,7 @@ struct threshold_bank {
69 struct threshold_block *blocks; 69 struct threshold_block *blocks;
70 cpumask_var_t cpus; 70 cpumask_var_t cpus;
71}; 71};
72static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); 72static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
73 73
74#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
75static unsigned char shared_bank[NR_BANKS] = { 75static unsigned char shared_bank[NR_BANKS] = {
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 900332b800f8..3d4ebbd2e129 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -976,7 +976,7 @@ amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
976 x86_pmu_disable_counter(hwc, idx); 976 x86_pmu_disable_counter(hwc, idx);
977} 977}
978 978
979static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); 979static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
980 980
981/* 981/*
982 * Set the next IRQ period, based on the hwc->period_left value. 982 * Set the next IRQ period, based on the hwc->period_left value.
@@ -1015,7 +1015,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
1015 if (left > x86_pmu.max_period) 1015 if (left > x86_pmu.max_period)
1016 left = x86_pmu.max_period; 1016 left = x86_pmu.max_period;
1017 1017
1018 per_cpu(prev_left[idx], smp_processor_id()) = left; 1018 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1019 1019
1020 /* 1020 /*
1021 * The hw counter starts counting from this counter offset, 1021 * The hw counter starts counting from this counter offset,
@@ -1211,7 +1211,7 @@ void perf_counter_print_debug(void)
1211 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1211 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1212 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1212 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1213 1213
1214 prev_left = per_cpu(prev_left[idx], cpu); 1214 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1215 1215
1216 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", 1216 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1217 cpu, idx, pmc_ctrl); 1217 cpu, idx, pmc_ctrl);
@@ -1798,8 +1798,8 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
1798 entry->ip[entry->nr++] = ip; 1798 entry->ip[entry->nr++] = ip;
1799} 1799}
1800 1800
1801static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); 1801static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1802static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); 1802static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
1803static DEFINE_PER_CPU(int, in_nmi_frame); 1803static DEFINE_PER_CPU(int, in_nmi_frame);
1804 1804
1805 1805
@@ -1952,9 +1952,9 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1952 struct perf_callchain_entry *entry; 1952 struct perf_callchain_entry *entry;
1953 1953
1954 if (in_nmi()) 1954 if (in_nmi())
1955 entry = &__get_cpu_var(nmi_entry); 1955 entry = &__get_cpu_var(pmc_nmi_entry);
1956 else 1956 else
1957 entry = &__get_cpu_var(irq_entry); 1957 entry = &__get_cpu_var(pmc_irq_entry);
1958 1958
1959 entry->nr = 0; 1959 entry->nr = 0;
1960 1960