diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 5 |
7 files changed, 42 insertions, 51 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e5b27d8f1b4..28e5f595604 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -258,13 +258,15 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
258 | { | 258 | { |
259 | #ifdef CONFIG_X86_HT | 259 | #ifdef CONFIG_X86_HT |
260 | unsigned bits; | 260 | unsigned bits; |
261 | int cpu = smp_processor_id(); | ||
261 | 262 | ||
262 | bits = c->x86_coreid_bits; | 263 | bits = c->x86_coreid_bits; |
263 | |||
264 | /* Low order bits define the core id (index of core in socket) */ | 264 | /* Low order bits define the core id (index of core in socket) */ |
265 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); | 265 | c->cpu_core_id = c->initial_apicid & ((1 << bits)-1); |
266 | /* Convert the initial APIC ID into the socket ID */ | 266 | /* Convert the initial APIC ID into the socket ID */ |
267 | c->phys_proc_id = c->initial_apicid >> bits; | 267 | c->phys_proc_id = c->initial_apicid >> bits; |
268 | /* use socket ID also for last level cache */ | ||
269 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | ||
268 | #endif | 270 | #endif |
269 | } | 271 | } |
270 | 272 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6b26d4deada..f1961c07af9 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -848,9 +848,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
848 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 848 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
849 | numa_add_cpu(smp_processor_id()); | 849 | numa_add_cpu(smp_processor_id()); |
850 | #endif | 850 | #endif |
851 | |||
852 | /* Cap the iomem address space to what is addressable on all CPUs */ | ||
853 | iomem_resource.end &= (1ULL << c->x86_phys_bits) - 1; | ||
854 | } | 851 | } |
855 | 852 | ||
856 | #ifdef CONFIG_X86_64 | 853 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 81cbe64ed6b..2a50ef89100 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -299,7 +299,7 @@ static int transition_pstate(struct powernow_k8_data *data, u32 pstate) | |||
299 | static int transition_fid_vid(struct powernow_k8_data *data, | 299 | static int transition_fid_vid(struct powernow_k8_data *data, |
300 | u32 reqfid, u32 reqvid) | 300 | u32 reqfid, u32 reqvid) |
301 | { | 301 | { |
302 | if (core_voltage_pre_transition(data, reqvid)) | 302 | if (core_voltage_pre_transition(data, reqvid, reqfid)) |
303 | return 1; | 303 | return 1; |
304 | 304 | ||
305 | if (core_frequency_transition(data, reqfid)) | 305 | if (core_frequency_transition(data, reqfid)) |
@@ -327,17 +327,20 @@ static int transition_fid_vid(struct powernow_k8_data *data, | |||
327 | 327 | ||
328 | /* Phase 1 - core voltage transition ... setup voltage */ | 328 | /* Phase 1 - core voltage transition ... setup voltage */ |
329 | static int core_voltage_pre_transition(struct powernow_k8_data *data, | 329 | static int core_voltage_pre_transition(struct powernow_k8_data *data, |
330 | u32 reqvid) | 330 | u32 reqvid, u32 reqfid) |
331 | { | 331 | { |
332 | u32 rvosteps = data->rvo; | 332 | u32 rvosteps = data->rvo; |
333 | u32 savefid = data->currfid; | 333 | u32 savefid = data->currfid; |
334 | u32 maxvid, lo; | 334 | u32 maxvid, lo, rvomult = 1; |
335 | 335 | ||
336 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " | 336 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " |
337 | "reqvid 0x%x, rvo 0x%x\n", | 337 | "reqvid 0x%x, rvo 0x%x\n", |
338 | smp_processor_id(), | 338 | smp_processor_id(), |
339 | data->currfid, data->currvid, reqvid, data->rvo); | 339 | data->currfid, data->currvid, reqvid, data->rvo); |
340 | 340 | ||
341 | if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP)) | ||
342 | rvomult = 2; | ||
343 | rvosteps *= rvomult; | ||
341 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); | 344 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); |
342 | maxvid = 0x1f & (maxvid >> 16); | 345 | maxvid = 0x1f & (maxvid >> 16); |
343 | dprintk("ph1 maxvid=0x%x\n", maxvid); | 346 | dprintk("ph1 maxvid=0x%x\n", maxvid); |
@@ -351,7 +354,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
351 | return 1; | 354 | return 1; |
352 | } | 355 | } |
353 | 356 | ||
354 | while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) { | 357 | while ((rvosteps > 0) && |
358 | ((rvomult * data->rvo + data->currvid) > reqvid)) { | ||
355 | if (data->currvid == maxvid) { | 359 | if (data->currvid == maxvid) { |
356 | rvosteps = 0; | 360 | rvosteps = 0; |
357 | } else { | 361 | } else { |
@@ -384,13 +388,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
384 | u32 vcoreqfid, vcocurrfid, vcofiddiff; | 388 | u32 vcoreqfid, vcocurrfid, vcofiddiff; |
385 | u32 fid_interval, savevid = data->currvid; | 389 | u32 fid_interval, savevid = data->currvid; |
386 | 390 | ||
387 | if ((reqfid < HI_FID_TABLE_BOTTOM) && | ||
388 | (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
389 | printk(KERN_ERR PFX "ph2: illegal lo-lo transition " | ||
390 | "0x%x 0x%x\n", reqfid, data->currfid); | ||
391 | return 1; | ||
392 | } | ||
393 | |||
394 | if (data->currfid == reqfid) { | 391 | if (data->currfid == reqfid) { |
395 | printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", | 392 | printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", |
396 | data->currfid); | 393 | data->currfid); |
@@ -407,6 +404,9 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
407 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid | 404 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid |
408 | : vcoreqfid - vcocurrfid; | 405 | : vcoreqfid - vcocurrfid; |
409 | 406 | ||
407 | if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP)) | ||
408 | vcofiddiff = 0; | ||
409 | |||
410 | while (vcofiddiff > 2) { | 410 | while (vcofiddiff > 2) { |
411 | (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); | 411 | (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); |
412 | 412 | ||
@@ -1081,14 +1081,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1081 | return 0; | 1081 | return 0; |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | if ((fid < HI_FID_TABLE_BOTTOM) && | ||
1085 | (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
1086 | printk(KERN_ERR PFX | ||
1087 | "ignoring illegal change in lo freq table-%x to 0x%x\n", | ||
1088 | data->currfid, fid); | ||
1089 | return 1; | ||
1090 | } | ||
1091 | |||
1092 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", | 1084 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", |
1093 | smp_processor_id(), fid, vid); | 1085 | smp_processor_id(), fid, vid); |
1094 | freqs.old = find_khz_freq_from_fid(data->currfid); | 1086 | freqs.old = find_khz_freq_from_fid(data->currfid); |
@@ -1267,7 +1259,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1267 | { | 1259 | { |
1268 | static const char ACPI_PSS_BIOS_BUG_MSG[] = | 1260 | static const char ACPI_PSS_BIOS_BUG_MSG[] = |
1269 | KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" | 1261 | KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" |
1270 | KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; | 1262 | FW_BUG PFX "Try again with latest BIOS.\n"; |
1271 | struct powernow_k8_data *data; | 1263 | struct powernow_k8_data *data; |
1272 | struct init_on_cpu init_on_cpu; | 1264 | struct init_on_cpu init_on_cpu; |
1273 | int rc; | 1265 | int rc; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index c9c1190b5e1..02ce824073c 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -215,7 +215,8 @@ struct pst_s { | |||
215 | 215 | ||
216 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) | 216 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) |
217 | 217 | ||
218 | static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid); | 218 | static int core_voltage_pre_transition(struct powernow_k8_data *data, |
219 | u32 reqvid, u32 regfid); | ||
219 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); | 220 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); |
220 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); | 221 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); |
221 | 222 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 284d1de968b..484c1e5f658 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -194,14 +194,14 @@ static void print_mce(struct mce *m) | |||
194 | m->cs, m->ip); | 194 | m->cs, m->ip); |
195 | if (m->cs == __KERNEL_CS) | 195 | if (m->cs == __KERNEL_CS) |
196 | print_symbol("{%s}", m->ip); | 196 | print_symbol("{%s}", m->ip); |
197 | printk("\n"); | 197 | printk(KERN_CONT "\n"); |
198 | } | 198 | } |
199 | printk(KERN_EMERG "TSC %llx ", m->tsc); | 199 | printk(KERN_EMERG "TSC %llx ", m->tsc); |
200 | if (m->addr) | 200 | if (m->addr) |
201 | printk("ADDR %llx ", m->addr); | 201 | printk(KERN_CONT "ADDR %llx ", m->addr); |
202 | if (m->misc) | 202 | if (m->misc) |
203 | printk("MISC %llx ", m->misc); | 203 | printk(KERN_CONT "MISC %llx ", m->misc); |
204 | printk("\n"); | 204 | printk(KERN_CONT "\n"); |
205 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | 205 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", |
206 | m->cpuvendor, m->cpuid, m->time, m->socketid, | 206 | m->cpuvendor, m->cpuid, m->time, m->socketid, |
207 | m->apicid); | 207 | m->apicid); |
@@ -209,13 +209,13 @@ static void print_mce(struct mce *m) | |||
209 | 209 | ||
210 | static void print_mce_head(void) | 210 | static void print_mce_head(void) |
211 | { | 211 | { |
212 | printk(KERN_EMERG "\n" KERN_EMERG "HARDWARE ERROR\n"); | 212 | printk(KERN_EMERG "\nHARDWARE ERROR\n"); |
213 | } | 213 | } |
214 | 214 | ||
215 | static void print_mce_tail(void) | 215 | static void print_mce_tail(void) |
216 | { | 216 | { |
217 | printk(KERN_EMERG "This is not a software problem!\n" | 217 | printk(KERN_EMERG "This is not a software problem!\n" |
218 | KERN_EMERG "Run through mcelog --ascii to decode and contact your hardware vendor\n"); | 218 | "Run through mcelog --ascii to decode and contact your hardware vendor\n"); |
219 | } | 219 | } |
220 | 220 | ||
221 | #define PANIC_TIMEOUT 5 /* 5 seconds */ | 221 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
@@ -1117,7 +1117,7 @@ static void mcheck_timer(unsigned long data) | |||
1117 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); | 1117 | *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); |
1118 | 1118 | ||
1119 | t->expires = jiffies + *n; | 1119 | t->expires = jiffies + *n; |
1120 | add_timer(t); | 1120 | add_timer_on(t, smp_processor_id()); |
1121 | } | 1121 | } |
1122 | 1122 | ||
1123 | static void mce_do_trigger(struct work_struct *work) | 1123 | static void mce_do_trigger(struct work_struct *work) |
@@ -1321,7 +1321,7 @@ static void mce_init_timer(void) | |||
1321 | return; | 1321 | return; |
1322 | setup_timer(t, mcheck_timer, smp_processor_id()); | 1322 | setup_timer(t, mcheck_timer, smp_processor_id()); |
1323 | t->expires = round_jiffies(jiffies + *n); | 1323 | t->expires = round_jiffies(jiffies + *n); |
1324 | add_timer(t); | 1324 | add_timer_on(t, smp_processor_id()); |
1325 | } | 1325 | } |
1326 | 1326 | ||
1327 | /* | 1327 | /* |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 76dfef23f78..36c3dc7b899 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -401,7 +401,7 @@ static const u64 amd_hw_cache_event_ids | |||
401 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ | 401 | [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */ |
402 | }, | 402 | }, |
403 | [ C(OP_WRITE) ] = { | 403 | [ C(OP_WRITE) ] = { |
404 | [ C(RESULT_ACCESS) ] = 0x0042, /* Data Cache Refills from L2 */ | 404 | [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */ |
405 | [ C(RESULT_MISS) ] = 0, | 405 | [ C(RESULT_MISS) ] = 0, |
406 | }, | 406 | }, |
407 | [ C(OP_PREFETCH) ] = { | 407 | [ C(OP_PREFETCH) ] = { |
@@ -912,6 +912,8 @@ x86_perf_counter_set_period(struct perf_counter *counter, | |||
912 | err = checking_wrmsrl(hwc->counter_base + idx, | 912 | err = checking_wrmsrl(hwc->counter_base + idx, |
913 | (u64)(-left) & x86_pmu.counter_mask); | 913 | (u64)(-left) & x86_pmu.counter_mask); |
914 | 914 | ||
915 | perf_counter_update_userpage(counter); | ||
916 | |||
915 | return ret; | 917 | return ret; |
916 | } | 918 | } |
917 | 919 | ||
@@ -969,13 +971,6 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
969 | if (!x86_pmu.num_counters_fixed) | 971 | if (!x86_pmu.num_counters_fixed) |
970 | return -1; | 972 | return -1; |
971 | 973 | ||
972 | /* | ||
973 | * Quirk, IA32_FIXED_CTRs do not work on current Atom processors: | ||
974 | */ | ||
975 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
976 | boot_cpu_data.x86_model == 28) | ||
977 | return -1; | ||
978 | |||
979 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | 974 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
980 | 975 | ||
981 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | 976 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
@@ -1041,6 +1036,8 @@ try_generic: | |||
1041 | x86_perf_counter_set_period(counter, hwc, idx); | 1036 | x86_perf_counter_set_period(counter, hwc, idx); |
1042 | x86_pmu.enable(hwc, idx); | 1037 | x86_pmu.enable(hwc, idx); |
1043 | 1038 | ||
1039 | perf_counter_update_userpage(counter); | ||
1040 | |||
1044 | return 0; | 1041 | return 0; |
1045 | } | 1042 | } |
1046 | 1043 | ||
@@ -1133,6 +1130,8 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1133 | x86_perf_counter_update(counter, hwc, idx); | 1130 | x86_perf_counter_update(counter, hwc, idx); |
1134 | cpuc->counters[idx] = NULL; | 1131 | cpuc->counters[idx] = NULL; |
1135 | clear_bit(idx, cpuc->used_mask); | 1132 | clear_bit(idx, cpuc->used_mask); |
1133 | |||
1134 | perf_counter_update_userpage(counter); | ||
1136 | } | 1135 | } |
1137 | 1136 | ||
1138 | /* | 1137 | /* |
@@ -1428,8 +1427,6 @@ static int intel_pmu_init(void) | |||
1428 | */ | 1427 | */ |
1429 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); | 1428 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); |
1430 | 1429 | ||
1431 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | ||
1432 | |||
1433 | /* | 1430 | /* |
1434 | * Install the hw-cache-events table: | 1431 | * Install the hw-cache-events table: |
1435 | */ | 1432 | */ |
@@ -1499,21 +1496,22 @@ void __init init_hw_perf_counters(void) | |||
1499 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 1496 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
1500 | 1497 | ||
1501 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { | 1498 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { |
1502 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | ||
1503 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", | 1499 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", |
1504 | x86_pmu.num_counters, X86_PMC_MAX_GENERIC); | 1500 | x86_pmu.num_counters, X86_PMC_MAX_GENERIC); |
1501 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | ||
1505 | } | 1502 | } |
1506 | perf_counter_mask = (1 << x86_pmu.num_counters) - 1; | 1503 | perf_counter_mask = (1 << x86_pmu.num_counters) - 1; |
1507 | perf_max_counters = x86_pmu.num_counters; | 1504 | perf_max_counters = x86_pmu.num_counters; |
1508 | 1505 | ||
1509 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { | 1506 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { |
1510 | x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; | ||
1511 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", | 1507 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", |
1512 | x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); | 1508 | x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); |
1509 | x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; | ||
1513 | } | 1510 | } |
1514 | 1511 | ||
1515 | perf_counter_mask |= | 1512 | perf_counter_mask |= |
1516 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | 1513 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
1514 | x86_pmu.intel_ctrl = perf_counter_mask; | ||
1517 | 1515 | ||
1518 | perf_counters_lapic_init(); | 1516 | perf_counters_lapic_init(); |
1519 | register_die_notifier(&perf_counter_nmi_notifier); | 1517 | register_die_notifier(&perf_counter_nmi_notifier); |
@@ -1563,6 +1561,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) | |||
1563 | 1561 | ||
1564 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); | 1562 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); |
1565 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); | 1563 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); |
1564 | static DEFINE_PER_CPU(int, in_nmi_frame); | ||
1566 | 1565 | ||
1567 | 1566 | ||
1568 | static void | 1567 | static void |
@@ -1578,7 +1577,9 @@ static void backtrace_warning(void *data, char *msg) | |||
1578 | 1577 | ||
1579 | static int backtrace_stack(void *data, char *name) | 1578 | static int backtrace_stack(void *data, char *name) |
1580 | { | 1579 | { |
1581 | /* Process all stacks: */ | 1580 | per_cpu(in_nmi_frame, smp_processor_id()) = |
1581 | x86_is_stack_id(NMI_STACK, name); | ||
1582 | |||
1582 | return 0; | 1583 | return 0; |
1583 | } | 1584 | } |
1584 | 1585 | ||
@@ -1586,6 +1587,9 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
1586 | { | 1587 | { |
1587 | struct perf_callchain_entry *entry = data; | 1588 | struct perf_callchain_entry *entry = data; |
1588 | 1589 | ||
1590 | if (per_cpu(in_nmi_frame, smp_processor_id())) | ||
1591 | return; | ||
1592 | |||
1589 | if (reliable) | 1593 | if (reliable) |
1590 | callchain_store(entry, addr); | 1594 | callchain_store(entry, addr); |
1591 | } | 1595 | } |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 5c481f6205b..e60ed740d2b 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -803,8 +803,3 @@ int __kprobes lapic_wd_event(unsigned nmi_hz) | |||
803 | wd_ops->rearm(wd, nmi_hz); | 803 | wd_ops->rearm(wd, nmi_hz); |
804 | return 1; | 804 | return 1; |
805 | } | 805 | } |
806 | |||
807 | int lapic_watchdog_ok(void) | ||
808 | { | ||
809 | return wd_ops != NULL; | ||
810 | } | ||