diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-08-24 06:25:44 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-24 06:25:54 -0400 |
commit | 5f9ece02401116b29eb04396b99ea092acb75dd8 (patch) | |
tree | e10386e2dc63c275646b4eb0bed857da7bf86c6a /arch/x86/kernel/cpu | |
parent | 9f51e24ee8b5a1595b6a5ac0c2be278a16488e75 (diff) | |
parent | 422bef879e84104fee6dc68ded0e371dbeb5f88e (diff) |
Merge commit 'v2.6.31-rc7' into x86/cleanups
Merge reason: we were on -rc1 before - go up to -rc7
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 48 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 32 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 37 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 23 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 289 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 5 |
8 files changed, 352 insertions, 94 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c6eb02e69875..83b217c7225f 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -358,7 +358,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | |||
358 | #endif | 358 | #endif |
359 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) | 359 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI) |
360 | /* check CPU config space for extended APIC ID */ | 360 | /* check CPU config space for extended APIC ID */ |
361 | if (c->x86 >= 0xf) { | 361 | if (cpu_has_apic && c->x86 >= 0xf) { |
362 | unsigned int val; | 362 | unsigned int val; |
363 | val = read_pci_config(0, 24, 0, 0x68); | 363 | val = read_pci_config(0, 24, 0, 0x68); |
364 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) | 364 | if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18))) |
@@ -402,6 +402,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
402 | level = cpuid_eax(1); | 402 | level = cpuid_eax(1); |
403 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | 403 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
404 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 404 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
405 | |||
406 | /* | ||
407 | * Some BIOSes incorrectly force this feature, but only K8 | ||
408 | * revision D (model = 0x14) and later actually support it. | ||
409 | */ | ||
410 | if (c->x86_model < 0x14) | ||
411 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | ||
405 | } | 412 | } |
406 | if (c->x86 == 0x10 || c->x86 == 0x11) | 413 | if (c->x86 == 0x10 || c->x86 == 0x11) |
407 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | 414 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c96ea44928bf..734eaad93656 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -59,7 +59,30 @@ void __init setup_cpu_local_masks(void) | |||
59 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 59 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
60 | } | 60 | } |
61 | 61 | ||
62 | static const struct cpu_dev *this_cpu __cpuinitdata; | 62 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
63 | { | ||
64 | #ifdef CONFIG_X86_64 | ||
65 | display_cacheinfo(c); | ||
66 | #else | ||
67 | /* Not much we can do here... */ | ||
68 | /* Check if at least it has cpuid */ | ||
69 | if (c->cpuid_level == -1) { | ||
70 | /* No cpuid. It must be an ancient CPU */ | ||
71 | if (c->x86 == 4) | ||
72 | strcpy(c->x86_model_id, "486"); | ||
73 | else if (c->x86 == 3) | ||
74 | strcpy(c->x86_model_id, "386"); | ||
75 | } | ||
76 | #endif | ||
77 | } | ||
78 | |||
79 | static const struct cpu_dev __cpuinitconst default_cpu = { | ||
80 | .c_init = default_init, | ||
81 | .c_vendor = "Unknown", | ||
82 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
83 | }; | ||
84 | |||
85 | static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | ||
63 | 86 | ||
64 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 87 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
65 | #ifdef CONFIG_X86_64 | 88 | #ifdef CONFIG_X86_64 |
@@ -332,29 +355,6 @@ void switch_to_new_gdt(int cpu) | |||
332 | 355 | ||
333 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; | 356 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
334 | 357 | ||
335 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | ||
336 | { | ||
337 | #ifdef CONFIG_X86_64 | ||
338 | display_cacheinfo(c); | ||
339 | #else | ||
340 | /* Not much we can do here... */ | ||
341 | /* Check if at least it has cpuid */ | ||
342 | if (c->cpuid_level == -1) { | ||
343 | /* No cpuid. It must be an ancient CPU */ | ||
344 | if (c->x86 == 4) | ||
345 | strcpy(c->x86_model_id, "486"); | ||
346 | else if (c->x86 == 3) | ||
347 | strcpy(c->x86_model_id, "386"); | ||
348 | } | ||
349 | #endif | ||
350 | } | ||
351 | |||
352 | static const struct cpu_dev __cpuinitconst default_cpu = { | ||
353 | .c_init = default_init, | ||
354 | .c_vendor = "Unknown", | ||
355 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | ||
356 | }; | ||
357 | |||
358 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | 358 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) |
359 | { | 359 | { |
360 | unsigned int *v; | 360 | unsigned int *v; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 81cbe64ed6b4..2a50ef891000 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -299,7 +299,7 @@ static int transition_pstate(struct powernow_k8_data *data, u32 pstate) | |||
299 | static int transition_fid_vid(struct powernow_k8_data *data, | 299 | static int transition_fid_vid(struct powernow_k8_data *data, |
300 | u32 reqfid, u32 reqvid) | 300 | u32 reqfid, u32 reqvid) |
301 | { | 301 | { |
302 | if (core_voltage_pre_transition(data, reqvid)) | 302 | if (core_voltage_pre_transition(data, reqvid, reqfid)) |
303 | return 1; | 303 | return 1; |
304 | 304 | ||
305 | if (core_frequency_transition(data, reqfid)) | 305 | if (core_frequency_transition(data, reqfid)) |
@@ -327,17 +327,20 @@ static int transition_fid_vid(struct powernow_k8_data *data, | |||
327 | 327 | ||
328 | /* Phase 1 - core voltage transition ... setup voltage */ | 328 | /* Phase 1 - core voltage transition ... setup voltage */ |
329 | static int core_voltage_pre_transition(struct powernow_k8_data *data, | 329 | static int core_voltage_pre_transition(struct powernow_k8_data *data, |
330 | u32 reqvid) | 330 | u32 reqvid, u32 reqfid) |
331 | { | 331 | { |
332 | u32 rvosteps = data->rvo; | 332 | u32 rvosteps = data->rvo; |
333 | u32 savefid = data->currfid; | 333 | u32 savefid = data->currfid; |
334 | u32 maxvid, lo; | 334 | u32 maxvid, lo, rvomult = 1; |
335 | 335 | ||
336 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " | 336 | dprintk("ph1 (cpu%d): start, currfid 0x%x, currvid 0x%x, " |
337 | "reqvid 0x%x, rvo 0x%x\n", | 337 | "reqvid 0x%x, rvo 0x%x\n", |
338 | smp_processor_id(), | 338 | smp_processor_id(), |
339 | data->currfid, data->currvid, reqvid, data->rvo); | 339 | data->currfid, data->currvid, reqvid, data->rvo); |
340 | 340 | ||
341 | if ((savefid < LO_FID_TABLE_TOP) && (reqfid < LO_FID_TABLE_TOP)) | ||
342 | rvomult = 2; | ||
343 | rvosteps *= rvomult; | ||
341 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); | 344 | rdmsr(MSR_FIDVID_STATUS, lo, maxvid); |
342 | maxvid = 0x1f & (maxvid >> 16); | 345 | maxvid = 0x1f & (maxvid >> 16); |
343 | dprintk("ph1 maxvid=0x%x\n", maxvid); | 346 | dprintk("ph1 maxvid=0x%x\n", maxvid); |
@@ -351,7 +354,8 @@ static int core_voltage_pre_transition(struct powernow_k8_data *data, | |||
351 | return 1; | 354 | return 1; |
352 | } | 355 | } |
353 | 356 | ||
354 | while ((rvosteps > 0) && ((data->rvo + data->currvid) > reqvid)) { | 357 | while ((rvosteps > 0) && |
358 | ((rvomult * data->rvo + data->currvid) > reqvid)) { | ||
355 | if (data->currvid == maxvid) { | 359 | if (data->currvid == maxvid) { |
356 | rvosteps = 0; | 360 | rvosteps = 0; |
357 | } else { | 361 | } else { |
@@ -384,13 +388,6 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
384 | u32 vcoreqfid, vcocurrfid, vcofiddiff; | 388 | u32 vcoreqfid, vcocurrfid, vcofiddiff; |
385 | u32 fid_interval, savevid = data->currvid; | 389 | u32 fid_interval, savevid = data->currvid; |
386 | 390 | ||
387 | if ((reqfid < HI_FID_TABLE_BOTTOM) && | ||
388 | (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
389 | printk(KERN_ERR PFX "ph2: illegal lo-lo transition " | ||
390 | "0x%x 0x%x\n", reqfid, data->currfid); | ||
391 | return 1; | ||
392 | } | ||
393 | |||
394 | if (data->currfid == reqfid) { | 391 | if (data->currfid == reqfid) { |
395 | printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", | 392 | printk(KERN_ERR PFX "ph2 null fid transition 0x%x\n", |
396 | data->currfid); | 393 | data->currfid); |
@@ -407,6 +404,9 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid) | |||
407 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid | 404 | vcofiddiff = vcocurrfid > vcoreqfid ? vcocurrfid - vcoreqfid |
408 | : vcoreqfid - vcocurrfid; | 405 | : vcoreqfid - vcocurrfid; |
409 | 406 | ||
407 | if ((reqfid <= LO_FID_TABLE_TOP) && (data->currfid <= LO_FID_TABLE_TOP)) | ||
408 | vcofiddiff = 0; | ||
409 | |||
410 | while (vcofiddiff > 2) { | 410 | while (vcofiddiff > 2) { |
411 | (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); | 411 | (data->currfid & 1) ? (fid_interval = 1) : (fid_interval = 2); |
412 | 412 | ||
@@ -1081,14 +1081,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1081 | return 0; | 1081 | return 0; |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | if ((fid < HI_FID_TABLE_BOTTOM) && | ||
1085 | (data->currfid < HI_FID_TABLE_BOTTOM)) { | ||
1086 | printk(KERN_ERR PFX | ||
1087 | "ignoring illegal change in lo freq table-%x to 0x%x\n", | ||
1088 | data->currfid, fid); | ||
1089 | return 1; | ||
1090 | } | ||
1091 | |||
1092 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", | 1084 | dprintk("cpu %d, changing to fid 0x%x, vid 0x%x\n", |
1093 | smp_processor_id(), fid, vid); | 1085 | smp_processor_id(), fid, vid); |
1094 | freqs.old = find_khz_freq_from_fid(data->currfid); | 1086 | freqs.old = find_khz_freq_from_fid(data->currfid); |
@@ -1267,7 +1259,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1267 | { | 1259 | { |
1268 | static const char ACPI_PSS_BIOS_BUG_MSG[] = | 1260 | static const char ACPI_PSS_BIOS_BUG_MSG[] = |
1269 | KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" | 1261 | KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" |
1270 | KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; | 1262 | FW_BUG PFX "Try again with latest BIOS.\n"; |
1271 | struct powernow_k8_data *data; | 1263 | struct powernow_k8_data *data; |
1272 | struct init_on_cpu init_on_cpu; | 1264 | struct init_on_cpu init_on_cpu; |
1273 | int rc; | 1265 | int rc; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index c9c1190b5e1f..02ce824073cb 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -215,7 +215,8 @@ struct pst_s { | |||
215 | 215 | ||
216 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) | 216 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "powernow-k8", msg) |
217 | 217 | ||
218 | static int core_voltage_pre_transition(struct powernow_k8_data *data, u32 reqvid); | 218 | static int core_voltage_pre_transition(struct powernow_k8_data *data, |
219 | u32 reqvid, u32 regfid); | ||
219 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); | 220 | static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvid); |
220 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); | 221 | static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid); |
221 | 222 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index af425b83202b..01213048f62f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -194,14 +194,14 @@ static void print_mce(struct mce *m) | |||
194 | m->cs, m->ip); | 194 | m->cs, m->ip); |
195 | if (m->cs == __KERNEL_CS) | 195 | if (m->cs == __KERNEL_CS) |
196 | print_symbol("{%s}", m->ip); | 196 | print_symbol("{%s}", m->ip); |
197 | printk("\n"); | 197 | printk(KERN_CONT "\n"); |
198 | } | 198 | } |
199 | printk(KERN_EMERG "TSC %llx ", m->tsc); | 199 | printk(KERN_EMERG "TSC %llx ", m->tsc); |
200 | if (m->addr) | 200 | if (m->addr) |
201 | printk("ADDR %llx ", m->addr); | 201 | printk(KERN_CONT "ADDR %llx ", m->addr); |
202 | if (m->misc) | 202 | if (m->misc) |
203 | printk("MISC %llx ", m->misc); | 203 | printk(KERN_CONT "MISC %llx ", m->misc); |
204 | printk("\n"); | 204 | printk(KERN_CONT "\n"); |
205 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | 205 | printk(KERN_EMERG "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", |
206 | m->cpuvendor, m->cpuid, m->time, m->socketid, | 206 | m->cpuvendor, m->cpuid, m->time, m->socketid, |
207 | m->apicid); | 207 | m->apicid); |
@@ -209,13 +209,13 @@ static void print_mce(struct mce *m) | |||
209 | 209 | ||
210 | static void print_mce_head(void) | 210 | static void print_mce_head(void) |
211 | { | 211 | { |
212 | printk(KERN_EMERG "\n" KERN_EMERG "HARDWARE ERROR\n"); | 212 | printk(KERN_EMERG "\nHARDWARE ERROR\n"); |
213 | } | 213 | } |
214 | 214 | ||
215 | static void print_mce_tail(void) | 215 | static void print_mce_tail(void) |
216 | { | 216 | { |
217 | printk(KERN_EMERG "This is not a software problem!\n" | 217 | printk(KERN_EMERG "This is not a software problem!\n" |
218 | KERN_EMERG "Run through mcelog --ascii to decode and contact your hardware vendor\n"); | 218 | "Run through mcelog --ascii to decode and contact your hardware vendor\n"); |
219 | } | 219 | } |
220 | 220 | ||
221 | #define PANIC_TIMEOUT 5 /* 5 seconds */ | 221 | #define PANIC_TIMEOUT 5 /* 5 seconds */ |
@@ -1226,8 +1226,13 @@ static void mce_init(void) | |||
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | /* Add per CPU specific workarounds here */ | 1228 | /* Add per CPU specific workarounds here */ |
1229 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) | 1229 | static int mce_cpu_quirks(struct cpuinfo_x86 *c) |
1230 | { | 1230 | { |
1231 | if (c->x86_vendor == X86_VENDOR_UNKNOWN) { | ||
1232 | pr_info("MCE: unknown CPU type - not enabling MCE support.\n"); | ||
1233 | return -EOPNOTSUPP; | ||
1234 | } | ||
1235 | |||
1231 | /* This should be disabled by the BIOS, but isn't always */ | 1236 | /* This should be disabled by the BIOS, but isn't always */ |
1232 | if (c->x86_vendor == X86_VENDOR_AMD) { | 1237 | if (c->x86_vendor == X86_VENDOR_AMD) { |
1233 | if (c->x86 == 15 && banks > 4) { | 1238 | if (c->x86 == 15 && banks > 4) { |
@@ -1273,11 +1278,20 @@ static void mce_cpu_quirks(struct cpuinfo_x86 *c) | |||
1273 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && | 1278 | if ((c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xe)) && |
1274 | monarch_timeout < 0) | 1279 | monarch_timeout < 0) |
1275 | monarch_timeout = USEC_PER_SEC; | 1280 | monarch_timeout = USEC_PER_SEC; |
1281 | |||
1282 | /* | ||
1283 | * There are also broken BIOSes on some Pentium M and | ||
1284 | * earlier systems: | ||
1285 | */ | ||
1286 | if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0) | ||
1287 | mce_bootlog = 0; | ||
1276 | } | 1288 | } |
1277 | if (monarch_timeout < 0) | 1289 | if (monarch_timeout < 0) |
1278 | monarch_timeout = 0; | 1290 | monarch_timeout = 0; |
1279 | if (mce_bootlog != 0) | 1291 | if (mce_bootlog != 0) |
1280 | mce_panic_timeout = 30; | 1292 | mce_panic_timeout = 30; |
1293 | |||
1294 | return 0; | ||
1281 | } | 1295 | } |
1282 | 1296 | ||
1283 | static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) | 1297 | static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c) |
@@ -1338,11 +1352,10 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | |||
1338 | if (!mce_available(c)) | 1352 | if (!mce_available(c)) |
1339 | return; | 1353 | return; |
1340 | 1354 | ||
1341 | if (mce_cap_init() < 0) { | 1355 | if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) { |
1342 | mce_disabled = 1; | 1356 | mce_disabled = 1; |
1343 | return; | 1357 | return; |
1344 | } | 1358 | } |
1345 | mce_cpu_quirks(c); | ||
1346 | 1359 | ||
1347 | machine_check_vector = do_machine_check; | 1360 | machine_check_vector = do_machine_check; |
1348 | 1361 | ||
@@ -1692,17 +1705,15 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr, | |||
1692 | const char *buf, size_t siz) | 1705 | const char *buf, size_t siz) |
1693 | { | 1706 | { |
1694 | char *p; | 1707 | char *p; |
1695 | int len; | ||
1696 | 1708 | ||
1697 | strncpy(mce_helper, buf, sizeof(mce_helper)); | 1709 | strncpy(mce_helper, buf, sizeof(mce_helper)); |
1698 | mce_helper[sizeof(mce_helper)-1] = 0; | 1710 | mce_helper[sizeof(mce_helper)-1] = 0; |
1699 | len = strlen(mce_helper); | ||
1700 | p = strchr(mce_helper, '\n'); | 1711 | p = strchr(mce_helper, '\n'); |
1701 | 1712 | ||
1702 | if (*p) | 1713 | if (p) |
1703 | *p = 0; | 1714 | *p = 0; |
1704 | 1715 | ||
1705 | return len; | 1716 | return strlen(mce_helper) + !!p; |
1706 | } | 1717 | } |
1707 | 1718 | ||
1708 | static ssize_t set_ignore_ce(struct sys_device *s, | 1719 | static ssize_t set_ignore_ce(struct sys_device *s, |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index bff8dd191dd5..5957a93e5173 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; | 37 | static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; |
38 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); | 38 | static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); |
39 | static DEFINE_PER_CPU(bool, thermal_throttle_active); | ||
39 | 40 | ||
40 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | 41 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
41 | 42 | ||
@@ -96,27 +97,33 @@ static int therm_throt_process(int curr) | |||
96 | { | 97 | { |
97 | unsigned int cpu = smp_processor_id(); | 98 | unsigned int cpu = smp_processor_id(); |
98 | __u64 tmp_jiffs = get_jiffies_64(); | 99 | __u64 tmp_jiffs = get_jiffies_64(); |
100 | bool was_throttled = __get_cpu_var(thermal_throttle_active); | ||
101 | bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr; | ||
99 | 102 | ||
100 | if (curr) | 103 | if (is_throttled) |
101 | __get_cpu_var(thermal_throttle_count)++; | 104 | __get_cpu_var(thermal_throttle_count)++; |
102 | 105 | ||
103 | if (time_before64(tmp_jiffs, __get_cpu_var(next_check))) | 106 | if (!(was_throttled ^ is_throttled) && |
107 | time_before64(tmp_jiffs, __get_cpu_var(next_check))) | ||
104 | return 0; | 108 | return 0; |
105 | 109 | ||
106 | __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; | 110 | __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; |
107 | 111 | ||
108 | /* if we just entered the thermal event */ | 112 | /* if we just entered the thermal event */ |
109 | if (curr) { | 113 | if (is_throttled) { |
110 | printk(KERN_CRIT "CPU%d: Temperature above threshold, " | 114 | printk(KERN_CRIT "CPU%d: Temperature above threshold, " |
111 | "cpu clock throttled (total events = %lu)\n", cpu, | 115 | "cpu clock throttled (total events = %lu)\n", |
112 | __get_cpu_var(thermal_throttle_count)); | 116 | cpu, __get_cpu_var(thermal_throttle_count)); |
113 | 117 | ||
114 | add_taint(TAINT_MACHINE_CHECK); | 118 | add_taint(TAINT_MACHINE_CHECK); |
115 | } else { | 119 | return 1; |
116 | printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu); | 120 | } |
121 | if (was_throttled) { | ||
122 | printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); | ||
123 | return 1; | ||
117 | } | 124 | } |
118 | 125 | ||
119 | return 1; | 126 | return 0; |
120 | } | 127 | } |
121 | 128 | ||
122 | #ifdef CONFIG_SYSFS | 129 | #ifdef CONFIG_SYSFS |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d4cf4ce19aac..900332b800f8 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -55,6 +55,7 @@ struct x86_pmu { | |||
55 | int num_counters_fixed; | 55 | int num_counters_fixed; |
56 | int counter_bits; | 56 | int counter_bits; |
57 | u64 counter_mask; | 57 | u64 counter_mask; |
58 | int apic; | ||
58 | u64 max_period; | 59 | u64 max_period; |
59 | u64 intel_ctrl; | 60 | u64 intel_ctrl; |
60 | }; | 61 | }; |
@@ -66,6 +67,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { | |||
66 | }; | 67 | }; |
67 | 68 | ||
68 | /* | 69 | /* |
70 | * Not sure about some of these | ||
71 | */ | ||
72 | static const u64 p6_perfmon_event_map[] = | ||
73 | { | ||
74 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, | ||
75 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
76 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, | ||
77 | [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, | ||
78 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
79 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
80 | [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, | ||
81 | }; | ||
82 | |||
83 | static u64 p6_pmu_event_map(int event) | ||
84 | { | ||
85 | return p6_perfmon_event_map[event]; | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Counter setting that is specified not to count anything. | ||
90 | * We use this to effectively disable a counter. | ||
91 | * | ||
92 | * L2_RQSTS with 0 MESI unit mask. | ||
93 | */ | ||
94 | #define P6_NOP_COUNTER 0x0000002EULL | ||
95 | |||
96 | static u64 p6_pmu_raw_event(u64 event) | ||
97 | { | ||
98 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
99 | #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
100 | #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
101 | #define P6_EVNTSEL_INV_MASK 0x00800000ULL | ||
102 | #define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL | ||
103 | |||
104 | #define P6_EVNTSEL_MASK \ | ||
105 | (P6_EVNTSEL_EVENT_MASK | \ | ||
106 | P6_EVNTSEL_UNIT_MASK | \ | ||
107 | P6_EVNTSEL_EDGE_MASK | \ | ||
108 | P6_EVNTSEL_INV_MASK | \ | ||
109 | P6_EVNTSEL_COUNTER_MASK) | ||
110 | |||
111 | return event & P6_EVNTSEL_MASK; | ||
112 | } | ||
113 | |||
114 | |||
115 | /* | ||
69 | * Intel PerfMon v3. Used on Core2 and later. | 116 | * Intel PerfMon v3. Used on Core2 and later. |
70 | */ | 117 | */ |
71 | static const u64 intel_perfmon_event_map[] = | 118 | static const u64 intel_perfmon_event_map[] = |
@@ -567,6 +614,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex); | |||
567 | 614 | ||
568 | static bool reserve_pmc_hardware(void) | 615 | static bool reserve_pmc_hardware(void) |
569 | { | 616 | { |
617 | #ifdef CONFIG_X86_LOCAL_APIC | ||
570 | int i; | 618 | int i; |
571 | 619 | ||
572 | if (nmi_watchdog == NMI_LOCAL_APIC) | 620 | if (nmi_watchdog == NMI_LOCAL_APIC) |
@@ -581,9 +629,11 @@ static bool reserve_pmc_hardware(void) | |||
581 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | 629 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) |
582 | goto eventsel_fail; | 630 | goto eventsel_fail; |
583 | } | 631 | } |
632 | #endif | ||
584 | 633 | ||
585 | return true; | 634 | return true; |
586 | 635 | ||
636 | #ifdef CONFIG_X86_LOCAL_APIC | ||
587 | eventsel_fail: | 637 | eventsel_fail: |
588 | for (i--; i >= 0; i--) | 638 | for (i--; i >= 0; i--) |
589 | release_evntsel_nmi(x86_pmu.eventsel + i); | 639 | release_evntsel_nmi(x86_pmu.eventsel + i); |
@@ -598,10 +648,12 @@ perfctr_fail: | |||
598 | enable_lapic_nmi_watchdog(); | 648 | enable_lapic_nmi_watchdog(); |
599 | 649 | ||
600 | return false; | 650 | return false; |
651 | #endif | ||
601 | } | 652 | } |
602 | 653 | ||
603 | static void release_pmc_hardware(void) | 654 | static void release_pmc_hardware(void) |
604 | { | 655 | { |
656 | #ifdef CONFIG_X86_LOCAL_APIC | ||
605 | int i; | 657 | int i; |
606 | 658 | ||
607 | for (i = 0; i < x86_pmu.num_counters; i++) { | 659 | for (i = 0; i < x86_pmu.num_counters; i++) { |
@@ -611,6 +663,7 @@ static void release_pmc_hardware(void) | |||
611 | 663 | ||
612 | if (nmi_watchdog == NMI_LOCAL_APIC) | 664 | if (nmi_watchdog == NMI_LOCAL_APIC) |
613 | enable_lapic_nmi_watchdog(); | 665 | enable_lapic_nmi_watchdog(); |
666 | #endif | ||
614 | } | 667 | } |
615 | 668 | ||
616 | static void hw_perf_counter_destroy(struct perf_counter *counter) | 669 | static void hw_perf_counter_destroy(struct perf_counter *counter) |
@@ -666,6 +719,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
666 | { | 719 | { |
667 | struct perf_counter_attr *attr = &counter->attr; | 720 | struct perf_counter_attr *attr = &counter->attr; |
668 | struct hw_perf_counter *hwc = &counter->hw; | 721 | struct hw_perf_counter *hwc = &counter->hw; |
722 | u64 config; | ||
669 | int err; | 723 | int err; |
670 | 724 | ||
671 | if (!x86_pmu_initialized()) | 725 | if (!x86_pmu_initialized()) |
@@ -701,6 +755,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
701 | hwc->sample_period = x86_pmu.max_period; | 755 | hwc->sample_period = x86_pmu.max_period; |
702 | hwc->last_period = hwc->sample_period; | 756 | hwc->last_period = hwc->sample_period; |
703 | atomic64_set(&hwc->period_left, hwc->sample_period); | 757 | atomic64_set(&hwc->period_left, hwc->sample_period); |
758 | } else { | ||
759 | /* | ||
760 | * If we have a PMU initialized but no APIC | ||
761 | * interrupts, we cannot sample hardware | ||
762 | * counters (user-space has to fall back and | ||
763 | * sample via a hrtimer based software counter): | ||
764 | */ | ||
765 | if (!x86_pmu.apic) | ||
766 | return -EOPNOTSUPP; | ||
704 | } | 767 | } |
705 | 768 | ||
706 | counter->destroy = hw_perf_counter_destroy; | 769 | counter->destroy = hw_perf_counter_destroy; |
@@ -718,14 +781,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
718 | 781 | ||
719 | if (attr->config >= x86_pmu.max_events) | 782 | if (attr->config >= x86_pmu.max_events) |
720 | return -EINVAL; | 783 | return -EINVAL; |
784 | |||
721 | /* | 785 | /* |
722 | * The generic map: | 786 | * The generic map: |
723 | */ | 787 | */ |
724 | hwc->config |= x86_pmu.event_map(attr->config); | 788 | config = x86_pmu.event_map(attr->config); |
789 | |||
790 | if (config == 0) | ||
791 | return -ENOENT; | ||
792 | |||
793 | if (config == -1LL) | ||
794 | return -EINVAL; | ||
795 | |||
796 | hwc->config |= config; | ||
725 | 797 | ||
726 | return 0; | 798 | return 0; |
727 | } | 799 | } |
728 | 800 | ||
801 | static void p6_pmu_disable_all(void) | ||
802 | { | ||
803 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
804 | u64 val; | ||
805 | |||
806 | if (!cpuc->enabled) | ||
807 | return; | ||
808 | |||
809 | cpuc->enabled = 0; | ||
810 | barrier(); | ||
811 | |||
812 | /* p6 only has one enable register */ | ||
813 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
814 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
815 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
816 | } | ||
817 | |||
729 | static void intel_pmu_disable_all(void) | 818 | static void intel_pmu_disable_all(void) |
730 | { | 819 | { |
731 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 820 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
@@ -767,6 +856,23 @@ void hw_perf_disable(void) | |||
767 | return x86_pmu.disable_all(); | 856 | return x86_pmu.disable_all(); |
768 | } | 857 | } |
769 | 858 | ||
859 | static void p6_pmu_enable_all(void) | ||
860 | { | ||
861 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
862 | unsigned long val; | ||
863 | |||
864 | if (cpuc->enabled) | ||
865 | return; | ||
866 | |||
867 | cpuc->enabled = 1; | ||
868 | barrier(); | ||
869 | |||
870 | /* p6 only has one enable register */ | ||
871 | rdmsrl(MSR_P6_EVNTSEL0, val); | ||
872 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
873 | wrmsrl(MSR_P6_EVNTSEL0, val); | ||
874 | } | ||
875 | |||
770 | static void intel_pmu_enable_all(void) | 876 | static void intel_pmu_enable_all(void) |
771 | { | 877 | { |
772 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 878 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); |
@@ -784,13 +890,13 @@ static void amd_pmu_enable_all(void) | |||
784 | barrier(); | 890 | barrier(); |
785 | 891 | ||
786 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 892 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
893 | struct perf_counter *counter = cpuc->counters[idx]; | ||
787 | u64 val; | 894 | u64 val; |
788 | 895 | ||
789 | if (!test_bit(idx, cpuc->active_mask)) | 896 | if (!test_bit(idx, cpuc->active_mask)) |
790 | continue; | 897 | continue; |
791 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); | 898 | |
792 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | 899 | val = counter->hw.config; |
793 | continue; | ||
794 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 900 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
795 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | 901 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
796 | } | 902 | } |
@@ -819,16 +925,13 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
819 | 925 | ||
820 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 926 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
821 | { | 927 | { |
822 | int err; | 928 | (void)checking_wrmsrl(hwc->config_base + idx, |
823 | err = checking_wrmsrl(hwc->config_base + idx, | ||
824 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | 929 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); |
825 | } | 930 | } |
826 | 931 | ||
827 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 932 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
828 | { | 933 | { |
829 | int err; | 934 | (void)checking_wrmsrl(hwc->config_base + idx, hwc->config); |
830 | err = checking_wrmsrl(hwc->config_base + idx, | ||
831 | hwc->config); | ||
832 | } | 935 | } |
833 | 936 | ||
834 | static inline void | 937 | static inline void |
@@ -836,13 +939,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
836 | { | 939 | { |
837 | int idx = __idx - X86_PMC_IDX_FIXED; | 940 | int idx = __idx - X86_PMC_IDX_FIXED; |
838 | u64 ctrl_val, mask; | 941 | u64 ctrl_val, mask; |
839 | int err; | ||
840 | 942 | ||
841 | mask = 0xfULL << (idx * 4); | 943 | mask = 0xfULL << (idx * 4); |
842 | 944 | ||
843 | rdmsrl(hwc->config_base, ctrl_val); | 945 | rdmsrl(hwc->config_base, ctrl_val); |
844 | ctrl_val &= ~mask; | 946 | ctrl_val &= ~mask; |
845 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 947 | (void)checking_wrmsrl(hwc->config_base, ctrl_val); |
948 | } | ||
949 | |||
950 | static inline void | ||
951 | p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | ||
952 | { | ||
953 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
954 | u64 val = P6_NOP_COUNTER; | ||
955 | |||
956 | if (cpuc->enabled) | ||
957 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
958 | |||
959 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
846 | } | 960 | } |
847 | 961 | ||
848 | static inline void | 962 | static inline void |
@@ -943,6 +1057,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
943 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 1057 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
944 | } | 1058 | } |
945 | 1059 | ||
1060 | static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | ||
1061 | { | ||
1062 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1063 | u64 val; | ||
1064 | |||
1065 | val = hwc->config; | ||
1066 | if (cpuc->enabled) | ||
1067 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
1068 | |||
1069 | (void)checking_wrmsrl(hwc->config_base + idx, val); | ||
1070 | } | ||
1071 | |||
1072 | |||
946 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1073 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
947 | { | 1074 | { |
948 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 1075 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
@@ -959,8 +1086,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
959 | 1086 | ||
960 | if (cpuc->enabled) | 1087 | if (cpuc->enabled) |
961 | x86_pmu_enable_counter(hwc, idx); | 1088 | x86_pmu_enable_counter(hwc, idx); |
962 | else | ||
963 | x86_pmu_disable_counter(hwc, idx); | ||
964 | } | 1089 | } |
965 | 1090 | ||
966 | static int | 1091 | static int |
@@ -1176,6 +1301,49 @@ static void intel_pmu_reset(void) | |||
1176 | local_irq_restore(flags); | 1301 | local_irq_restore(flags); |
1177 | } | 1302 | } |
1178 | 1303 | ||
1304 | static int p6_pmu_handle_irq(struct pt_regs *regs) | ||
1305 | { | ||
1306 | struct perf_sample_data data; | ||
1307 | struct cpu_hw_counters *cpuc; | ||
1308 | struct perf_counter *counter; | ||
1309 | struct hw_perf_counter *hwc; | ||
1310 | int idx, handled = 0; | ||
1311 | u64 val; | ||
1312 | |||
1313 | data.regs = regs; | ||
1314 | data.addr = 0; | ||
1315 | |||
1316 | cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1317 | |||
1318 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1319 | if (!test_bit(idx, cpuc->active_mask)) | ||
1320 | continue; | ||
1321 | |||
1322 | counter = cpuc->counters[idx]; | ||
1323 | hwc = &counter->hw; | ||
1324 | |||
1325 | val = x86_perf_counter_update(counter, hwc, idx); | ||
1326 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | ||
1327 | continue; | ||
1328 | |||
1329 | /* | ||
1330 | * counter overflow | ||
1331 | */ | ||
1332 | handled = 1; | ||
1333 | data.period = counter->hw.last_period; | ||
1334 | |||
1335 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | ||
1336 | continue; | ||
1337 | |||
1338 | if (perf_counter_overflow(counter, 1, &data)) | ||
1339 | p6_pmu_disable_counter(hwc, idx); | ||
1340 | } | ||
1341 | |||
1342 | if (handled) | ||
1343 | inc_irq_stat(apic_perf_irqs); | ||
1344 | |||
1345 | return handled; | ||
1346 | } | ||
1179 | 1347 | ||
1180 | /* | 1348 | /* |
1181 | * This handler is triggered by the local APIC, so the APIC IRQ handling | 1349 | * This handler is triggered by the local APIC, so the APIC IRQ handling |
@@ -1185,14 +1353,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1185 | { | 1353 | { |
1186 | struct perf_sample_data data; | 1354 | struct perf_sample_data data; |
1187 | struct cpu_hw_counters *cpuc; | 1355 | struct cpu_hw_counters *cpuc; |
1188 | int bit, cpu, loops; | 1356 | int bit, loops; |
1189 | u64 ack, status; | 1357 | u64 ack, status; |
1190 | 1358 | ||
1191 | data.regs = regs; | 1359 | data.regs = regs; |
1192 | data.addr = 0; | 1360 | data.addr = 0; |
1193 | 1361 | ||
1194 | cpu = smp_processor_id(); | 1362 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1195 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1196 | 1363 | ||
1197 | perf_disable(); | 1364 | perf_disable(); |
1198 | status = intel_pmu_get_status(); | 1365 | status = intel_pmu_get_status(); |
@@ -1249,14 +1416,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
1249 | struct cpu_hw_counters *cpuc; | 1416 | struct cpu_hw_counters *cpuc; |
1250 | struct perf_counter *counter; | 1417 | struct perf_counter *counter; |
1251 | struct hw_perf_counter *hwc; | 1418 | struct hw_perf_counter *hwc; |
1252 | int cpu, idx, handled = 0; | 1419 | int idx, handled = 0; |
1253 | u64 val; | 1420 | u64 val; |
1254 | 1421 | ||
1255 | data.regs = regs; | 1422 | data.regs = regs; |
1256 | data.addr = 0; | 1423 | data.addr = 0; |
1257 | 1424 | ||
1258 | cpu = smp_processor_id(); | 1425 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1259 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1260 | 1426 | ||
1261 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1427 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1262 | if (!test_bit(idx, cpuc->active_mask)) | 1428 | if (!test_bit(idx, cpuc->active_mask)) |
@@ -1299,18 +1465,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs) | |||
1299 | 1465 | ||
1300 | void set_perf_counter_pending(void) | 1466 | void set_perf_counter_pending(void) |
1301 | { | 1467 | { |
1468 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1302 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); | 1469 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); |
1470 | #endif | ||
1303 | } | 1471 | } |
1304 | 1472 | ||
1305 | void perf_counters_lapic_init(void) | 1473 | void perf_counters_lapic_init(void) |
1306 | { | 1474 | { |
1307 | if (!x86_pmu_initialized()) | 1475 | #ifdef CONFIG_X86_LOCAL_APIC |
1476 | if (!x86_pmu.apic || !x86_pmu_initialized()) | ||
1308 | return; | 1477 | return; |
1309 | 1478 | ||
1310 | /* | 1479 | /* |
1311 | * Always use NMI for PMU | 1480 | * Always use NMI for PMU |
1312 | */ | 1481 | */ |
1313 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1482 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1483 | #endif | ||
1314 | } | 1484 | } |
1315 | 1485 | ||
1316 | static int __kprobes | 1486 | static int __kprobes |
@@ -1334,7 +1504,9 @@ perf_counter_nmi_handler(struct notifier_block *self, | |||
1334 | 1504 | ||
1335 | regs = args->regs; | 1505 | regs = args->regs; |
1336 | 1506 | ||
1507 | #ifdef CONFIG_X86_LOCAL_APIC | ||
1337 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1508 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1509 | #endif | ||
1338 | /* | 1510 | /* |
1339 | * Can't rely on the handled return value to say it was our NMI, two | 1511 | * Can't rely on the handled return value to say it was our NMI, two |
1340 | * counters could trigger 'simultaneously' raising two back-to-back NMIs. | 1512 | * counters could trigger 'simultaneously' raising two back-to-back NMIs. |
@@ -1353,6 +1525,33 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |||
1353 | .priority = 1 | 1525 | .priority = 1 |
1354 | }; | 1526 | }; |
1355 | 1527 | ||
1528 | static struct x86_pmu p6_pmu = { | ||
1529 | .name = "p6", | ||
1530 | .handle_irq = p6_pmu_handle_irq, | ||
1531 | .disable_all = p6_pmu_disable_all, | ||
1532 | .enable_all = p6_pmu_enable_all, | ||
1533 | .enable = p6_pmu_enable_counter, | ||
1534 | .disable = p6_pmu_disable_counter, | ||
1535 | .eventsel = MSR_P6_EVNTSEL0, | ||
1536 | .perfctr = MSR_P6_PERFCTR0, | ||
1537 | .event_map = p6_pmu_event_map, | ||
1538 | .raw_event = p6_pmu_raw_event, | ||
1539 | .max_events = ARRAY_SIZE(p6_perfmon_event_map), | ||
1540 | .apic = 1, | ||
1541 | .max_period = (1ULL << 31) - 1, | ||
1542 | .version = 0, | ||
1543 | .num_counters = 2, | ||
1544 | /* | ||
1545 | * Counters have 40 bits implemented. However they are designed such | ||
1546 | * that bits [32-39] are sign extensions of bit 31. As such the | ||
1547 | * effective width of a counter for P6-like PMU is 32 bits only. | ||
1548 | * | ||
1549 | * See IA-32 Intel Architecture Software developer manual Vol 3B | ||
1550 | */ | ||
1551 | .counter_bits = 32, | ||
1552 | .counter_mask = (1ULL << 32) - 1, | ||
1553 | }; | ||
1554 | |||
1356 | static struct x86_pmu intel_pmu = { | 1555 | static struct x86_pmu intel_pmu = { |
1357 | .name = "Intel", | 1556 | .name = "Intel", |
1358 | .handle_irq = intel_pmu_handle_irq, | 1557 | .handle_irq = intel_pmu_handle_irq, |
@@ -1365,6 +1564,7 @@ static struct x86_pmu intel_pmu = { | |||
1365 | .event_map = intel_pmu_event_map, | 1564 | .event_map = intel_pmu_event_map, |
1366 | .raw_event = intel_pmu_raw_event, | 1565 | .raw_event = intel_pmu_raw_event, |
1367 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | 1566 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), |
1567 | .apic = 1, | ||
1368 | /* | 1568 | /* |
1369 | * Intel PMCs cannot be accessed sanely above 32 bit width, | 1569 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
1370 | * so we install an artificial 1<<31 period regardless of | 1570 | * so we install an artificial 1<<31 period regardless of |
@@ -1388,10 +1588,43 @@ static struct x86_pmu amd_pmu = { | |||
1388 | .num_counters = 4, | 1588 | .num_counters = 4, |
1389 | .counter_bits = 48, | 1589 | .counter_bits = 48, |
1390 | .counter_mask = (1ULL << 48) - 1, | 1590 | .counter_mask = (1ULL << 48) - 1, |
1591 | .apic = 1, | ||
1391 | /* use highest bit to detect overflow */ | 1592 | /* use highest bit to detect overflow */ |
1392 | .max_period = (1ULL << 47) - 1, | 1593 | .max_period = (1ULL << 47) - 1, |
1393 | }; | 1594 | }; |
1394 | 1595 | ||
1596 | static int p6_pmu_init(void) | ||
1597 | { | ||
1598 | switch (boot_cpu_data.x86_model) { | ||
1599 | case 1: | ||
1600 | case 3: /* Pentium Pro */ | ||
1601 | case 5: | ||
1602 | case 6: /* Pentium II */ | ||
1603 | case 7: | ||
1604 | case 8: | ||
1605 | case 11: /* Pentium III */ | ||
1606 | break; | ||
1607 | case 9: | ||
1608 | case 13: | ||
1609 | /* Pentium M */ | ||
1610 | break; | ||
1611 | default: | ||
1612 | pr_cont("unsupported p6 CPU model %d ", | ||
1613 | boot_cpu_data.x86_model); | ||
1614 | return -ENODEV; | ||
1615 | } | ||
1616 | |||
1617 | x86_pmu = p6_pmu; | ||
1618 | |||
1619 | if (!cpu_has_apic) { | ||
1620 | pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); | ||
1621 | pr_info("no hardware sampling interrupt available.\n"); | ||
1622 | x86_pmu.apic = 0; | ||
1623 | } | ||
1624 | |||
1625 | return 0; | ||
1626 | } | ||
1627 | |||
1395 | static int intel_pmu_init(void) | 1628 | static int intel_pmu_init(void) |
1396 | { | 1629 | { |
1397 | union cpuid10_edx edx; | 1630 | union cpuid10_edx edx; |
@@ -1400,8 +1633,14 @@ static int intel_pmu_init(void) | |||
1400 | unsigned int ebx; | 1633 | unsigned int ebx; |
1401 | int version; | 1634 | int version; |
1402 | 1635 | ||
1403 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 1636 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
1637 | /* check for P6 processor family */ | ||
1638 | if (boot_cpu_data.x86 == 6) { | ||
1639 | return p6_pmu_init(); | ||
1640 | } else { | ||
1404 | return -ENODEV; | 1641 | return -ENODEV; |
1642 | } | ||
1643 | } | ||
1405 | 1644 | ||
1406 | /* | 1645 | /* |
1407 | * Check whether the Architectural PerfMon supports | 1646 | * Check whether the Architectural PerfMon supports |
@@ -1561,6 +1800,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip) | |||
1561 | 1800 | ||
1562 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); | 1801 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); |
1563 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); | 1802 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); |
1803 | static DEFINE_PER_CPU(int, in_nmi_frame); | ||
1564 | 1804 | ||
1565 | 1805 | ||
1566 | static void | 1806 | static void |
@@ -1576,7 +1816,9 @@ static void backtrace_warning(void *data, char *msg) | |||
1576 | 1816 | ||
1577 | static int backtrace_stack(void *data, char *name) | 1817 | static int backtrace_stack(void *data, char *name) |
1578 | { | 1818 | { |
1579 | /* Process all stacks: */ | 1819 | per_cpu(in_nmi_frame, smp_processor_id()) = |
1820 | x86_is_stack_id(NMI_STACK, name); | ||
1821 | |||
1580 | return 0; | 1822 | return 0; |
1581 | } | 1823 | } |
1582 | 1824 | ||
@@ -1584,6 +1826,9 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
1584 | { | 1826 | { |
1585 | struct perf_callchain_entry *entry = data; | 1827 | struct perf_callchain_entry *entry = data; |
1586 | 1828 | ||
1829 | if (per_cpu(in_nmi_frame, smp_processor_id())) | ||
1830 | return; | ||
1831 | |||
1587 | if (reliable) | 1832 | if (reliable) |
1588 | callchain_store(entry, addr); | 1833 | callchain_store(entry, addr); |
1589 | } | 1834 | } |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 8100a29c854f..392bea43b890 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -804,8 +804,3 @@ int __kprobes lapic_wd_event(unsigned nmi_hz) | |||
804 | wd_ops->rearm(wd, nmi_hz); | 804 | wd_ops->rearm(wd, nmi_hz); |
805 | return 1; | 805 | return 1; |
806 | } | 806 | } |
807 | |||
808 | int lapic_watchdog_ok(void) | ||
809 | { | ||
810 | return wd_ops != NULL; | ||
811 | } | ||