diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 151 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce-inject.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 135 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 40 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event.c | 104 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_amd.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 30 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_p4.c | 28 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 644 |
14 files changed, 310 insertions, 872 deletions
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9e093f8fe78c..7c7bedb83c5a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -668,7 +668,7 @@ EXPORT_SYMBOL_GPL(amd_erratum_383); | |||
668 | 668 | ||
669 | bool cpu_has_amd_erratum(const int *erratum) | 669 | bool cpu_has_amd_erratum(const int *erratum) |
670 | { | 670 | { |
671 | struct cpuinfo_x86 *cpu = ¤t_cpu_data; | 671 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); |
672 | int osvw_id = *erratum++; | 672 | int osvw_id = *erratum++; |
673 | u32 range; | 673 | u32 range; |
674 | u32 ms; | 674 | u32 ms; |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b68bda30938..1d59834396bd 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -894,7 +894,6 @@ void __init identify_boot_cpu(void) | |||
894 | #else | 894 | #else |
895 | vgetcpu_set_mode(); | 895 | vgetcpu_set_mode(); |
896 | #endif | 896 | #endif |
897 | init_hw_perf_events(); | ||
898 | } | 897 | } |
899 | 898 | ||
900 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 899 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 491977baf6c0..35c7e65e59be 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -521,7 +521,7 @@ static void check_supported_cpu(void *_rc) | |||
521 | 521 | ||
522 | *rc = -ENODEV; | 522 | *rc = -ENODEV; |
523 | 523 | ||
524 | if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) | 524 | if (__this_cpu_read(cpu_info.x86_vendor) != X86_VENDOR_AMD) |
525 | return; | 525 | return; |
526 | 526 | ||
527 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | 527 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); |
@@ -1377,7 +1377,7 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | |||
1377 | static void query_values_on_cpu(void *_err) | 1377 | static void query_values_on_cpu(void *_err) |
1378 | { | 1378 | { |
1379 | int *err = _err; | 1379 | int *err = _err; |
1380 | struct powernow_k8_data *data = __get_cpu_var(powernow_data); | 1380 | struct powernow_k8_data *data = __this_cpu_read(powernow_data); |
1381 | 1381 | ||
1382 | *err = query_current_values_with_pending_wait(data); | 1382 | *err = query_current_values_with_pending_wait(data); |
1383 | } | 1383 | } |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 17ad03366211..7283e98deaae 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -149,8 +149,7 @@ union _cpuid4_leaf_ecx { | |||
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct amd_l3_cache { | 151 | struct amd_l3_cache { |
152 | struct pci_dev *dev; | 152 | struct amd_northbridge *nb; |
153 | bool can_disable; | ||
154 | unsigned indices; | 153 | unsigned indices; |
155 | u8 subcaches[4]; | 154 | u8 subcaches[4]; |
156 | }; | 155 | }; |
@@ -266,7 +265,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
266 | line_size = l2.line_size; | 265 | line_size = l2.line_size; |
267 | lines_per_tag = l2.lines_per_tag; | 266 | lines_per_tag = l2.lines_per_tag; |
268 | /* cpu_data has errata corrections for K7 applied */ | 267 | /* cpu_data has errata corrections for K7 applied */ |
269 | size_in_kb = current_cpu_data.x86_cache_size; | 268 | size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); |
270 | break; | 269 | break; |
271 | case 3: | 270 | case 3: |
272 | if (!l3.val) | 271 | if (!l3.val) |
@@ -288,7 +287,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
288 | eax->split.type = types[leaf]; | 287 | eax->split.type = types[leaf]; |
289 | eax->split.level = levels[leaf]; | 288 | eax->split.level = levels[leaf]; |
290 | eax->split.num_threads_sharing = 0; | 289 | eax->split.num_threads_sharing = 0; |
291 | eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1; | 290 | eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; |
292 | 291 | ||
293 | 292 | ||
294 | if (assoc == 0xffff) | 293 | if (assoc == 0xffff) |
@@ -311,14 +310,12 @@ struct _cache_attr { | |||
311 | /* | 310 | /* |
312 | * L3 cache descriptors | 311 | * L3 cache descriptors |
313 | */ | 312 | */ |
314 | static struct amd_l3_cache **__cpuinitdata l3_caches; | ||
315 | |||
316 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | 313 | static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) |
317 | { | 314 | { |
318 | unsigned int sc0, sc1, sc2, sc3; | 315 | unsigned int sc0, sc1, sc2, sc3; |
319 | u32 val = 0; | 316 | u32 val = 0; |
320 | 317 | ||
321 | pci_read_config_dword(l3->dev, 0x1C4, &val); | 318 | pci_read_config_dword(l3->nb->misc, 0x1C4, &val); |
322 | 319 | ||
323 | /* calculate subcache sizes */ | 320 | /* calculate subcache sizes */ |
324 | l3->subcaches[0] = sc0 = !(val & BIT(0)); | 321 | l3->subcaches[0] = sc0 = !(val & BIT(0)); |
@@ -330,47 +327,14 @@ static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3) | |||
330 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 327 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
331 | } | 328 | } |
332 | 329 | ||
333 | static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node) | 330 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, |
334 | { | 331 | int index) |
335 | struct amd_l3_cache *l3; | ||
336 | struct pci_dev *dev = node_to_k8_nb_misc(node); | ||
337 | |||
338 | l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC); | ||
339 | if (!l3) { | ||
340 | printk(KERN_WARNING "Error allocating L3 struct\n"); | ||
341 | return NULL; | ||
342 | } | ||
343 | |||
344 | l3->dev = dev; | ||
345 | |||
346 | amd_calc_l3_indices(l3); | ||
347 | |||
348 | return l3; | ||
349 | } | ||
350 | |||
351 | static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | ||
352 | int index) | ||
353 | { | 332 | { |
333 | static struct amd_l3_cache *__cpuinitdata l3_caches; | ||
354 | int node; | 334 | int node; |
355 | 335 | ||
356 | if (boot_cpu_data.x86 != 0x10) | 336 | /* only for L3, and not in virtualized environments */ |
357 | return; | 337 | if (index < 3 || amd_nb_num() == 0) |
358 | |||
359 | if (index < 3) | ||
360 | return; | ||
361 | |||
362 | /* see errata #382 and #388 */ | ||
363 | if (boot_cpu_data.x86_model < 0x8) | ||
364 | return; | ||
365 | |||
366 | if ((boot_cpu_data.x86_model == 0x8 || | ||
367 | boot_cpu_data.x86_model == 0x9) | ||
368 | && | ||
369 | boot_cpu_data.x86_mask < 0x1) | ||
370 | return; | ||
371 | |||
372 | /* not in virtualized environments */ | ||
373 | if (k8_northbridges.num == 0) | ||
374 | return; | 338 | return; |
375 | 339 | ||
376 | /* | 340 | /* |
@@ -378,7 +342,7 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
378 | * never freed but this is done only on shutdown so it doesn't matter. | 342 | * never freed but this is done only on shutdown so it doesn't matter. |
379 | */ | 343 | */ |
380 | if (!l3_caches) { | 344 | if (!l3_caches) { |
381 | int size = k8_northbridges.num * sizeof(struct amd_l3_cache *); | 345 | int size = amd_nb_num() * sizeof(struct amd_l3_cache); |
382 | 346 | ||
383 | l3_caches = kzalloc(size, GFP_ATOMIC); | 347 | l3_caches = kzalloc(size, GFP_ATOMIC); |
384 | if (!l3_caches) | 348 | if (!l3_caches) |
@@ -387,14 +351,12 @@ static void __cpuinit amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, | |||
387 | 351 | ||
388 | node = amd_get_nb_id(smp_processor_id()); | 352 | node = amd_get_nb_id(smp_processor_id()); |
389 | 353 | ||
390 | if (!l3_caches[node]) { | 354 | if (!l3_caches[node].nb) { |
391 | l3_caches[node] = amd_init_l3_cache(node); | 355 | l3_caches[node].nb = node_to_amd_nb(node); |
392 | l3_caches[node]->can_disable = true; | 356 | amd_calc_l3_indices(&l3_caches[node]); |
393 | } | 357 | } |
394 | 358 | ||
395 | WARN_ON(!l3_caches[node]); | 359 | this_leaf->l3 = &l3_caches[node]; |
396 | |||
397 | this_leaf->l3 = l3_caches[node]; | ||
398 | } | 360 | } |
399 | 361 | ||
400 | /* | 362 | /* |
@@ -408,7 +370,7 @@ int amd_get_l3_disable_slot(struct amd_l3_cache *l3, unsigned slot) | |||
408 | { | 370 | { |
409 | unsigned int reg = 0; | 371 | unsigned int reg = 0; |
410 | 372 | ||
411 | pci_read_config_dword(l3->dev, 0x1BC + slot * 4, ®); | 373 | pci_read_config_dword(l3->nb->misc, 0x1BC + slot * 4, ®); |
412 | 374 | ||
413 | /* check whether this slot is activated already */ | 375 | /* check whether this slot is activated already */ |
414 | if (reg & (3UL << 30)) | 376 | if (reg & (3UL << 30)) |
@@ -422,7 +384,8 @@ static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf, | |||
422 | { | 384 | { |
423 | int index; | 385 | int index; |
424 | 386 | ||
425 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) | 387 | if (!this_leaf->l3 || |
388 | !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
426 | return -EINVAL; | 389 | return -EINVAL; |
427 | 390 | ||
428 | index = amd_get_l3_disable_slot(this_leaf->l3, slot); | 391 | index = amd_get_l3_disable_slot(this_leaf->l3, slot); |
@@ -457,7 +420,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
457 | if (!l3->subcaches[i]) | 420 | if (!l3->subcaches[i]) |
458 | continue; | 421 | continue; |
459 | 422 | ||
460 | pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); | 423 | pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); |
461 | 424 | ||
462 | /* | 425 | /* |
463 | * We need to WBINVD on a core on the node containing the L3 | 426 | * We need to WBINVD on a core on the node containing the L3 |
@@ -467,7 +430,7 @@ static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu, | |||
467 | wbinvd_on_cpu(cpu); | 430 | wbinvd_on_cpu(cpu); |
468 | 431 | ||
469 | reg |= BIT(31); | 432 | reg |= BIT(31); |
470 | pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg); | 433 | pci_write_config_dword(l3->nb->misc, 0x1BC + slot * 4, reg); |
471 | } | 434 | } |
472 | } | 435 | } |
473 | 436 | ||
@@ -524,7 +487,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
524 | if (!capable(CAP_SYS_ADMIN)) | 487 | if (!capable(CAP_SYS_ADMIN)) |
525 | return -EPERM; | 488 | return -EPERM; |
526 | 489 | ||
527 | if (!this_leaf->l3 || !this_leaf->l3->can_disable) | 490 | if (!this_leaf->l3 || |
491 | !amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
528 | return -EINVAL; | 492 | return -EINVAL; |
529 | 493 | ||
530 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); | 494 | cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map)); |
@@ -545,7 +509,7 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, | |||
545 | #define STORE_CACHE_DISABLE(slot) \ | 509 | #define STORE_CACHE_DISABLE(slot) \ |
546 | static ssize_t \ | 510 | static ssize_t \ |
547 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ | 511 | store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \ |
548 | const char *buf, size_t count) \ | 512 | const char *buf, size_t count) \ |
549 | { \ | 513 | { \ |
550 | return store_cache_disable(this_leaf, buf, count, slot); \ | 514 | return store_cache_disable(this_leaf, buf, count, slot); \ |
551 | } | 515 | } |
@@ -558,10 +522,7 @@ static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644, | |||
558 | show_cache_disable_1, store_cache_disable_1); | 522 | show_cache_disable_1, store_cache_disable_1); |
559 | 523 | ||
560 | #else /* CONFIG_AMD_NB */ | 524 | #else /* CONFIG_AMD_NB */ |
561 | static void __cpuinit | 525 | #define amd_init_l3_cache(x, y) |
562 | amd_check_l3_disable(struct _cpuid4_info_regs *this_leaf, int index) | ||
563 | { | ||
564 | }; | ||
565 | #endif /* CONFIG_AMD_NB */ | 526 | #endif /* CONFIG_AMD_NB */ |
566 | 527 | ||
567 | static int | 528 | static int |
@@ -575,7 +536,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index, | |||
575 | 536 | ||
576 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { | 537 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { |
577 | amd_cpuid4(index, &eax, &ebx, &ecx); | 538 | amd_cpuid4(index, &eax, &ebx, &ecx); |
578 | amd_check_l3_disable(this_leaf, index); | 539 | amd_init_l3_cache(this_leaf, index); |
579 | } else { | 540 | } else { |
580 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); | 541 | cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx); |
581 | } | 542 | } |
@@ -983,30 +944,48 @@ define_one_ro(size); | |||
983 | define_one_ro(shared_cpu_map); | 944 | define_one_ro(shared_cpu_map); |
984 | define_one_ro(shared_cpu_list); | 945 | define_one_ro(shared_cpu_list); |
985 | 946 | ||
986 | #define DEFAULT_SYSFS_CACHE_ATTRS \ | ||
987 | &type.attr, \ | ||
988 | &level.attr, \ | ||
989 | &coherency_line_size.attr, \ | ||
990 | &physical_line_partition.attr, \ | ||
991 | &ways_of_associativity.attr, \ | ||
992 | &number_of_sets.attr, \ | ||
993 | &size.attr, \ | ||
994 | &shared_cpu_map.attr, \ | ||
995 | &shared_cpu_list.attr | ||
996 | |||
997 | static struct attribute *default_attrs[] = { | 947 | static struct attribute *default_attrs[] = { |
998 | DEFAULT_SYSFS_CACHE_ATTRS, | 948 | &type.attr, |
949 | &level.attr, | ||
950 | &coherency_line_size.attr, | ||
951 | &physical_line_partition.attr, | ||
952 | &ways_of_associativity.attr, | ||
953 | &number_of_sets.attr, | ||
954 | &size.attr, | ||
955 | &shared_cpu_map.attr, | ||
956 | &shared_cpu_list.attr, | ||
999 | NULL | 957 | NULL |
1000 | }; | 958 | }; |
1001 | 959 | ||
1002 | static struct attribute *default_l3_attrs[] = { | ||
1003 | DEFAULT_SYSFS_CACHE_ATTRS, | ||
1004 | #ifdef CONFIG_AMD_NB | 960 | #ifdef CONFIG_AMD_NB |
1005 | &cache_disable_0.attr, | 961 | static struct attribute ** __cpuinit amd_l3_attrs(void) |
1006 | &cache_disable_1.attr, | 962 | { |
963 | static struct attribute **attrs; | ||
964 | int n; | ||
965 | |||
966 | if (attrs) | ||
967 | return attrs; | ||
968 | |||
969 | n = sizeof (default_attrs) / sizeof (struct attribute *); | ||
970 | |||
971 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) | ||
972 | n += 2; | ||
973 | |||
974 | attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL); | ||
975 | if (attrs == NULL) | ||
976 | return attrs = default_attrs; | ||
977 | |||
978 | for (n = 0; default_attrs[n]; n++) | ||
979 | attrs[n] = default_attrs[n]; | ||
980 | |||
981 | if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) { | ||
982 | attrs[n++] = &cache_disable_0.attr; | ||
983 | attrs[n++] = &cache_disable_1.attr; | ||
984 | } | ||
985 | |||
986 | return attrs; | ||
987 | } | ||
1007 | #endif | 988 | #endif |
1008 | NULL | ||
1009 | }; | ||
1010 | 989 | ||
1011 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | 990 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) |
1012 | { | 991 | { |
@@ -1117,11 +1096,11 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
1117 | 1096 | ||
1118 | this_leaf = CPUID4_INFO_IDX(cpu, i); | 1097 | this_leaf = CPUID4_INFO_IDX(cpu, i); |
1119 | 1098 | ||
1120 | if (this_leaf->l3 && this_leaf->l3->can_disable) | 1099 | ktype_cache.default_attrs = default_attrs; |
1121 | ktype_cache.default_attrs = default_l3_attrs; | 1100 | #ifdef CONFIG_AMD_NB |
1122 | else | 1101 | if (this_leaf->l3) |
1123 | ktype_cache.default_attrs = default_attrs; | 1102 | ktype_cache.default_attrs = amd_l3_attrs(); |
1124 | 1103 | #endif | |
1125 | retval = kobject_init_and_add(&(this_object->kobj), | 1104 | retval = kobject_init_and_add(&(this_object->kobj), |
1126 | &ktype_cache, | 1105 | &ktype_cache, |
1127 | per_cpu(ici_cache_kobject, cpu), | 1106 | per_cpu(ici_cache_kobject, cpu), |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index e7dbde7bfedb..a77971979564 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | #include <asm/mce.h> | 26 | #include <asm/mce.h> |
27 | #include <asm/apic.h> | 27 | #include <asm/apic.h> |
28 | #include <asm/nmi.h> | ||
28 | 29 | ||
29 | /* Update fake mce registers on current CPU. */ | 30 | /* Update fake mce registers on current CPU. */ |
30 | static void inject_mce(struct mce *m) | 31 | static void inject_mce(struct mce *m) |
@@ -83,7 +84,7 @@ static int mce_raise_notify(struct notifier_block *self, | |||
83 | struct die_args *args = (struct die_args *)data; | 84 | struct die_args *args = (struct die_args *)data; |
84 | int cpu = smp_processor_id(); | 85 | int cpu = smp_processor_id(); |
85 | struct mce *m = &__get_cpu_var(injectm); | 86 | struct mce *m = &__get_cpu_var(injectm); |
86 | if (val != DIE_NMI_IPI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) | 87 | if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) |
87 | return NOTIFY_DONE; | 88 | return NOTIFY_DONE; |
88 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
89 | if (m->inject_flags & MCJ_EXCEPTION) | 90 | if (m->inject_flags & MCJ_EXCEPTION) |
@@ -95,7 +96,7 @@ static int mce_raise_notify(struct notifier_block *self, | |||
95 | 96 | ||
96 | static struct notifier_block mce_raise_nb = { | 97 | static struct notifier_block mce_raise_nb = { |
97 | .notifier_call = mce_raise_notify, | 98 | .notifier_call = mce_raise_notify, |
98 | .priority = 1000, | 99 | .priority = NMI_LOCAL_NORMAL_PRIOR, |
99 | }; | 100 | }; |
100 | 101 | ||
101 | /* Inject mce on current CPU */ | 102 | /* Inject mce on current CPU */ |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 7a35b72d7c03..d916183b7f9c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -326,7 +326,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp) | |||
326 | 326 | ||
327 | static int msr_to_offset(u32 msr) | 327 | static int msr_to_offset(u32 msr) |
328 | { | 328 | { |
329 | unsigned bank = __get_cpu_var(injectm.bank); | 329 | unsigned bank = __this_cpu_read(injectm.bank); |
330 | 330 | ||
331 | if (msr == rip_msr) | 331 | if (msr == rip_msr) |
332 | return offsetof(struct mce, ip); | 332 | return offsetof(struct mce, ip); |
@@ -346,7 +346,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
346 | { | 346 | { |
347 | u64 v; | 347 | u64 v; |
348 | 348 | ||
349 | if (__get_cpu_var(injectm).finished) { | 349 | if (__this_cpu_read(injectm.finished)) { |
350 | int offset = msr_to_offset(msr); | 350 | int offset = msr_to_offset(msr); |
351 | 351 | ||
352 | if (offset < 0) | 352 | if (offset < 0) |
@@ -369,7 +369,7 @@ static u64 mce_rdmsrl(u32 msr) | |||
369 | 369 | ||
370 | static void mce_wrmsrl(u32 msr, u64 v) | 370 | static void mce_wrmsrl(u32 msr, u64 v) |
371 | { | 371 | { |
372 | if (__get_cpu_var(injectm).finished) { | 372 | if (__this_cpu_read(injectm.finished)) { |
373 | int offset = msr_to_offset(msr); | 373 | int offset = msr_to_offset(msr); |
374 | 374 | ||
375 | if (offset >= 0) | 375 | if (offset >= 0) |
@@ -1159,7 +1159,7 @@ static void mce_start_timer(unsigned long data) | |||
1159 | 1159 | ||
1160 | WARN_ON(smp_processor_id() != data); | 1160 | WARN_ON(smp_processor_id() != data); |
1161 | 1161 | ||
1162 | if (mce_available(¤t_cpu_data)) { | 1162 | if (mce_available(__this_cpu_ptr(&cpu_info))) { |
1163 | machine_check_poll(MCP_TIMESTAMP, | 1163 | machine_check_poll(MCP_TIMESTAMP, |
1164 | &__get_cpu_var(mce_poll_banks)); | 1164 | &__get_cpu_var(mce_poll_banks)); |
1165 | } | 1165 | } |
@@ -1767,7 +1767,7 @@ static int mce_shutdown(struct sys_device *dev) | |||
1767 | static int mce_resume(struct sys_device *dev) | 1767 | static int mce_resume(struct sys_device *dev) |
1768 | { | 1768 | { |
1769 | __mcheck_cpu_init_generic(); | 1769 | __mcheck_cpu_init_generic(); |
1770 | __mcheck_cpu_init_vendor(¤t_cpu_data); | 1770 | __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info)); |
1771 | 1771 | ||
1772 | return 0; | 1772 | return 0; |
1773 | } | 1773 | } |
@@ -1775,7 +1775,7 @@ static int mce_resume(struct sys_device *dev) | |||
1775 | static void mce_cpu_restart(void *data) | 1775 | static void mce_cpu_restart(void *data) |
1776 | { | 1776 | { |
1777 | del_timer_sync(&__get_cpu_var(mce_timer)); | 1777 | del_timer_sync(&__get_cpu_var(mce_timer)); |
1778 | if (!mce_available(¤t_cpu_data)) | 1778 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1779 | return; | 1779 | return; |
1780 | __mcheck_cpu_init_generic(); | 1780 | __mcheck_cpu_init_generic(); |
1781 | __mcheck_cpu_init_timer(); | 1781 | __mcheck_cpu_init_timer(); |
@@ -1790,7 +1790,7 @@ static void mce_restart(void) | |||
1790 | /* Toggle features for corrected errors */ | 1790 | /* Toggle features for corrected errors */ |
1791 | static void mce_disable_ce(void *all) | 1791 | static void mce_disable_ce(void *all) |
1792 | { | 1792 | { |
1793 | if (!mce_available(¤t_cpu_data)) | 1793 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1794 | return; | 1794 | return; |
1795 | if (all) | 1795 | if (all) |
1796 | del_timer_sync(&__get_cpu_var(mce_timer)); | 1796 | del_timer_sync(&__get_cpu_var(mce_timer)); |
@@ -1799,7 +1799,7 @@ static void mce_disable_ce(void *all) | |||
1799 | 1799 | ||
1800 | static void mce_enable_ce(void *all) | 1800 | static void mce_enable_ce(void *all) |
1801 | { | 1801 | { |
1802 | if (!mce_available(¤t_cpu_data)) | 1802 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1803 | return; | 1803 | return; |
1804 | cmci_reenable(); | 1804 | cmci_reenable(); |
1805 | cmci_recheck(); | 1805 | cmci_recheck(); |
@@ -2022,7 +2022,7 @@ static void __cpuinit mce_disable_cpu(void *h) | |||
2022 | unsigned long action = *(unsigned long *)h; | 2022 | unsigned long action = *(unsigned long *)h; |
2023 | int i; | 2023 | int i; |
2024 | 2024 | ||
2025 | if (!mce_available(¤t_cpu_data)) | 2025 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
2026 | return; | 2026 | return; |
2027 | 2027 | ||
2028 | if (!(action & CPU_TASKS_FROZEN)) | 2028 | if (!(action & CPU_TASKS_FROZEN)) |
@@ -2040,7 +2040,7 @@ static void __cpuinit mce_reenable_cpu(void *h) | |||
2040 | unsigned long action = *(unsigned long *)h; | 2040 | unsigned long action = *(unsigned long *)h; |
2041 | int i; | 2041 | int i; |
2042 | 2042 | ||
2043 | if (!mce_available(¤t_cpu_data)) | 2043 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
2044 | return; | 2044 | return; |
2045 | 2045 | ||
2046 | if (!(action & CPU_TASKS_FROZEN)) | 2046 | if (!(action & CPU_TASKS_FROZEN)) |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 80c482382d5c..5bf2fac52aca 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -31,8 +31,6 @@ | |||
31 | #include <asm/mce.h> | 31 | #include <asm/mce.h> |
32 | #include <asm/msr.h> | 32 | #include <asm/msr.h> |
33 | 33 | ||
34 | #define PFX "mce_threshold: " | ||
35 | #define VERSION "version 1.1.1" | ||
36 | #define NR_BANKS 6 | 34 | #define NR_BANKS 6 |
37 | #define NR_BLOCKS 9 | 35 | #define NR_BLOCKS 9 |
38 | #define THRESHOLD_MAX 0xFFF | 36 | #define THRESHOLD_MAX 0xFFF |
@@ -59,12 +57,6 @@ struct threshold_block { | |||
59 | struct list_head miscj; | 57 | struct list_head miscj; |
60 | }; | 58 | }; |
61 | 59 | ||
62 | /* defaults used early on boot */ | ||
63 | static struct threshold_block threshold_defaults = { | ||
64 | .interrupt_enable = 0, | ||
65 | .threshold_limit = THRESHOLD_MAX, | ||
66 | }; | ||
67 | |||
68 | struct threshold_bank { | 60 | struct threshold_bank { |
69 | struct kobject *kobj; | 61 | struct kobject *kobj; |
70 | struct threshold_block *blocks; | 62 | struct threshold_block *blocks; |
@@ -89,50 +81,101 @@ static void amd_threshold_interrupt(void); | |||
89 | struct thresh_restart { | 81 | struct thresh_restart { |
90 | struct threshold_block *b; | 82 | struct threshold_block *b; |
91 | int reset; | 83 | int reset; |
84 | int set_lvt_off; | ||
85 | int lvt_off; | ||
92 | u16 old_limit; | 86 | u16 old_limit; |
93 | }; | 87 | }; |
94 | 88 | ||
89 | static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi) | ||
90 | { | ||
91 | int msr = (hi & MASK_LVTOFF_HI) >> 20; | ||
92 | |||
93 | if (apic < 0) { | ||
94 | pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt " | ||
95 | "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu, | ||
96 | b->bank, b->block, b->address, hi, lo); | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | if (apic != msr) { | ||
101 | pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d " | ||
102 | "for bank %d, block %d (MSR%08X=0x%x%08x)\n", | ||
103 | b->cpu, apic, b->bank, b->block, b->address, hi, lo); | ||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | return 1; | ||
108 | }; | ||
109 | |||
95 | /* must be called with correct cpu affinity */ | 110 | /* must be called with correct cpu affinity */ |
96 | /* Called via smp_call_function_single() */ | 111 | /* Called via smp_call_function_single() */ |
97 | static void threshold_restart_bank(void *_tr) | 112 | static void threshold_restart_bank(void *_tr) |
98 | { | 113 | { |
99 | struct thresh_restart *tr = _tr; | 114 | struct thresh_restart *tr = _tr; |
100 | u32 mci_misc_hi, mci_misc_lo; | 115 | u32 hi, lo; |
101 | 116 | ||
102 | rdmsr(tr->b->address, mci_misc_lo, mci_misc_hi); | 117 | rdmsr(tr->b->address, lo, hi); |
103 | 118 | ||
104 | if (tr->b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) | 119 | if (tr->b->threshold_limit < (hi & THRESHOLD_MAX)) |
105 | tr->reset = 1; /* limit cannot be lower than err count */ | 120 | tr->reset = 1; /* limit cannot be lower than err count */ |
106 | 121 | ||
107 | if (tr->reset) { /* reset err count and overflow bit */ | 122 | if (tr->reset) { /* reset err count and overflow bit */ |
108 | mci_misc_hi = | 123 | hi = |
109 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | | 124 | (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | |
110 | (THRESHOLD_MAX - tr->b->threshold_limit); | 125 | (THRESHOLD_MAX - tr->b->threshold_limit); |
111 | } else if (tr->old_limit) { /* change limit w/o reset */ | 126 | } else if (tr->old_limit) { /* change limit w/o reset */ |
112 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + | 127 | int new_count = (hi & THRESHOLD_MAX) + |
113 | (tr->old_limit - tr->b->threshold_limit); | 128 | (tr->old_limit - tr->b->threshold_limit); |
114 | 129 | ||
115 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | | 130 | hi = (hi & ~MASK_ERR_COUNT_HI) | |
116 | (new_count & THRESHOLD_MAX); | 131 | (new_count & THRESHOLD_MAX); |
117 | } | 132 | } |
118 | 133 | ||
134 | if (tr->set_lvt_off) { | ||
135 | if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) { | ||
136 | /* set new lvt offset */ | ||
137 | hi &= ~MASK_LVTOFF_HI; | ||
138 | hi |= tr->lvt_off << 20; | ||
139 | } | ||
140 | } | ||
141 | |||
119 | tr->b->interrupt_enable ? | 142 | tr->b->interrupt_enable ? |
120 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : | 143 | (hi = (hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : |
121 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); | 144 | (hi &= ~MASK_INT_TYPE_HI); |
122 | 145 | ||
123 | mci_misc_hi |= MASK_COUNT_EN_HI; | 146 | hi |= MASK_COUNT_EN_HI; |
124 | wrmsr(tr->b->address, mci_misc_lo, mci_misc_hi); | 147 | wrmsr(tr->b->address, lo, hi); |
148 | } | ||
149 | |||
150 | static void mce_threshold_block_init(struct threshold_block *b, int offset) | ||
151 | { | ||
152 | struct thresh_restart tr = { | ||
153 | .b = b, | ||
154 | .set_lvt_off = 1, | ||
155 | .lvt_off = offset, | ||
156 | }; | ||
157 | |||
158 | b->threshold_limit = THRESHOLD_MAX; | ||
159 | threshold_restart_bank(&tr); | ||
160 | }; | ||
161 | |||
162 | static int setup_APIC_mce(int reserved, int new) | ||
163 | { | ||
164 | if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR, | ||
165 | APIC_EILVT_MSG_FIX, 0)) | ||
166 | return new; | ||
167 | |||
168 | return reserved; | ||
125 | } | 169 | } |
126 | 170 | ||
127 | /* cpu init entry point, called from mce.c with preempt off */ | 171 | /* cpu init entry point, called from mce.c with preempt off */ |
128 | void mce_amd_feature_init(struct cpuinfo_x86 *c) | 172 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
129 | { | 173 | { |
174 | struct threshold_block b; | ||
130 | unsigned int cpu = smp_processor_id(); | 175 | unsigned int cpu = smp_processor_id(); |
131 | u32 low = 0, high = 0, address = 0; | 176 | u32 low = 0, high = 0, address = 0; |
132 | unsigned int bank, block; | 177 | unsigned int bank, block; |
133 | struct thresh_restart tr; | 178 | int offset = -1; |
134 | int lvt_off = -1; | ||
135 | u8 offset; | ||
136 | 179 | ||
137 | for (bank = 0; bank < NR_BANKS; ++bank) { | 180 | for (bank = 0; bank < NR_BANKS; ++bank) { |
138 | for (block = 0; block < NR_BLOCKS; ++block) { | 181 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -163,39 +206,16 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
163 | if (shared_bank[bank] && c->cpu_core_id) | 206 | if (shared_bank[bank] && c->cpu_core_id) |
164 | break; | 207 | break; |
165 | #endif | 208 | #endif |
166 | offset = (high & MASK_LVTOFF_HI) >> 20; | 209 | offset = setup_APIC_mce(offset, |
167 | if (lvt_off < 0) { | 210 | (high & MASK_LVTOFF_HI) >> 20); |
168 | if (setup_APIC_eilvt(offset, | ||
169 | THRESHOLD_APIC_VECTOR, | ||
170 | APIC_EILVT_MSG_FIX, 0)) { | ||
171 | pr_err(FW_BUG "cpu %d, failed to " | ||
172 | "setup threshold interrupt " | ||
173 | "for bank %d, block %d " | ||
174 | "(MSR%08X=0x%x%08x)", | ||
175 | smp_processor_id(), bank, block, | ||
176 | address, high, low); | ||
177 | continue; | ||
178 | } | ||
179 | lvt_off = offset; | ||
180 | } else if (lvt_off != offset) { | ||
181 | pr_err(FW_BUG "cpu %d, invalid threshold " | ||
182 | "interrupt offset %d for bank %d," | ||
183 | "block %d (MSR%08X=0x%x%08x)", | ||
184 | smp_processor_id(), lvt_off, bank, | ||
185 | block, address, high, low); | ||
186 | continue; | ||
187 | } | ||
188 | |||
189 | high &= ~MASK_LVTOFF_HI; | ||
190 | high |= lvt_off << 20; | ||
191 | wrmsr(address, low, high); | ||
192 | 211 | ||
193 | threshold_defaults.address = address; | 212 | memset(&b, 0, sizeof(b)); |
194 | tr.b = &threshold_defaults; | 213 | b.cpu = cpu; |
195 | tr.reset = 0; | 214 | b.bank = bank; |
196 | tr.old_limit = 0; | 215 | b.block = block; |
197 | threshold_restart_bank(&tr); | 216 | b.address = address; |
198 | 217 | ||
218 | mce_threshold_block_init(&b, offset); | ||
199 | mce_threshold_vector = amd_threshold_interrupt; | 219 | mce_threshold_vector = amd_threshold_interrupt; |
200 | } | 220 | } |
201 | } | 221 | } |
@@ -298,9 +318,8 @@ store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size) | |||
298 | 318 | ||
299 | b->interrupt_enable = !!new; | 319 | b->interrupt_enable = !!new; |
300 | 320 | ||
321 | memset(&tr, 0, sizeof(tr)); | ||
301 | tr.b = b; | 322 | tr.b = b; |
302 | tr.reset = 0; | ||
303 | tr.old_limit = 0; | ||
304 | 323 | ||
305 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); | 324 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
306 | 325 | ||
@@ -321,10 +340,10 @@ store_threshold_limit(struct threshold_block *b, const char *buf, size_t size) | |||
321 | if (new < 1) | 340 | if (new < 1) |
322 | new = 1; | 341 | new = 1; |
323 | 342 | ||
343 | memset(&tr, 0, sizeof(tr)); | ||
324 | tr.old_limit = b->threshold_limit; | 344 | tr.old_limit = b->threshold_limit; |
325 | b->threshold_limit = new; | 345 | b->threshold_limit = new; |
326 | tr.b = b; | 346 | tr.b = b; |
327 | tr.reset = 0; | ||
328 | 347 | ||
329 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); | 348 | smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1); |
330 | 349 | ||
@@ -603,9 +622,9 @@ static __cpuinit int threshold_create_device(unsigned int cpu) | |||
603 | continue; | 622 | continue; |
604 | err = threshold_create_bank(cpu, bank); | 623 | err = threshold_create_bank(cpu, bank); |
605 | if (err) | 624 | if (err) |
606 | goto out; | 625 | return err; |
607 | } | 626 | } |
608 | out: | 627 | |
609 | return err; | 628 | return err; |
610 | } | 629 | } |
611 | 630 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c index 6fcd0936194f..8694ef56459d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c | |||
@@ -130,7 +130,7 @@ void cmci_recheck(void) | |||
130 | unsigned long flags; | 130 | unsigned long flags; |
131 | int banks; | 131 | int banks; |
132 | 132 | ||
133 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | 133 | if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks)) |
134 | return; | 134 | return; |
135 | local_irq_save(flags); | 135 | local_irq_save(flags); |
136 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | 136 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 4b683267eca5..e12246ff5aa6 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -53,8 +53,13 @@ struct thermal_state { | |||
53 | struct _thermal_state core_power_limit; | 53 | struct _thermal_state core_power_limit; |
54 | struct _thermal_state package_throttle; | 54 | struct _thermal_state package_throttle; |
55 | struct _thermal_state package_power_limit; | 55 | struct _thermal_state package_power_limit; |
56 | struct _thermal_state core_thresh0; | ||
57 | struct _thermal_state core_thresh1; | ||
56 | }; | 58 | }; |
57 | 59 | ||
60 | /* Callback to handle core threshold interrupts */ | ||
61 | int (*platform_thermal_notify)(__u64 msr_val); | ||
62 | |||
58 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); | 63 | static DEFINE_PER_CPU(struct thermal_state, thermal_state); |
59 | 64 | ||
60 | static atomic_t therm_throt_en = ATOMIC_INIT(0); | 65 | static atomic_t therm_throt_en = ATOMIC_INIT(0); |
@@ -200,6 +205,22 @@ static int therm_throt_process(bool new_event, int event, int level) | |||
200 | return 0; | 205 | return 0; |
201 | } | 206 | } |
202 | 207 | ||
208 | static int thresh_event_valid(int event) | ||
209 | { | ||
210 | struct _thermal_state *state; | ||
211 | unsigned int this_cpu = smp_processor_id(); | ||
212 | struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); | ||
213 | u64 now = get_jiffies_64(); | ||
214 | |||
215 | state = (event == 0) ? &pstate->core_thresh0 : &pstate->core_thresh1; | ||
216 | |||
217 | if (time_before64(now, state->next_check)) | ||
218 | return 0; | ||
219 | |||
220 | state->next_check = now + CHECK_INTERVAL; | ||
221 | return 1; | ||
222 | } | ||
223 | |||
203 | #ifdef CONFIG_SYSFS | 224 | #ifdef CONFIG_SYSFS |
204 | /* Add/Remove thermal_throttle interface for CPU device: */ | 225 | /* Add/Remove thermal_throttle interface for CPU device: */ |
205 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, | 226 | static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, |
@@ -313,6 +334,22 @@ device_initcall(thermal_throttle_init_device); | |||
313 | #define PACKAGE_THROTTLED ((__u64)2 << 62) | 334 | #define PACKAGE_THROTTLED ((__u64)2 << 62) |
314 | #define PACKAGE_POWER_LIMIT ((__u64)3 << 62) | 335 | #define PACKAGE_POWER_LIMIT ((__u64)3 << 62) |
315 | 336 | ||
337 | static void notify_thresholds(__u64 msr_val) | ||
338 | { | ||
339 | /* check whether the interrupt handler is defined; | ||
340 | * otherwise simply return | ||
341 | */ | ||
342 | if (!platform_thermal_notify) | ||
343 | return; | ||
344 | |||
345 | /* lower threshold reached */ | ||
346 | if ((msr_val & THERM_LOG_THRESHOLD0) && thresh_event_valid(0)) | ||
347 | platform_thermal_notify(msr_val); | ||
348 | /* higher threshold reached */ | ||
349 | if ((msr_val & THERM_LOG_THRESHOLD1) && thresh_event_valid(1)) | ||
350 | platform_thermal_notify(msr_val); | ||
351 | } | ||
352 | |||
316 | /* Thermal transition interrupt handler */ | 353 | /* Thermal transition interrupt handler */ |
317 | static void intel_thermal_interrupt(void) | 354 | static void intel_thermal_interrupt(void) |
318 | { | 355 | { |
@@ -321,6 +358,9 @@ static void intel_thermal_interrupt(void) | |||
321 | 358 | ||
322 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 359 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
323 | 360 | ||
361 | /* Check for violation of core thermal thresholds*/ | ||
362 | notify_thresholds(msr_val); | ||
363 | |||
324 | if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, | 364 | if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, |
325 | THERMAL_THROTTLING_EVENT, | 365 | THERMAL_THROTTLING_EVENT, |
326 | CORE_LEVEL) != 0) | 366 | CORE_LEVEL) != 0) |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 6d75b9145b13..9d977a2ea693 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -330,9 +330,6 @@ static bool reserve_pmc_hardware(void) | |||
330 | { | 330 | { |
331 | int i; | 331 | int i; |
332 | 332 | ||
333 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
334 | disable_lapic_nmi_watchdog(); | ||
335 | |||
336 | for (i = 0; i < x86_pmu.num_counters; i++) { | 333 | for (i = 0; i < x86_pmu.num_counters; i++) { |
337 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | 334 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) |
338 | goto perfctr_fail; | 335 | goto perfctr_fail; |
@@ -355,9 +352,6 @@ perfctr_fail: | |||
355 | for (i--; i >= 0; i--) | 352 | for (i--; i >= 0; i--) |
356 | release_perfctr_nmi(x86_pmu.perfctr + i); | 353 | release_perfctr_nmi(x86_pmu.perfctr + i); |
357 | 354 | ||
358 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
359 | enable_lapic_nmi_watchdog(); | ||
360 | |||
361 | return false; | 355 | return false; |
362 | } | 356 | } |
363 | 357 | ||
@@ -369,9 +363,6 @@ static void release_pmc_hardware(void) | |||
369 | release_perfctr_nmi(x86_pmu.perfctr + i); | 363 | release_perfctr_nmi(x86_pmu.perfctr + i); |
370 | release_evntsel_nmi(x86_pmu.eventsel + i); | 364 | release_evntsel_nmi(x86_pmu.eventsel + i); |
371 | } | 365 | } |
372 | |||
373 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
374 | enable_lapic_nmi_watchdog(); | ||
375 | } | 366 | } |
376 | 367 | ||
377 | #else | 368 | #else |
@@ -384,15 +375,53 @@ static void release_pmc_hardware(void) {} | |||
384 | static bool check_hw_exists(void) | 375 | static bool check_hw_exists(void) |
385 | { | 376 | { |
386 | u64 val, val_new = 0; | 377 | u64 val, val_new = 0; |
387 | int ret = 0; | 378 | int i, reg, ret = 0; |
379 | |||
380 | /* | ||
381 | * Check to see if the BIOS enabled any of the counters, if so | ||
382 | * complain and bail. | ||
383 | */ | ||
384 | for (i = 0; i < x86_pmu.num_counters; i++) { | ||
385 | reg = x86_pmu.eventsel + i; | ||
386 | ret = rdmsrl_safe(reg, &val); | ||
387 | if (ret) | ||
388 | goto msr_fail; | ||
389 | if (val & ARCH_PERFMON_EVENTSEL_ENABLE) | ||
390 | goto bios_fail; | ||
391 | } | ||
388 | 392 | ||
393 | if (x86_pmu.num_counters_fixed) { | ||
394 | reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | ||
395 | ret = rdmsrl_safe(reg, &val); | ||
396 | if (ret) | ||
397 | goto msr_fail; | ||
398 | for (i = 0; i < x86_pmu.num_counters_fixed; i++) { | ||
399 | if (val & (0x03 << i*4)) | ||
400 | goto bios_fail; | ||
401 | } | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * Now write a value and read it back to see if it matches, | ||
406 | * this is needed to detect certain hardware emulators (qemu/kvm) | ||
407 | * that don't trap on the MSR access and always return 0s. | ||
408 | */ | ||
389 | val = 0xabcdUL; | 409 | val = 0xabcdUL; |
390 | ret |= checking_wrmsrl(x86_pmu.perfctr, val); | 410 | ret = checking_wrmsrl(x86_pmu.perfctr, val); |
391 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); | 411 | ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); |
392 | if (ret || val != val_new) | 412 | if (ret || val != val_new) |
393 | return false; | 413 | goto msr_fail; |
394 | 414 | ||
395 | return true; | 415 | return true; |
416 | |||
417 | bios_fail: | ||
418 | printk(KERN_CONT "Broken BIOS detected, using software events only.\n"); | ||
419 | printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val); | ||
420 | return false; | ||
421 | |||
422 | msr_fail: | ||
423 | printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); | ||
424 | return false; | ||
396 | } | 425 | } |
397 | 426 | ||
398 | static void reserve_ds_buffers(void); | 427 | static void reserve_ds_buffers(void); |
@@ -451,7 +480,7 @@ static int x86_setup_perfctr(struct perf_event *event) | |||
451 | struct hw_perf_event *hwc = &event->hw; | 480 | struct hw_perf_event *hwc = &event->hw; |
452 | u64 config; | 481 | u64 config; |
453 | 482 | ||
454 | if (!hwc->sample_period) { | 483 | if (!is_sampling_event(event)) { |
455 | hwc->sample_period = x86_pmu.max_period; | 484 | hwc->sample_period = x86_pmu.max_period; |
456 | hwc->last_period = hwc->sample_period; | 485 | hwc->last_period = hwc->sample_period; |
457 | local64_set(&hwc->period_left, hwc->sample_period); | 486 | local64_set(&hwc->period_left, hwc->sample_period); |
@@ -968,8 +997,7 @@ x86_perf_event_set_period(struct perf_event *event) | |||
968 | 997 | ||
969 | static void x86_pmu_enable_event(struct perf_event *event) | 998 | static void x86_pmu_enable_event(struct perf_event *event) |
970 | { | 999 | { |
971 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1000 | if (__this_cpu_read(cpu_hw_events.enabled)) |
972 | if (cpuc->enabled) | ||
973 | __x86_pmu_enable_event(&event->hw, | 1001 | __x86_pmu_enable_event(&event->hw, |
974 | ARCH_PERFMON_EVENTSEL_ENABLE); | 1002 | ARCH_PERFMON_EVENTSEL_ENABLE); |
975 | } | 1003 | } |
@@ -1239,11 +1267,10 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1239 | 1267 | ||
1240 | switch (cmd) { | 1268 | switch (cmd) { |
1241 | case DIE_NMI: | 1269 | case DIE_NMI: |
1242 | case DIE_NMI_IPI: | ||
1243 | break; | 1270 | break; |
1244 | case DIE_NMIUNKNOWN: | 1271 | case DIE_NMIUNKNOWN: |
1245 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1272 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1246 | if (this_nmi != __get_cpu_var(pmu_nmi).marked) | 1273 | if (this_nmi != __this_cpu_read(pmu_nmi.marked)) |
1247 | /* let the kernel handle the unknown nmi */ | 1274 | /* let the kernel handle the unknown nmi */ |
1248 | return NOTIFY_DONE; | 1275 | return NOTIFY_DONE; |
1249 | /* | 1276 | /* |
@@ -1267,8 +1294,8 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1267 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1294 | this_nmi = percpu_read(irq_stat.__nmi_count); |
1268 | if ((handled > 1) || | 1295 | if ((handled > 1) || |
1269 | /* the next nmi could be a back-to-back nmi */ | 1296 | /* the next nmi could be a back-to-back nmi */ |
1270 | ((__get_cpu_var(pmu_nmi).marked == this_nmi) && | 1297 | ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && |
1271 | (__get_cpu_var(pmu_nmi).handled > 1))) { | 1298 | (__this_cpu_read(pmu_nmi.handled) > 1))) { |
1272 | /* | 1299 | /* |
1273 | * We could have two subsequent back-to-back nmis: The | 1300 | * We could have two subsequent back-to-back nmis: The |
1274 | * first handles more than one counter, the 2nd | 1301 | * first handles more than one counter, the 2nd |
@@ -1279,8 +1306,8 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1279 | * handling more than one counter. We will mark the | 1306 | * handling more than one counter. We will mark the |
1280 | * next (3rd) and then drop it if unhandled. | 1307 | * next (3rd) and then drop it if unhandled. |
1281 | */ | 1308 | */ |
1282 | __get_cpu_var(pmu_nmi).marked = this_nmi + 1; | 1309 | __this_cpu_write(pmu_nmi.marked, this_nmi + 1); |
1283 | __get_cpu_var(pmu_nmi).handled = handled; | 1310 | __this_cpu_write(pmu_nmi.handled, handled); |
1284 | } | 1311 | } |
1285 | 1312 | ||
1286 | return NOTIFY_STOP; | 1313 | return NOTIFY_STOP; |
@@ -1289,7 +1316,7 @@ perf_event_nmi_handler(struct notifier_block *self, | |||
1289 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { | 1316 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { |
1290 | .notifier_call = perf_event_nmi_handler, | 1317 | .notifier_call = perf_event_nmi_handler, |
1291 | .next = NULL, | 1318 | .next = NULL, |
1292 | .priority = 1 | 1319 | .priority = NMI_LOCAL_LOW_PRIOR, |
1293 | }; | 1320 | }; |
1294 | 1321 | ||
1295 | static struct event_constraint unconstrained; | 1322 | static struct event_constraint unconstrained; |
@@ -1362,7 +1389,7 @@ static void __init pmu_check_apic(void) | |||
1362 | pr_info("no hardware sampling interrupt available.\n"); | 1389 | pr_info("no hardware sampling interrupt available.\n"); |
1363 | } | 1390 | } |
1364 | 1391 | ||
1365 | void __init init_hw_perf_events(void) | 1392 | int __init init_hw_perf_events(void) |
1366 | { | 1393 | { |
1367 | struct event_constraint *c; | 1394 | struct event_constraint *c; |
1368 | int err; | 1395 | int err; |
@@ -1377,20 +1404,18 @@ void __init init_hw_perf_events(void) | |||
1377 | err = amd_pmu_init(); | 1404 | err = amd_pmu_init(); |
1378 | break; | 1405 | break; |
1379 | default: | 1406 | default: |
1380 | return; | 1407 | return 0; |
1381 | } | 1408 | } |
1382 | if (err != 0) { | 1409 | if (err != 0) { |
1383 | pr_cont("no PMU driver, software events only.\n"); | 1410 | pr_cont("no PMU driver, software events only.\n"); |
1384 | return; | 1411 | return 0; |
1385 | } | 1412 | } |
1386 | 1413 | ||
1387 | pmu_check_apic(); | 1414 | pmu_check_apic(); |
1388 | 1415 | ||
1389 | /* sanity check that the hardware exists or is emulated */ | 1416 | /* sanity check that the hardware exists or is emulated */ |
1390 | if (!check_hw_exists()) { | 1417 | if (!check_hw_exists()) |
1391 | pr_cont("Broken PMU hardware detected, software events only.\n"); | 1418 | return 0; |
1392 | return; | ||
1393 | } | ||
1394 | 1419 | ||
1395 | pr_cont("%s PMU driver.\n", x86_pmu.name); | 1420 | pr_cont("%s PMU driver.\n", x86_pmu.name); |
1396 | 1421 | ||
@@ -1438,9 +1463,12 @@ void __init init_hw_perf_events(void) | |||
1438 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); | 1463 | pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); |
1439 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); | 1464 | pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); |
1440 | 1465 | ||
1441 | perf_pmu_register(&pmu); | 1466 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
1442 | perf_cpu_notifier(x86_pmu_notifier); | 1467 | perf_cpu_notifier(x86_pmu_notifier); |
1468 | |||
1469 | return 0; | ||
1443 | } | 1470 | } |
1471 | early_initcall(init_hw_perf_events); | ||
1444 | 1472 | ||
1445 | static inline void x86_pmu_read(struct perf_event *event) | 1473 | static inline void x86_pmu_read(struct perf_event *event) |
1446 | { | 1474 | { |
@@ -1454,11 +1482,9 @@ static inline void x86_pmu_read(struct perf_event *event) | |||
1454 | */ | 1482 | */ |
1455 | static void x86_pmu_start_txn(struct pmu *pmu) | 1483 | static void x86_pmu_start_txn(struct pmu *pmu) |
1456 | { | 1484 | { |
1457 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1458 | |||
1459 | perf_pmu_disable(pmu); | 1485 | perf_pmu_disable(pmu); |
1460 | cpuc->group_flag |= PERF_EVENT_TXN; | 1486 | __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN); |
1461 | cpuc->n_txn = 0; | 1487 | __this_cpu_write(cpu_hw_events.n_txn, 0); |
1462 | } | 1488 | } |
1463 | 1489 | ||
1464 | /* | 1490 | /* |
@@ -1468,14 +1494,12 @@ static void x86_pmu_start_txn(struct pmu *pmu) | |||
1468 | */ | 1494 | */ |
1469 | static void x86_pmu_cancel_txn(struct pmu *pmu) | 1495 | static void x86_pmu_cancel_txn(struct pmu *pmu) |
1470 | { | 1496 | { |
1471 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 1497 | __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN); |
1472 | |||
1473 | cpuc->group_flag &= ~PERF_EVENT_TXN; | ||
1474 | /* | 1498 | /* |
1475 | * Truncate the collected events. | 1499 | * Truncate the collected events. |
1476 | */ | 1500 | */ |
1477 | cpuc->n_added -= cpuc->n_txn; | 1501 | __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); |
1478 | cpuc->n_events -= cpuc->n_txn; | 1502 | __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); |
1479 | perf_pmu_enable(pmu); | 1503 | perf_pmu_enable(pmu); |
1480 | } | 1504 | } |
1481 | 1505 | ||
@@ -1686,7 +1710,7 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
1686 | 1710 | ||
1687 | perf_callchain_store(entry, regs->ip); | 1711 | perf_callchain_store(entry, regs->ip); |
1688 | 1712 | ||
1689 | dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry); | 1713 | dump_trace(NULL, regs, NULL, &backtrace_ops, entry); |
1690 | } | 1714 | } |
1691 | 1715 | ||
1692 | #ifdef CONFIG_COMPAT | 1716 | #ifdef CONFIG_COMPAT |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index e421b8cd6944..67e2202a6039 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -1,7 +1,5 @@ | |||
1 | #ifdef CONFIG_CPU_SUP_AMD | 1 | #ifdef CONFIG_CPU_SUP_AMD |
2 | 2 | ||
3 | static DEFINE_RAW_SPINLOCK(amd_nb_lock); | ||
4 | |||
5 | static __initconst const u64 amd_hw_cache_event_ids | 3 | static __initconst const u64 amd_hw_cache_event_ids |
6 | [PERF_COUNT_HW_CACHE_MAX] | 4 | [PERF_COUNT_HW_CACHE_MAX] |
7 | [PERF_COUNT_HW_CACHE_OP_MAX] | 5 | [PERF_COUNT_HW_CACHE_OP_MAX] |
@@ -275,7 +273,7 @@ done: | |||
275 | return &emptyconstraint; | 273 | return &emptyconstraint; |
276 | } | 274 | } |
277 | 275 | ||
278 | static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | 276 | static struct amd_nb *amd_alloc_nb(int cpu) |
279 | { | 277 | { |
280 | struct amd_nb *nb; | 278 | struct amd_nb *nb; |
281 | int i; | 279 | int i; |
@@ -285,7 +283,7 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id) | |||
285 | if (!nb) | 283 | if (!nb) |
286 | return NULL; | 284 | return NULL; |
287 | 285 | ||
288 | nb->nb_id = nb_id; | 286 | nb->nb_id = -1; |
289 | 287 | ||
290 | /* | 288 | /* |
291 | * initialize all possible NB constraints | 289 | * initialize all possible NB constraints |
@@ -306,7 +304,7 @@ static int amd_pmu_cpu_prepare(int cpu) | |||
306 | if (boot_cpu_data.x86_max_cores < 2) | 304 | if (boot_cpu_data.x86_max_cores < 2) |
307 | return NOTIFY_OK; | 305 | return NOTIFY_OK; |
308 | 306 | ||
309 | cpuc->amd_nb = amd_alloc_nb(cpu, -1); | 307 | cpuc->amd_nb = amd_alloc_nb(cpu); |
310 | if (!cpuc->amd_nb) | 308 | if (!cpuc->amd_nb) |
311 | return NOTIFY_BAD; | 309 | return NOTIFY_BAD; |
312 | 310 | ||
@@ -325,8 +323,6 @@ static void amd_pmu_cpu_starting(int cpu) | |||
325 | nb_id = amd_get_nb_id(cpu); | 323 | nb_id = amd_get_nb_id(cpu); |
326 | WARN_ON_ONCE(nb_id == BAD_APICID); | 324 | WARN_ON_ONCE(nb_id == BAD_APICID); |
327 | 325 | ||
328 | raw_spin_lock(&amd_nb_lock); | ||
329 | |||
330 | for_each_online_cpu(i) { | 326 | for_each_online_cpu(i) { |
331 | nb = per_cpu(cpu_hw_events, i).amd_nb; | 327 | nb = per_cpu(cpu_hw_events, i).amd_nb; |
332 | if (WARN_ON_ONCE(!nb)) | 328 | if (WARN_ON_ONCE(!nb)) |
@@ -341,8 +337,6 @@ static void amd_pmu_cpu_starting(int cpu) | |||
341 | 337 | ||
342 | cpuc->amd_nb->nb_id = nb_id; | 338 | cpuc->amd_nb->nb_id = nb_id; |
343 | cpuc->amd_nb->refcnt++; | 339 | cpuc->amd_nb->refcnt++; |
344 | |||
345 | raw_spin_unlock(&amd_nb_lock); | ||
346 | } | 340 | } |
347 | 341 | ||
348 | static void amd_pmu_cpu_dead(int cpu) | 342 | static void amd_pmu_cpu_dead(int cpu) |
@@ -354,8 +348,6 @@ static void amd_pmu_cpu_dead(int cpu) | |||
354 | 348 | ||
355 | cpuhw = &per_cpu(cpu_hw_events, cpu); | 349 | cpuhw = &per_cpu(cpu_hw_events, cpu); |
356 | 350 | ||
357 | raw_spin_lock(&amd_nb_lock); | ||
358 | |||
359 | if (cpuhw->amd_nb) { | 351 | if (cpuhw->amd_nb) { |
360 | struct amd_nb *nb = cpuhw->amd_nb; | 352 | struct amd_nb *nb = cpuhw->amd_nb; |
361 | 353 | ||
@@ -364,8 +356,6 @@ static void amd_pmu_cpu_dead(int cpu) | |||
364 | 356 | ||
365 | cpuhw->amd_nb = NULL; | 357 | cpuhw->amd_nb = NULL; |
366 | } | 358 | } |
367 | |||
368 | raw_spin_unlock(&amd_nb_lock); | ||
369 | } | 359 | } |
370 | 360 | ||
371 | static __initconst const struct x86_pmu amd_pmu = { | 361 | static __initconst const struct x86_pmu amd_pmu = { |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index c8f5c088cad1..008835c1d79c 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -649,7 +649,7 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
649 | struct hw_perf_event *hwc = &event->hw; | 649 | struct hw_perf_event *hwc = &event->hw; |
650 | 650 | ||
651 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | 651 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { |
652 | if (!__get_cpu_var(cpu_hw_events).enabled) | 652 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
653 | return; | 653 | return; |
654 | 654 | ||
655 | intel_pmu_enable_bts(hwc->config); | 655 | intel_pmu_enable_bts(hwc->config); |
@@ -679,7 +679,7 @@ static int intel_pmu_save_and_restart(struct perf_event *event) | |||
679 | 679 | ||
680 | static void intel_pmu_reset(void) | 680 | static void intel_pmu_reset(void) |
681 | { | 681 | { |
682 | struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds; | 682 | struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); |
683 | unsigned long flags; | 683 | unsigned long flags; |
684 | int idx; | 684 | int idx; |
685 | 685 | ||
@@ -816,6 +816,32 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
816 | if (ret) | 816 | if (ret) |
817 | return ret; | 817 | return ret; |
818 | 818 | ||
819 | if (event->attr.precise_ip && | ||
820 | (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) { | ||
821 | /* | ||
822 | * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P | ||
823 | * (0x003c) so that we can use it with PEBS. | ||
824 | * | ||
825 | * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't | ||
826 | * PEBS capable. However we can use INST_RETIRED.ANY_P | ||
827 | * (0x00c0), which is a PEBS capable event, to get the same | ||
828 | * count. | ||
829 | * | ||
830 | * INST_RETIRED.ANY_P counts the number of cycles that retires | ||
831 | * CNTMASK instructions. By setting CNTMASK to a value (16) | ||
832 | * larger than the maximum number of instructions that can be | ||
833 | * retired per cycle (4) and then inverting the condition, we | ||
834 | * count all cycles that retire 16 or less instructions, which | ||
835 | * is every cycle. | ||
836 | * | ||
837 | * Thereby we gain a PEBS capable cycle counter. | ||
838 | */ | ||
839 | u64 alt_config = 0x108000c0; /* INST_RETIRED.TOTAL_CYCLES */ | ||
840 | |||
841 | alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); | ||
842 | event->hw.config = alt_config; | ||
843 | } | ||
844 | |||
819 | if (event->attr.type != PERF_TYPE_RAW) | 845 | if (event->attr.type != PERF_TYPE_RAW) |
820 | return 0; | 846 | return 0; |
821 | 847 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index 81400b93e694..e56b9bfbabd1 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -753,19 +753,21 @@ out: | |||
753 | 753 | ||
754 | static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | 754 | static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) |
755 | { | 755 | { |
756 | int overflow = 0; | 756 | u64 v; |
757 | u32 low, high; | ||
758 | 757 | ||
759 | rdmsr(hwc->config_base + hwc->idx, low, high); | 758 | /* an official way for overflow indication */ |
760 | 759 | rdmsrl(hwc->config_base + hwc->idx, v); | |
761 | /* we need to check high bit for unflagged overflows */ | 760 | if (v & P4_CCCR_OVF) { |
762 | if ((low & P4_CCCR_OVF) || !(high & (1 << 31))) { | 761 | wrmsrl(hwc->config_base + hwc->idx, v & ~P4_CCCR_OVF); |
763 | overflow = 1; | 762 | return 1; |
764 | (void)checking_wrmsrl(hwc->config_base + hwc->idx, | ||
765 | ((u64)low) & ~P4_CCCR_OVF); | ||
766 | } | 763 | } |
767 | 764 | ||
768 | return overflow; | 765 | /* it might be unflagged overflow */ |
766 | rdmsrl(hwc->event_base + hwc->idx, v); | ||
767 | if (!(v & ARCH_P4_CNTRVAL_MASK)) | ||
768 | return 1; | ||
769 | |||
770 | return 0; | ||
769 | } | 771 | } |
770 | 772 | ||
771 | static void p4_pmu_disable_pebs(void) | 773 | static void p4_pmu_disable_pebs(void) |
@@ -1152,9 +1154,9 @@ static __initconst const struct x86_pmu p4_pmu = { | |||
1152 | */ | 1154 | */ |
1153 | .num_counters = ARCH_P4_MAX_CCCR, | 1155 | .num_counters = ARCH_P4_MAX_CCCR, |
1154 | .apic = 1, | 1156 | .apic = 1, |
1155 | .cntval_bits = 40, | 1157 | .cntval_bits = ARCH_P4_CNTRVAL_BITS, |
1156 | .cntval_mask = (1ULL << 40) - 1, | 1158 | .cntval_mask = ARCH_P4_CNTRVAL_MASK, |
1157 | .max_period = (1ULL << 39) - 1, | 1159 | .max_period = (1ULL << (ARCH_P4_CNTRVAL_BITS - 1)) - 1, |
1158 | .hw_config = p4_hw_config, | 1160 | .hw_config = p4_hw_config, |
1159 | .schedule_events = p4_pmu_schedule_events, | 1161 | .schedule_events = p4_pmu_schedule_events, |
1160 | /* | 1162 | /* |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index d9f4ff8fcd69..d5a236615501 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -16,32 +16,12 @@ | |||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/bitops.h> | 17 | #include <linux/bitops.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/nmi.h> | 19 | #include <asm/nmi.h> |
20 | #include <linux/kprobes.h> | 20 | #include <linux/kprobes.h> |
21 | 21 | ||
22 | #include <asm/apic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/perf_event.h> | 23 | #include <asm/perf_event.h> |
24 | 24 | ||
25 | struct nmi_watchdog_ctlblk { | ||
26 | unsigned int cccr_msr; | ||
27 | unsigned int perfctr_msr; /* the MSR to reset in NMI handler */ | ||
28 | unsigned int evntsel_msr; /* the MSR to select the events to handle */ | ||
29 | }; | ||
30 | |||
31 | /* Interface defining a CPU specific perfctr watchdog */ | ||
32 | struct wd_ops { | ||
33 | int (*reserve)(void); | ||
34 | void (*unreserve)(void); | ||
35 | int (*setup)(unsigned nmi_hz); | ||
36 | void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); | ||
37 | void (*stop)(void); | ||
38 | unsigned perfctr; | ||
39 | unsigned evntsel; | ||
40 | u64 checkbit; | ||
41 | }; | ||
42 | |||
43 | static const struct wd_ops *wd_ops; | ||
44 | |||
45 | /* | 25 | /* |
46 | * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's | 26 | * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's |
47 | * offset from MSR_P4_BSU_ESCR0. | 27 | * offset from MSR_P4_BSU_ESCR0. |
@@ -60,8 +40,6 @@ static const struct wd_ops *wd_ops; | |||
60 | static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); | 40 | static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS); |
61 | static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); | 41 | static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS); |
62 | 42 | ||
63 | static DEFINE_PER_CPU(struct nmi_watchdog_ctlblk, nmi_watchdog_ctlblk); | ||
64 | |||
65 | /* converts an msr to an appropriate reservation bit */ | 43 | /* converts an msr to an appropriate reservation bit */ |
66 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) | 44 | static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) |
67 | { | 45 | { |
@@ -172,623 +150,3 @@ void release_evntsel_nmi(unsigned int msr) | |||
172 | clear_bit(counter, evntsel_nmi_owner); | 150 | clear_bit(counter, evntsel_nmi_owner); |
173 | } | 151 | } |
174 | EXPORT_SYMBOL(release_evntsel_nmi); | 152 | EXPORT_SYMBOL(release_evntsel_nmi); |
175 | |||
176 | void disable_lapic_nmi_watchdog(void) | ||
177 | { | ||
178 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
179 | |||
180 | if (atomic_read(&nmi_active) <= 0) | ||
181 | return; | ||
182 | |||
183 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); | ||
184 | |||
185 | if (wd_ops) | ||
186 | wd_ops->unreserve(); | ||
187 | |||
188 | BUG_ON(atomic_read(&nmi_active) != 0); | ||
189 | } | ||
190 | |||
191 | void enable_lapic_nmi_watchdog(void) | ||
192 | { | ||
193 | BUG_ON(nmi_watchdog != NMI_LOCAL_APIC); | ||
194 | |||
195 | /* are we already enabled */ | ||
196 | if (atomic_read(&nmi_active) != 0) | ||
197 | return; | ||
198 | |||
199 | /* are we lapic aware */ | ||
200 | if (!wd_ops) | ||
201 | return; | ||
202 | if (!wd_ops->reserve()) { | ||
203 | printk(KERN_ERR "NMI watchdog: cannot reserve perfctrs\n"); | ||
204 | return; | ||
205 | } | ||
206 | |||
207 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); | ||
208 | touch_nmi_watchdog(); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Activate the NMI watchdog via the local APIC. | ||
213 | */ | ||
214 | |||
215 | static unsigned int adjust_for_32bit_ctr(unsigned int hz) | ||
216 | { | ||
217 | u64 counter_val; | ||
218 | unsigned int retval = hz; | ||
219 | |||
220 | /* | ||
221 | * On Intel CPUs with P6/ARCH_PERFMON only 32 bits in the counter | ||
222 | * are writable, with higher bits sign extending from bit 31. | ||
223 | * So, we can only program the counter with 31 bit values and | ||
224 | * 32nd bit should be 1, for 33.. to be 1. | ||
225 | * Find the appropriate nmi_hz | ||
226 | */ | ||
227 | counter_val = (u64)cpu_khz * 1000; | ||
228 | do_div(counter_val, retval); | ||
229 | if (counter_val > 0x7fffffffULL) { | ||
230 | u64 count = (u64)cpu_khz * 1000; | ||
231 | do_div(count, 0x7fffffffUL); | ||
232 | retval = count + 1; | ||
233 | } | ||
234 | return retval; | ||
235 | } | ||
236 | |||
237 | static void write_watchdog_counter(unsigned int perfctr_msr, | ||
238 | const char *descr, unsigned nmi_hz) | ||
239 | { | ||
240 | u64 count = (u64)cpu_khz * 1000; | ||
241 | |||
242 | do_div(count, nmi_hz); | ||
243 | if (descr) | ||
244 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | ||
245 | wrmsrl(perfctr_msr, 0 - count); | ||
246 | } | ||
247 | |||
248 | static void write_watchdog_counter32(unsigned int perfctr_msr, | ||
249 | const char *descr, unsigned nmi_hz) | ||
250 | { | ||
251 | u64 count = (u64)cpu_khz * 1000; | ||
252 | |||
253 | do_div(count, nmi_hz); | ||
254 | if (descr) | ||
255 | pr_debug("setting %s to -0x%08Lx\n", descr, count); | ||
256 | wrmsr(perfctr_msr, (u32)(-count), 0); | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * AMD K7/K8/Family10h/Family11h support. | ||
261 | * AMD keeps this interface nicely stable so there is not much variety | ||
262 | */ | ||
263 | #define K7_EVNTSEL_ENABLE (1 << 22) | ||
264 | #define K7_EVNTSEL_INT (1 << 20) | ||
265 | #define K7_EVNTSEL_OS (1 << 17) | ||
266 | #define K7_EVNTSEL_USR (1 << 16) | ||
267 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | ||
268 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | ||
269 | |||
270 | static int setup_k7_watchdog(unsigned nmi_hz) | ||
271 | { | ||
272 | unsigned int perfctr_msr, evntsel_msr; | ||
273 | unsigned int evntsel; | ||
274 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
275 | |||
276 | perfctr_msr = wd_ops->perfctr; | ||
277 | evntsel_msr = wd_ops->evntsel; | ||
278 | |||
279 | wrmsrl(perfctr_msr, 0UL); | ||
280 | |||
281 | evntsel = K7_EVNTSEL_INT | ||
282 | | K7_EVNTSEL_OS | ||
283 | | K7_EVNTSEL_USR | ||
284 | | K7_NMI_EVENT; | ||
285 | |||
286 | /* setup the timer */ | ||
287 | wrmsr(evntsel_msr, evntsel, 0); | ||
288 | write_watchdog_counter(perfctr_msr, "K7_PERFCTR0", nmi_hz); | ||
289 | |||
290 | /* initialize the wd struct before enabling */ | ||
291 | wd->perfctr_msr = perfctr_msr; | ||
292 | wd->evntsel_msr = evntsel_msr; | ||
293 | wd->cccr_msr = 0; /* unused */ | ||
294 | |||
295 | /* ok, everything is initialized, announce that we're set */ | ||
296 | cpu_nmi_set_wd_enabled(); | ||
297 | |||
298 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
299 | evntsel |= K7_EVNTSEL_ENABLE; | ||
300 | wrmsr(evntsel_msr, evntsel, 0); | ||
301 | |||
302 | return 1; | ||
303 | } | ||
304 | |||
305 | static void single_msr_stop_watchdog(void) | ||
306 | { | ||
307 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
308 | |||
309 | wrmsr(wd->evntsel_msr, 0, 0); | ||
310 | } | ||
311 | |||
312 | static int single_msr_reserve(void) | ||
313 | { | ||
314 | if (!reserve_perfctr_nmi(wd_ops->perfctr)) | ||
315 | return 0; | ||
316 | |||
317 | if (!reserve_evntsel_nmi(wd_ops->evntsel)) { | ||
318 | release_perfctr_nmi(wd_ops->perfctr); | ||
319 | return 0; | ||
320 | } | ||
321 | return 1; | ||
322 | } | ||
323 | |||
324 | static void single_msr_unreserve(void) | ||
325 | { | ||
326 | release_evntsel_nmi(wd_ops->evntsel); | ||
327 | release_perfctr_nmi(wd_ops->perfctr); | ||
328 | } | ||
329 | |||
330 | static void __kprobes | ||
331 | single_msr_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
332 | { | ||
333 | /* start the cycle over again */ | ||
334 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | ||
335 | } | ||
336 | |||
337 | static const struct wd_ops k7_wd_ops = { | ||
338 | .reserve = single_msr_reserve, | ||
339 | .unreserve = single_msr_unreserve, | ||
340 | .setup = setup_k7_watchdog, | ||
341 | .rearm = single_msr_rearm, | ||
342 | .stop = single_msr_stop_watchdog, | ||
343 | .perfctr = MSR_K7_PERFCTR0, | ||
344 | .evntsel = MSR_K7_EVNTSEL0, | ||
345 | .checkbit = 1ULL << 47, | ||
346 | }; | ||
347 | |||
348 | /* | ||
349 | * Intel Model 6 (PPro+,P2,P3,P-M,Core1) | ||
350 | */ | ||
351 | #define P6_EVNTSEL0_ENABLE (1 << 22) | ||
352 | #define P6_EVNTSEL_INT (1 << 20) | ||
353 | #define P6_EVNTSEL_OS (1 << 17) | ||
354 | #define P6_EVNTSEL_USR (1 << 16) | ||
355 | #define P6_EVENT_CPU_CLOCKS_NOT_HALTED 0x79 | ||
356 | #define P6_NMI_EVENT P6_EVENT_CPU_CLOCKS_NOT_HALTED | ||
357 | |||
358 | static int setup_p6_watchdog(unsigned nmi_hz) | ||
359 | { | ||
360 | unsigned int perfctr_msr, evntsel_msr; | ||
361 | unsigned int evntsel; | ||
362 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
363 | |||
364 | perfctr_msr = wd_ops->perfctr; | ||
365 | evntsel_msr = wd_ops->evntsel; | ||
366 | |||
367 | /* KVM doesn't implement this MSR */ | ||
368 | if (wrmsr_safe(perfctr_msr, 0, 0) < 0) | ||
369 | return 0; | ||
370 | |||
371 | evntsel = P6_EVNTSEL_INT | ||
372 | | P6_EVNTSEL_OS | ||
373 | | P6_EVNTSEL_USR | ||
374 | | P6_NMI_EVENT; | ||
375 | |||
376 | /* setup the timer */ | ||
377 | wrmsr(evntsel_msr, evntsel, 0); | ||
378 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
379 | write_watchdog_counter32(perfctr_msr, "P6_PERFCTR0", nmi_hz); | ||
380 | |||
381 | /* initialize the wd struct before enabling */ | ||
382 | wd->perfctr_msr = perfctr_msr; | ||
383 | wd->evntsel_msr = evntsel_msr; | ||
384 | wd->cccr_msr = 0; /* unused */ | ||
385 | |||
386 | /* ok, everything is initialized, announce that we're set */ | ||
387 | cpu_nmi_set_wd_enabled(); | ||
388 | |||
389 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
390 | evntsel |= P6_EVNTSEL0_ENABLE; | ||
391 | wrmsr(evntsel_msr, evntsel, 0); | ||
392 | |||
393 | return 1; | ||
394 | } | ||
395 | |||
396 | static void __kprobes p6_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
397 | { | ||
398 | /* | ||
399 | * P6 based Pentium M need to re-unmask | ||
400 | * the apic vector but it doesn't hurt | ||
401 | * other P6 variant. | ||
402 | * ArchPerfom/Core Duo also needs this | ||
403 | */ | ||
404 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
405 | |||
406 | /* P6/ARCH_PERFMON has 32 bit counter write */ | ||
407 | write_watchdog_counter32(wd->perfctr_msr, NULL, nmi_hz); | ||
408 | } | ||
409 | |||
410 | static const struct wd_ops p6_wd_ops = { | ||
411 | .reserve = single_msr_reserve, | ||
412 | .unreserve = single_msr_unreserve, | ||
413 | .setup = setup_p6_watchdog, | ||
414 | .rearm = p6_rearm, | ||
415 | .stop = single_msr_stop_watchdog, | ||
416 | .perfctr = MSR_P6_PERFCTR0, | ||
417 | .evntsel = MSR_P6_EVNTSEL0, | ||
418 | .checkbit = 1ULL << 39, | ||
419 | }; | ||
420 | |||
421 | /* | ||
422 | * Intel P4 performance counters. | ||
423 | * By far the most complicated of all. | ||
424 | */ | ||
425 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1 << 7) | ||
426 | #define P4_ESCR_EVENT_SELECT(N) ((N) << 25) | ||
427 | #define P4_ESCR_OS (1 << 3) | ||
428 | #define P4_ESCR_USR (1 << 2) | ||
429 | #define P4_CCCR_OVF_PMI0 (1 << 26) | ||
430 | #define P4_CCCR_OVF_PMI1 (1 << 27) | ||
431 | #define P4_CCCR_THRESHOLD(N) ((N) << 20) | ||
432 | #define P4_CCCR_COMPLEMENT (1 << 19) | ||
433 | #define P4_CCCR_COMPARE (1 << 18) | ||
434 | #define P4_CCCR_REQUIRED (3 << 16) | ||
435 | #define P4_CCCR_ESCR_SELECT(N) ((N) << 13) | ||
436 | #define P4_CCCR_ENABLE (1 << 12) | ||
437 | #define P4_CCCR_OVF (1 << 31) | ||
438 | |||
439 | #define P4_CONTROLS 18 | ||
440 | static unsigned int p4_controls[18] = { | ||
441 | MSR_P4_BPU_CCCR0, | ||
442 | MSR_P4_BPU_CCCR1, | ||
443 | MSR_P4_BPU_CCCR2, | ||
444 | MSR_P4_BPU_CCCR3, | ||
445 | MSR_P4_MS_CCCR0, | ||
446 | MSR_P4_MS_CCCR1, | ||
447 | MSR_P4_MS_CCCR2, | ||
448 | MSR_P4_MS_CCCR3, | ||
449 | MSR_P4_FLAME_CCCR0, | ||
450 | MSR_P4_FLAME_CCCR1, | ||
451 | MSR_P4_FLAME_CCCR2, | ||
452 | MSR_P4_FLAME_CCCR3, | ||
453 | MSR_P4_IQ_CCCR0, | ||
454 | MSR_P4_IQ_CCCR1, | ||
455 | MSR_P4_IQ_CCCR2, | ||
456 | MSR_P4_IQ_CCCR3, | ||
457 | MSR_P4_IQ_CCCR4, | ||
458 | MSR_P4_IQ_CCCR5, | ||
459 | }; | ||
460 | /* | ||
461 | * Set up IQ_COUNTER0 to behave like a clock, by having IQ_CCCR0 filter | ||
462 | * CRU_ESCR0 (with any non-null event selector) through a complemented | ||
463 | * max threshold. [IA32-Vol3, Section 14.9.9] | ||
464 | */ | ||
465 | static int setup_p4_watchdog(unsigned nmi_hz) | ||
466 | { | ||
467 | unsigned int perfctr_msr, evntsel_msr, cccr_msr; | ||
468 | unsigned int evntsel, cccr_val; | ||
469 | unsigned int misc_enable, dummy; | ||
470 | unsigned int ht_num; | ||
471 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
472 | |||
473 | rdmsr(MSR_IA32_MISC_ENABLE, misc_enable, dummy); | ||
474 | if (!(misc_enable & MSR_P4_MISC_ENABLE_PERF_AVAIL)) | ||
475 | return 0; | ||
476 | |||
477 | #ifdef CONFIG_SMP | ||
478 | /* detect which hyperthread we are on */ | ||
479 | if (smp_num_siblings == 2) { | ||
480 | unsigned int ebx, apicid; | ||
481 | |||
482 | ebx = cpuid_ebx(1); | ||
483 | apicid = (ebx >> 24) & 0xff; | ||
484 | ht_num = apicid & 1; | ||
485 | } else | ||
486 | #endif | ||
487 | ht_num = 0; | ||
488 | |||
489 | /* | ||
490 | * performance counters are shared resources | ||
491 | * assign each hyperthread its own set | ||
492 | * (re-use the ESCR0 register, seems safe | ||
493 | * and keeps the cccr_val the same) | ||
494 | */ | ||
495 | if (!ht_num) { | ||
496 | /* logical cpu 0 */ | ||
497 | perfctr_msr = MSR_P4_IQ_PERFCTR0; | ||
498 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
499 | cccr_msr = MSR_P4_IQ_CCCR0; | ||
500 | cccr_val = P4_CCCR_OVF_PMI0 | P4_CCCR_ESCR_SELECT(4); | ||
501 | |||
502 | /* | ||
503 | * If we're on the kdump kernel or other situation, we may | ||
504 | * still have other performance counter registers set to | ||
505 | * interrupt and they'll keep interrupting forever because | ||
506 | * of the P4_CCCR_OVF quirk. So we need to ACK all the | ||
507 | * pending interrupts and disable all the registers here, | ||
508 | * before reenabling the NMI delivery. Refer to p4_rearm() | ||
509 | * about the P4_CCCR_OVF quirk. | ||
510 | */ | ||
511 | if (reset_devices) { | ||
512 | unsigned int low, high; | ||
513 | int i; | ||
514 | |||
515 | for (i = 0; i < P4_CONTROLS; i++) { | ||
516 | rdmsr(p4_controls[i], low, high); | ||
517 | low &= ~(P4_CCCR_ENABLE | P4_CCCR_OVF); | ||
518 | wrmsr(p4_controls[i], low, high); | ||
519 | } | ||
520 | } | ||
521 | } else { | ||
522 | /* logical cpu 1 */ | ||
523 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | ||
524 | evntsel_msr = MSR_P4_CRU_ESCR0; | ||
525 | cccr_msr = MSR_P4_IQ_CCCR1; | ||
526 | |||
527 | /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */ | ||
528 | if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4) | ||
529 | cccr_val = P4_CCCR_OVF_PMI0; | ||
530 | else | ||
531 | cccr_val = P4_CCCR_OVF_PMI1; | ||
532 | cccr_val |= P4_CCCR_ESCR_SELECT(4); | ||
533 | } | ||
534 | |||
535 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | ||
536 | | P4_ESCR_OS | ||
537 | | P4_ESCR_USR; | ||
538 | |||
539 | cccr_val |= P4_CCCR_THRESHOLD(15) | ||
540 | | P4_CCCR_COMPLEMENT | ||
541 | | P4_CCCR_COMPARE | ||
542 | | P4_CCCR_REQUIRED; | ||
543 | |||
544 | wrmsr(evntsel_msr, evntsel, 0); | ||
545 | wrmsr(cccr_msr, cccr_val, 0); | ||
546 | write_watchdog_counter(perfctr_msr, "P4_IQ_COUNTER0", nmi_hz); | ||
547 | |||
548 | wd->perfctr_msr = perfctr_msr; | ||
549 | wd->evntsel_msr = evntsel_msr; | ||
550 | wd->cccr_msr = cccr_msr; | ||
551 | |||
552 | /* ok, everything is initialized, announce that we're set */ | ||
553 | cpu_nmi_set_wd_enabled(); | ||
554 | |||
555 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
556 | cccr_val |= P4_CCCR_ENABLE; | ||
557 | wrmsr(cccr_msr, cccr_val, 0); | ||
558 | return 1; | ||
559 | } | ||
560 | |||
561 | static void stop_p4_watchdog(void) | ||
562 | { | ||
563 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
564 | wrmsr(wd->cccr_msr, 0, 0); | ||
565 | wrmsr(wd->evntsel_msr, 0, 0); | ||
566 | } | ||
567 | |||
568 | static int p4_reserve(void) | ||
569 | { | ||
570 | if (!reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR0)) | ||
571 | return 0; | ||
572 | #ifdef CONFIG_SMP | ||
573 | if (smp_num_siblings > 1 && !reserve_perfctr_nmi(MSR_P4_IQ_PERFCTR1)) | ||
574 | goto fail1; | ||
575 | #endif | ||
576 | if (!reserve_evntsel_nmi(MSR_P4_CRU_ESCR0)) | ||
577 | goto fail2; | ||
578 | /* RED-PEN why is ESCR1 not reserved here? */ | ||
579 | return 1; | ||
580 | fail2: | ||
581 | #ifdef CONFIG_SMP | ||
582 | if (smp_num_siblings > 1) | ||
583 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); | ||
584 | fail1: | ||
585 | #endif | ||
586 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | ||
587 | return 0; | ||
588 | } | ||
589 | |||
590 | static void p4_unreserve(void) | ||
591 | { | ||
592 | #ifdef CONFIG_SMP | ||
593 | if (smp_num_siblings > 1) | ||
594 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR1); | ||
595 | #endif | ||
596 | release_evntsel_nmi(MSR_P4_CRU_ESCR0); | ||
597 | release_perfctr_nmi(MSR_P4_IQ_PERFCTR0); | ||
598 | } | ||
599 | |||
600 | static void __kprobes p4_rearm(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz) | ||
601 | { | ||
602 | unsigned dummy; | ||
603 | /* | ||
604 | * P4 quirks: | ||
605 | * - An overflown perfctr will assert its interrupt | ||
606 | * until the OVF flag in its CCCR is cleared. | ||
607 | * - LVTPC is masked on interrupt and must be | ||
608 | * unmasked by the LVTPC handler. | ||
609 | */ | ||
610 | rdmsrl(wd->cccr_msr, dummy); | ||
611 | dummy &= ~P4_CCCR_OVF; | ||
612 | wrmsrl(wd->cccr_msr, dummy); | ||
613 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
614 | /* start the cycle over again */ | ||
615 | write_watchdog_counter(wd->perfctr_msr, NULL, nmi_hz); | ||
616 | } | ||
617 | |||
618 | static const struct wd_ops p4_wd_ops = { | ||
619 | .reserve = p4_reserve, | ||
620 | .unreserve = p4_unreserve, | ||
621 | .setup = setup_p4_watchdog, | ||
622 | .rearm = p4_rearm, | ||
623 | .stop = stop_p4_watchdog, | ||
624 | /* RED-PEN this is wrong for the other sibling */ | ||
625 | .perfctr = MSR_P4_BPU_PERFCTR0, | ||
626 | .evntsel = MSR_P4_BSU_ESCR0, | ||
627 | .checkbit = 1ULL << 39, | ||
628 | }; | ||
629 | |||
630 | /* | ||
631 | * Watchdog using the Intel architected PerfMon. | ||
632 | * Used for Core2 and hopefully all future Intel CPUs. | ||
633 | */ | ||
634 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
635 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
636 | |||
637 | static struct wd_ops intel_arch_wd_ops; | ||
638 | |||
639 | static int setup_intel_arch_watchdog(unsigned nmi_hz) | ||
640 | { | ||
641 | unsigned int ebx; | ||
642 | union cpuid10_eax eax; | ||
643 | unsigned int unused; | ||
644 | unsigned int perfctr_msr, evntsel_msr; | ||
645 | unsigned int evntsel; | ||
646 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
647 | |||
648 | /* | ||
649 | * Check whether the Architectural PerfMon supports | ||
650 | * Unhalted Core Cycles Event or not. | ||
651 | * NOTE: Corresponding bit = 0 in ebx indicates event present. | ||
652 | */ | ||
653 | cpuid(10, &(eax.full), &ebx, &unused, &unused); | ||
654 | if ((eax.split.mask_length < | ||
655 | (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX+1)) || | ||
656 | (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
657 | return 0; | ||
658 | |||
659 | perfctr_msr = wd_ops->perfctr; | ||
660 | evntsel_msr = wd_ops->evntsel; | ||
661 | |||
662 | wrmsrl(perfctr_msr, 0UL); | ||
663 | |||
664 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
665 | | ARCH_PERFMON_EVENTSEL_OS | ||
666 | | ARCH_PERFMON_EVENTSEL_USR | ||
667 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
668 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
669 | |||
670 | /* setup the timer */ | ||
671 | wrmsr(evntsel_msr, evntsel, 0); | ||
672 | nmi_hz = adjust_for_32bit_ctr(nmi_hz); | ||
673 | write_watchdog_counter32(perfctr_msr, "INTEL_ARCH_PERFCTR0", nmi_hz); | ||
674 | |||
675 | wd->perfctr_msr = perfctr_msr; | ||
676 | wd->evntsel_msr = evntsel_msr; | ||
677 | wd->cccr_msr = 0; /* unused */ | ||
678 | |||
679 | /* ok, everything is initialized, announce that we're set */ | ||
680 | cpu_nmi_set_wd_enabled(); | ||
681 | |||
682 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
683 | evntsel |= ARCH_PERFMON_EVENTSEL_ENABLE; | ||
684 | wrmsr(evntsel_msr, evntsel, 0); | ||
685 | intel_arch_wd_ops.checkbit = 1ULL << (eax.split.bit_width - 1); | ||
686 | return 1; | ||
687 | } | ||
688 | |||
689 | static struct wd_ops intel_arch_wd_ops __read_mostly = { | ||
690 | .reserve = single_msr_reserve, | ||
691 | .unreserve = single_msr_unreserve, | ||
692 | .setup = setup_intel_arch_watchdog, | ||
693 | .rearm = p6_rearm, | ||
694 | .stop = single_msr_stop_watchdog, | ||
695 | .perfctr = MSR_ARCH_PERFMON_PERFCTR1, | ||
696 | .evntsel = MSR_ARCH_PERFMON_EVENTSEL1, | ||
697 | }; | ||
698 | |||
699 | static void probe_nmi_watchdog(void) | ||
700 | { | ||
701 | switch (boot_cpu_data.x86_vendor) { | ||
702 | case X86_VENDOR_AMD: | ||
703 | if (boot_cpu_data.x86 == 6 || | ||
704 | (boot_cpu_data.x86 >= 0xf && boot_cpu_data.x86 <= 0x15)) | ||
705 | wd_ops = &k7_wd_ops; | ||
706 | return; | ||
707 | case X86_VENDOR_INTEL: | ||
708 | /* Work around where perfctr1 doesn't have a working enable | ||
709 | * bit as described in the following errata: | ||
710 | * AE49 Core Duo and Intel Core Solo 65 nm | ||
711 | * AN49 Intel Pentium Dual-Core | ||
712 | * AF49 Dual-Core Intel Xeon Processor LV | ||
713 | */ | ||
714 | if ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 14) || | ||
715 | ((boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 15 && | ||
716 | boot_cpu_data.x86_mask == 4))) { | ||
717 | intel_arch_wd_ops.perfctr = MSR_ARCH_PERFMON_PERFCTR0; | ||
718 | intel_arch_wd_ops.evntsel = MSR_ARCH_PERFMON_EVENTSEL0; | ||
719 | } | ||
720 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
721 | wd_ops = &intel_arch_wd_ops; | ||
722 | break; | ||
723 | } | ||
724 | switch (boot_cpu_data.x86) { | ||
725 | case 6: | ||
726 | if (boot_cpu_data.x86_model > 13) | ||
727 | return; | ||
728 | |||
729 | wd_ops = &p6_wd_ops; | ||
730 | break; | ||
731 | case 15: | ||
732 | wd_ops = &p4_wd_ops; | ||
733 | break; | ||
734 | default: | ||
735 | return; | ||
736 | } | ||
737 | break; | ||
738 | } | ||
739 | } | ||
740 | |||
741 | /* Interface to nmi.c */ | ||
742 | |||
743 | int lapic_watchdog_init(unsigned nmi_hz) | ||
744 | { | ||
745 | if (!wd_ops) { | ||
746 | probe_nmi_watchdog(); | ||
747 | if (!wd_ops) { | ||
748 | printk(KERN_INFO "NMI watchdog: CPU not supported\n"); | ||
749 | return -1; | ||
750 | } | ||
751 | |||
752 | if (!wd_ops->reserve()) { | ||
753 | printk(KERN_ERR | ||
754 | "NMI watchdog: cannot reserve perfctrs\n"); | ||
755 | return -1; | ||
756 | } | ||
757 | } | ||
758 | |||
759 | if (!(wd_ops->setup(nmi_hz))) { | ||
760 | printk(KERN_ERR "Cannot setup NMI watchdog on CPU %d\n", | ||
761 | raw_smp_processor_id()); | ||
762 | return -1; | ||
763 | } | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | void lapic_watchdog_stop(void) | ||
769 | { | ||
770 | if (wd_ops) | ||
771 | wd_ops->stop(); | ||
772 | } | ||
773 | |||
774 | unsigned lapic_adjust_nmi_hz(unsigned hz) | ||
775 | { | ||
776 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
777 | if (wd->perfctr_msr == MSR_P6_PERFCTR0 || | ||
778 | wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) | ||
779 | hz = adjust_for_32bit_ctr(hz); | ||
780 | return hz; | ||
781 | } | ||
782 | |||
783 | int __kprobes lapic_wd_event(unsigned nmi_hz) | ||
784 | { | ||
785 | struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); | ||
786 | u64 ctr; | ||
787 | |||
788 | rdmsrl(wd->perfctr_msr, ctr); | ||
789 | if (ctr & wd_ops->checkbit) /* perfctr still running? */ | ||
790 | return 0; | ||
791 | |||
792 | wd_ops->rearm(wd, nmi_hz); | ||
793 | return 1; | ||
794 | } | ||