diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 166546ec6aef..9e3f5d6e3d20 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -5,6 +5,8 @@ | |||
5 | * among events on a single PMU. | 5 | * among events on a single PMU. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
9 | |||
8 | #include <linux/stddef.h> | 10 | #include <linux/stddef.h> |
9 | #include <linux/types.h> | 11 | #include <linux/types.h> |
10 | #include <linux/init.h> | 12 | #include <linux/init.h> |
@@ -1000,7 +1002,7 @@ static void intel_pmu_reset(void) | |||
1000 | 1002 | ||
1001 | local_irq_save(flags); | 1003 | local_irq_save(flags); |
1002 | 1004 | ||
1003 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | 1005 | pr_info("clearing PMU state on CPU#%d\n", smp_processor_id()); |
1004 | 1006 | ||
1005 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 1007 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
1006 | checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); | 1008 | checking_wrmsrl(x86_pmu_config_addr(idx), 0ull); |
@@ -1638,14 +1640,14 @@ static __init void intel_clovertown_quirk(void) | |||
1638 | * But taken together it might just make sense to not enable PEBS on | 1640 | * But taken together it might just make sense to not enable PEBS on |
1639 | * these chips. | 1641 | * these chips. |
1640 | */ | 1642 | */ |
1641 | printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); | 1643 | pr_warn("PEBS disabled due to CPU errata\n"); |
1642 | x86_pmu.pebs = 0; | 1644 | x86_pmu.pebs = 0; |
1643 | x86_pmu.pebs_constraints = NULL; | 1645 | x86_pmu.pebs_constraints = NULL; |
1644 | } | 1646 | } |
1645 | 1647 | ||
1646 | static __init void intel_sandybridge_quirk(void) | 1648 | static __init void intel_sandybridge_quirk(void) |
1647 | { | 1649 | { |
1648 | printk(KERN_WARNING "PEBS disabled due to CPU errata.\n"); | 1650 | pr_warn("PEBS disabled due to CPU errata\n"); |
1649 | x86_pmu.pebs = 0; | 1651 | x86_pmu.pebs = 0; |
1650 | x86_pmu.pebs_constraints = NULL; | 1652 | x86_pmu.pebs_constraints = NULL; |
1651 | } | 1653 | } |
@@ -1667,8 +1669,8 @@ static __init void intel_arch_events_quirk(void) | |||
1667 | /* disable event that reported as not presend by cpuid */ | 1669 | /* disable event that reported as not presend by cpuid */ |
1668 | for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { | 1670 | for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) { |
1669 | intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; | 1671 | intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0; |
1670 | printk(KERN_WARNING "CPUID marked event: \'%s\' unavailable\n", | 1672 | pr_warn("CPUID marked event: \'%s\' unavailable\n", |
1671 | intel_arch_events_map[bit].name); | 1673 | intel_arch_events_map[bit].name); |
1672 | } | 1674 | } |
1673 | } | 1675 | } |
1674 | 1676 | ||
@@ -1687,7 +1689,7 @@ static __init void intel_nehalem_quirk(void) | |||
1687 | intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; | 1689 | intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89; |
1688 | ebx.split.no_branch_misses_retired = 0; | 1690 | ebx.split.no_branch_misses_retired = 0; |
1689 | x86_pmu.events_maskl = ebx.full; | 1691 | x86_pmu.events_maskl = ebx.full; |
1690 | printk(KERN_INFO "CPU erratum AAJ80 worked around\n"); | 1692 | pr_info("CPU erratum AAJ80 worked around\n"); |
1691 | } | 1693 | } |
1692 | } | 1694 | } |
1693 | 1695 | ||