diff options
author | Jiri Olsa <jolsa@redhat.com> | 2015-04-21 11:26:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-22 02:24:33 -0400 |
commit | 3b6e042188994466ec257b71296b5f85b894dcd9 (patch) | |
tree | 58a3bafd4bfe1a26345afbb4f98ab1c8b617694c /arch/x86/kernel | |
parent | 0c99241c93b8060441f3c8434848e54b5338f922 (diff) |
perf/x86/intel: Add cpu_(prepare|starting|dying) for core_pmu
The core_pmu does not define cpu_* callbacks, which handles
allocation of 'struct cpu_hw_events::shared_regs' data,
initialization of debug store and PMU_FL_EXCL_CNTRS counters.
While this probably won't happen on bare metal, virtual CPU can
define x86_pmu.extra_regs together with PMU version 1 and thus
be using core_pmu -> using shared_regs data without it being
allocated. That could could leave to following panic:
BUG: unable to handle kernel NULL pointer dereference at (null)
IP: [<ffffffff8152cd4f>] _spin_lock_irqsave+0x1f/0x40
SNIP
[<ffffffff81024bd9>] __intel_shared_reg_get_constraints+0x69/0x1e0
[<ffffffff81024deb>] intel_get_event_constraints+0x9b/0x180
[<ffffffff8101e815>] x86_schedule_events+0x75/0x1d0
[<ffffffff810586dc>] ? check_preempt_curr+0x7c/0x90
[<ffffffff810649fe>] ? try_to_wake_up+0x24e/0x3e0
[<ffffffff81064ba2>] ? default_wake_function+0x12/0x20
[<ffffffff8109eb16>] ? autoremove_wake_function+0x16/0x40
[<ffffffff810577e9>] ? __wake_up_common+0x59/0x90
[<ffffffff811a9517>] ? __d_lookup+0xa7/0x150
[<ffffffff8119db5f>] ? do_lookup+0x9f/0x230
[<ffffffff811a993a>] ? dput+0x9a/0x150
[<ffffffff8119c8f5>] ? path_to_nameidata+0x25/0x60
[<ffffffff8119e90a>] ? __link_path_walk+0x7da/0x1000
[<ffffffff8101d8f9>] ? x86_pmu_add+0xb9/0x170
[<ffffffff8101d7a7>] x86_pmu_commit_txn+0x67/0xc0
[<ffffffff811b07b0>] ? mntput_no_expire+0x30/0x110
[<ffffffff8119c731>] ? path_put+0x31/0x40
[<ffffffff8107c297>] ? current_fs_time+0x27/0x30
[<ffffffff8117d170>] ? mem_cgroup_get_reclaim_stat_from_page+0x20/0x70
[<ffffffff8111b7aa>] group_sched_in+0x13a/0x170
[<ffffffff81014a29>] ? sched_clock+0x9/0x10
[<ffffffff8111bac8>] ctx_sched_in+0x2e8/0x330
[<ffffffff8111bb7b>] perf_event_sched_in+0x6b/0xb0
[<ffffffff8111bc36>] perf_event_context_sched_in+0x76/0xc0
[<ffffffff8111eb3b>] perf_event_comm+0x1bb/0x2e0
[<ffffffff81195ee9>] set_task_comm+0x69/0x80
[<ffffffff81195fe1>] setup_new_exec+0xe1/0x2e0
[<ffffffff811ea68e>] load_elf_binary+0x3ce/0x1ab0
Adding cpu_(prepare|starting|dying) for core_pmu to have
shared_regs data allocated for core_pmu. AFAICS there's no harm
to initialize debug store and PMU_FL_EXCL_CNTRS either for
core_pmu.
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/20150421152623.GC13169@krava.redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 66 |
1 files changed, 38 insertions, 28 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 219d3fb423a1..960e85de13fb 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2533,34 +2533,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config) | |||
2533 | return x86_event_sysfs_show(page, config, event); | 2533 | return x86_event_sysfs_show(page, config, event); |
2534 | } | 2534 | } |
2535 | 2535 | ||
2536 | static __initconst const struct x86_pmu core_pmu = { | ||
2537 | .name = "core", | ||
2538 | .handle_irq = x86_pmu_handle_irq, | ||
2539 | .disable_all = x86_pmu_disable_all, | ||
2540 | .enable_all = core_pmu_enable_all, | ||
2541 | .enable = core_pmu_enable_event, | ||
2542 | .disable = x86_pmu_disable_event, | ||
2543 | .hw_config = x86_pmu_hw_config, | ||
2544 | .schedule_events = x86_schedule_events, | ||
2545 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
2546 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
2547 | .event_map = intel_pmu_event_map, | ||
2548 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
2549 | .apic = 1, | ||
2550 | /* | ||
2551 | * Intel PMCs cannot be accessed sanely above 32 bit width, | ||
2552 | * so we install an artificial 1<<31 period regardless of | ||
2553 | * the generic event period: | ||
2554 | */ | ||
2555 | .max_period = (1ULL << 31) - 1, | ||
2556 | .get_event_constraints = intel_get_event_constraints, | ||
2557 | .put_event_constraints = intel_put_event_constraints, | ||
2558 | .event_constraints = intel_core_event_constraints, | ||
2559 | .guest_get_msrs = core_guest_get_msrs, | ||
2560 | .format_attrs = intel_arch_formats_attr, | ||
2561 | .events_sysfs_show = intel_event_sysfs_show, | ||
2562 | }; | ||
2563 | |||
2564 | struct intel_shared_regs *allocate_shared_regs(int cpu) | 2536 | struct intel_shared_regs *allocate_shared_regs(int cpu) |
2565 | { | 2537 | { |
2566 | struct intel_shared_regs *regs; | 2538 | struct intel_shared_regs *regs; |
@@ -2743,6 +2715,44 @@ static struct attribute *intel_arch3_formats_attr[] = { | |||
2743 | NULL, | 2715 | NULL, |
2744 | }; | 2716 | }; |
2745 | 2717 | ||
2718 | static __initconst const struct x86_pmu core_pmu = { | ||
2719 | .name = "core", | ||
2720 | .handle_irq = x86_pmu_handle_irq, | ||
2721 | .disable_all = x86_pmu_disable_all, | ||
2722 | .enable_all = core_pmu_enable_all, | ||
2723 | .enable = core_pmu_enable_event, | ||
2724 | .disable = x86_pmu_disable_event, | ||
2725 | .hw_config = x86_pmu_hw_config, | ||
2726 | .schedule_events = x86_schedule_events, | ||
2727 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
2728 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
2729 | .event_map = intel_pmu_event_map, | ||
2730 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
2731 | .apic = 1, | ||
2732 | /* | ||
2733 | * Intel PMCs cannot be accessed sanely above 32-bit width, | ||
2734 | * so we install an artificial 1<<31 period regardless of | ||
2735 | * the generic event period: | ||
2736 | */ | ||
2737 | .max_period = (1ULL<<31) - 1, | ||
2738 | .get_event_constraints = intel_get_event_constraints, | ||
2739 | .put_event_constraints = intel_put_event_constraints, | ||
2740 | .event_constraints = intel_core_event_constraints, | ||
2741 | .guest_get_msrs = core_guest_get_msrs, | ||
2742 | .format_attrs = intel_arch_formats_attr, | ||
2743 | .events_sysfs_show = intel_event_sysfs_show, | ||
2744 | |||
2745 | /* | ||
2746 | * Virtual (or funny metal) CPU can define x86_pmu.extra_regs | ||
2747 | * together with PMU version 1 and thus be using core_pmu with | ||
2748 | * shared_regs. We need following callbacks here to allocate | ||
2749 | * it properly. | ||
2750 | */ | ||
2751 | .cpu_prepare = intel_pmu_cpu_prepare, | ||
2752 | .cpu_starting = intel_pmu_cpu_starting, | ||
2753 | .cpu_dying = intel_pmu_cpu_dying, | ||
2754 | }; | ||
2755 | |||
2746 | static __initconst const struct x86_pmu intel_pmu = { | 2756 | static __initconst const struct x86_pmu intel_pmu = { |
2747 | .name = "Intel", | 2757 | .name = "Intel", |
2748 | .handle_irq = intel_pmu_handle_irq, | 2758 | .handle_irq = intel_pmu_handle_irq, |