diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 0a7f3bea2dc6..30e7ebf78275 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -56,9 +56,10 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); | |||
56 | /* | 56 | /* |
57 | * Setup the hardware configuration for a given hw_event_type | 57 | * Setup the hardware configuration for a given hw_event_type |
58 | */ | 58 | */ |
59 | int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) | 59 | int hw_perf_counter_init(struct perf_counter *counter) |
60 | { | 60 | { |
61 | struct hw_perf_counter *hwc = &counter->hw; | 61 | struct hw_perf_counter *hwc = &counter->hw; |
62 | u32 hw_event_type = counter->event.hw_event_type; | ||
62 | 63 | ||
63 | if (unlikely(!perf_counters_initialized)) | 64 | if (unlikely(!perf_counters_initialized)) |
64 | return -EINVAL; | 65 | return -EINVAL; |
@@ -83,7 +84,7 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) | |||
83 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; | 84 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; |
84 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; | 85 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; |
85 | 86 | ||
86 | hwc->irq_period = counter->__irq_period; | 87 | hwc->irq_period = counter->event.hw_event_period; |
87 | /* | 88 | /* |
88 | * Intel PMCs cannot be accessed sanely above 32 bit width, | 89 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
89 | * so we install an artificial 1<<31 period regardless of | 90 | * so we install an artificial 1<<31 period regardless of |
@@ -95,21 +96,19 @@ int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type) | |||
95 | hwc->next_count = -((s32) hwc->irq_period); | 96 | hwc->next_count = -((s32) hwc->irq_period); |
96 | 97 | ||
97 | /* | 98 | /* |
98 | * Negative event types mean raw encoded event+umask values: | 99 | * Raw event type provide the config in the event structure |
99 | */ | 100 | */ |
100 | if (hw_event_type < 0) { | 101 | hw_event_type &= ~PERF_COUNT_NMI; |
101 | counter->hw_event_type = -hw_event_type; | 102 | if (hw_event_type == PERF_COUNT_RAW) { |
102 | counter->hw_event_type &= ~PERF_COUNT_NMI; | 103 | hwc->config |= counter->event.hw_raw_ctrl; |
103 | } else { | 104 | } else { |
104 | hw_event_type &= ~PERF_COUNT_NMI; | ||
105 | if (hw_event_type >= max_intel_perfmon_events) | 105 | if (hw_event_type >= max_intel_perfmon_events) |
106 | return -EINVAL; | 106 | return -EINVAL; |
107 | /* | 107 | /* |
108 | * The generic map: | 108 | * The generic map: |
109 | */ | 109 | */ |
110 | counter->hw_event_type = intel_perfmon_event_map[hw_event_type]; | 110 | hwc->config |= intel_perfmon_event_map[hw_event_type]; |
111 | } | 111 | } |
112 | hwc->config |= counter->hw_event_type; | ||
113 | counter->wakeup_pending = 0; | 112 | counter->wakeup_pending = 0; |
114 | 113 | ||
115 | return 0; | 114 | return 0; |
@@ -373,7 +372,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) | |||
373 | perf_save_and_restart(counter); | 372 | perf_save_and_restart(counter); |
374 | } | 373 | } |
375 | } | 374 | } |
376 | perf_store_irq_data(leader, counter->hw_event_type); | 375 | perf_store_irq_data(leader, counter->event.hw_event_type); |
377 | perf_store_irq_data(leader, atomic64_counter_read(counter)); | 376 | perf_store_irq_data(leader, atomic64_counter_read(counter)); |
378 | } | 377 | } |
379 | } | 378 | } |
@@ -418,7 +417,8 @@ again: | |||
418 | perf_store_irq_data(counter, instruction_pointer(regs)); | 417 | perf_store_irq_data(counter, instruction_pointer(regs)); |
419 | break; | 418 | break; |
420 | case PERF_RECORD_GROUP: | 419 | case PERF_RECORD_GROUP: |
421 | perf_store_irq_data(counter, counter->hw_event_type); | 420 | perf_store_irq_data(counter, |
421 | counter->event.hw_event_type); | ||
422 | perf_store_irq_data(counter, | 422 | perf_store_irq_data(counter, |
423 | atomic64_counter_read(counter)); | 423 | atomic64_counter_read(counter)); |
424 | perf_handle_group(counter, &status, &ack); | 424 | perf_handle_group(counter, &status, &ack); |