diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 30e7ebf78275..ef1936a871aa 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -58,8 +58,8 @@ const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map); | |||
58 | */ | 58 | */ |
59 | int hw_perf_counter_init(struct perf_counter *counter) | 59 | int hw_perf_counter_init(struct perf_counter *counter) |
60 | { | 60 | { |
61 | struct perf_counter_hw_event *hw_event = &counter->hw_event; | ||
61 | struct hw_perf_counter *hwc = &counter->hw; | 62 | struct hw_perf_counter *hwc = &counter->hw; |
62 | u32 hw_event_type = counter->event.hw_event_type; | ||
63 | 63 | ||
64 | if (unlikely(!perf_counters_initialized)) | 64 | if (unlikely(!perf_counters_initialized)) |
65 | return -EINVAL; | 65 | return -EINVAL; |
@@ -77,14 +77,14 @@ int hw_perf_counter_init(struct perf_counter *counter) | |||
77 | hwc->nmi = 0; | 77 | hwc->nmi = 0; |
78 | if (capable(CAP_SYS_ADMIN)) { | 78 | if (capable(CAP_SYS_ADMIN)) { |
79 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; | 79 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; |
80 | if (hw_event_type & PERF_COUNT_NMI) | 80 | if (hw_event->nmi) |
81 | hwc->nmi = 1; | 81 | hwc->nmi = 1; |
82 | } | 82 | } |
83 | 83 | ||
84 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; | 84 | hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0; |
85 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; | 85 | hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0; |
86 | 86 | ||
87 | hwc->irq_period = counter->event.hw_event_period; | 87 | hwc->irq_period = hw_event->irq_period; |
88 | /* | 88 | /* |
89 | * Intel PMCs cannot be accessed sanely above 32 bit width, | 89 | * Intel PMCs cannot be accessed sanely above 32 bit width, |
90 | * so we install an artificial 1<<31 period regardless of | 90 | * so we install an artificial 1<<31 period regardless of |
@@ -93,21 +93,20 @@ int hw_perf_counter_init(struct perf_counter *counter) | |||
93 | if (!hwc->irq_period) | 93 | if (!hwc->irq_period) |
94 | hwc->irq_period = 0x7FFFFFFF; | 94 | hwc->irq_period = 0x7FFFFFFF; |
95 | 95 | ||
96 | hwc->next_count = -((s32) hwc->irq_period); | 96 | hwc->next_count = -(s32)hwc->irq_period; |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * Raw event type provide the config in the event structure | 99 | * Raw event type provide the config in the event structure |
100 | */ | 100 | */ |
101 | hw_event_type &= ~PERF_COUNT_NMI; | 101 | if (hw_event->raw) { |
102 | if (hw_event_type == PERF_COUNT_RAW) { | 102 | hwc->config |= hw_event->type; |
103 | hwc->config |= counter->event.hw_raw_ctrl; | ||
104 | } else { | 103 | } else { |
105 | if (hw_event_type >= max_intel_perfmon_events) | 104 | if (hw_event->type >= max_intel_perfmon_events) |
106 | return -EINVAL; | 105 | return -EINVAL; |
107 | /* | 106 | /* |
108 | * The generic map: | 107 | * The generic map: |
109 | */ | 108 | */ |
110 | hwc->config |= intel_perfmon_event_map[hw_event_type]; | 109 | hwc->config |= intel_perfmon_event_map[hw_event->type]; |
111 | } | 110 | } |
112 | counter->wakeup_pending = 0; | 111 | counter->wakeup_pending = 0; |
113 | 112 | ||
@@ -354,7 +353,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) | |||
354 | int bit; | 353 | int bit; |
355 | 354 | ||
356 | list_for_each_entry(counter, &ctx->counters, list) { | 355 | list_for_each_entry(counter, &ctx->counters, list) { |
357 | if (counter->record_type != PERF_RECORD_SIMPLE || | 356 | if (counter->hw_event.record_type != PERF_RECORD_SIMPLE || |
358 | counter == leader) | 357 | counter == leader) |
359 | continue; | 358 | continue; |
360 | 359 | ||
@@ -372,7 +371,7 @@ perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown) | |||
372 | perf_save_and_restart(counter); | 371 | perf_save_and_restart(counter); |
373 | } | 372 | } |
374 | } | 373 | } |
375 | perf_store_irq_data(leader, counter->event.hw_event_type); | 374 | perf_store_irq_data(leader, counter->hw_event.type); |
376 | perf_store_irq_data(leader, atomic64_counter_read(counter)); | 375 | perf_store_irq_data(leader, atomic64_counter_read(counter)); |
377 | } | 376 | } |
378 | } | 377 | } |
@@ -410,7 +409,7 @@ again: | |||
410 | 409 | ||
411 | perf_save_and_restart(counter); | 410 | perf_save_and_restart(counter); |
412 | 411 | ||
413 | switch (counter->record_type) { | 412 | switch (counter->hw_event.record_type) { |
414 | case PERF_RECORD_SIMPLE: | 413 | case PERF_RECORD_SIMPLE: |
415 | continue; | 414 | continue; |
416 | case PERF_RECORD_IRQ: | 415 | case PERF_RECORD_IRQ: |
@@ -418,7 +417,7 @@ again: | |||
418 | break; | 417 | break; |
419 | case PERF_RECORD_GROUP: | 418 | case PERF_RECORD_GROUP: |
420 | perf_store_irq_data(counter, | 419 | perf_store_irq_data(counter, |
421 | counter->event.hw_event_type); | 420 | counter->hw_event.type); |
422 | perf_store_irq_data(counter, | 421 | perf_store_irq_data(counter, |
423 | atomic64_counter_read(counter)); | 422 | atomic64_counter_read(counter)); |
424 | perf_handle_group(counter, &status, &ack); | 423 | perf_handle_group(counter, &status, &ack); |