aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-03 04:39:53 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-08 09:47:15 -0500
commit241771ef016b5c0c83cd7a4372a74321c973c1e6 (patch)
tree5893d72f1721af34daee82f27449bd35c9f65363 /arch/x86/kernel/cpu/perf_counter.c
parente7bc62b6b3aeaa8849f8383e0cfb7ca6c003adc6 (diff)
performance counters: x86 support
Implement performance counters for x86 Intel CPUs. It's simplified right now: the PERFMON CPU feature is assumed, which is available in Core2 and later Intel CPUs. The design is flexible to be extended to more CPU types as well. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c571
1 files changed, 571 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
new file mode 100644
index 000000000000..82440cbed0e6
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -0,0 +1,571 @@
1/*
2 * Performance counter x86 architecture code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
6 *
7 * For licencing details see kernel-base/COPYING
8 */
9
10#include <linux/perf_counter.h>
11#include <linux/capability.h>
12#include <linux/notifier.h>
13#include <linux/hardirq.h>
14#include <linux/kprobes.h>
15#include <linux/kdebug.h>
16#include <linux/sched.h>
17
18#include <asm/intel_arch_perfmon.h>
19#include <asm/apic.h>
20
21static bool perf_counters_initialized __read_mostly;
22
23/*
24 * Number of (generic) HW counters:
25 */
26static int nr_hw_counters __read_mostly;
27static u32 perf_counter_mask __read_mostly;
28
29/* No support for fixed function counters yet */
30
31#define MAX_HW_COUNTERS 8
32
33struct cpu_hw_counters {
34 struct perf_counter *counters[MAX_HW_COUNTERS];
35 unsigned long used[BITS_TO_LONGS(MAX_HW_COUNTERS)];
36 int enable_all;
37};
38
39/*
40 * Intel PerfMon v3. Used on Core2 and later.
41 */
42static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
43
44const int intel_perfmon_event_map[] =
45{
46 [PERF_COUNT_CYCLES] = 0x003c,
47 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
48 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
49 [PERF_COUNT_CACHE_MISSES] = 0x412e,
50 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
51 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
52};
53
54const int max_intel_perfmon_events = ARRAY_SIZE(intel_perfmon_event_map);
55
56/*
57 * Setup the hardware configuration for a given hw_event_type
58 */
59int hw_perf_counter_init(struct perf_counter *counter, s32 hw_event_type)
60{
61 struct hw_perf_counter *hwc = &counter->hw;
62
63 if (unlikely(!perf_counters_initialized))
64 return -EINVAL;
65
66 /*
67 * Count user events, and generate PMC IRQs:
68 * (keep 'enabled' bit clear for now)
69 */
70 hwc->config = ARCH_PERFMON_EVENTSEL_USR | ARCH_PERFMON_EVENTSEL_INT;
71
72 /*
73 * If privileged enough, count OS events too, and allow
74 * NMI events as well:
75 */
76 hwc->nmi = 0;
77 if (capable(CAP_SYS_ADMIN)) {
78 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
79 if (hw_event_type & PERF_COUNT_NMI)
80 hwc->nmi = 1;
81 }
82
83 hwc->config_base = MSR_ARCH_PERFMON_EVENTSEL0;
84 hwc->counter_base = MSR_ARCH_PERFMON_PERFCTR0;
85
86 hwc->irq_period = counter->__irq_period;
87 /*
88 * Intel PMCs cannot be accessed sanely above 32 bit width,
89 * so we install an artificial 1<<31 period regardless of
90 * the generic counter period:
91 */
92 if (!hwc->irq_period)
93 hwc->irq_period = 0x7FFFFFFF;
94
95 hwc->next_count = -((s32) hwc->irq_period);
96
97 /*
98 * Negative event types mean raw encoded event+umask values:
99 */
100 if (hw_event_type < 0) {
101 counter->hw_event_type = -hw_event_type;
102 counter->hw_event_type &= ~PERF_COUNT_NMI;
103 } else {
104 hw_event_type &= ~PERF_COUNT_NMI;
105 if (hw_event_type >= max_intel_perfmon_events)
106 return -EINVAL;
107 /*
108 * The generic map:
109 */
110 counter->hw_event_type = intel_perfmon_event_map[hw_event_type];
111 }
112 hwc->config |= counter->hw_event_type;
113 counter->wakeup_pending = 0;
114
115 return 0;
116}
117
118static void __hw_perf_enable_all(void)
119{
120 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, perf_counter_mask, 0);
121}
122
123void hw_perf_enable_all(void)
124{
125 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
126
127 cpuc->enable_all = 1;
128 __hw_perf_enable_all();
129}
130
131void hw_perf_disable_all(void)
132{
133 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
134
135 cpuc->enable_all = 0;
136 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
137}
138
139static DEFINE_PER_CPU(u64, prev_next_count[MAX_HW_COUNTERS]);
140
141static void __hw_perf_counter_enable(struct hw_perf_counter *hwc, int idx)
142{
143 per_cpu(prev_next_count[idx], smp_processor_id()) = hwc->next_count;
144
145 wrmsr(hwc->counter_base + idx, hwc->next_count, 0);
146 wrmsr(hwc->config_base + idx, hwc->config, 0);
147}
148
149void hw_perf_counter_enable(struct perf_counter *counter)
150{
151 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
152 struct hw_perf_counter *hwc = &counter->hw;
153 int idx = hwc->idx;
154
155 /* Try to get the previous counter again */
156 if (test_and_set_bit(idx, cpuc->used)) {
157 idx = find_first_zero_bit(cpuc->used, nr_hw_counters);
158 set_bit(idx, cpuc->used);
159 hwc->idx = idx;
160 }
161
162 perf_counters_lapic_init(hwc->nmi);
163
164 wrmsr(hwc->config_base + idx,
165 hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
166
167 cpuc->counters[idx] = counter;
168 counter->hw.config |= ARCH_PERFMON_EVENTSEL0_ENABLE;
169 __hw_perf_counter_enable(hwc, idx);
170}
171
172#ifdef CONFIG_X86_64
173static inline void atomic64_counter_set(struct perf_counter *counter, u64 val)
174{
175 atomic64_set(&counter->count, val);
176}
177
178static inline u64 atomic64_counter_read(struct perf_counter *counter)
179{
180 return atomic64_read(&counter->count);
181}
182#else
183/*
184 * Todo: add proper atomic64_t support to 32-bit x86:
185 */
186static inline void atomic64_counter_set(struct perf_counter *counter, u64 val64)
187{
188 u32 *val32 = (void *)&val64;
189
190 atomic_set(counter->count32 + 0, *(val32 + 0));
191 atomic_set(counter->count32 + 1, *(val32 + 1));
192}
193
194static inline u64 atomic64_counter_read(struct perf_counter *counter)
195{
196 return atomic_read(counter->count32 + 0) |
197 (u64) atomic_read(counter->count32 + 1) << 32;
198}
199#endif
200
201static void __hw_perf_save_counter(struct perf_counter *counter,
202 struct hw_perf_counter *hwc, int idx)
203{
204 s64 raw = -1;
205 s64 delta;
206 int err;
207
208 /*
209 * Get the raw hw counter value:
210 */
211 err = rdmsrl_safe(hwc->counter_base + idx, &raw);
212 WARN_ON_ONCE(err);
213
214 /*
215 * Rebase it to zero (it started counting at -irq_period),
216 * to see the delta since ->prev_count:
217 */
218 delta = (s64)hwc->irq_period + (s64)(s32)raw;
219
220 atomic64_counter_set(counter, hwc->prev_count + delta);
221
222 /*
223 * Adjust the ->prev_count offset - if we went beyond
224 * irq_period of units, then we got an IRQ and the counter
225 * was set back to -irq_period:
226 */
227 while (delta >= (s64)hwc->irq_period) {
228 hwc->prev_count += hwc->irq_period;
229 delta -= (s64)hwc->irq_period;
230 }
231
232 /*
233 * Calculate the next raw counter value we'll write into
234 * the counter at the next sched-in time:
235 */
236 delta -= (s64)hwc->irq_period;
237
238 hwc->next_count = (s32)delta;
239}
240
241void perf_counter_print_debug(void)
242{
243 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, next_count;
244 int cpu, err, idx;
245
246 local_irq_disable();
247
248 cpu = smp_processor_id();
249
250 err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_CTRL, &ctrl);
251 WARN_ON_ONCE(err);
252
253 err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_STATUS, &status);
254 WARN_ON_ONCE(err);
255
256 err = rdmsrl_safe(MSR_CORE_PERF_GLOBAL_OVF_CTRL, &overflow);
257 WARN_ON_ONCE(err);
258
259 printk(KERN_INFO "\n");
260 printk(KERN_INFO "CPU#%d: ctrl: %016llx\n", cpu, ctrl);
261 printk(KERN_INFO "CPU#%d: status: %016llx\n", cpu, status);
262 printk(KERN_INFO "CPU#%d: overflow: %016llx\n", cpu, overflow);
263
264 for (idx = 0; idx < nr_hw_counters; idx++) {
265 err = rdmsrl_safe(MSR_ARCH_PERFMON_EVENTSEL0 + idx, &pmc_ctrl);
266 WARN_ON_ONCE(err);
267
268 err = rdmsrl_safe(MSR_ARCH_PERFMON_PERFCTR0 + idx, &pmc_count);
269 WARN_ON_ONCE(err);
270
271 next_count = per_cpu(prev_next_count[idx], cpu);
272
273 printk(KERN_INFO "CPU#%d: PMC%d ctrl: %016llx\n",
274 cpu, idx, pmc_ctrl);
275 printk(KERN_INFO "CPU#%d: PMC%d count: %016llx\n",
276 cpu, idx, pmc_count);
277 printk(KERN_INFO "CPU#%d: PMC%d next: %016llx\n",
278 cpu, idx, next_count);
279 }
280 local_irq_enable();
281}
282
283void hw_perf_counter_disable(struct perf_counter *counter)
284{
285 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
286 struct hw_perf_counter *hwc = &counter->hw;
287 unsigned int idx = hwc->idx;
288
289 counter->hw.config &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
290 wrmsr(hwc->config_base + idx, hwc->config, 0);
291
292 clear_bit(idx, cpuc->used);
293 cpuc->counters[idx] = NULL;
294 __hw_perf_save_counter(counter, hwc, idx);
295}
296
297void hw_perf_counter_read(struct perf_counter *counter)
298{
299 struct hw_perf_counter *hwc = &counter->hw;
300 unsigned long addr = hwc->counter_base + hwc->idx;
301 s64 offs, val = -1LL;
302 s32 val32;
303 int err;
304
305 /* Careful: NMI might modify the counter offset */
306 do {
307 offs = hwc->prev_count;
308 err = rdmsrl_safe(addr, &val);
309 WARN_ON_ONCE(err);
310 } while (offs != hwc->prev_count);
311
312 val32 = (s32) val;
313 val = (s64)hwc->irq_period + (s64)val32;
314 atomic64_counter_set(counter, hwc->prev_count + val);
315}
316
317static void perf_store_irq_data(struct perf_counter *counter, u64 data)
318{
319 struct perf_data *irqdata = counter->irqdata;
320
321 if (irqdata->len > PERF_DATA_BUFLEN - sizeof(u64)) {
322 irqdata->overrun++;
323 } else {
324 u64 *p = (u64 *) &irqdata->data[irqdata->len];
325
326 *p = data;
327 irqdata->len += sizeof(u64);
328 }
329}
330
331static void perf_save_and_restart(struct perf_counter *counter)
332{
333 struct hw_perf_counter *hwc = &counter->hw;
334 int idx = hwc->idx;
335
336 wrmsr(hwc->config_base + idx,
337 hwc->config & ~ARCH_PERFMON_EVENTSEL0_ENABLE, 0);
338
339 if (hwc->config & ARCH_PERFMON_EVENTSEL0_ENABLE) {
340 __hw_perf_save_counter(counter, hwc, idx);
341 __hw_perf_counter_enable(hwc, idx);
342 }
343}
344
345static void
346perf_handle_group(struct perf_counter *leader, u64 *status, u64 *overflown)
347{
348 struct perf_counter_context *ctx = leader->ctx;
349 struct perf_counter *counter;
350 int bit;
351
352 list_for_each_entry(counter, &ctx->counters, list) {
353 if (counter->record_type != PERF_RECORD_SIMPLE ||
354 counter == leader)
355 continue;
356
357 if (counter->active) {
358 /*
359 * When counter was not in the overflow mask, we have to
360 * read it from hardware. We read it as well, when it
361 * has not been read yet and clear the bit in the
362 * status mask.
363 */
364 bit = counter->hw.idx;
365 if (!test_bit(bit, (unsigned long *) overflown) ||
366 test_bit(bit, (unsigned long *) status)) {
367 clear_bit(bit, (unsigned long *) status);
368 perf_save_and_restart(counter);
369 }
370 }
371 perf_store_irq_data(leader, counter->hw_event_type);
372 perf_store_irq_data(leader, atomic64_counter_read(counter));
373 }
374}
375
376/*
377 * This handler is triggered by the local APIC, so the APIC IRQ handling
378 * rules apply:
379 */
380static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
381{
382 int bit, cpu = smp_processor_id();
383 struct cpu_hw_counters *cpuc;
384 u64 ack, status;
385
386 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
387 if (!status) {
388 ack_APIC_irq();
389 return;
390 }
391
392 /* Disable counters globally */
393 wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
394 ack_APIC_irq();
395
396 cpuc = &per_cpu(cpu_hw_counters, cpu);
397
398again:
399 ack = status;
400 for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
401 struct perf_counter *counter = cpuc->counters[bit];
402
403 clear_bit(bit, (unsigned long *) &status);
404 if (!counter)
405 continue;
406
407 perf_save_and_restart(counter);
408
409 switch (counter->record_type) {
410 case PERF_RECORD_SIMPLE:
411 continue;
412 case PERF_RECORD_IRQ:
413 perf_store_irq_data(counter, instruction_pointer(regs));
414 break;
415 case PERF_RECORD_GROUP:
416 perf_store_irq_data(counter, counter->hw_event_type);
417 perf_store_irq_data(counter,
418 atomic64_counter_read(counter));
419 perf_handle_group(counter, &status, &ack);
420 break;
421 }
422 /*
423 * From NMI context we cannot call into the scheduler to
424 * do a task wakeup - but we mark these counters as
425 * wakeup_pending and initate a wakeup callback:
426 */
427 if (nmi) {
428 counter->wakeup_pending = 1;
429 set_tsk_thread_flag(current, TIF_PERF_COUNTERS);
430 } else {
431 wake_up(&counter->waitq);
432 }
433 }
434
435 wrmsr(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack, 0);
436
437 /*
438 * Repeat if there is more work to be done:
439 */
440 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
441 if (status)
442 goto again;
443
444 /*
445 * Do not reenable when global enable is off:
446 */
447 if (cpuc->enable_all)
448 __hw_perf_enable_all();
449}
450
451void smp_perf_counter_interrupt(struct pt_regs *regs)
452{
453 irq_enter();
454#ifdef CONFIG_X86_64
455 add_pda(apic_perf_irqs, 1);
456#else
457 per_cpu(irq_stat, smp_processor_id()).apic_perf_irqs++;
458#endif
459 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
460 __smp_perf_counter_interrupt(regs, 0);
461
462 irq_exit();
463}
464
465/*
466 * This handler is triggered by NMI contexts:
467 */
468void perf_counter_notify(struct pt_regs *regs)
469{
470 struct cpu_hw_counters *cpuc;
471 unsigned long flags;
472 int bit, cpu;
473
474 local_irq_save(flags);
475 cpu = smp_processor_id();
476 cpuc = &per_cpu(cpu_hw_counters, cpu);
477
478 for_each_bit(bit, cpuc->used, nr_hw_counters) {
479 struct perf_counter *counter = cpuc->counters[bit];
480
481 if (!counter)
482 continue;
483
484 if (counter->wakeup_pending) {
485 counter->wakeup_pending = 0;
486 wake_up(&counter->waitq);
487 }
488 }
489
490 local_irq_restore(flags);
491}
492
493void __cpuinit perf_counters_lapic_init(int nmi)
494{
495 u32 apic_val;
496
497 if (!perf_counters_initialized)
498 return;
499 /*
500 * Enable the performance counter vector in the APIC LVT:
501 */
502 apic_val = apic_read(APIC_LVTERR);
503
504 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
505 if (nmi)
506 apic_write(APIC_LVTPC, APIC_DM_NMI);
507 else
508 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
509 apic_write(APIC_LVTERR, apic_val);
510}
511
512static int __kprobes
513perf_counter_nmi_handler(struct notifier_block *self,
514 unsigned long cmd, void *__args)
515{
516 struct die_args *args = __args;
517 struct pt_regs *regs;
518
519 if (likely(cmd != DIE_NMI_IPI))
520 return NOTIFY_DONE;
521
522 regs = args->regs;
523
524 apic_write(APIC_LVTPC, APIC_DM_NMI);
525 __smp_perf_counter_interrupt(regs, 1);
526
527 return NOTIFY_STOP;
528}
529
530static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
531 .notifier_call = perf_counter_nmi_handler
532};
533
534void __init init_hw_perf_counters(void)
535{
536 union cpuid10_eax eax;
537 unsigned int unused;
538 unsigned int ebx;
539
540 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
541 return;
542
543 /*
544 * Check whether the Architectural PerfMon supports
545 * Branch Misses Retired Event or not.
546 */
547 cpuid(10, &(eax.full), &ebx, &unused, &unused);
548 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
549 return;
550
551 printk(KERN_INFO "Intel Performance Monitoring support detected.\n");
552
553 printk(KERN_INFO "... version: %d\n", eax.split.version_id);
554 printk(KERN_INFO "... num_counters: %d\n", eax.split.num_counters);
555 nr_hw_counters = eax.split.num_counters;
556 if (nr_hw_counters > MAX_HW_COUNTERS) {
557 nr_hw_counters = MAX_HW_COUNTERS;
558 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
559 nr_hw_counters, MAX_HW_COUNTERS);
560 }
561 perf_counter_mask = (1 << nr_hw_counters) - 1;
562 perf_max_counters = nr_hw_counters;
563
564 printk(KERN_INFO "... bit_width: %d\n", eax.split.bit_width);
565 printk(KERN_INFO "... mask_length: %d\n", eax.split.mask_length);
566
567 perf_counters_lapic_init(0);
568 register_die_notifier(&perf_counter_nmi_notifier);
569
570 perf_counters_initialized = true;
571}