diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 17:01:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-11 17:01:07 -0400 |
commit | 8a1ca8cedd108c8e76a6ab34079d0bbb4f244799 (patch) | |
tree | 636c715524f1718599209cc289908ea44b6cb859 /arch/x86 | |
parent | b640f042faa2a2fad6464f259a8afec06e2f6386 (diff) | |
parent | 940010c5a314a7bd9b498593bc6ba1718ac5aec5 (diff) |
Merge branch 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (574 commits)
perf_counter: Turn off by default
perf_counter: Add counter->id to the throttle event
perf_counter: Better align code
perf_counter: Rename L2 to LL cache
perf_counter: Standardize event names
perf_counter: Rename enums
perf_counter tools: Clean up u64 usage
perf_counter: Rename perf_counter_limit sysctl
perf_counter: More paranoia settings
perf_counter: powerpc: Implement generalized cache events for POWER processors
perf_counters: powerpc: Add support for POWER7 processors
perf_counter: Accurate period data
perf_counter: Introduce struct for sample data
perf_counter tools: Normalize data using per sample period data
perf_counter: Annotate exit ctx recursion
perf_counter tools: Propagate signals properly
perf_counter tools: Small frequency related fixes
perf_counter: More aggressive frequency adjustment
perf_counter/x86: Fix the model number of Intel Core2 processors
perf_counter, x86: Correct some event and umask values for Intel processors
...
Diffstat (limited to 'arch/x86')
27 files changed, 2130 insertions, 70 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index aafae3b140de..68f5578fe38e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -739,6 +739,7 @@ config X86_UP_IOAPIC | |||
739 | config X86_LOCAL_APIC | 739 | config X86_LOCAL_APIC |
740 | def_bool y | 740 | def_bool y |
741 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC | 741 | depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC |
742 | select HAVE_PERF_COUNTERS if (!M386 && !M486) | ||
742 | 743 | ||
743 | config X86_IO_APIC | 744 | config X86_IO_APIC |
744 | def_bool y | 745 | def_bool y |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index dcef387ddc36..e590261ba059 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -825,10 +825,11 @@ ia32_sys_call_table: | |||
825 | .quad compat_sys_signalfd4 | 825 | .quad compat_sys_signalfd4 |
826 | .quad sys_eventfd2 | 826 | .quad sys_eventfd2 |
827 | .quad sys_epoll_create1 | 827 | .quad sys_epoll_create1 |
828 | .quad sys_dup3 /* 330 */ | 828 | .quad sys_dup3 /* 330 */ |
829 | .quad sys_pipe2 | 829 | .quad sys_pipe2 |
830 | .quad sys_inotify_init1 | 830 | .quad sys_inotify_init1 |
831 | .quad compat_sys_preadv | 831 | .quad compat_sys_preadv |
832 | .quad compat_sys_pwritev | 832 | .quad compat_sys_pwritev |
833 | .quad compat_sys_rt_tgsigqueueinfo /* 335 */ | 833 | .quad compat_sys_rt_tgsigqueueinfo /* 335 */ |
834 | .quad sys_perf_counter_open | ||
834 | ia32_syscall_end: | 835 | ia32_syscall_end: |
diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h index 85b46fba4229..aff9f1fcdcd7 100644 --- a/arch/x86/include/asm/atomic_32.h +++ b/arch/x86/include/asm/atomic_32.h | |||
@@ -247,5 +247,241 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
247 | #define smp_mb__before_atomic_inc() barrier() | 247 | #define smp_mb__before_atomic_inc() barrier() |
248 | #define smp_mb__after_atomic_inc() barrier() | 248 | #define smp_mb__after_atomic_inc() barrier() |
249 | 249 | ||
250 | /* An 64bit atomic type */ | ||
251 | |||
252 | typedef struct { | ||
253 | unsigned long long counter; | ||
254 | } atomic64_t; | ||
255 | |||
256 | #define ATOMIC64_INIT(val) { (val) } | ||
257 | |||
258 | /** | ||
259 | * atomic64_read - read atomic64 variable | ||
260 | * @v: pointer of type atomic64_t | ||
261 | * | ||
262 | * Atomically reads the value of @v. | ||
263 | * Doesn't imply a read memory barrier. | ||
264 | */ | ||
265 | #define __atomic64_read(ptr) ((ptr)->counter) | ||
266 | |||
267 | static inline unsigned long long | ||
268 | cmpxchg8b(unsigned long long *ptr, unsigned long long old, unsigned long long new) | ||
269 | { | ||
270 | asm volatile( | ||
271 | |||
272 | LOCK_PREFIX "cmpxchg8b (%[ptr])\n" | ||
273 | |||
274 | : "=A" (old) | ||
275 | |||
276 | : [ptr] "D" (ptr), | ||
277 | "A" (old), | ||
278 | "b" (ll_low(new)), | ||
279 | "c" (ll_high(new)) | ||
280 | |||
281 | : "memory"); | ||
282 | |||
283 | return old; | ||
284 | } | ||
285 | |||
286 | static inline unsigned long long | ||
287 | atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val, | ||
288 | unsigned long long new_val) | ||
289 | { | ||
290 | return cmpxchg8b(&ptr->counter, old_val, new_val); | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * atomic64_xchg - xchg atomic64 variable | ||
295 | * @ptr: pointer to type atomic64_t | ||
296 | * @new_val: value to assign | ||
297 | * @old_val: old value that was there | ||
298 | * | ||
299 | * Atomically xchgs the value of @ptr to @new_val and returns | ||
300 | * the old value. | ||
301 | */ | ||
302 | |||
303 | static inline unsigned long long | ||
304 | atomic64_xchg(atomic64_t *ptr, unsigned long long new_val) | ||
305 | { | ||
306 | unsigned long long old_val; | ||
307 | |||
308 | do { | ||
309 | old_val = atomic_read(ptr); | ||
310 | } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); | ||
311 | |||
312 | return old_val; | ||
313 | } | ||
314 | |||
315 | /** | ||
316 | * atomic64_set - set atomic64 variable | ||
317 | * @ptr: pointer to type atomic64_t | ||
318 | * @new_val: value to assign | ||
319 | * | ||
320 | * Atomically sets the value of @ptr to @new_val. | ||
321 | */ | ||
322 | static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val) | ||
323 | { | ||
324 | atomic64_xchg(ptr, new_val); | ||
325 | } | ||
326 | |||
327 | /** | ||
328 | * atomic64_read - read atomic64 variable | ||
329 | * @ptr: pointer to type atomic64_t | ||
330 | * | ||
331 | * Atomically reads the value of @ptr and returns it. | ||
332 | */ | ||
333 | static inline unsigned long long atomic64_read(atomic64_t *ptr) | ||
334 | { | ||
335 | unsigned long long curr_val; | ||
336 | |||
337 | do { | ||
338 | curr_val = __atomic64_read(ptr); | ||
339 | } while (atomic64_cmpxchg(ptr, curr_val, curr_val) != curr_val); | ||
340 | |||
341 | return curr_val; | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * atomic64_add_return - add and return | ||
346 | * @delta: integer value to add | ||
347 | * @ptr: pointer to type atomic64_t | ||
348 | * | ||
349 | * Atomically adds @delta to @ptr and returns @delta + *@ptr | ||
350 | */ | ||
351 | static inline unsigned long long | ||
352 | atomic64_add_return(unsigned long long delta, atomic64_t *ptr) | ||
353 | { | ||
354 | unsigned long long old_val, new_val; | ||
355 | |||
356 | do { | ||
357 | old_val = atomic_read(ptr); | ||
358 | new_val = old_val + delta; | ||
359 | |||
360 | } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val); | ||
361 | |||
362 | return new_val; | ||
363 | } | ||
364 | |||
365 | static inline long atomic64_sub_return(unsigned long long delta, atomic64_t *ptr) | ||
366 | { | ||
367 | return atomic64_add_return(-delta, ptr); | ||
368 | } | ||
369 | |||
370 | static inline long atomic64_inc_return(atomic64_t *ptr) | ||
371 | { | ||
372 | return atomic64_add_return(1, ptr); | ||
373 | } | ||
374 | |||
375 | static inline long atomic64_dec_return(atomic64_t *ptr) | ||
376 | { | ||
377 | return atomic64_sub_return(1, ptr); | ||
378 | } | ||
379 | |||
380 | /** | ||
381 | * atomic64_add - add integer to atomic64 variable | ||
382 | * @delta: integer value to add | ||
383 | * @ptr: pointer to type atomic64_t | ||
384 | * | ||
385 | * Atomically adds @delta to @ptr. | ||
386 | */ | ||
387 | static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr) | ||
388 | { | ||
389 | atomic64_add_return(delta, ptr); | ||
390 | } | ||
391 | |||
392 | /** | ||
393 | * atomic64_sub - subtract the atomic64 variable | ||
394 | * @delta: integer value to subtract | ||
395 | * @ptr: pointer to type atomic64_t | ||
396 | * | ||
397 | * Atomically subtracts @delta from @ptr. | ||
398 | */ | ||
399 | static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr) | ||
400 | { | ||
401 | atomic64_add(-delta, ptr); | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * atomic64_sub_and_test - subtract value from variable and test result | ||
406 | * @delta: integer value to subtract | ||
407 | * @ptr: pointer to type atomic64_t | ||
408 | * | ||
409 | * Atomically subtracts @delta from @ptr and returns | ||
410 | * true if the result is zero, or false for all | ||
411 | * other cases. | ||
412 | */ | ||
413 | static inline int | ||
414 | atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr) | ||
415 | { | ||
416 | unsigned long long old_val = atomic64_sub_return(delta, ptr); | ||
417 | |||
418 | return old_val == 0; | ||
419 | } | ||
420 | |||
421 | /** | ||
422 | * atomic64_inc - increment atomic64 variable | ||
423 | * @ptr: pointer to type atomic64_t | ||
424 | * | ||
425 | * Atomically increments @ptr by 1. | ||
426 | */ | ||
427 | static inline void atomic64_inc(atomic64_t *ptr) | ||
428 | { | ||
429 | atomic64_add(1, ptr); | ||
430 | } | ||
431 | |||
432 | /** | ||
433 | * atomic64_dec - decrement atomic64 variable | ||
434 | * @ptr: pointer to type atomic64_t | ||
435 | * | ||
436 | * Atomically decrements @ptr by 1. | ||
437 | */ | ||
438 | static inline void atomic64_dec(atomic64_t *ptr) | ||
439 | { | ||
440 | atomic64_sub(1, ptr); | ||
441 | } | ||
442 | |||
443 | /** | ||
444 | * atomic64_dec_and_test - decrement and test | ||
445 | * @ptr: pointer to type atomic64_t | ||
446 | * | ||
447 | * Atomically decrements @ptr by 1 and | ||
448 | * returns true if the result is 0, or false for all other | ||
449 | * cases. | ||
450 | */ | ||
451 | static inline int atomic64_dec_and_test(atomic64_t *ptr) | ||
452 | { | ||
453 | return atomic64_sub_and_test(1, ptr); | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * atomic64_inc_and_test - increment and test | ||
458 | * @ptr: pointer to type atomic64_t | ||
459 | * | ||
460 | * Atomically increments @ptr by 1 | ||
461 | * and returns true if the result is zero, or false for all | ||
462 | * other cases. | ||
463 | */ | ||
464 | static inline int atomic64_inc_and_test(atomic64_t *ptr) | ||
465 | { | ||
466 | return atomic64_sub_and_test(-1, ptr); | ||
467 | } | ||
468 | |||
469 | /** | ||
470 | * atomic64_add_negative - add and test if negative | ||
471 | * @delta: integer value to add | ||
472 | * @ptr: pointer to type atomic64_t | ||
473 | * | ||
474 | * Atomically adds @delta to @ptr and returns true | ||
475 | * if the result is negative, or false when | ||
476 | * result is greater than or equal to zero. | ||
477 | */ | ||
478 | static inline int | ||
479 | atomic64_add_negative(unsigned long long delta, atomic64_t *ptr) | ||
480 | { | ||
481 | long long old_val = atomic64_add_return(delta, ptr); | ||
482 | |||
483 | return old_val < 0; | ||
484 | } | ||
485 | |||
250 | #include <asm-generic/atomic.h> | 486 | #include <asm-generic/atomic.h> |
251 | #endif /* _ASM_X86_ATOMIC_32_H */ | 487 | #endif /* _ASM_X86_ATOMIC_32_H */ |
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index c2e6bedaf258..d750a10ccad6 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -49,7 +49,7 @@ BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR) | |||
49 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) | 49 | BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR) |
50 | 50 | ||
51 | #ifdef CONFIG_PERF_COUNTERS | 51 | #ifdef CONFIG_PERF_COUNTERS |
52 | BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR) | 52 | BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR) |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | #ifdef CONFIG_X86_MCE_P4THERMAL | 55 | #ifdef CONFIG_X86_MCE_P4THERMAL |
diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h index 37555e52f980..9ebc5c255032 100644 --- a/arch/x86/include/asm/hardirq.h +++ b/arch/x86/include/asm/hardirq.h | |||
@@ -13,6 +13,8 @@ typedef struct { | |||
13 | unsigned int irq_spurious_count; | 13 | unsigned int irq_spurious_count; |
14 | #endif | 14 | #endif |
15 | unsigned int generic_irqs; /* arch dependent */ | 15 | unsigned int generic_irqs; /* arch dependent */ |
16 | unsigned int apic_perf_irqs; | ||
17 | unsigned int apic_pending_irqs; | ||
16 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
17 | unsigned int irq_resched_count; | 19 | unsigned int irq_resched_count; |
18 | unsigned int irq_call_count; | 20 | unsigned int irq_call_count; |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 3bd1777a4c8b..6df45f639666 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -29,6 +29,8 @@ | |||
29 | extern void apic_timer_interrupt(void); | 29 | extern void apic_timer_interrupt(void); |
30 | extern void generic_interrupt(void); | 30 | extern void generic_interrupt(void); |
31 | extern void error_interrupt(void); | 31 | extern void error_interrupt(void); |
32 | extern void perf_pending_interrupt(void); | ||
33 | |||
32 | extern void spurious_interrupt(void); | 34 | extern void spurious_interrupt(void); |
33 | extern void thermal_interrupt(void); | 35 | extern void thermal_interrupt(void); |
34 | extern void reschedule_interrupt(void); | 36 | extern void reschedule_interrupt(void); |
diff --git a/arch/x86/include/asm/intel_arch_perfmon.h b/arch/x86/include/asm/intel_arch_perfmon.h deleted file mode 100644 index fa0fd068bc2e..000000000000 --- a/arch/x86/include/asm/intel_arch_perfmon.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef _ASM_X86_INTEL_ARCH_PERFMON_H | ||
2 | #define _ASM_X86_INTEL_ARCH_PERFMON_H | ||
3 | |||
4 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | ||
5 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | ||
6 | |||
7 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | ||
8 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | ||
9 | |||
10 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | ||
11 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | ||
12 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | ||
13 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | ||
14 | |||
15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) | ||
16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | ||
17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0) | ||
18 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | ||
19 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | ||
20 | |||
21 | union cpuid10_eax { | ||
22 | struct { | ||
23 | unsigned int version_id:8; | ||
24 | unsigned int num_counters:8; | ||
25 | unsigned int bit_width:8; | ||
26 | unsigned int mask_length:8; | ||
27 | } split; | ||
28 | unsigned int full; | ||
29 | }; | ||
30 | |||
31 | #endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */ | ||
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 910b5a3d6751..e997be98c9b9 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -108,14 +108,14 @@ | |||
108 | #define LOCAL_TIMER_VECTOR 0xef | 108 | #define LOCAL_TIMER_VECTOR 0xef |
109 | 109 | ||
110 | /* | 110 | /* |
111 | * Performance monitoring interrupt vector: | 111 | * Generic system vector for platform specific use |
112 | */ | 112 | */ |
113 | #define LOCAL_PERF_VECTOR 0xee | 113 | #define GENERIC_INTERRUPT_VECTOR 0xed |
114 | 114 | ||
115 | /* | 115 | /* |
116 | * Generic system vector for platform specific use | 116 | * Performance monitoring pending work vector: |
117 | */ | 117 | */ |
118 | #define GENERIC_INTERRUPT_VECTOR 0xed | 118 | #define LOCAL_PENDING_VECTOR 0xec |
119 | 119 | ||
120 | /* | 120 | /* |
121 | * First APIC vector available to drivers: (vectors 0x30-0xee) we | 121 | * First APIC vector available to drivers: (vectors 0x30-0xee) we |
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h new file mode 100644 index 000000000000..876ed97147b3 --- /dev/null +++ b/arch/x86/include/asm/perf_counter.h | |||
@@ -0,0 +1,100 @@ | |||
1 | #ifndef _ASM_X86_PERF_COUNTER_H | ||
2 | #define _ASM_X86_PERF_COUNTER_H | ||
3 | |||
4 | /* | ||
5 | * Performance counter hw details: | ||
6 | */ | ||
7 | |||
8 | #define X86_PMC_MAX_GENERIC 8 | ||
9 | #define X86_PMC_MAX_FIXED 3 | ||
10 | |||
11 | #define X86_PMC_IDX_GENERIC 0 | ||
12 | #define X86_PMC_IDX_FIXED 32 | ||
13 | #define X86_PMC_IDX_MAX 64 | ||
14 | |||
15 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | ||
16 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | ||
17 | |||
18 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | ||
19 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | ||
20 | |||
21 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | ||
22 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | ||
23 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | ||
24 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | ||
25 | |||
26 | /* | ||
27 | * Includes eventsel and unit mask as well: | ||
28 | */ | ||
29 | #define ARCH_PERFMON_EVENT_MASK 0xffff | ||
30 | |||
31 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c | ||
32 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | ||
33 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0 | ||
34 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \ | ||
35 | (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX)) | ||
36 | |||
37 | #define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6 | ||
38 | |||
39 | /* | ||
40 | * Intel "Architectural Performance Monitoring" CPUID | ||
41 | * detection/enumeration details: | ||
42 | */ | ||
43 | union cpuid10_eax { | ||
44 | struct { | ||
45 | unsigned int version_id:8; | ||
46 | unsigned int num_counters:8; | ||
47 | unsigned int bit_width:8; | ||
48 | unsigned int mask_length:8; | ||
49 | } split; | ||
50 | unsigned int full; | ||
51 | }; | ||
52 | |||
53 | union cpuid10_edx { | ||
54 | struct { | ||
55 | unsigned int num_counters_fixed:4; | ||
56 | unsigned int reserved:28; | ||
57 | } split; | ||
58 | unsigned int full; | ||
59 | }; | ||
60 | |||
61 | |||
62 | /* | ||
63 | * Fixed-purpose performance counters: | ||
64 | */ | ||
65 | |||
66 | /* | ||
67 | * All 3 fixed-mode PMCs are configured via this single MSR: | ||
68 | */ | ||
69 | #define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d | ||
70 | |||
71 | /* | ||
72 | * The counts are available in three separate MSRs: | ||
73 | */ | ||
74 | |||
75 | /* Instr_Retired.Any: */ | ||
76 | #define MSR_ARCH_PERFMON_FIXED_CTR0 0x309 | ||
77 | #define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0) | ||
78 | |||
79 | /* CPU_CLK_Unhalted.Core: */ | ||
80 | #define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a | ||
81 | #define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1) | ||
82 | |||
83 | /* CPU_CLK_Unhalted.Ref: */ | ||
84 | #define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b | ||
85 | #define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2) | ||
86 | |||
87 | extern void set_perf_counter_pending(void); | ||
88 | |||
89 | #define clear_perf_counter_pending() do { } while (0) | ||
90 | #define test_perf_counter_pending() (0) | ||
91 | |||
92 | #ifdef CONFIG_PERF_COUNTERS | ||
93 | extern void init_hw_perf_counters(void); | ||
94 | extern void perf_counters_lapic_init(void); | ||
95 | #else | ||
96 | static inline void init_hw_perf_counters(void) { } | ||
97 | static inline void perf_counters_lapic_init(void) { } | ||
98 | #endif | ||
99 | |||
100 | #endif /* _ASM_X86_PERF_COUNTER_H */ | ||
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h index 708dae61262d..732a30706153 100644 --- a/arch/x86/include/asm/unistd_32.h +++ b/arch/x86/include/asm/unistd_32.h | |||
@@ -341,6 +341,7 @@ | |||
341 | #define __NR_preadv 333 | 341 | #define __NR_preadv 333 |
342 | #define __NR_pwritev 334 | 342 | #define __NR_pwritev 334 |
343 | #define __NR_rt_tgsigqueueinfo 335 | 343 | #define __NR_rt_tgsigqueueinfo 335 |
344 | #define __NR_perf_counter_open 336 | ||
344 | 345 | ||
345 | #ifdef __KERNEL__ | 346 | #ifdef __KERNEL__ |
346 | 347 | ||
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h index 4e2b05404400..900e1617e672 100644 --- a/arch/x86/include/asm/unistd_64.h +++ b/arch/x86/include/asm/unistd_64.h | |||
@@ -659,7 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv) | |||
659 | __SYSCALL(__NR_pwritev, sys_pwritev) | 659 | __SYSCALL(__NR_pwritev, sys_pwritev) |
660 | #define __NR_rt_tgsigqueueinfo 297 | 660 | #define __NR_rt_tgsigqueueinfo 297 |
661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) | 661 | __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) |
662 | 662 | #define __NR_perf_counter_open 298 | |
663 | __SYSCALL(__NR_perf_counter_open, sys_perf_counter_open) | ||
663 | 664 | ||
664 | #ifndef __NO_STUBS | 665 | #ifndef __NO_STUBS |
665 | #define __ARCH_WANT_OLD_READDIR | 666 | #define __ARCH_WANT_OLD_READDIR |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a4c9cf0bf70b..076d3881f3da 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -14,6 +14,7 @@ | |||
14 | * Mikael Pettersson : PM converted to driver model. | 14 | * Mikael Pettersson : PM converted to driver model. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/perf_counter.h> | ||
17 | #include <linux/kernel_stat.h> | 18 | #include <linux/kernel_stat.h> |
18 | #include <linux/mc146818rtc.h> | 19 | #include <linux/mc146818rtc.h> |
19 | #include <linux/acpi_pmtmr.h> | 20 | #include <linux/acpi_pmtmr.h> |
@@ -34,6 +35,7 @@ | |||
34 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
35 | #include <linux/mm.h> | 36 | #include <linux/mm.h> |
36 | 37 | ||
38 | #include <asm/perf_counter.h> | ||
37 | #include <asm/pgalloc.h> | 39 | #include <asm/pgalloc.h> |
38 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
39 | #include <asm/mpspec.h> | 41 | #include <asm/mpspec.h> |
@@ -1187,6 +1189,7 @@ void __cpuinit setup_local_APIC(void) | |||
1187 | apic_write(APIC_ESR, 0); | 1189 | apic_write(APIC_ESR, 0); |
1188 | } | 1190 | } |
1189 | #endif | 1191 | #endif |
1192 | perf_counters_lapic_init(); | ||
1190 | 1193 | ||
1191 | preempt_disable(); | 1194 | preempt_disable(); |
1192 | 1195 | ||
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 4e242f9a06e4..3efcb2b96a15 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # Makefile for x86-compatible CPU details and quirks | 2 | # Makefile for x86-compatible CPU details, features and quirks |
3 | # | 3 | # |
4 | 4 | ||
5 | # Don't trace early stages of a secondary CPU boot | 5 | # Don't trace early stages of a secondary CPU boot |
@@ -23,11 +23,13 @@ obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o | |||
23 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o | 23 | obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o |
24 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | 24 | obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o |
25 | 25 | ||
26 | obj-$(CONFIG_X86_MCE) += mcheck/ | 26 | obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o |
27 | obj-$(CONFIG_MTRR) += mtrr/ | ||
28 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | ||
29 | 27 | ||
30 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 28 | obj-$(CONFIG_X86_MCE) += mcheck/ |
29 | obj-$(CONFIG_MTRR) += mtrr/ | ||
30 | obj-$(CONFIG_CPU_FREQ) += cpufreq/ | ||
31 | |||
32 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | ||
31 | 33 | ||
32 | quiet_cmd_mkcapflags = MKCAP $@ | 34 | quiet_cmd_mkcapflags = MKCAP $@ |
33 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ | 35 | cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@ |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b0517aa2bd3b..3ffdcfa9abdf 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | 14 | ||
15 | #include <asm/stackprotector.h> | 15 | #include <asm/stackprotector.h> |
16 | #include <asm/perf_counter.h> | ||
16 | #include <asm/mmu_context.h> | 17 | #include <asm/mmu_context.h> |
17 | #include <asm/hypervisor.h> | 18 | #include <asm/hypervisor.h> |
18 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
@@ -874,6 +875,7 @@ void __init identify_boot_cpu(void) | |||
874 | #else | 875 | #else |
875 | vgetcpu_set_mode(); | 876 | vgetcpu_set_mode(); |
876 | #endif | 877 | #endif |
878 | init_hw_perf_counters(); | ||
877 | } | 879 | } |
878 | 880 | ||
879 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 881 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) |
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c new file mode 100644 index 000000000000..895c82e78455 --- /dev/null +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -0,0 +1,1704 @@ | |||
1 | /* | ||
2 | * Performance counter x86 architecture code | ||
3 | * | ||
4 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | ||
5 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | ||
6 | * Copyright (C) 2009 Jaswinder Singh Rajput | ||
7 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | ||
8 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | ||
9 | * | ||
10 | * For licencing details see kernel-base/COPYING | ||
11 | */ | ||
12 | |||
13 | #include <linux/perf_counter.h> | ||
14 | #include <linux/capability.h> | ||
15 | #include <linux/notifier.h> | ||
16 | #include <linux/hardirq.h> | ||
17 | #include <linux/kprobes.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/kdebug.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | #include <asm/apic.h> | ||
24 | #include <asm/stacktrace.h> | ||
25 | #include <asm/nmi.h> | ||
26 | |||
27 | static u64 perf_counter_mask __read_mostly; | ||
28 | |||
29 | struct cpu_hw_counters { | ||
30 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | ||
31 | unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
32 | unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | ||
33 | unsigned long interrupts; | ||
34 | int enabled; | ||
35 | }; | ||
36 | |||
37 | /* | ||
38 | * struct x86_pmu - generic x86 pmu | ||
39 | */ | ||
40 | struct x86_pmu { | ||
41 | const char *name; | ||
42 | int version; | ||
43 | int (*handle_irq)(struct pt_regs *); | ||
44 | void (*disable_all)(void); | ||
45 | void (*enable_all)(void); | ||
46 | void (*enable)(struct hw_perf_counter *, int); | ||
47 | void (*disable)(struct hw_perf_counter *, int); | ||
48 | unsigned eventsel; | ||
49 | unsigned perfctr; | ||
50 | u64 (*event_map)(int); | ||
51 | u64 (*raw_event)(u64); | ||
52 | int max_events; | ||
53 | int num_counters; | ||
54 | int num_counters_fixed; | ||
55 | int counter_bits; | ||
56 | u64 counter_mask; | ||
57 | u64 max_period; | ||
58 | u64 intel_ctrl; | ||
59 | }; | ||
60 | |||
61 | static struct x86_pmu x86_pmu __read_mostly; | ||
62 | |||
63 | static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { | ||
64 | .enabled = 1, | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * Intel PerfMon v3. Used on Core2 and later. | ||
69 | */ | ||
70 | static const u64 intel_perfmon_event_map[] = | ||
71 | { | ||
72 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, | ||
73 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
74 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, | ||
75 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, | ||
76 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
77 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
78 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, | ||
79 | }; | ||
80 | |||
81 | static u64 intel_pmu_event_map(int event) | ||
82 | { | ||
83 | return intel_perfmon_event_map[event]; | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Generalized hw caching related event table, filled | ||
88 | * in on a per model basis. A value of 0 means | ||
89 | * 'not supported', -1 means 'event makes no sense on | ||
90 | * this CPU', any other value means the raw event | ||
91 | * ID. | ||
92 | */ | ||
93 | |||
94 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
95 | |||
96 | static u64 __read_mostly hw_cache_event_ids | ||
97 | [PERF_COUNT_HW_CACHE_MAX] | ||
98 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
99 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
100 | |||
101 | static const u64 nehalem_hw_cache_event_ids | ||
102 | [PERF_COUNT_HW_CACHE_MAX] | ||
103 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
104 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
105 | { | ||
106 | [ C(L1D) ] = { | ||
107 | [ C(OP_READ) ] = { | ||
108 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | ||
109 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | ||
110 | }, | ||
111 | [ C(OP_WRITE) ] = { | ||
112 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | ||
113 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | ||
114 | }, | ||
115 | [ C(OP_PREFETCH) ] = { | ||
116 | [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */ | ||
117 | [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */ | ||
118 | }, | ||
119 | }, | ||
120 | [ C(L1I ) ] = { | ||
121 | [ C(OP_READ) ] = { | ||
122 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
123 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
124 | }, | ||
125 | [ C(OP_WRITE) ] = { | ||
126 | [ C(RESULT_ACCESS) ] = -1, | ||
127 | [ C(RESULT_MISS) ] = -1, | ||
128 | }, | ||
129 | [ C(OP_PREFETCH) ] = { | ||
130 | [ C(RESULT_ACCESS) ] = 0x0, | ||
131 | [ C(RESULT_MISS) ] = 0x0, | ||
132 | }, | ||
133 | }, | ||
134 | [ C(LL ) ] = { | ||
135 | [ C(OP_READ) ] = { | ||
136 | [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */ | ||
137 | [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */ | ||
138 | }, | ||
139 | [ C(OP_WRITE) ] = { | ||
140 | [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */ | ||
141 | [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */ | ||
142 | }, | ||
143 | [ C(OP_PREFETCH) ] = { | ||
144 | [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */ | ||
145 | [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */ | ||
146 | }, | ||
147 | }, | ||
148 | [ C(DTLB) ] = { | ||
149 | [ C(OP_READ) ] = { | ||
150 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | ||
151 | [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */ | ||
152 | }, | ||
153 | [ C(OP_WRITE) ] = { | ||
154 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | ||
155 | [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */ | ||
156 | }, | ||
157 | [ C(OP_PREFETCH) ] = { | ||
158 | [ C(RESULT_ACCESS) ] = 0x0, | ||
159 | [ C(RESULT_MISS) ] = 0x0, | ||
160 | }, | ||
161 | }, | ||
162 | [ C(ITLB) ] = { | ||
163 | [ C(OP_READ) ] = { | ||
164 | [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */ | ||
165 | [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */ | ||
166 | }, | ||
167 | [ C(OP_WRITE) ] = { | ||
168 | [ C(RESULT_ACCESS) ] = -1, | ||
169 | [ C(RESULT_MISS) ] = -1, | ||
170 | }, | ||
171 | [ C(OP_PREFETCH) ] = { | ||
172 | [ C(RESULT_ACCESS) ] = -1, | ||
173 | [ C(RESULT_MISS) ] = -1, | ||
174 | }, | ||
175 | }, | ||
176 | [ C(BPU ) ] = { | ||
177 | [ C(OP_READ) ] = { | ||
178 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */ | ||
179 | [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */ | ||
180 | }, | ||
181 | [ C(OP_WRITE) ] = { | ||
182 | [ C(RESULT_ACCESS) ] = -1, | ||
183 | [ C(RESULT_MISS) ] = -1, | ||
184 | }, | ||
185 | [ C(OP_PREFETCH) ] = { | ||
186 | [ C(RESULT_ACCESS) ] = -1, | ||
187 | [ C(RESULT_MISS) ] = -1, | ||
188 | }, | ||
189 | }, | ||
190 | }; | ||
191 | |||
192 | static const u64 core2_hw_cache_event_ids | ||
193 | [PERF_COUNT_HW_CACHE_MAX] | ||
194 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
195 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
196 | { | ||
197 | [ C(L1D) ] = { | ||
198 | [ C(OP_READ) ] = { | ||
199 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */ | ||
200 | [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */ | ||
201 | }, | ||
202 | [ C(OP_WRITE) ] = { | ||
203 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */ | ||
204 | [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */ | ||
205 | }, | ||
206 | [ C(OP_PREFETCH) ] = { | ||
207 | [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */ | ||
208 | [ C(RESULT_MISS) ] = 0, | ||
209 | }, | ||
210 | }, | ||
211 | [ C(L1I ) ] = { | ||
212 | [ C(OP_READ) ] = { | ||
213 | [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */ | ||
214 | [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */ | ||
215 | }, | ||
216 | [ C(OP_WRITE) ] = { | ||
217 | [ C(RESULT_ACCESS) ] = -1, | ||
218 | [ C(RESULT_MISS) ] = -1, | ||
219 | }, | ||
220 | [ C(OP_PREFETCH) ] = { | ||
221 | [ C(RESULT_ACCESS) ] = 0, | ||
222 | [ C(RESULT_MISS) ] = 0, | ||
223 | }, | ||
224 | }, | ||
225 | [ C(LL ) ] = { | ||
226 | [ C(OP_READ) ] = { | ||
227 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | ||
228 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | ||
229 | }, | ||
230 | [ C(OP_WRITE) ] = { | ||
231 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | ||
232 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | ||
233 | }, | ||
234 | [ C(OP_PREFETCH) ] = { | ||
235 | [ C(RESULT_ACCESS) ] = 0, | ||
236 | [ C(RESULT_MISS) ] = 0, | ||
237 | }, | ||
238 | }, | ||
239 | [ C(DTLB) ] = { | ||
240 | [ C(OP_READ) ] = { | ||
241 | [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */ | ||
242 | [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */ | ||
243 | }, | ||
244 | [ C(OP_WRITE) ] = { | ||
245 | [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */ | ||
246 | [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */ | ||
247 | }, | ||
248 | [ C(OP_PREFETCH) ] = { | ||
249 | [ C(RESULT_ACCESS) ] = 0, | ||
250 | [ C(RESULT_MISS) ] = 0, | ||
251 | }, | ||
252 | }, | ||
253 | [ C(ITLB) ] = { | ||
254 | [ C(OP_READ) ] = { | ||
255 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | ||
256 | [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */ | ||
257 | }, | ||
258 | [ C(OP_WRITE) ] = { | ||
259 | [ C(RESULT_ACCESS) ] = -1, | ||
260 | [ C(RESULT_MISS) ] = -1, | ||
261 | }, | ||
262 | [ C(OP_PREFETCH) ] = { | ||
263 | [ C(RESULT_ACCESS) ] = -1, | ||
264 | [ C(RESULT_MISS) ] = -1, | ||
265 | }, | ||
266 | }, | ||
267 | [ C(BPU ) ] = { | ||
268 | [ C(OP_READ) ] = { | ||
269 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | ||
270 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | ||
271 | }, | ||
272 | [ C(OP_WRITE) ] = { | ||
273 | [ C(RESULT_ACCESS) ] = -1, | ||
274 | [ C(RESULT_MISS) ] = -1, | ||
275 | }, | ||
276 | [ C(OP_PREFETCH) ] = { | ||
277 | [ C(RESULT_ACCESS) ] = -1, | ||
278 | [ C(RESULT_MISS) ] = -1, | ||
279 | }, | ||
280 | }, | ||
281 | }; | ||
282 | |||
283 | static const u64 atom_hw_cache_event_ids | ||
284 | [PERF_COUNT_HW_CACHE_MAX] | ||
285 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
286 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
287 | { | ||
288 | [ C(L1D) ] = { | ||
289 | [ C(OP_READ) ] = { | ||
290 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */ | ||
291 | [ C(RESULT_MISS) ] = 0, | ||
292 | }, | ||
293 | [ C(OP_WRITE) ] = { | ||
294 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */ | ||
295 | [ C(RESULT_MISS) ] = 0, | ||
296 | }, | ||
297 | [ C(OP_PREFETCH) ] = { | ||
298 | [ C(RESULT_ACCESS) ] = 0x0, | ||
299 | [ C(RESULT_MISS) ] = 0, | ||
300 | }, | ||
301 | }, | ||
302 | [ C(L1I ) ] = { | ||
303 | [ C(OP_READ) ] = { | ||
304 | [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */ | ||
305 | [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */ | ||
306 | }, | ||
307 | [ C(OP_WRITE) ] = { | ||
308 | [ C(RESULT_ACCESS) ] = -1, | ||
309 | [ C(RESULT_MISS) ] = -1, | ||
310 | }, | ||
311 | [ C(OP_PREFETCH) ] = { | ||
312 | [ C(RESULT_ACCESS) ] = 0, | ||
313 | [ C(RESULT_MISS) ] = 0, | ||
314 | }, | ||
315 | }, | ||
316 | [ C(LL ) ] = { | ||
317 | [ C(OP_READ) ] = { | ||
318 | [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */ | ||
319 | [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */ | ||
320 | }, | ||
321 | [ C(OP_WRITE) ] = { | ||
322 | [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */ | ||
323 | [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */ | ||
324 | }, | ||
325 | [ C(OP_PREFETCH) ] = { | ||
326 | [ C(RESULT_ACCESS) ] = 0, | ||
327 | [ C(RESULT_MISS) ] = 0, | ||
328 | }, | ||
329 | }, | ||
330 | [ C(DTLB) ] = { | ||
331 | [ C(OP_READ) ] = { | ||
332 | [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */ | ||
333 | [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */ | ||
334 | }, | ||
335 | [ C(OP_WRITE) ] = { | ||
336 | [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */ | ||
337 | [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */ | ||
338 | }, | ||
339 | [ C(OP_PREFETCH) ] = { | ||
340 | [ C(RESULT_ACCESS) ] = 0, | ||
341 | [ C(RESULT_MISS) ] = 0, | ||
342 | }, | ||
343 | }, | ||
344 | [ C(ITLB) ] = { | ||
345 | [ C(OP_READ) ] = { | ||
346 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | ||
347 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | ||
348 | }, | ||
349 | [ C(OP_WRITE) ] = { | ||
350 | [ C(RESULT_ACCESS) ] = -1, | ||
351 | [ C(RESULT_MISS) ] = -1, | ||
352 | }, | ||
353 | [ C(OP_PREFETCH) ] = { | ||
354 | [ C(RESULT_ACCESS) ] = -1, | ||
355 | [ C(RESULT_MISS) ] = -1, | ||
356 | }, | ||
357 | }, | ||
358 | [ C(BPU ) ] = { | ||
359 | [ C(OP_READ) ] = { | ||
360 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | ||
361 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | ||
362 | }, | ||
363 | [ C(OP_WRITE) ] = { | ||
364 | [ C(RESULT_ACCESS) ] = -1, | ||
365 | [ C(RESULT_MISS) ] = -1, | ||
366 | }, | ||
367 | [ C(OP_PREFETCH) ] = { | ||
368 | [ C(RESULT_ACCESS) ] = -1, | ||
369 | [ C(RESULT_MISS) ] = -1, | ||
370 | }, | ||
371 | }, | ||
372 | }; | ||
373 | |||
374 | static u64 intel_pmu_raw_event(u64 event) | ||
375 | { | ||
376 | #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL | ||
377 | #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL | ||
378 | #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL | ||
379 | #define CORE_EVNTSEL_INV_MASK 0x00800000ULL | ||
380 | #define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL | ||
381 | |||
382 | #define CORE_EVNTSEL_MASK \ | ||
383 | (CORE_EVNTSEL_EVENT_MASK | \ | ||
384 | CORE_EVNTSEL_UNIT_MASK | \ | ||
385 | CORE_EVNTSEL_EDGE_MASK | \ | ||
386 | CORE_EVNTSEL_INV_MASK | \ | ||
387 | CORE_EVNTSEL_COUNTER_MASK) | ||
388 | |||
389 | return event & CORE_EVNTSEL_MASK; | ||
390 | } | ||
391 | |||
392 | static const u64 amd_0f_hw_cache_event_ids | ||
393 | [PERF_COUNT_HW_CACHE_MAX] | ||
394 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
395 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
396 | { | ||
397 | [ C(L1D) ] = { | ||
398 | [ C(OP_READ) ] = { | ||
399 | [ C(RESULT_ACCESS) ] = 0, | ||
400 | [ C(RESULT_MISS) ] = 0, | ||
401 | }, | ||
402 | [ C(OP_WRITE) ] = { | ||
403 | [ C(RESULT_ACCESS) ] = 0, | ||
404 | [ C(RESULT_MISS) ] = 0, | ||
405 | }, | ||
406 | [ C(OP_PREFETCH) ] = { | ||
407 | [ C(RESULT_ACCESS) ] = 0, | ||
408 | [ C(RESULT_MISS) ] = 0, | ||
409 | }, | ||
410 | }, | ||
411 | [ C(L1I ) ] = { | ||
412 | [ C(OP_READ) ] = { | ||
413 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */ | ||
414 | [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */ | ||
415 | }, | ||
416 | [ C(OP_WRITE) ] = { | ||
417 | [ C(RESULT_ACCESS) ] = -1, | ||
418 | [ C(RESULT_MISS) ] = -1, | ||
419 | }, | ||
420 | [ C(OP_PREFETCH) ] = { | ||
421 | [ C(RESULT_ACCESS) ] = 0, | ||
422 | [ C(RESULT_MISS) ] = 0, | ||
423 | }, | ||
424 | }, | ||
425 | [ C(LL ) ] = { | ||
426 | [ C(OP_READ) ] = { | ||
427 | [ C(RESULT_ACCESS) ] = 0, | ||
428 | [ C(RESULT_MISS) ] = 0, | ||
429 | }, | ||
430 | [ C(OP_WRITE) ] = { | ||
431 | [ C(RESULT_ACCESS) ] = 0, | ||
432 | [ C(RESULT_MISS) ] = 0, | ||
433 | }, | ||
434 | [ C(OP_PREFETCH) ] = { | ||
435 | [ C(RESULT_ACCESS) ] = 0, | ||
436 | [ C(RESULT_MISS) ] = 0, | ||
437 | }, | ||
438 | }, | ||
439 | [ C(DTLB) ] = { | ||
440 | [ C(OP_READ) ] = { | ||
441 | [ C(RESULT_ACCESS) ] = 0, | ||
442 | [ C(RESULT_MISS) ] = 0, | ||
443 | }, | ||
444 | [ C(OP_WRITE) ] = { | ||
445 | [ C(RESULT_ACCESS) ] = 0, | ||
446 | [ C(RESULT_MISS) ] = 0, | ||
447 | }, | ||
448 | [ C(OP_PREFETCH) ] = { | ||
449 | [ C(RESULT_ACCESS) ] = 0, | ||
450 | [ C(RESULT_MISS) ] = 0, | ||
451 | }, | ||
452 | }, | ||
453 | [ C(ITLB) ] = { | ||
454 | [ C(OP_READ) ] = { | ||
455 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | ||
456 | [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ | ||
457 | }, | ||
458 | [ C(OP_WRITE) ] = { | ||
459 | [ C(RESULT_ACCESS) ] = -1, | ||
460 | [ C(RESULT_MISS) ] = -1, | ||
461 | }, | ||
462 | [ C(OP_PREFETCH) ] = { | ||
463 | [ C(RESULT_ACCESS) ] = -1, | ||
464 | [ C(RESULT_MISS) ] = -1, | ||
465 | }, | ||
466 | }, | ||
467 | [ C(BPU ) ] = { | ||
468 | [ C(OP_READ) ] = { | ||
469 | [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */ | ||
470 | [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */ | ||
471 | }, | ||
472 | [ C(OP_WRITE) ] = { | ||
473 | [ C(RESULT_ACCESS) ] = -1, | ||
474 | [ C(RESULT_MISS) ] = -1, | ||
475 | }, | ||
476 | [ C(OP_PREFETCH) ] = { | ||
477 | [ C(RESULT_ACCESS) ] = -1, | ||
478 | [ C(RESULT_MISS) ] = -1, | ||
479 | }, | ||
480 | }, | ||
481 | }; | ||
482 | |||
483 | /* | ||
484 | * AMD Performance Monitor K7 and later. | ||
485 | */ | ||
486 | static const u64 amd_perfmon_event_map[] = | ||
487 | { | ||
488 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, | ||
489 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, | ||
490 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, | ||
491 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, | ||
492 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, | ||
493 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, | ||
494 | }; | ||
495 | |||
496 | static u64 amd_pmu_event_map(int event) | ||
497 | { | ||
498 | return amd_perfmon_event_map[event]; | ||
499 | } | ||
500 | |||
501 | static u64 amd_pmu_raw_event(u64 event) | ||
502 | { | ||
503 | #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL | ||
504 | #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL | ||
505 | #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL | ||
506 | #define K7_EVNTSEL_INV_MASK 0x000800000ULL | ||
507 | #define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL | ||
508 | |||
509 | #define K7_EVNTSEL_MASK \ | ||
510 | (K7_EVNTSEL_EVENT_MASK | \ | ||
511 | K7_EVNTSEL_UNIT_MASK | \ | ||
512 | K7_EVNTSEL_EDGE_MASK | \ | ||
513 | K7_EVNTSEL_INV_MASK | \ | ||
514 | K7_EVNTSEL_COUNTER_MASK) | ||
515 | |||
516 | return event & K7_EVNTSEL_MASK; | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * Propagate counter elapsed time into the generic counter. | ||
521 | * Can only be executed on the CPU where the counter is active. | ||
522 | * Returns the delta events processed. | ||
523 | */ | ||
524 | static u64 | ||
525 | x86_perf_counter_update(struct perf_counter *counter, | ||
526 | struct hw_perf_counter *hwc, int idx) | ||
527 | { | ||
528 | int shift = 64 - x86_pmu.counter_bits; | ||
529 | u64 prev_raw_count, new_raw_count; | ||
530 | s64 delta; | ||
531 | |||
532 | /* | ||
533 | * Careful: an NMI might modify the previous counter value. | ||
534 | * | ||
535 | * Our tactic to handle this is to first atomically read and | ||
536 | * exchange a new raw count - then add that new-prev delta | ||
537 | * count to the generic counter atomically: | ||
538 | */ | ||
539 | again: | ||
540 | prev_raw_count = atomic64_read(&hwc->prev_count); | ||
541 | rdmsrl(hwc->counter_base + idx, new_raw_count); | ||
542 | |||
543 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
544 | new_raw_count) != prev_raw_count) | ||
545 | goto again; | ||
546 | |||
547 | /* | ||
548 | * Now we have the new raw value and have updated the prev | ||
549 | * timestamp already. We can now calculate the elapsed delta | ||
550 | * (counter-)time and add that to the generic counter. | ||
551 | * | ||
552 | * Careful, not all hw sign-extends above the physical width | ||
553 | * of the count. | ||
554 | */ | ||
555 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
556 | delta >>= shift; | ||
557 | |||
558 | atomic64_add(delta, &counter->count); | ||
559 | atomic64_sub(delta, &hwc->period_left); | ||
560 | |||
561 | return new_raw_count; | ||
562 | } | ||
563 | |||
564 | static atomic_t active_counters; | ||
565 | static DEFINE_MUTEX(pmc_reserve_mutex); | ||
566 | |||
567 | static bool reserve_pmc_hardware(void) | ||
568 | { | ||
569 | int i; | ||
570 | |||
571 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
572 | disable_lapic_nmi_watchdog(); | ||
573 | |||
574 | for (i = 0; i < x86_pmu.num_counters; i++) { | ||
575 | if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) | ||
576 | goto perfctr_fail; | ||
577 | } | ||
578 | |||
579 | for (i = 0; i < x86_pmu.num_counters; i++) { | ||
580 | if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) | ||
581 | goto eventsel_fail; | ||
582 | } | ||
583 | |||
584 | return true; | ||
585 | |||
586 | eventsel_fail: | ||
587 | for (i--; i >= 0; i--) | ||
588 | release_evntsel_nmi(x86_pmu.eventsel + i); | ||
589 | |||
590 | i = x86_pmu.num_counters; | ||
591 | |||
592 | perfctr_fail: | ||
593 | for (i--; i >= 0; i--) | ||
594 | release_perfctr_nmi(x86_pmu.perfctr + i); | ||
595 | |||
596 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
597 | enable_lapic_nmi_watchdog(); | ||
598 | |||
599 | return false; | ||
600 | } | ||
601 | |||
602 | static void release_pmc_hardware(void) | ||
603 | { | ||
604 | int i; | ||
605 | |||
606 | for (i = 0; i < x86_pmu.num_counters; i++) { | ||
607 | release_perfctr_nmi(x86_pmu.perfctr + i); | ||
608 | release_evntsel_nmi(x86_pmu.eventsel + i); | ||
609 | } | ||
610 | |||
611 | if (nmi_watchdog == NMI_LOCAL_APIC) | ||
612 | enable_lapic_nmi_watchdog(); | ||
613 | } | ||
614 | |||
615 | static void hw_perf_counter_destroy(struct perf_counter *counter) | ||
616 | { | ||
617 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) { | ||
618 | release_pmc_hardware(); | ||
619 | mutex_unlock(&pmc_reserve_mutex); | ||
620 | } | ||
621 | } | ||
622 | |||
623 | static inline int x86_pmu_initialized(void) | ||
624 | { | ||
625 | return x86_pmu.handle_irq != NULL; | ||
626 | } | ||
627 | |||
628 | static inline int | ||
629 | set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr) | ||
630 | { | ||
631 | unsigned int cache_type, cache_op, cache_result; | ||
632 | u64 config, val; | ||
633 | |||
634 | config = attr->config; | ||
635 | |||
636 | cache_type = (config >> 0) & 0xff; | ||
637 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
638 | return -EINVAL; | ||
639 | |||
640 | cache_op = (config >> 8) & 0xff; | ||
641 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
642 | return -EINVAL; | ||
643 | |||
644 | cache_result = (config >> 16) & 0xff; | ||
645 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
646 | return -EINVAL; | ||
647 | |||
648 | val = hw_cache_event_ids[cache_type][cache_op][cache_result]; | ||
649 | |||
650 | if (val == 0) | ||
651 | return -ENOENT; | ||
652 | |||
653 | if (val == -1) | ||
654 | return -EINVAL; | ||
655 | |||
656 | hwc->config |= val; | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Setup the hardware configuration for a given attr_type | ||
663 | */ | ||
664 | static int __hw_perf_counter_init(struct perf_counter *counter) | ||
665 | { | ||
666 | struct perf_counter_attr *attr = &counter->attr; | ||
667 | struct hw_perf_counter *hwc = &counter->hw; | ||
668 | int err; | ||
669 | |||
670 | if (!x86_pmu_initialized()) | ||
671 | return -ENODEV; | ||
672 | |||
673 | err = 0; | ||
674 | if (!atomic_inc_not_zero(&active_counters)) { | ||
675 | mutex_lock(&pmc_reserve_mutex); | ||
676 | if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware()) | ||
677 | err = -EBUSY; | ||
678 | else | ||
679 | atomic_inc(&active_counters); | ||
680 | mutex_unlock(&pmc_reserve_mutex); | ||
681 | } | ||
682 | if (err) | ||
683 | return err; | ||
684 | |||
685 | /* | ||
686 | * Generate PMC IRQs: | ||
687 | * (keep 'enabled' bit clear for now) | ||
688 | */ | ||
689 | hwc->config = ARCH_PERFMON_EVENTSEL_INT; | ||
690 | |||
691 | /* | ||
692 | * Count user and OS events unless requested not to. | ||
693 | */ | ||
694 | if (!attr->exclude_user) | ||
695 | hwc->config |= ARCH_PERFMON_EVENTSEL_USR; | ||
696 | if (!attr->exclude_kernel) | ||
697 | hwc->config |= ARCH_PERFMON_EVENTSEL_OS; | ||
698 | |||
699 | if (!hwc->sample_period) { | ||
700 | hwc->sample_period = x86_pmu.max_period; | ||
701 | hwc->last_period = hwc->sample_period; | ||
702 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
703 | } | ||
704 | |||
705 | counter->destroy = hw_perf_counter_destroy; | ||
706 | |||
707 | /* | ||
708 | * Raw event type provide the config in the event structure | ||
709 | */ | ||
710 | if (attr->type == PERF_TYPE_RAW) { | ||
711 | hwc->config |= x86_pmu.raw_event(attr->config); | ||
712 | return 0; | ||
713 | } | ||
714 | |||
715 | if (attr->type == PERF_TYPE_HW_CACHE) | ||
716 | return set_ext_hw_attr(hwc, attr); | ||
717 | |||
718 | if (attr->config >= x86_pmu.max_events) | ||
719 | return -EINVAL; | ||
720 | /* | ||
721 | * The generic map: | ||
722 | */ | ||
723 | hwc->config |= x86_pmu.event_map(attr->config); | ||
724 | |||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | static void intel_pmu_disable_all(void) | ||
729 | { | ||
730 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | ||
731 | } | ||
732 | |||
733 | static void amd_pmu_disable_all(void) | ||
734 | { | ||
735 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
736 | int idx; | ||
737 | |||
738 | if (!cpuc->enabled) | ||
739 | return; | ||
740 | |||
741 | cpuc->enabled = 0; | ||
742 | /* | ||
743 | * ensure we write the disable before we start disabling the | ||
744 | * counters proper, so that amd_pmu_enable_counter() does the | ||
745 | * right thing. | ||
746 | */ | ||
747 | barrier(); | ||
748 | |||
749 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
750 | u64 val; | ||
751 | |||
752 | if (!test_bit(idx, cpuc->active_mask)) | ||
753 | continue; | ||
754 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); | ||
755 | if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) | ||
756 | continue; | ||
757 | val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
758 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | ||
759 | } | ||
760 | } | ||
761 | |||
762 | void hw_perf_disable(void) | ||
763 | { | ||
764 | if (!x86_pmu_initialized()) | ||
765 | return; | ||
766 | return x86_pmu.disable_all(); | ||
767 | } | ||
768 | |||
769 | static void intel_pmu_enable_all(void) | ||
770 | { | ||
771 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | ||
772 | } | ||
773 | |||
774 | static void amd_pmu_enable_all(void) | ||
775 | { | ||
776 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
777 | int idx; | ||
778 | |||
779 | if (cpuc->enabled) | ||
780 | return; | ||
781 | |||
782 | cpuc->enabled = 1; | ||
783 | barrier(); | ||
784 | |||
785 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
786 | u64 val; | ||
787 | |||
788 | if (!test_bit(idx, cpuc->active_mask)) | ||
789 | continue; | ||
790 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); | ||
791 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | ||
792 | continue; | ||
793 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
794 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | ||
795 | } | ||
796 | } | ||
797 | |||
798 | void hw_perf_enable(void) | ||
799 | { | ||
800 | if (!x86_pmu_initialized()) | ||
801 | return; | ||
802 | x86_pmu.enable_all(); | ||
803 | } | ||
804 | |||
805 | static inline u64 intel_pmu_get_status(void) | ||
806 | { | ||
807 | u64 status; | ||
808 | |||
809 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | ||
810 | |||
811 | return status; | ||
812 | } | ||
813 | |||
814 | static inline void intel_pmu_ack_status(u64 ack) | ||
815 | { | ||
816 | wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack); | ||
817 | } | ||
818 | |||
819 | static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | ||
820 | { | ||
821 | int err; | ||
822 | err = checking_wrmsrl(hwc->config_base + idx, | ||
823 | hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); | ||
824 | } | ||
825 | |||
826 | static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | ||
827 | { | ||
828 | int err; | ||
829 | err = checking_wrmsrl(hwc->config_base + idx, | ||
830 | hwc->config); | ||
831 | } | ||
832 | |||
833 | static inline void | ||
834 | intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx) | ||
835 | { | ||
836 | int idx = __idx - X86_PMC_IDX_FIXED; | ||
837 | u64 ctrl_val, mask; | ||
838 | int err; | ||
839 | |||
840 | mask = 0xfULL << (idx * 4); | ||
841 | |||
842 | rdmsrl(hwc->config_base, ctrl_val); | ||
843 | ctrl_val &= ~mask; | ||
844 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | ||
845 | } | ||
846 | |||
847 | static inline void | ||
848 | intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | ||
849 | { | ||
850 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | ||
851 | intel_pmu_disable_fixed(hwc, idx); | ||
852 | return; | ||
853 | } | ||
854 | |||
855 | x86_pmu_disable_counter(hwc, idx); | ||
856 | } | ||
857 | |||
858 | static inline void | ||
859 | amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | ||
860 | { | ||
861 | x86_pmu_disable_counter(hwc, idx); | ||
862 | } | ||
863 | |||
864 | static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); | ||
865 | |||
866 | /* | ||
867 | * Set the next IRQ period, based on the hwc->period_left value. | ||
868 | * To be called with the counter disabled in hw: | ||
869 | */ | ||
870 | static int | ||
871 | x86_perf_counter_set_period(struct perf_counter *counter, | ||
872 | struct hw_perf_counter *hwc, int idx) | ||
873 | { | ||
874 | s64 left = atomic64_read(&hwc->period_left); | ||
875 | s64 period = hwc->sample_period; | ||
876 | int err, ret = 0; | ||
877 | |||
878 | /* | ||
879 | * If we are way outside a reasoable range then just skip forward: | ||
880 | */ | ||
881 | if (unlikely(left <= -period)) { | ||
882 | left = period; | ||
883 | atomic64_set(&hwc->period_left, left); | ||
884 | hwc->last_period = period; | ||
885 | ret = 1; | ||
886 | } | ||
887 | |||
888 | if (unlikely(left <= 0)) { | ||
889 | left += period; | ||
890 | atomic64_set(&hwc->period_left, left); | ||
891 | hwc->last_period = period; | ||
892 | ret = 1; | ||
893 | } | ||
894 | /* | ||
895 | * Quirk: certain CPUs dont like it if just 1 event is left: | ||
896 | */ | ||
897 | if (unlikely(left < 2)) | ||
898 | left = 2; | ||
899 | |||
900 | if (left > x86_pmu.max_period) | ||
901 | left = x86_pmu.max_period; | ||
902 | |||
903 | per_cpu(prev_left[idx], smp_processor_id()) = left; | ||
904 | |||
905 | /* | ||
906 | * The hw counter starts counting from this counter offset, | ||
907 | * mark it to be able to extra future deltas: | ||
908 | */ | ||
909 | atomic64_set(&hwc->prev_count, (u64)-left); | ||
910 | |||
911 | err = checking_wrmsrl(hwc->counter_base + idx, | ||
912 | (u64)(-left) & x86_pmu.counter_mask); | ||
913 | |||
914 | return ret; | ||
915 | } | ||
916 | |||
917 | static inline void | ||
918 | intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) | ||
919 | { | ||
920 | int idx = __idx - X86_PMC_IDX_FIXED; | ||
921 | u64 ctrl_val, bits, mask; | ||
922 | int err; | ||
923 | |||
924 | /* | ||
925 | * Enable IRQ generation (0x8), | ||
926 | * and enable ring-3 counting (0x2) and ring-0 counting (0x1) | ||
927 | * if requested: | ||
928 | */ | ||
929 | bits = 0x8ULL; | ||
930 | if (hwc->config & ARCH_PERFMON_EVENTSEL_USR) | ||
931 | bits |= 0x2; | ||
932 | if (hwc->config & ARCH_PERFMON_EVENTSEL_OS) | ||
933 | bits |= 0x1; | ||
934 | bits <<= (idx * 4); | ||
935 | mask = 0xfULL << (idx * 4); | ||
936 | |||
937 | rdmsrl(hwc->config_base, ctrl_val); | ||
938 | ctrl_val &= ~mask; | ||
939 | ctrl_val |= bits; | ||
940 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | ||
941 | } | ||
942 | |||
943 | static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | ||
944 | { | ||
945 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | ||
946 | intel_pmu_enable_fixed(hwc, idx); | ||
947 | return; | ||
948 | } | ||
949 | |||
950 | x86_pmu_enable_counter(hwc, idx); | ||
951 | } | ||
952 | |||
953 | static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | ||
954 | { | ||
955 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
956 | |||
957 | if (cpuc->enabled) | ||
958 | x86_pmu_enable_counter(hwc, idx); | ||
959 | else | ||
960 | x86_pmu_disable_counter(hwc, idx); | ||
961 | } | ||
962 | |||
963 | static int | ||
964 | fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | ||
965 | { | ||
966 | unsigned int event; | ||
967 | |||
968 | if (!x86_pmu.num_counters_fixed) | ||
969 | return -1; | ||
970 | |||
971 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | ||
972 | |||
973 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) | ||
974 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | ||
975 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) | ||
976 | return X86_PMC_IDX_FIXED_CPU_CYCLES; | ||
977 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) | ||
978 | return X86_PMC_IDX_FIXED_BUS_CYCLES; | ||
979 | |||
980 | return -1; | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * Find a PMC slot for the freshly enabled / scheduled in counter: | ||
985 | */ | ||
986 | static int x86_pmu_enable(struct perf_counter *counter) | ||
987 | { | ||
988 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
989 | struct hw_perf_counter *hwc = &counter->hw; | ||
990 | int idx; | ||
991 | |||
992 | idx = fixed_mode_idx(counter, hwc); | ||
993 | if (idx >= 0) { | ||
994 | /* | ||
995 | * Try to get the fixed counter, if that is already taken | ||
996 | * then try to get a generic counter: | ||
997 | */ | ||
998 | if (test_and_set_bit(idx, cpuc->used_mask)) | ||
999 | goto try_generic; | ||
1000 | |||
1001 | hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; | ||
1002 | /* | ||
1003 | * We set it so that counter_base + idx in wrmsr/rdmsr maps to | ||
1004 | * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2: | ||
1005 | */ | ||
1006 | hwc->counter_base = | ||
1007 | MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED; | ||
1008 | hwc->idx = idx; | ||
1009 | } else { | ||
1010 | idx = hwc->idx; | ||
1011 | /* Try to get the previous generic counter again */ | ||
1012 | if (test_and_set_bit(idx, cpuc->used_mask)) { | ||
1013 | try_generic: | ||
1014 | idx = find_first_zero_bit(cpuc->used_mask, | ||
1015 | x86_pmu.num_counters); | ||
1016 | if (idx == x86_pmu.num_counters) | ||
1017 | return -EAGAIN; | ||
1018 | |||
1019 | set_bit(idx, cpuc->used_mask); | ||
1020 | hwc->idx = idx; | ||
1021 | } | ||
1022 | hwc->config_base = x86_pmu.eventsel; | ||
1023 | hwc->counter_base = x86_pmu.perfctr; | ||
1024 | } | ||
1025 | |||
1026 | perf_counters_lapic_init(); | ||
1027 | |||
1028 | x86_pmu.disable(hwc, idx); | ||
1029 | |||
1030 | cpuc->counters[idx] = counter; | ||
1031 | set_bit(idx, cpuc->active_mask); | ||
1032 | |||
1033 | x86_perf_counter_set_period(counter, hwc, idx); | ||
1034 | x86_pmu.enable(hwc, idx); | ||
1035 | |||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | static void x86_pmu_unthrottle(struct perf_counter *counter) | ||
1040 | { | ||
1041 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1042 | struct hw_perf_counter *hwc = &counter->hw; | ||
1043 | |||
1044 | if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX || | ||
1045 | cpuc->counters[hwc->idx] != counter)) | ||
1046 | return; | ||
1047 | |||
1048 | x86_pmu.enable(hwc, hwc->idx); | ||
1049 | } | ||
1050 | |||
1051 | void perf_counter_print_debug(void) | ||
1052 | { | ||
1053 | u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed; | ||
1054 | struct cpu_hw_counters *cpuc; | ||
1055 | unsigned long flags; | ||
1056 | int cpu, idx; | ||
1057 | |||
1058 | if (!x86_pmu.num_counters) | ||
1059 | return; | ||
1060 | |||
1061 | local_irq_save(flags); | ||
1062 | |||
1063 | cpu = smp_processor_id(); | ||
1064 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1065 | |||
1066 | if (x86_pmu.version >= 2) { | ||
1067 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl); | ||
1068 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | ||
1069 | rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow); | ||
1070 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed); | ||
1071 | |||
1072 | pr_info("\n"); | ||
1073 | pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl); | ||
1074 | pr_info("CPU#%d: status: %016llx\n", cpu, status); | ||
1075 | pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow); | ||
1076 | pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed); | ||
1077 | } | ||
1078 | pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask); | ||
1079 | |||
1080 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1081 | rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); | ||
1082 | rdmsrl(x86_pmu.perfctr + idx, pmc_count); | ||
1083 | |||
1084 | prev_left = per_cpu(prev_left[idx], cpu); | ||
1085 | |||
1086 | pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n", | ||
1087 | cpu, idx, pmc_ctrl); | ||
1088 | pr_info("CPU#%d: gen-PMC%d count: %016llx\n", | ||
1089 | cpu, idx, pmc_count); | ||
1090 | pr_info("CPU#%d: gen-PMC%d left: %016llx\n", | ||
1091 | cpu, idx, prev_left); | ||
1092 | } | ||
1093 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | ||
1094 | rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); | ||
1095 | |||
1096 | pr_info("CPU#%d: fixed-PMC%d count: %016llx\n", | ||
1097 | cpu, idx, pmc_count); | ||
1098 | } | ||
1099 | local_irq_restore(flags); | ||
1100 | } | ||
1101 | |||
1102 | static void x86_pmu_disable(struct perf_counter *counter) | ||
1103 | { | ||
1104 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | ||
1105 | struct hw_perf_counter *hwc = &counter->hw; | ||
1106 | int idx = hwc->idx; | ||
1107 | |||
1108 | /* | ||
1109 | * Must be done before we disable, otherwise the nmi handler | ||
1110 | * could reenable again: | ||
1111 | */ | ||
1112 | clear_bit(idx, cpuc->active_mask); | ||
1113 | x86_pmu.disable(hwc, idx); | ||
1114 | |||
1115 | /* | ||
1116 | * Make sure the cleared pointer becomes visible before we | ||
1117 | * (potentially) free the counter: | ||
1118 | */ | ||
1119 | barrier(); | ||
1120 | |||
1121 | /* | ||
1122 | * Drain the remaining delta count out of a counter | ||
1123 | * that we are disabling: | ||
1124 | */ | ||
1125 | x86_perf_counter_update(counter, hwc, idx); | ||
1126 | cpuc->counters[idx] = NULL; | ||
1127 | clear_bit(idx, cpuc->used_mask); | ||
1128 | } | ||
1129 | |||
1130 | /* | ||
1131 | * Save and restart an expired counter. Called by NMI contexts, | ||
1132 | * so it has to be careful about preempting normal counter ops: | ||
1133 | */ | ||
1134 | static int intel_pmu_save_and_restart(struct perf_counter *counter) | ||
1135 | { | ||
1136 | struct hw_perf_counter *hwc = &counter->hw; | ||
1137 | int idx = hwc->idx; | ||
1138 | int ret; | ||
1139 | |||
1140 | x86_perf_counter_update(counter, hwc, idx); | ||
1141 | ret = x86_perf_counter_set_period(counter, hwc, idx); | ||
1142 | |||
1143 | if (counter->state == PERF_COUNTER_STATE_ACTIVE) | ||
1144 | intel_pmu_enable_counter(hwc, idx); | ||
1145 | |||
1146 | return ret; | ||
1147 | } | ||
1148 | |||
1149 | static void intel_pmu_reset(void) | ||
1150 | { | ||
1151 | unsigned long flags; | ||
1152 | int idx; | ||
1153 | |||
1154 | if (!x86_pmu.num_counters) | ||
1155 | return; | ||
1156 | |||
1157 | local_irq_save(flags); | ||
1158 | |||
1159 | printk("clearing PMU state on CPU#%d\n", smp_processor_id()); | ||
1160 | |||
1161 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1162 | checking_wrmsrl(x86_pmu.eventsel + idx, 0ull); | ||
1163 | checking_wrmsrl(x86_pmu.perfctr + idx, 0ull); | ||
1164 | } | ||
1165 | for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { | ||
1166 | checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); | ||
1167 | } | ||
1168 | |||
1169 | local_irq_restore(flags); | ||
1170 | } | ||
1171 | |||
1172 | |||
1173 | /* | ||
1174 | * This handler is triggered by the local APIC, so the APIC IRQ handling | ||
1175 | * rules apply: | ||
1176 | */ | ||
1177 | static int intel_pmu_handle_irq(struct pt_regs *regs) | ||
1178 | { | ||
1179 | struct perf_sample_data data; | ||
1180 | struct cpu_hw_counters *cpuc; | ||
1181 | int bit, cpu, loops; | ||
1182 | u64 ack, status; | ||
1183 | |||
1184 | data.regs = regs; | ||
1185 | data.addr = 0; | ||
1186 | |||
1187 | cpu = smp_processor_id(); | ||
1188 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1189 | |||
1190 | perf_disable(); | ||
1191 | status = intel_pmu_get_status(); | ||
1192 | if (!status) { | ||
1193 | perf_enable(); | ||
1194 | return 0; | ||
1195 | } | ||
1196 | |||
1197 | loops = 0; | ||
1198 | again: | ||
1199 | if (++loops > 100) { | ||
1200 | WARN_ONCE(1, "perfcounters: irq loop stuck!\n"); | ||
1201 | perf_counter_print_debug(); | ||
1202 | intel_pmu_reset(); | ||
1203 | perf_enable(); | ||
1204 | return 1; | ||
1205 | } | ||
1206 | |||
1207 | inc_irq_stat(apic_perf_irqs); | ||
1208 | ack = status; | ||
1209 | for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | ||
1210 | struct perf_counter *counter = cpuc->counters[bit]; | ||
1211 | |||
1212 | clear_bit(bit, (unsigned long *) &status); | ||
1213 | if (!test_bit(bit, cpuc->active_mask)) | ||
1214 | continue; | ||
1215 | |||
1216 | if (!intel_pmu_save_and_restart(counter)) | ||
1217 | continue; | ||
1218 | |||
1219 | if (perf_counter_overflow(counter, 1, &data)) | ||
1220 | intel_pmu_disable_counter(&counter->hw, bit); | ||
1221 | } | ||
1222 | |||
1223 | intel_pmu_ack_status(ack); | ||
1224 | |||
1225 | /* | ||
1226 | * Repeat if there is more work to be done: | ||
1227 | */ | ||
1228 | status = intel_pmu_get_status(); | ||
1229 | if (status) | ||
1230 | goto again; | ||
1231 | |||
1232 | perf_enable(); | ||
1233 | |||
1234 | return 1; | ||
1235 | } | ||
1236 | |||
1237 | static int amd_pmu_handle_irq(struct pt_regs *regs) | ||
1238 | { | ||
1239 | struct perf_sample_data data; | ||
1240 | struct cpu_hw_counters *cpuc; | ||
1241 | struct perf_counter *counter; | ||
1242 | struct hw_perf_counter *hwc; | ||
1243 | int cpu, idx, handled = 0; | ||
1244 | u64 val; | ||
1245 | |||
1246 | data.regs = regs; | ||
1247 | data.addr = 0; | ||
1248 | |||
1249 | cpu = smp_processor_id(); | ||
1250 | cpuc = &per_cpu(cpu_hw_counters, cpu); | ||
1251 | |||
1252 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1253 | if (!test_bit(idx, cpuc->active_mask)) | ||
1254 | continue; | ||
1255 | |||
1256 | counter = cpuc->counters[idx]; | ||
1257 | hwc = &counter->hw; | ||
1258 | |||
1259 | val = x86_perf_counter_update(counter, hwc, idx); | ||
1260 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | ||
1261 | continue; | ||
1262 | |||
1263 | /* | ||
1264 | * counter overflow | ||
1265 | */ | ||
1266 | handled = 1; | ||
1267 | data.period = counter->hw.last_period; | ||
1268 | |||
1269 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | ||
1270 | continue; | ||
1271 | |||
1272 | if (perf_counter_overflow(counter, 1, &data)) | ||
1273 | amd_pmu_disable_counter(hwc, idx); | ||
1274 | } | ||
1275 | |||
1276 | if (handled) | ||
1277 | inc_irq_stat(apic_perf_irqs); | ||
1278 | |||
1279 | return handled; | ||
1280 | } | ||
1281 | |||
1282 | void smp_perf_pending_interrupt(struct pt_regs *regs) | ||
1283 | { | ||
1284 | irq_enter(); | ||
1285 | ack_APIC_irq(); | ||
1286 | inc_irq_stat(apic_pending_irqs); | ||
1287 | perf_counter_do_pending(); | ||
1288 | irq_exit(); | ||
1289 | } | ||
1290 | |||
1291 | void set_perf_counter_pending(void) | ||
1292 | { | ||
1293 | apic->send_IPI_self(LOCAL_PENDING_VECTOR); | ||
1294 | } | ||
1295 | |||
1296 | void perf_counters_lapic_init(void) | ||
1297 | { | ||
1298 | if (!x86_pmu_initialized()) | ||
1299 | return; | ||
1300 | |||
1301 | /* | ||
1302 | * Always use NMI for PMU | ||
1303 | */ | ||
1304 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
1305 | } | ||
1306 | |||
1307 | static int __kprobes | ||
1308 | perf_counter_nmi_handler(struct notifier_block *self, | ||
1309 | unsigned long cmd, void *__args) | ||
1310 | { | ||
1311 | struct die_args *args = __args; | ||
1312 | struct pt_regs *regs; | ||
1313 | |||
1314 | if (!atomic_read(&active_counters)) | ||
1315 | return NOTIFY_DONE; | ||
1316 | |||
1317 | switch (cmd) { | ||
1318 | case DIE_NMI: | ||
1319 | case DIE_NMI_IPI: | ||
1320 | break; | ||
1321 | |||
1322 | default: | ||
1323 | return NOTIFY_DONE; | ||
1324 | } | ||
1325 | |||
1326 | regs = args->regs; | ||
1327 | |||
1328 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
1329 | /* | ||
1330 | * Can't rely on the handled return value to say it was our NMI, two | ||
1331 | * counters could trigger 'simultaneously' raising two back-to-back NMIs. | ||
1332 | * | ||
1333 | * If the first NMI handles both, the latter will be empty and daze | ||
1334 | * the CPU. | ||
1335 | */ | ||
1336 | x86_pmu.handle_irq(regs); | ||
1337 | |||
1338 | return NOTIFY_STOP; | ||
1339 | } | ||
1340 | |||
1341 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | ||
1342 | .notifier_call = perf_counter_nmi_handler, | ||
1343 | .next = NULL, | ||
1344 | .priority = 1 | ||
1345 | }; | ||
1346 | |||
1347 | static struct x86_pmu intel_pmu = { | ||
1348 | .name = "Intel", | ||
1349 | .handle_irq = intel_pmu_handle_irq, | ||
1350 | .disable_all = intel_pmu_disable_all, | ||
1351 | .enable_all = intel_pmu_enable_all, | ||
1352 | .enable = intel_pmu_enable_counter, | ||
1353 | .disable = intel_pmu_disable_counter, | ||
1354 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | ||
1355 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | ||
1356 | .event_map = intel_pmu_event_map, | ||
1357 | .raw_event = intel_pmu_raw_event, | ||
1358 | .max_events = ARRAY_SIZE(intel_perfmon_event_map), | ||
1359 | /* | ||
1360 | * Intel PMCs cannot be accessed sanely above 32 bit width, | ||
1361 | * so we install an artificial 1<<31 period regardless of | ||
1362 | * the generic counter period: | ||
1363 | */ | ||
1364 | .max_period = (1ULL << 31) - 1, | ||
1365 | }; | ||
1366 | |||
1367 | static struct x86_pmu amd_pmu = { | ||
1368 | .name = "AMD", | ||
1369 | .handle_irq = amd_pmu_handle_irq, | ||
1370 | .disable_all = amd_pmu_disable_all, | ||
1371 | .enable_all = amd_pmu_enable_all, | ||
1372 | .enable = amd_pmu_enable_counter, | ||
1373 | .disable = amd_pmu_disable_counter, | ||
1374 | .eventsel = MSR_K7_EVNTSEL0, | ||
1375 | .perfctr = MSR_K7_PERFCTR0, | ||
1376 | .event_map = amd_pmu_event_map, | ||
1377 | .raw_event = amd_pmu_raw_event, | ||
1378 | .max_events = ARRAY_SIZE(amd_perfmon_event_map), | ||
1379 | .num_counters = 4, | ||
1380 | .counter_bits = 48, | ||
1381 | .counter_mask = (1ULL << 48) - 1, | ||
1382 | /* use highest bit to detect overflow */ | ||
1383 | .max_period = (1ULL << 47) - 1, | ||
1384 | }; | ||
1385 | |||
1386 | static int intel_pmu_init(void) | ||
1387 | { | ||
1388 | union cpuid10_edx edx; | ||
1389 | union cpuid10_eax eax; | ||
1390 | unsigned int unused; | ||
1391 | unsigned int ebx; | ||
1392 | int version; | ||
1393 | |||
1394 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
1395 | return -ENODEV; | ||
1396 | |||
1397 | /* | ||
1398 | * Check whether the Architectural PerfMon supports | ||
1399 | * Branch Misses Retired Event or not. | ||
1400 | */ | ||
1401 | cpuid(10, &eax.full, &ebx, &unused, &edx.full); | ||
1402 | if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED) | ||
1403 | return -ENODEV; | ||
1404 | |||
1405 | version = eax.split.version_id; | ||
1406 | if (version < 2) | ||
1407 | return -ENODEV; | ||
1408 | |||
1409 | x86_pmu = intel_pmu; | ||
1410 | x86_pmu.version = version; | ||
1411 | x86_pmu.num_counters = eax.split.num_counters; | ||
1412 | x86_pmu.counter_bits = eax.split.bit_width; | ||
1413 | x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1; | ||
1414 | |||
1415 | /* | ||
1416 | * Quirk: v2 perfmon does not report fixed-purpose counters, so | ||
1417 | * assume at least 3 counters: | ||
1418 | */ | ||
1419 | x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3); | ||
1420 | |||
1421 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | ||
1422 | |||
1423 | /* | ||
1424 | * Install the hw-cache-events table: | ||
1425 | */ | ||
1426 | switch (boot_cpu_data.x86_model) { | ||
1427 | case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | ||
1428 | case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | ||
1429 | case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | ||
1430 | case 29: /* six-core 45 nm xeon "Dunnington" */ | ||
1431 | memcpy(hw_cache_event_ids, core2_hw_cache_event_ids, | ||
1432 | sizeof(hw_cache_event_ids)); | ||
1433 | |||
1434 | pr_cont("Core2 events, "); | ||
1435 | break; | ||
1436 | default: | ||
1437 | case 26: | ||
1438 | memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids, | ||
1439 | sizeof(hw_cache_event_ids)); | ||
1440 | |||
1441 | pr_cont("Nehalem/Corei7 events, "); | ||
1442 | break; | ||
1443 | case 28: | ||
1444 | memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, | ||
1445 | sizeof(hw_cache_event_ids)); | ||
1446 | |||
1447 | pr_cont("Atom events, "); | ||
1448 | break; | ||
1449 | } | ||
1450 | return 0; | ||
1451 | } | ||
1452 | |||
1453 | static int amd_pmu_init(void) | ||
1454 | { | ||
1455 | x86_pmu = amd_pmu; | ||
1456 | |||
1457 | switch (boot_cpu_data.x86) { | ||
1458 | case 0x0f: | ||
1459 | case 0x10: | ||
1460 | case 0x11: | ||
1461 | memcpy(hw_cache_event_ids, amd_0f_hw_cache_event_ids, | ||
1462 | sizeof(hw_cache_event_ids)); | ||
1463 | |||
1464 | pr_cont("AMD Family 0f/10/11 events, "); | ||
1465 | break; | ||
1466 | } | ||
1467 | return 0; | ||
1468 | } | ||
1469 | |||
1470 | void __init init_hw_perf_counters(void) | ||
1471 | { | ||
1472 | int err; | ||
1473 | |||
1474 | pr_info("Performance Counters: "); | ||
1475 | |||
1476 | switch (boot_cpu_data.x86_vendor) { | ||
1477 | case X86_VENDOR_INTEL: | ||
1478 | err = intel_pmu_init(); | ||
1479 | break; | ||
1480 | case X86_VENDOR_AMD: | ||
1481 | err = amd_pmu_init(); | ||
1482 | break; | ||
1483 | default: | ||
1484 | return; | ||
1485 | } | ||
1486 | if (err != 0) { | ||
1487 | pr_cont("no PMU driver, software counters only.\n"); | ||
1488 | return; | ||
1489 | } | ||
1490 | |||
1491 | pr_cont("%s PMU driver.\n", x86_pmu.name); | ||
1492 | |||
1493 | if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) { | ||
1494 | x86_pmu.num_counters = X86_PMC_MAX_GENERIC; | ||
1495 | WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!", | ||
1496 | x86_pmu.num_counters, X86_PMC_MAX_GENERIC); | ||
1497 | } | ||
1498 | perf_counter_mask = (1 << x86_pmu.num_counters) - 1; | ||
1499 | perf_max_counters = x86_pmu.num_counters; | ||
1500 | |||
1501 | if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { | ||
1502 | x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED; | ||
1503 | WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!", | ||
1504 | x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED); | ||
1505 | } | ||
1506 | |||
1507 | perf_counter_mask |= | ||
1508 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | ||
1509 | |||
1510 | perf_counters_lapic_init(); | ||
1511 | register_die_notifier(&perf_counter_nmi_notifier); | ||
1512 | |||
1513 | pr_info("... version: %d\n", x86_pmu.version); | ||
1514 | pr_info("... bit width: %d\n", x86_pmu.counter_bits); | ||
1515 | pr_info("... generic counters: %d\n", x86_pmu.num_counters); | ||
1516 | pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask); | ||
1517 | pr_info("... max period: %016Lx\n", x86_pmu.max_period); | ||
1518 | pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed); | ||
1519 | pr_info("... counter mask: %016Lx\n", perf_counter_mask); | ||
1520 | } | ||
1521 | |||
1522 | static inline void x86_pmu_read(struct perf_counter *counter) | ||
1523 | { | ||
1524 | x86_perf_counter_update(counter, &counter->hw, counter->hw.idx); | ||
1525 | } | ||
1526 | |||
1527 | static const struct pmu pmu = { | ||
1528 | .enable = x86_pmu_enable, | ||
1529 | .disable = x86_pmu_disable, | ||
1530 | .read = x86_pmu_read, | ||
1531 | .unthrottle = x86_pmu_unthrottle, | ||
1532 | }; | ||
1533 | |||
1534 | const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | ||
1535 | { | ||
1536 | int err; | ||
1537 | |||
1538 | err = __hw_perf_counter_init(counter); | ||
1539 | if (err) | ||
1540 | return ERR_PTR(err); | ||
1541 | |||
1542 | return &pmu; | ||
1543 | } | ||
1544 | |||
1545 | /* | ||
1546 | * callchain support | ||
1547 | */ | ||
1548 | |||
1549 | static inline | ||
1550 | void callchain_store(struct perf_callchain_entry *entry, unsigned long ip) | ||
1551 | { | ||
1552 | if (entry->nr < MAX_STACK_DEPTH) | ||
1553 | entry->ip[entry->nr++] = ip; | ||
1554 | } | ||
1555 | |||
1556 | static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry); | ||
1557 | static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry); | ||
1558 | |||
1559 | |||
1560 | static void | ||
1561 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
1562 | { | ||
1563 | /* Ignore warnings */ | ||
1564 | } | ||
1565 | |||
1566 | static void backtrace_warning(void *data, char *msg) | ||
1567 | { | ||
1568 | /* Ignore warnings */ | ||
1569 | } | ||
1570 | |||
1571 | static int backtrace_stack(void *data, char *name) | ||
1572 | { | ||
1573 | /* Don't bother with IRQ stacks for now */ | ||
1574 | return -1; | ||
1575 | } | ||
1576 | |||
1577 | static void backtrace_address(void *data, unsigned long addr, int reliable) | ||
1578 | { | ||
1579 | struct perf_callchain_entry *entry = data; | ||
1580 | |||
1581 | if (reliable) | ||
1582 | callchain_store(entry, addr); | ||
1583 | } | ||
1584 | |||
1585 | static const struct stacktrace_ops backtrace_ops = { | ||
1586 | .warning = backtrace_warning, | ||
1587 | .warning_symbol = backtrace_warning_symbol, | ||
1588 | .stack = backtrace_stack, | ||
1589 | .address = backtrace_address, | ||
1590 | }; | ||
1591 | |||
1592 | static void | ||
1593 | perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1594 | { | ||
1595 | unsigned long bp; | ||
1596 | char *stack; | ||
1597 | int nr = entry->nr; | ||
1598 | |||
1599 | callchain_store(entry, instruction_pointer(regs)); | ||
1600 | |||
1601 | stack = ((char *)regs + sizeof(struct pt_regs)); | ||
1602 | #ifdef CONFIG_FRAME_POINTER | ||
1603 | bp = frame_pointer(regs); | ||
1604 | #else | ||
1605 | bp = 0; | ||
1606 | #endif | ||
1607 | |||
1608 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry); | ||
1609 | |||
1610 | entry->kernel = entry->nr - nr; | ||
1611 | } | ||
1612 | |||
1613 | |||
1614 | struct stack_frame { | ||
1615 | const void __user *next_fp; | ||
1616 | unsigned long return_address; | ||
1617 | }; | ||
1618 | |||
1619 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | ||
1620 | { | ||
1621 | int ret; | ||
1622 | |||
1623 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | ||
1624 | return 0; | ||
1625 | |||
1626 | ret = 1; | ||
1627 | pagefault_disable(); | ||
1628 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | ||
1629 | ret = 0; | ||
1630 | pagefault_enable(); | ||
1631 | |||
1632 | return ret; | ||
1633 | } | ||
1634 | |||
1635 | static void | ||
1636 | perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1637 | { | ||
1638 | struct stack_frame frame; | ||
1639 | const void __user *fp; | ||
1640 | int nr = entry->nr; | ||
1641 | |||
1642 | regs = (struct pt_regs *)current->thread.sp0 - 1; | ||
1643 | fp = (void __user *)regs->bp; | ||
1644 | |||
1645 | callchain_store(entry, regs->ip); | ||
1646 | |||
1647 | while (entry->nr < MAX_STACK_DEPTH) { | ||
1648 | frame.next_fp = NULL; | ||
1649 | frame.return_address = 0; | ||
1650 | |||
1651 | if (!copy_stack_frame(fp, &frame)) | ||
1652 | break; | ||
1653 | |||
1654 | if ((unsigned long)fp < user_stack_pointer(regs)) | ||
1655 | break; | ||
1656 | |||
1657 | callchain_store(entry, frame.return_address); | ||
1658 | fp = frame.next_fp; | ||
1659 | } | ||
1660 | |||
1661 | entry->user = entry->nr - nr; | ||
1662 | } | ||
1663 | |||
1664 | static void | ||
1665 | perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry) | ||
1666 | { | ||
1667 | int is_user; | ||
1668 | |||
1669 | if (!regs) | ||
1670 | return; | ||
1671 | |||
1672 | is_user = user_mode(regs); | ||
1673 | |||
1674 | if (!current || current->pid == 0) | ||
1675 | return; | ||
1676 | |||
1677 | if (is_user && current->state != TASK_RUNNING) | ||
1678 | return; | ||
1679 | |||
1680 | if (!is_user) | ||
1681 | perf_callchain_kernel(regs, entry); | ||
1682 | |||
1683 | if (current->mm) | ||
1684 | perf_callchain_user(regs, entry); | ||
1685 | } | ||
1686 | |||
1687 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
1688 | { | ||
1689 | struct perf_callchain_entry *entry; | ||
1690 | |||
1691 | if (in_nmi()) | ||
1692 | entry = &__get_cpu_var(nmi_entry); | ||
1693 | else | ||
1694 | entry = &__get_cpu_var(irq_entry); | ||
1695 | |||
1696 | entry->nr = 0; | ||
1697 | entry->hv = 0; | ||
1698 | entry->kernel = 0; | ||
1699 | entry->user = 0; | ||
1700 | |||
1701 | perf_do_callchain(regs, entry); | ||
1702 | |||
1703 | return entry; | ||
1704 | } | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index f6c70a164e32..d6f5b9fbde32 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -19,8 +19,8 @@ | |||
19 | #include <linux/nmi.h> | 19 | #include <linux/nmi.h> |
20 | #include <linux/kprobes.h> | 20 | #include <linux/kprobes.h> |
21 | 21 | ||
22 | #include <asm/genapic.h> | 22 | #include <asm/apic.h> |
23 | #include <asm/intel_arch_perfmon.h> | 23 | #include <asm/perf_counter.h> |
24 | 24 | ||
25 | struct nmi_watchdog_ctlblk { | 25 | struct nmi_watchdog_ctlblk { |
26 | unsigned int cccr_msr; | 26 | unsigned int cccr_msr; |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1c17d7c751a4..a4742a340d8d 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1012,6 +1012,11 @@ apicinterrupt ERROR_APIC_VECTOR \ | |||
1012 | apicinterrupt SPURIOUS_APIC_VECTOR \ | 1012 | apicinterrupt SPURIOUS_APIC_VECTOR \ |
1013 | spurious_interrupt smp_spurious_interrupt | 1013 | spurious_interrupt smp_spurious_interrupt |
1014 | 1014 | ||
1015 | #ifdef CONFIG_PERF_COUNTERS | ||
1016 | apicinterrupt LOCAL_PENDING_VECTOR \ | ||
1017 | perf_pending_interrupt smp_perf_pending_interrupt | ||
1018 | #endif | ||
1019 | |||
1015 | /* | 1020 | /* |
1016 | * Exception entry points. | 1021 | * Exception entry points. |
1017 | */ | 1022 | */ |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 9a391bbb8ba8..38287b5f116e 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
@@ -62,6 +62,14 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
62 | for_each_online_cpu(j) | 62 | for_each_online_cpu(j) |
63 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); | 63 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); |
64 | seq_printf(p, " Spurious interrupts\n"); | 64 | seq_printf(p, " Spurious interrupts\n"); |
65 | seq_printf(p, "%*s: ", prec, "CNT"); | ||
66 | for_each_online_cpu(j) | ||
67 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); | ||
68 | seq_printf(p, " Performance counter interrupts\n"); | ||
69 | seq_printf(p, "%*s: ", prec, "PND"); | ||
70 | for_each_online_cpu(j) | ||
71 | seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs); | ||
72 | seq_printf(p, " Performance pending work\n"); | ||
65 | #endif | 73 | #endif |
66 | if (generic_interrupt_extension) { | 74 | if (generic_interrupt_extension) { |
67 | seq_printf(p, "%*s: ", prec, "PLT"); | 75 | seq_printf(p, "%*s: ", prec, "PLT"); |
@@ -165,6 +173,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
165 | #ifdef CONFIG_X86_LOCAL_APIC | 173 | #ifdef CONFIG_X86_LOCAL_APIC |
166 | sum += irq_stats(cpu)->apic_timer_irqs; | 174 | sum += irq_stats(cpu)->apic_timer_irqs; |
167 | sum += irq_stats(cpu)->irq_spurious_count; | 175 | sum += irq_stats(cpu)->irq_spurious_count; |
176 | sum += irq_stats(cpu)->apic_perf_irqs; | ||
177 | sum += irq_stats(cpu)->apic_pending_irqs; | ||
168 | #endif | 178 | #endif |
169 | if (generic_interrupt_extension) | 179 | if (generic_interrupt_extension) |
170 | sum += irq_stats(cpu)->generic_irqs; | 180 | sum += irq_stats(cpu)->generic_irqs; |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 2e08b10ad51a..267c6624c77f 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -181,10 +181,15 @@ static void __init apic_intr_init(void) | |||
181 | { | 181 | { |
182 | smp_intr_init(); | 182 | smp_intr_init(); |
183 | 183 | ||
184 | #ifdef CONFIG_X86_64 | 184 | #ifdef CONFIG_X86_THERMAL_VECTOR |
185 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | 185 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); |
186 | #endif | ||
187 | #ifdef CONFIG_X86_THRESHOLD | ||
186 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | 188 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); |
187 | #endif | 189 | #endif |
190 | #if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC) | ||
191 | alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); | ||
192 | #endif | ||
188 | 193 | ||
189 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | 194 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
190 | /* self generated IPI for local APIC timer */ | 195 | /* self generated IPI for local APIC timer */ |
@@ -199,18 +204,10 @@ static void __init apic_intr_init(void) | |||
199 | 204 | ||
200 | /* Performance monitoring interrupts: */ | 205 | /* Performance monitoring interrupts: */ |
201 | # ifdef CONFIG_PERF_COUNTERS | 206 | # ifdef CONFIG_PERF_COUNTERS |
202 | alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt); | ||
203 | alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); | 207 | alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt); |
204 | # endif | 208 | # endif |
205 | 209 | ||
206 | #endif | 210 | #endif |
207 | |||
208 | #ifdef CONFIG_X86_32 | ||
209 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL) | ||
210 | /* thermal monitor LVT interrupt */ | ||
211 | alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt); | ||
212 | #endif | ||
213 | #endif | ||
214 | } | 211 | } |
215 | 212 | ||
216 | /** | 213 | /** |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 14425166b8e3..0a813b17b172 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -6,7 +6,6 @@ | |||
6 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes | 6 | * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes |
7 | * 2000-2002 x86-64 support by Andi Kleen | 7 | * 2000-2002 x86-64 support by Andi Kleen |
8 | */ | 8 | */ |
9 | |||
10 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
11 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
12 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S index 734f92c02dde..d51321ddafda 100644 --- a/arch/x86/kernel/syscall_table_32.S +++ b/arch/x86/kernel/syscall_table_32.S | |||
@@ -335,3 +335,4 @@ ENTRY(sys_call_table) | |||
335 | .long sys_preadv | 335 | .long sys_preadv |
336 | .long sys_pwritev | 336 | .long sys_pwritev |
337 | .long sys_rt_tgsigqueueinfo /* 335 */ | 337 | .long sys_rt_tgsigqueueinfo /* 335 */ |
338 | .long sys_perf_counter_open | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ede024531f8f..07d60c870ce2 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -942,8 +942,13 @@ void __init trap_init(void) | |||
942 | #endif | 942 | #endif |
943 | set_intr_gate(19, &simd_coprocessor_error); | 943 | set_intr_gate(19, &simd_coprocessor_error); |
944 | 944 | ||
945 | /* Reserve all the builtin and the syscall vector: */ | ||
946 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | ||
947 | set_bit(i, used_vectors); | ||
948 | |||
945 | #ifdef CONFIG_IA32_EMULATION | 949 | #ifdef CONFIG_IA32_EMULATION |
946 | set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); | 950 | set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall); |
951 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); | ||
947 | #endif | 952 | #endif |
948 | 953 | ||
949 | #ifdef CONFIG_X86_32 | 954 | #ifdef CONFIG_X86_32 |
@@ -960,14 +965,9 @@ void __init trap_init(void) | |||
960 | } | 965 | } |
961 | 966 | ||
962 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); | 967 | set_system_trap_gate(SYSCALL_VECTOR, &system_call); |
968 | set_bit(SYSCALL_VECTOR, used_vectors); | ||
963 | #endif | 969 | #endif |
964 | 970 | ||
965 | /* Reserve all the builtin and the syscall vector: */ | ||
966 | for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++) | ||
967 | set_bit(i, used_vectors); | ||
968 | |||
969 | set_bit(IA32_SYSCALL_VECTOR, used_vectors); | ||
970 | |||
971 | /* | 971 | /* |
972 | * Should be a barrier for any external CPU state: | 972 | * Should be a barrier for any external CPU state: |
973 | */ | 973 | */ |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 5ec7ae366615..c6acc6326374 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/bootmem.h> /* max_low_pfn */ | 10 | #include <linux/bootmem.h> /* max_low_pfn */ |
11 | #include <linux/kprobes.h> /* __kprobes, ... */ | 11 | #include <linux/kprobes.h> /* __kprobes, ... */ |
12 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ | 12 | #include <linux/mmiotrace.h> /* kmmio_handler, ... */ |
13 | #include <linux/perf_counter.h> /* perf_swcounter_event */ | ||
13 | 14 | ||
14 | #include <asm/traps.h> /* dotraplinkage, ... */ | 15 | #include <asm/traps.h> /* dotraplinkage, ... */ |
15 | #include <asm/pgalloc.h> /* pgd_*(), ... */ | 16 | #include <asm/pgalloc.h> /* pgd_*(), ... */ |
@@ -1013,6 +1014,8 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1013 | if (unlikely(error_code & PF_RSVD)) | 1014 | if (unlikely(error_code & PF_RSVD)) |
1014 | pgtable_bad(regs, error_code, address); | 1015 | pgtable_bad(regs, error_code, address); |
1015 | 1016 | ||
1017 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | ||
1018 | |||
1016 | /* | 1019 | /* |
1017 | * If we're in an interrupt, have no user context or are running | 1020 | * If we're in an interrupt, have no user context or are running |
1018 | * in an atomic region then we must not take the fault: | 1021 | * in an atomic region then we must not take the fault: |
@@ -1106,10 +1109,15 @@ good_area: | |||
1106 | return; | 1109 | return; |
1107 | } | 1110 | } |
1108 | 1111 | ||
1109 | if (fault & VM_FAULT_MAJOR) | 1112 | if (fault & VM_FAULT_MAJOR) { |
1110 | tsk->maj_flt++; | 1113 | tsk->maj_flt++; |
1111 | else | 1114 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, |
1115 | regs, address); | ||
1116 | } else { | ||
1112 | tsk->min_flt++; | 1117 | tsk->min_flt++; |
1118 | perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | ||
1119 | regs, address); | ||
1120 | } | ||
1113 | 1121 | ||
1114 | check_v8086_mode(regs, address, tsk); | 1122 | check_v8086_mode(regs, address, tsk); |
1115 | 1123 | ||
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 3b285e656e27..b07dd8d0b321 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -40,8 +40,9 @@ static int profile_exceptions_notify(struct notifier_block *self, | |||
40 | 40 | ||
41 | switch (val) { | 41 | switch (val) { |
42 | case DIE_NMI: | 42 | case DIE_NMI: |
43 | if (model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu))) | 43 | case DIE_NMI_IPI: |
44 | ret = NOTIFY_STOP; | 44 | model->check_ctrs(args->regs, &per_cpu(cpu_msrs, cpu)); |
45 | ret = NOTIFY_STOP; | ||
45 | break; | 46 | break; |
46 | default: | 47 | default: |
47 | break; | 48 | break; |
@@ -134,7 +135,7 @@ static void nmi_cpu_setup(void *dummy) | |||
134 | static struct notifier_block profile_exceptions_nb = { | 135 | static struct notifier_block profile_exceptions_nb = { |
135 | .notifier_call = profile_exceptions_notify, | 136 | .notifier_call = profile_exceptions_notify, |
136 | .next = NULL, | 137 | .next = NULL, |
137 | .priority = 0 | 138 | .priority = 2 |
138 | }; | 139 | }; |
139 | 140 | ||
140 | static int nmi_setup(void) | 141 | static int nmi_setup(void) |
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c index 10131fbdaada..4da7230b3d17 100644 --- a/arch/x86/oprofile/op_model_ppro.c +++ b/arch/x86/oprofile/op_model_ppro.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/msr.h> | 18 | #include <asm/msr.h> |
19 | #include <asm/apic.h> | 19 | #include <asm/apic.h> |
20 | #include <asm/nmi.h> | 20 | #include <asm/nmi.h> |
21 | #include <asm/intel_arch_perfmon.h> | 21 | #include <asm/perf_counter.h> |
22 | 22 | ||
23 | #include "op_x86_model.h" | 23 | #include "op_x86_model.h" |
24 | #include "op_counter.h" | 24 | #include "op_counter.h" |
@@ -136,6 +136,13 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
136 | u64 val; | 136 | u64 val; |
137 | int i; | 137 | int i; |
138 | 138 | ||
139 | /* | ||
140 | * This can happen if perf counters are in use when | ||
141 | * we steal the die notifier NMI. | ||
142 | */ | ||
143 | if (unlikely(!reset_value)) | ||
144 | goto out; | ||
145 | |||
139 | for (i = 0 ; i < num_counters; ++i) { | 146 | for (i = 0 ; i < num_counters; ++i) { |
140 | if (!reset_value[i]) | 147 | if (!reset_value[i]) |
141 | continue; | 148 | continue; |
@@ -146,6 +153,7 @@ static int ppro_check_ctrs(struct pt_regs * const regs, | |||
146 | } | 153 | } |
147 | } | 154 | } |
148 | 155 | ||
156 | out: | ||
149 | /* Only P6 based Pentium M need to re-unmask the apic vector but it | 157 | /* Only P6 based Pentium M need to re-unmask the apic vector but it |
150 | * doesn't hurt other P6 variant */ | 158 | * doesn't hurt other P6 variant */ |
151 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); | 159 | apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED); |
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index 1241f118ab56..58bc00f68b12 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c | |||
@@ -338,6 +338,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
338 | } | 338 | } |
339 | } | 339 | } |
340 | 340 | ||
341 | current->mm->context.vdso = (void *)addr; | ||
342 | |||
341 | if (compat_uses_vma || !compat) { | 343 | if (compat_uses_vma || !compat) { |
342 | /* | 344 | /* |
343 | * MAYWRITE to allow gdb to COW and set breakpoints | 345 | * MAYWRITE to allow gdb to COW and set breakpoints |
@@ -358,11 +360,13 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
358 | goto up_fail; | 360 | goto up_fail; |
359 | } | 361 | } |
360 | 362 | ||
361 | current->mm->context.vdso = (void *)addr; | ||
362 | current_thread_info()->sysenter_return = | 363 | current_thread_info()->sysenter_return = |
363 | VDSO32_SYMBOL(addr, SYSENTER_RETURN); | 364 | VDSO32_SYMBOL(addr, SYSENTER_RETURN); |
364 | 365 | ||
365 | up_fail: | 366 | up_fail: |
367 | if (ret) | ||
368 | current->mm->context.vdso = NULL; | ||
369 | |||
366 | up_write(&mm->mmap_sem); | 370 | up_write(&mm->mmap_sem); |
367 | 371 | ||
368 | return ret; | 372 | return ret; |
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index cac083386e03..21e1aeb9f3ea 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -116,15 +116,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
116 | goto up_fail; | 116 | goto up_fail; |
117 | } | 117 | } |
118 | 118 | ||
119 | current->mm->context.vdso = (void *)addr; | ||
120 | |||
119 | ret = install_special_mapping(mm, addr, vdso_size, | 121 | ret = install_special_mapping(mm, addr, vdso_size, |
120 | VM_READ|VM_EXEC| | 122 | VM_READ|VM_EXEC| |
121 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| | 123 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| |
122 | VM_ALWAYSDUMP, | 124 | VM_ALWAYSDUMP, |
123 | vdso_pages); | 125 | vdso_pages); |
124 | if (ret) | 126 | if (ret) { |
127 | current->mm->context.vdso = NULL; | ||
125 | goto up_fail; | 128 | goto up_fail; |
129 | } | ||
126 | 130 | ||
127 | current->mm->context.vdso = (void *)addr; | ||
128 | up_fail: | 131 | up_fail: |
129 | up_write(&mm->mmap_sem); | 132 | up_write(&mm->mmap_sem); |
130 | return ret; | 133 | return ret; |