diff options
author | Mike Galbraith <efault@gmx.de> | 2009-01-23 04:13:01 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-23 05:33:18 -0500 |
commit | 1b023a96d9b44f50f4d8ff28c15f5b80e354760f (patch) | |
tree | 8b6a5956c4461c13c2e2b3769096afac5b767524 /arch/x86/kernel/cpu/perf_counter.c | |
parent | 05e3423c8577126800841bc55de8a509f2433dca (diff) |
perfcounters: throttle on too high IRQ rates
Starting kerneltop with only -c 100 seems to be a bad idea, it can
easily lock the system due to perfcounter IRQ overload.
So add throttling: if a new IRQ arrives in a shorter than
PERFMON_MIN_PERIOD_NS time, turn off perfcounters and untrottle them
from the next timer tick.
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 38 |
1 files changed, 32 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 9376771f757b..1a040b179b53 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -33,6 +33,9 @@ static int nr_counters_fixed __read_mostly; | |||
33 | struct cpu_hw_counters { | 33 | struct cpu_hw_counters { |
34 | struct perf_counter *counters[X86_PMC_IDX_MAX]; | 34 | struct perf_counter *counters[X86_PMC_IDX_MAX]; |
35 | unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; | 35 | unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; |
36 | u64 last_interrupt; | ||
37 | u64 global_enable; | ||
38 | int throttled; | ||
36 | }; | 39 | }; |
37 | 40 | ||
38 | /* | 41 | /* |
@@ -474,16 +477,19 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown) | |||
474 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) | 477 | static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) |
475 | { | 478 | { |
476 | int bit, cpu = smp_processor_id(); | 479 | int bit, cpu = smp_processor_id(); |
477 | u64 ack, status, saved_global; | 480 | u64 ack, status, now; |
478 | struct cpu_hw_counters *cpuc; | 481 | struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); |
479 | 482 | ||
480 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); | 483 | rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); |
481 | 484 | ||
482 | /* Disable counters globally */ | 485 | /* Disable counters globally */ |
483 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); | 486 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); |
484 | ack_APIC_irq(); | 487 | ack_APIC_irq(); |
485 | 488 | ||
486 | cpuc = &per_cpu(cpu_hw_counters, cpu); | 489 | now = sched_clock(); |
490 | if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS) | ||
491 | cpuc->throttled = 1; | ||
492 | cpuc->last_interrupt = now; | ||
487 | 493 | ||
488 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); | 494 | rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); |
489 | if (!status) | 495 | if (!status) |
@@ -533,9 +539,29 @@ again: | |||
533 | goto again; | 539 | goto again; |
534 | out: | 540 | out: |
535 | /* | 541 | /* |
536 | * Restore - do not reenable when global enable is off: | 542 | * Restore - do not reenable when global enable is off or throttled: |
537 | */ | 543 | */ |
538 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, saved_global); | 544 | if (!cpuc->throttled) |
545 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); | ||
546 | } | ||
547 | |||
548 | void perf_counter_unthrottle(void) | ||
549 | { | ||
550 | struct cpu_hw_counters *cpuc; | ||
551 | |||
552 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | ||
553 | return; | ||
554 | |||
555 | if (unlikely(!perf_counters_initialized)) | ||
556 | return; | ||
557 | |||
558 | cpuc = &per_cpu(cpu_hw_counters, smp_processor_id()); | ||
559 | if (cpuc->throttled) { | ||
560 | if (printk_ratelimit()) | ||
561 | printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n"); | ||
562 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); | ||
563 | cpuc->throttled = 0; | ||
564 | } | ||
539 | } | 565 | } |
540 | 566 | ||
541 | void smp_perf_counter_interrupt(struct pt_regs *regs) | 567 | void smp_perf_counter_interrupt(struct pt_regs *regs) |