aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Galbraith <efault@gmx.de>2009-01-23 08:36:16 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-23 08:50:02 -0500
commit4b39fd96855254a244f71245b41a91cdecb87d63 (patch)
tree28f5e9d87bea0fccda0202e106759db3de41d33d
parent1b023a96d9b44f50f4d8ff28c15f5b80e354760f (diff)
perfcounters: ratelimit performance counter interrupts
Ratelimit performance counter interrupts to 100KHz per CPU. This replaces the irq-delta-time based method. Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c27
-rw-r--r--include/linux/perf_counter.h2
2 files changed, 15 insertions, 14 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 1a040b179b53..a56d4cf92f30 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -33,9 +33,8 @@ static int nr_counters_fixed __read_mostly;
33struct cpu_hw_counters { 33struct cpu_hw_counters {
34 struct perf_counter *counters[X86_PMC_IDX_MAX]; 34 struct perf_counter *counters[X86_PMC_IDX_MAX];
35 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)]; 35 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
36 u64 last_interrupt; 36 unsigned long interrupts;
37 u64 global_enable; 37 u64 global_enable;
38 int throttled;
39}; 38};
40 39
41/* 40/*
@@ -471,13 +470,18 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
471} 470}
472 471
473/* 472/*
473 * Maximum interrupt frequency of 100KHz per CPU
474 */
475#define PERFMON_MAX_INTERRUPTS 100000/HZ
476
477/*
474 * This handler is triggered by the local APIC, so the APIC IRQ handling 478 * This handler is triggered by the local APIC, so the APIC IRQ handling
475 * rules apply: 479 * rules apply:
476 */ 480 */
477static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) 481static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
478{ 482{
479 int bit, cpu = smp_processor_id(); 483 int bit, cpu = smp_processor_id();
480 u64 ack, status, now; 484 u64 ack, status;
481 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu); 485 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
482 486
483 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); 487 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
@@ -486,11 +490,6 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
486 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 490 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
487 ack_APIC_irq(); 491 ack_APIC_irq();
488 492
489 now = sched_clock();
490 if (now - cpuc->last_interrupt < PERFMON_MIN_PERIOD_NS)
491 cpuc->throttled = 1;
492 cpuc->last_interrupt = now;
493
494 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); 493 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
495 if (!status) 494 if (!status)
496 goto out; 495 goto out;
@@ -541,13 +540,14 @@ out:
541 /* 540 /*
542 * Restore - do not reenable when global enable is off or throttled: 541 * Restore - do not reenable when global enable is off or throttled:
543 */ 542 */
544 if (!cpuc->throttled) 543 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
545 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); 544 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
546} 545}
547 546
548void perf_counter_unthrottle(void) 547void perf_counter_unthrottle(void)
549{ 548{
550 struct cpu_hw_counters *cpuc; 549 struct cpu_hw_counters *cpuc;
550 u64 global_enable;
551 551
552 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) 552 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
553 return; 553 return;
@@ -556,12 +556,15 @@ void perf_counter_unthrottle(void)
556 return; 556 return;
557 557
558 cpuc = &per_cpu(cpu_hw_counters, smp_processor_id()); 558 cpuc = &per_cpu(cpu_hw_counters, smp_processor_id());
559 if (cpuc->throttled) { 559 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
560 if (printk_ratelimit()) 560 if (printk_ratelimit())
561 printk(KERN_WARNING "PERFMON: max event frequency exceeded!\n"); 561 printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
562 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable); 562 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
563 cpuc->throttled = 0;
564 } 563 }
564 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, global_enable);
565 if (unlikely(cpuc->global_enable && !global_enable))
566 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, cpuc->global_enable);
567 cpuc->interrupts = 0;
565} 568}
566 569
567void smp_perf_counter_interrupt(struct pt_regs *regs) 570void smp_perf_counter_interrupt(struct pt_regs *regs)
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index 91f1ca4c01c0..f55381fbcac9 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -271,8 +271,6 @@ static inline int is_software_counter(struct perf_counter *counter)
271 return !counter->hw_event.raw && counter->hw_event.type < 0; 271 return !counter->hw_event.raw && counter->hw_event.type < 0;
272} 272}
273 273
274#define PERFMON_MIN_PERIOD_NS 10000
275
276#else 274#else
277static inline void 275static inline void
278perf_counter_task_sched_in(struct task_struct *task, int cpu) { } 276perf_counter_task_sched_in(struct task_struct *task, int cpu) { }