diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 59 |
1 files changed, 55 insertions, 4 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 14b1fe984832..ec9c4007a7f9 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -46,6 +46,7 @@ static atomic_t nr_comm_tracking __read_mostly; | |||
46 | 46 | ||
47 | int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ | 47 | int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */ |
48 | int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ | 48 | int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */ |
49 | int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */ | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * Lock for (sysadmin-configurable) counter reservations: | 52 | * Lock for (sysadmin-configurable) counter reservations: |
@@ -1066,12 +1067,15 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) | |||
1066 | __perf_counter_sched_in(ctx, cpuctx, cpu); | 1067 | __perf_counter_sched_in(ctx, cpuctx, cpu); |
1067 | } | 1068 | } |
1068 | 1069 | ||
1070 | #define MAX_INTERRUPTS (~0ULL) | ||
1071 | |||
1072 | static void perf_log_throttle(struct perf_counter *counter, int enable); | ||
1069 | static void perf_log_period(struct perf_counter *counter, u64 period); | 1073 | static void perf_log_period(struct perf_counter *counter, u64 period); |
1070 | 1074 | ||
1071 | static void perf_adjust_freq(struct perf_counter_context *ctx) | 1075 | static void perf_adjust_freq(struct perf_counter_context *ctx) |
1072 | { | 1076 | { |
1073 | struct perf_counter *counter; | 1077 | struct perf_counter *counter; |
1074 | u64 irq_period; | 1078 | u64 interrupts, irq_period; |
1075 | u64 events, period; | 1079 | u64 events, period; |
1076 | s64 delta; | 1080 | s64 delta; |
1077 | 1081 | ||
@@ -1080,10 +1084,19 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) | |||
1080 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) | 1084 | if (counter->state != PERF_COUNTER_STATE_ACTIVE) |
1081 | continue; | 1085 | continue; |
1082 | 1086 | ||
1087 | interrupts = counter->hw.interrupts; | ||
1088 | counter->hw.interrupts = 0; | ||
1089 | |||
1090 | if (interrupts == MAX_INTERRUPTS) { | ||
1091 | perf_log_throttle(counter, 1); | ||
1092 | counter->pmu->unthrottle(counter); | ||
1093 | interrupts = 2*sysctl_perf_counter_limit/HZ; | ||
1094 | } | ||
1095 | |||
1083 | if (!counter->hw_event.freq || !counter->hw_event.irq_freq) | 1096 | if (!counter->hw_event.freq || !counter->hw_event.irq_freq) |
1084 | continue; | 1097 | continue; |
1085 | 1098 | ||
1086 | events = HZ * counter->hw.interrupts * counter->hw.irq_period; | 1099 | events = HZ * interrupts * counter->hw.irq_period; |
1087 | period = div64_u64(events, counter->hw_event.irq_freq); | 1100 | period = div64_u64(events, counter->hw_event.irq_freq); |
1088 | 1101 | ||
1089 | delta = (s64)(1 + period - counter->hw.irq_period); | 1102 | delta = (s64)(1 + period - counter->hw.irq_period); |
@@ -1097,7 +1110,6 @@ static void perf_adjust_freq(struct perf_counter_context *ctx) | |||
1097 | perf_log_period(counter, irq_period); | 1110 | perf_log_period(counter, irq_period); |
1098 | 1111 | ||
1099 | counter->hw.irq_period = irq_period; | 1112 | counter->hw.irq_period = irq_period; |
1100 | counter->hw.interrupts = 0; | ||
1101 | } | 1113 | } |
1102 | spin_unlock(&ctx->lock); | 1114 | spin_unlock(&ctx->lock); |
1103 | } | 1115 | } |
@@ -2544,6 +2556,35 @@ static void perf_log_period(struct perf_counter *counter, u64 period) | |||
2544 | } | 2556 | } |
2545 | 2557 | ||
2546 | /* | 2558 | /* |
2559 | * IRQ throttle logging | ||
2560 | */ | ||
2561 | |||
2562 | static void perf_log_throttle(struct perf_counter *counter, int enable) | ||
2563 | { | ||
2564 | struct perf_output_handle handle; | ||
2565 | int ret; | ||
2566 | |||
2567 | struct { | ||
2568 | struct perf_event_header header; | ||
2569 | u64 time; | ||
2570 | } throttle_event = { | ||
2571 | .header = { | ||
2572 | .type = PERF_EVENT_THROTTLE + 1, | ||
2573 | .misc = 0, | ||
2574 | .size = sizeof(throttle_event), | ||
2575 | }, | ||
2576 | .time = sched_clock(), | ||
2577 | }; | ||
2578 | |||
2579 | ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 0, 0); | ||
2580 | if (ret) | ||
2581 | return; | ||
2582 | |||
2583 | perf_output_put(&handle, throttle_event); | ||
2584 | perf_output_end(&handle); | ||
2585 | } | ||
2586 | |||
2587 | /* | ||
2547 | * Generic counter overflow handling. | 2588 | * Generic counter overflow handling. |
2548 | */ | 2589 | */ |
2549 | 2590 | ||
@@ -2551,9 +2592,19 @@ int perf_counter_overflow(struct perf_counter *counter, | |||
2551 | int nmi, struct pt_regs *regs, u64 addr) | 2592 | int nmi, struct pt_regs *regs, u64 addr) |
2552 | { | 2593 | { |
2553 | int events = atomic_read(&counter->event_limit); | 2594 | int events = atomic_read(&counter->event_limit); |
2595 | int throttle = counter->pmu->unthrottle != NULL; | ||
2554 | int ret = 0; | 2596 | int ret = 0; |
2555 | 2597 | ||
2556 | counter->hw.interrupts++; | 2598 | if (!throttle) { |
2599 | counter->hw.interrupts++; | ||
2600 | } else if (counter->hw.interrupts != MAX_INTERRUPTS) { | ||
2601 | counter->hw.interrupts++; | ||
2602 | if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) { | ||
2603 | counter->hw.interrupts = MAX_INTERRUPTS; | ||
2604 | perf_log_throttle(counter, 0); | ||
2605 | ret = 1; | ||
2606 | } | ||
2607 | } | ||
2557 | 2608 | ||
2558 | /* | 2609 | /* |
2559 | * XXX event_limit might not quite work as expected on inherited | 2610 | * XXX event_limit might not quite work as expected on inherited |