diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-09-17 12:47:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-17 16:08:25 -0400 |
commit | 850bc73ffcc99cddfb52bc23217c60810c508853 (patch) | |
tree | 787a863ccb2aaa1de48a6690b33026beadecce20 /kernel | |
parent | 0ec04e16d08b69d8da46abbcfa3e3f2cd9738852 (diff) |
perf_counter: Do not throttle single swcounter events
We can have swcounter events that contribute more than a single
count per event, when used with a non-zero period, those can
generate multiple events, which is when we need throttling.
However, swcounter that contribute only a single count per event
can only come as fast as we can run code, hence don't throttle
them.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 667ab25ad3d5..fe0d1adde804 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -3494,14 +3494,15 @@ static void perf_log_throttle(struct perf_counter *counter, int enable) | |||
3494 | * Generic counter overflow handling, sampling. | 3494 | * Generic counter overflow handling, sampling. |
3495 | */ | 3495 | */ |
3496 | 3496 | ||
3497 | int perf_counter_overflow(struct perf_counter *counter, int nmi, | 3497 | static int __perf_counter_overflow(struct perf_counter *counter, int nmi, |
3498 | struct perf_sample_data *data) | 3498 | int throttle, struct perf_sample_data *data) |
3499 | { | 3499 | { |
3500 | int events = atomic_read(&counter->event_limit); | 3500 | int events = atomic_read(&counter->event_limit); |
3501 | int throttle = counter->pmu->unthrottle != NULL; | ||
3502 | struct hw_perf_counter *hwc = &counter->hw; | 3501 | struct hw_perf_counter *hwc = &counter->hw; |
3503 | int ret = 0; | 3502 | int ret = 0; |
3504 | 3503 | ||
3504 | throttle = (throttle && counter->pmu->unthrottle != NULL); | ||
3505 | |||
3505 | if (!throttle) { | 3506 | if (!throttle) { |
3506 | hwc->interrupts++; | 3507 | hwc->interrupts++; |
3507 | } else { | 3508 | } else { |
@@ -3554,6 +3555,12 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi, | |||
3554 | return ret; | 3555 | return ret; |
3555 | } | 3556 | } |
3556 | 3557 | ||
3558 | int perf_counter_overflow(struct perf_counter *counter, int nmi, | ||
3559 | struct perf_sample_data *data) | ||
3560 | { | ||
3561 | return __perf_counter_overflow(counter, nmi, 1, data); | ||
3562 | } | ||
3563 | |||
3557 | /* | 3564 | /* |
3558 | * Generic software counter infrastructure | 3565 | * Generic software counter infrastructure |
3559 | */ | 3566 | */ |
@@ -3592,6 +3599,7 @@ static void perf_swcounter_overflow(struct perf_counter *counter, | |||
3592 | int nmi, struct perf_sample_data *data) | 3599 | int nmi, struct perf_sample_data *data) |
3593 | { | 3600 | { |
3594 | struct hw_perf_counter *hwc = &counter->hw; | 3601 | struct hw_perf_counter *hwc = &counter->hw; |
3602 | int throttle = 0; | ||
3595 | u64 overflow; | 3603 | u64 overflow; |
3596 | 3604 | ||
3597 | data->period = counter->hw.last_period; | 3605 | data->period = counter->hw.last_period; |
@@ -3601,13 +3609,14 @@ static void perf_swcounter_overflow(struct perf_counter *counter, | |||
3601 | return; | 3609 | return; |
3602 | 3610 | ||
3603 | for (; overflow; overflow--) { | 3611 | for (; overflow; overflow--) { |
3604 | if (perf_counter_overflow(counter, nmi, data)) { | 3612 | if (__perf_counter_overflow(counter, nmi, throttle, data)) { |
3605 | /* | 3613 | /* |
3606 | * We inhibit the overflow from happening when | 3614 | * We inhibit the overflow from happening when |
3607 | * hwc->interrupts == MAX_INTERRUPTS. | 3615 | * hwc->interrupts == MAX_INTERRUPTS. |
3608 | */ | 3616 | */ |
3609 | break; | 3617 | break; |
3610 | } | 3618 | } |
3619 | throttle = 0; | ||
3611 | } | 3620 | } |
3612 | } | 3621 | } |
3613 | 3622 | ||