diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-02-16 05:22:34 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-02-16 07:30:55 -0500 |
commit | 163ec4354a5135c6c38c3f4a9b46a31900ebdf48 (patch) | |
tree | 59063e726453ace397c66d95cab09ac43265be41 /kernel/perf_event.c | |
parent | 4979d2729af22f6ce8faa325fc60a85a2c2daa02 (diff) |
perf: Optimize throttling code
By pre-computing the maximum number of samples per tick we can avoid a
multiplication and a conditional since MAX_INTERRUPTS >
max_samples_per_tick.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 43 |
1 files changed, 24 insertions, 19 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 65dcdc76d709..e03be08d0ddf 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -150,7 +150,24 @@ int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */ | |||
150 | /* | 150 | /* |
151 | * max perf event sample rate | 151 | * max perf event sample rate |
152 | */ | 152 | */ |
153 | int sysctl_perf_event_sample_rate __read_mostly = 100000; | 153 | #define DEFAULT_MAX_SAMPLE_RATE 100000 |
154 | int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE; | ||
155 | static int max_samples_per_tick __read_mostly = | ||
156 | DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ); | ||
157 | |||
158 | int perf_proc_update_handler(struct ctl_table *table, int write, | ||
159 | void __user *buffer, size_t *lenp, | ||
160 | loff_t *ppos) | ||
161 | { | ||
162 | int ret = proc_dointvec(table, write, buffer, lenp, ppos); | ||
163 | |||
164 | if (ret || !write) | ||
165 | return ret; | ||
166 | |||
167 | max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); | ||
168 | |||
169 | return 0; | ||
170 | } | ||
154 | 171 | ||
155 | static atomic64_t perf_event_id; | 172 | static atomic64_t perf_event_id; |
156 | 173 | ||
@@ -4941,26 +4958,14 @@ static int __perf_event_overflow(struct perf_event *event, int nmi, | |||
4941 | if (unlikely(!is_sampling_event(event))) | 4958 | if (unlikely(!is_sampling_event(event))) |
4942 | return 0; | 4959 | return 0; |
4943 | 4960 | ||
4944 | if (!throttle) { | 4961 | if (unlikely(hwc->interrupts >= max_samples_per_tick)) { |
4945 | hwc->interrupts++; | 4962 | if (throttle) { |
4946 | } else { | 4963 | hwc->interrupts = MAX_INTERRUPTS; |
4947 | if (hwc->interrupts != MAX_INTERRUPTS) { | 4964 | perf_log_throttle(event, 0); |
4948 | hwc->interrupts++; | ||
4949 | if (HZ * hwc->interrupts > | ||
4950 | (u64)sysctl_perf_event_sample_rate) { | ||
4951 | hwc->interrupts = MAX_INTERRUPTS; | ||
4952 | perf_log_throttle(event, 0); | ||
4953 | ret = 1; | ||
4954 | } | ||
4955 | } else { | ||
4956 | /* | ||
4957 | * Keep re-disabling events even though on the previous | ||
4958 | * pass we disabled it - just in case we raced with a | ||
4959 | * sched-in and the event got enabled again: | ||
4960 | */ | ||
4961 | ret = 1; | 4965 | ret = 1; |
4962 | } | 4966 | } |
4963 | } | 4967 | } else |
4968 | hwc->interrupts++; | ||
4964 | 4969 | ||
4965 | if (event->attr.freq) { | 4970 | if (event->attr.freq) { |
4966 | u64 now = perf_clock(); | 4971 | u64 now = perf_clock(); |