aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-09-18 14:14:01 -0400
committerIngo Molnar <mingo@elte.hu>2009-09-18 14:47:30 -0400
commitdef0a9b2573e00ab0b486cb5382625203ab4c4a6 (patch)
tree1e3086fc320c244297b5b63cce47065bcfb71e8c /kernel/perf_counter.c
parentcf450a7355a116af793998c118a6bcf7f5a8367e (diff)
sched_clock: Make it NMI safe
Arjan complained about the suckyness of TSC on modern machines, and asked if we could do something about that for PERF_SAMPLE_TIME. Make cpu_clock() NMI safe by removing the spinlock and using cmpxchg. This also makes it smaller and more robust. Affects architectures that use HAVE_UNSTABLE_SCHED_CLOCK, i.e. IA64 and x86. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c9
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 6944bd55ec4e..06d233a06da5 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -2955,10 +2955,7 @@ void perf_prepare_sample(struct perf_event_header *header,
2955 } 2955 }
2956 2956
2957 if (sample_type & PERF_SAMPLE_TIME) { 2957 if (sample_type & PERF_SAMPLE_TIME) {
2958 /* 2958 data->time = perf_clock();
2959 * Maybe do better on x86 and provide cpu_clock_nmi()
2960 */
2961 data->time = sched_clock();
2962 2959
2963 header->size += sizeof(data->time); 2960 header->size += sizeof(data->time);
2964 } 2961 }
@@ -3488,7 +3485,7 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3488 .misc = 0, 3485 .misc = 0,
3489 .size = sizeof(throttle_event), 3486 .size = sizeof(throttle_event),
3490 }, 3487 },
3491 .time = sched_clock(), 3488 .time = perf_clock(),
3492 .id = primary_counter_id(counter), 3489 .id = primary_counter_id(counter),
3493 .stream_id = counter->id, 3490 .stream_id = counter->id,
3494 }; 3491 };
@@ -3540,7 +3537,7 @@ static int __perf_counter_overflow(struct perf_counter *counter, int nmi,
3540 } 3537 }
3541 3538
3542 if (counter->attr.freq) { 3539 if (counter->attr.freq) {
3543 u64 now = sched_clock(); 3540 u64 now = perf_clock();
3544 s64 delta = now - hwc->freq_stamp; 3541 s64 delta = now - hwc->freq_stamp;
3545 3542
3546 hwc->freq_stamp = now; 3543 hwc->freq_stamp = now;