diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-07-09 00:15:32 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-11 09:53:27 -0400 |
commit | a83bc47c33ab182f1e48977fd5a04024d713c75e (patch) | |
tree | 759eb1afc83271ee2815dbd1214635ec7b506b5c /kernel/sched_clock.c | |
parent | c0c87734f125d2fa8ebc70310f3257fa6209f2b6 (diff) |
sched_clock: record TSC after gtod
To read the gtod we need to grab the xtime lock for read. Reading the gtod
before the TSC can cause a bigger gab if the xtime lock is contended.
This patch simply reverses the order to read the TSC after the gtod.
The locking in the reading of the gtod handles any barriers one might
think is needed.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Cc: Steven Rostedt <srostedt@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r-- | kernel/sched_clock.c | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index ee7cce5029ce..28ff6bf5e02b 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -237,8 +237,8 @@ void sched_clock_tick(void) | |||
237 | 237 | ||
238 | WARN_ON_ONCE(!irqs_disabled()); | 238 | WARN_ON_ONCE(!irqs_disabled()); |
239 | 239 | ||
240 | now = sched_clock(); | ||
241 | now_gtod = ktime_to_ns(ktime_get()); | 240 | now_gtod = ktime_to_ns(ktime_get()); |
241 | now = sched_clock(); | ||
242 | 242 | ||
243 | __raw_spin_lock(&scd->lock); | 243 | __raw_spin_lock(&scd->lock); |
244 | __update_sched_clock(scd, now, NULL); | 244 | __update_sched_clock(scd, now, NULL); |