aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-07-09 00:15:31 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-11 09:53:27 -0400
commitc0c87734f125d2fa8ebc70310f3257fa6209f2b6 (patch)
treec94f9f6c1e8dcc68c2a30caaff64db7e81a65510 /kernel/sched_clock.c
parent2b8a0cf4890d7537a77b51caa8f508e4a05a0e67 (diff)
sched_clock: only update deltas with local reads.
Reading the CPU clock should try to stay accurate within the CPU. By reading the CPU clock from another CPU and updating the deltas can cause unneeded jumps when reading from the local CPU. This patch changes the code to update the last read TSC only when read from the local CPU. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: john stultz <johnstul@us.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index 55fca1e9e12a..ee7cce5029ce 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -124,7 +124,7 @@ static int check_max(struct sched_clock_data *scd)
124 * - filter out backward motion 124 * - filter out backward motion
125 * - use jiffies to generate a min,max window to clip the raw values 125 * - use jiffies to generate a min,max window to clip the raw values
126 */ 126 */
127static void __update_sched_clock(struct sched_clock_data *scd, u64 now) 127static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time)
128{ 128{
129 unsigned long now_jiffies = jiffies; 129 unsigned long now_jiffies = jiffies;
130 long delta_jiffies = now_jiffies - scd->tick_jiffies; 130 long delta_jiffies = now_jiffies - scd->tick_jiffies;
@@ -162,8 +162,12 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
162 if (unlikely(clock < min_clock)) 162 if (unlikely(clock < min_clock))
163 clock = min_clock; 163 clock = min_clock;
164 164
165 scd->prev_raw = now; 165 if (time)
166 scd->clock = clock; 166 *time = clock;
167 else {
168 scd->prev_raw = now;
169 scd->clock = clock;
170 }
167} 171}
168 172
169static void lock_double_clock(struct sched_clock_data *data1, 173static void lock_double_clock(struct sched_clock_data *data1,
@@ -207,15 +211,18 @@ u64 sched_clock_cpu(int cpu)
207 now -= scd->tick_gtod; 211 now -= scd->tick_gtod;
208 212
209 __raw_spin_unlock(&my_scd->lock); 213 __raw_spin_unlock(&my_scd->lock);
214
215 __update_sched_clock(scd, now, &clock);
216
217 __raw_spin_unlock(&scd->lock);
218
210 } else { 219 } else {
211 __raw_spin_lock(&scd->lock); 220 __raw_spin_lock(&scd->lock);
221 __update_sched_clock(scd, now, NULL);
222 clock = scd->clock;
223 __raw_spin_unlock(&scd->lock);
212 } 224 }
213 225
214 __update_sched_clock(scd, now);
215 clock = scd->clock;
216
217 __raw_spin_unlock(&scd->lock);
218
219 return clock; 226 return clock;
220} 227}
221 228
@@ -234,7 +241,7 @@ void sched_clock_tick(void)
234 now_gtod = ktime_to_ns(ktime_get()); 241 now_gtod = ktime_to_ns(ktime_get());
235 242
236 __raw_spin_lock(&scd->lock); 243 __raw_spin_lock(&scd->lock);
237 __update_sched_clock(scd, now); 244 __update_sched_clock(scd, now, NULL);
238 /* 245 /*
239 * update tick_gtod after __update_sched_clock() because that will 246 * update tick_gtod after __update_sched_clock() because that will
240 * already observe 1 new jiffy; adding a new tick_gtod to that would 247 * already observe 1 new jiffy; adding a new tick_gtod to that would