aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_clock.c
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-07-07 14:16:50 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-11 09:53:25 -0400
commit62c43dd9864dbd52ff158922d1d08c75f20335af (patch)
tree9b8ce91f634b56e1420759f6872c6bfc21deec97 /kernel/sched_clock.c
parent70ff05554f91a1edda1f11684da1dbde09e2feea (diff)
sched_clock: record from last tick
The sched_clock code tries to keep within the gtod time by one tick (jiffy). The current code mistakenly keeps track of the delta jiffies between updates of the clock, where the the delta is used to compare with the number of jiffies that have past since an update of the gtod. The gtod is updated at each schedule tick not each sched_clock update. After one jiffy passes the clock is updated fine. But the delta is taken from the last update so if the next update happens before the next tick the delta jiffies used will be incorrect. This patch changes the code to check the delta of jiffies between ticks and not updates to match the comparison of the updates with the gtod. Signed-off-by: Steven Rostedt <srostedt@redhat.com> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_clock.c')
-rw-r--r--kernel/sched_clock.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index ce05271219ab..e383bc7df6dd 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -40,7 +40,7 @@ struct sched_clock_data {
40 */ 40 */
41 raw_spinlock_t lock; 41 raw_spinlock_t lock;
42 42
43 unsigned long prev_jiffies; 43 unsigned long tick_jiffies;
44 u64 prev_raw; 44 u64 prev_raw;
45 u64 tick_raw; 45 u64 tick_raw;
46 u64 tick_gtod; 46 u64 tick_gtod;
@@ -71,7 +71,7 @@ void sched_clock_init(void)
71 struct sched_clock_data *scd = cpu_sdc(cpu); 71 struct sched_clock_data *scd = cpu_sdc(cpu);
72 72
73 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 73 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
74 scd->prev_jiffies = now_jiffies; 74 scd->tick_jiffies = now_jiffies;
75 scd->prev_raw = 0; 75 scd->prev_raw = 0;
76 scd->tick_raw = 0; 76 scd->tick_raw = 0;
77 scd->tick_gtod = ktime_now; 77 scd->tick_gtod = ktime_now;
@@ -90,7 +90,7 @@ void sched_clock_init(void)
90static void __update_sched_clock(struct sched_clock_data *scd, u64 now) 90static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
91{ 91{
92 unsigned long now_jiffies = jiffies; 92 unsigned long now_jiffies = jiffies;
93 long delta_jiffies = now_jiffies - scd->prev_jiffies; 93 long delta_jiffies = now_jiffies - scd->tick_jiffies;
94 u64 clock = scd->clock; 94 u64 clock = scd->clock;
95 u64 min_clock, max_clock; 95 u64 min_clock, max_clock;
96 s64 delta = now - scd->prev_raw; 96 s64 delta = now - scd->prev_raw;
@@ -119,7 +119,6 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
119 clock = min_clock; 119 clock = min_clock;
120 120
121 scd->prev_raw = now; 121 scd->prev_raw = now;
122 scd->prev_jiffies = now_jiffies;
123 scd->clock = clock; 122 scd->clock = clock;
124} 123}
125 124
@@ -179,6 +178,7 @@ u64 sched_clock_cpu(int cpu)
179void sched_clock_tick(void) 178void sched_clock_tick(void)
180{ 179{
181 struct sched_clock_data *scd = this_scd(); 180 struct sched_clock_data *scd = this_scd();
181 unsigned long now_jiffies = jiffies;
182 u64 now, now_gtod; 182 u64 now, now_gtod;
183 183
184 if (unlikely(!sched_clock_running)) 184 if (unlikely(!sched_clock_running))
@@ -196,6 +196,7 @@ void sched_clock_tick(void)
196 * already observe 1 new jiffy; adding a new tick_gtod to that would 196 * already observe 1 new jiffy; adding a new tick_gtod to that would
197 * increase the clock 2 jiffies. 197 * increase the clock 2 jiffies.
198 */ 198 */
199 scd->tick_jiffies = now_jiffies;
199 scd->tick_raw = now; 200 scd->tick_raw = now;
200 scd->tick_gtod = now_gtod; 201 scd->tick_gtod = now_gtod;
201 __raw_spin_unlock(&scd->lock); 202 __raw_spin_unlock(&scd->lock);