aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-30 04:13:35 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-31 11:20:49 -0400
commit18e4e36c66d6edbdefc639692206cdf01e468713 (patch)
tree271a70892fa5808459a1cfc30aa3b5631cd6647d /kernel
parent50526968e99afbca34924abcb04658b6dd5c5ea5 (diff)
sched: eliminate scd->prev_raw
eliminate prev_raw and use tick_raw instead. It's enough to base the current time on the scheduler tick timestamp alone - the monotonicity and maximum checks will prevent any damage. Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Mike Galbraith <efault@gmx.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_clock.c6
1 files changed, 1 insertions, 5 deletions
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index b96559cb96a5..4b8474c966dc 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -53,7 +53,6 @@ struct sched_clock_data {
53 raw_spinlock_t lock; 53 raw_spinlock_t lock;
54 54
55 unsigned long tick_jiffies; 55 unsigned long tick_jiffies;
56 u64 prev_raw;
57 u64 tick_raw; 56 u64 tick_raw;
58 u64 tick_gtod; 57 u64 tick_gtod;
59 u64 clock; 58 u64 clock;
@@ -84,7 +83,6 @@ void sched_clock_init(void)
84 83
85 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 84 scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
86 scd->tick_jiffies = now_jiffies; 85 scd->tick_jiffies = now_jiffies;
87 scd->prev_raw = 0;
88 scd->tick_raw = 0; 86 scd->tick_raw = 0;
89 scd->tick_gtod = ktime_now; 87 scd->tick_gtod = ktime_now;
90 scd->clock = ktime_now; 88 scd->clock = ktime_now;
@@ -105,7 +103,7 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
105 long delta_jiffies = now_jiffies - scd->tick_jiffies; 103 long delta_jiffies = now_jiffies - scd->tick_jiffies;
106 u64 clock = scd->clock; 104 u64 clock = scd->clock;
107 u64 min_clock, max_clock; 105 u64 min_clock, max_clock;
108 s64 delta = now - scd->prev_raw; 106 s64 delta = now - scd->tick_raw;
109 107
110 WARN_ON_ONCE(!irqs_disabled()); 108 WARN_ON_ONCE(!irqs_disabled());
111 min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; 109 min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC;
@@ -130,7 +128,6 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now)
130 if (unlikely(clock < min_clock)) 128 if (unlikely(clock < min_clock))
131 clock = min_clock; 129 clock = min_clock;
132 130
133 scd->prev_raw = now;
134 scd->tick_jiffies = now_jiffies; 131 scd->tick_jiffies = now_jiffies;
135 scd->clock = clock; 132 scd->clock = clock;
136} 133}
@@ -234,7 +231,6 @@ void sched_clock_idle_wakeup_event(u64 delta_ns)
234 * rq clock: 231 * rq clock:
235 */ 232 */
236 __raw_spin_lock(&scd->lock); 233 __raw_spin_lock(&scd->lock);
237 scd->prev_raw = now;
238 scd->clock += delta_ns; 234 scd->clock += delta_ns;
239 __raw_spin_unlock(&scd->lock); 235 __raw_spin_unlock(&scd->lock);
240 236