diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-11 18:07:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-11 18:07:02 -0400 |
commit | e26b33e9552c29c1d3fe67dc602c6264c29f5dc7 (patch) | |
tree | 98f99cedc86db77665d1a3b582444b9049aacc58 | |
parent | 279ef6bbb8308488398c8f33b04c760148428378 (diff) | |
parent | cf206bffbb7542df54043fad9898113172af99d8 (diff) |
Merge branch 'sched/clock' into sched/urgent
-rw-r--r-- | include/linux/sched.h | 31 | ||||
-rw-r--r-- | kernel/sched_clock.c | 163 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 |
3 files changed, 53 insertions, 143 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5270d449ff9d..5850bfb968a8 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1551,16 +1551,10 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1551 | 1551 | ||
1552 | extern unsigned long long sched_clock(void); | 1552 | extern unsigned long long sched_clock(void); |
1553 | 1553 | ||
1554 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 1554 | extern void sched_clock_init(void); |
1555 | static inline void sched_clock_init(void) | 1555 | extern u64 sched_clock_cpu(int cpu); |
1556 | { | ||
1557 | } | ||
1558 | |||
1559 | static inline u64 sched_clock_cpu(int cpu) | ||
1560 | { | ||
1561 | return sched_clock(); | ||
1562 | } | ||
1563 | 1556 | ||
1557 | #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
1564 | static inline void sched_clock_tick(void) | 1558 | static inline void sched_clock_tick(void) |
1565 | { | 1559 | { |
1566 | } | 1560 | } |
@@ -1572,28 +1566,11 @@ static inline void sched_clock_idle_sleep_event(void) | |||
1572 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | 1566 | static inline void sched_clock_idle_wakeup_event(u64 delta_ns) |
1573 | { | 1567 | { |
1574 | } | 1568 | } |
1575 | 1569 | #else | |
1576 | #ifdef CONFIG_NO_HZ | ||
1577 | static inline void sched_clock_tick_stop(int cpu) | ||
1578 | { | ||
1579 | } | ||
1580 | |||
1581 | static inline void sched_clock_tick_start(int cpu) | ||
1582 | { | ||
1583 | } | ||
1584 | #endif | ||
1585 | |||
1586 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
1587 | extern void sched_clock_init(void); | ||
1588 | extern u64 sched_clock_cpu(int cpu); | ||
1589 | extern void sched_clock_tick(void); | 1570 | extern void sched_clock_tick(void); |
1590 | extern void sched_clock_idle_sleep_event(void); | 1571 | extern void sched_clock_idle_sleep_event(void); |
1591 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1572 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1592 | #ifdef CONFIG_NO_HZ | ||
1593 | extern void sched_clock_tick_stop(int cpu); | ||
1594 | extern void sched_clock_tick_start(int cpu); | ||
1595 | #endif | 1573 | #endif |
1596 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
1597 | 1574 | ||
1598 | /* | 1575 | /* |
1599 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | 1576 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 5a2dc7d8fd98..204991a0bfa7 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -42,12 +42,9 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
42 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); | 42 | return (unsigned long long)jiffies * (NSEC_PER_SEC / HZ); |
43 | } | 43 | } |
44 | 44 | ||
45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 45 | static __read_mostly int sched_clock_running; |
46 | 46 | ||
47 | #define MULTI_SHIFT 15 | 47 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
48 | /* Max is double, Min is 1/2 */ | ||
49 | #define MAX_MULTI (2LL << MULTI_SHIFT) | ||
50 | #define MIN_MULTI (1LL << (MULTI_SHIFT-1)) | ||
51 | 48 | ||
52 | struct sched_clock_data { | 49 | struct sched_clock_data { |
53 | /* | 50 | /* |
@@ -58,14 +55,9 @@ struct sched_clock_data { | |||
58 | raw_spinlock_t lock; | 55 | raw_spinlock_t lock; |
59 | 56 | ||
60 | unsigned long tick_jiffies; | 57 | unsigned long tick_jiffies; |
61 | u64 prev_raw; | ||
62 | u64 tick_raw; | 58 | u64 tick_raw; |
63 | u64 tick_gtod; | 59 | u64 tick_gtod; |
64 | u64 clock; | 60 | u64 clock; |
65 | s64 multi; | ||
66 | #ifdef CONFIG_NO_HZ | ||
67 | int check_max; | ||
68 | #endif | ||
69 | }; | 61 | }; |
70 | 62 | ||
71 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); | 63 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data); |
@@ -80,8 +72,6 @@ static inline struct sched_clock_data *cpu_sdc(int cpu) | |||
80 | return &per_cpu(sched_clock_data, cpu); | 72 | return &per_cpu(sched_clock_data, cpu); |
81 | } | 73 | } |
82 | 74 | ||
83 | static __read_mostly int sched_clock_running; | ||
84 | |||
85 | void sched_clock_init(void) | 75 | void sched_clock_init(void) |
86 | { | 76 | { |
87 | u64 ktime_now = ktime_to_ns(ktime_get()); | 77 | u64 ktime_now = ktime_to_ns(ktime_get()); |
@@ -93,90 +83,39 @@ void sched_clock_init(void) | |||
93 | 83 | ||
94 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 84 | scd->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
95 | scd->tick_jiffies = now_jiffies; | 85 | scd->tick_jiffies = now_jiffies; |
96 | scd->prev_raw = 0; | ||
97 | scd->tick_raw = 0; | 86 | scd->tick_raw = 0; |
98 | scd->tick_gtod = ktime_now; | 87 | scd->tick_gtod = ktime_now; |
99 | scd->clock = ktime_now; | 88 | scd->clock = ktime_now; |
100 | scd->multi = 1 << MULTI_SHIFT; | ||
101 | #ifdef CONFIG_NO_HZ | ||
102 | scd->check_max = 1; | ||
103 | #endif | ||
104 | } | 89 | } |
105 | 90 | ||
106 | sched_clock_running = 1; | 91 | sched_clock_running = 1; |
107 | } | 92 | } |
108 | 93 | ||
109 | #ifdef CONFIG_NO_HZ | ||
110 | /* | ||
111 | * The dynamic ticks makes the delta jiffies inaccurate. This | ||
112 | * prevents us from checking the maximum time update. | ||
113 | * Disable the maximum check during stopped ticks. | ||
114 | */ | ||
115 | void sched_clock_tick_stop(int cpu) | ||
116 | { | ||
117 | struct sched_clock_data *scd = cpu_sdc(cpu); | ||
118 | |||
119 | scd->check_max = 0; | ||
120 | } | ||
121 | |||
122 | void sched_clock_tick_start(int cpu) | ||
123 | { | ||
124 | struct sched_clock_data *scd = cpu_sdc(cpu); | ||
125 | |||
126 | scd->check_max = 1; | ||
127 | } | ||
128 | |||
129 | static int check_max(struct sched_clock_data *scd) | ||
130 | { | ||
131 | return scd->check_max; | ||
132 | } | ||
133 | #else | ||
134 | static int check_max(struct sched_clock_data *scd) | ||
135 | { | ||
136 | return 1; | ||
137 | } | ||
138 | #endif /* CONFIG_NO_HZ */ | ||
139 | |||
140 | /* | 94 | /* |
141 | * update the percpu scd from the raw @now value | 95 | * update the percpu scd from the raw @now value |
142 | * | 96 | * |
143 | * - filter out backward motion | 97 | * - filter out backward motion |
144 | * - use jiffies to generate a min,max window to clip the raw values | 98 | * - use jiffies to generate a min,max window to clip the raw values |
145 | */ | 99 | */ |
146 | static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *time) | 100 | static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) |
147 | { | 101 | { |
148 | unsigned long now_jiffies = jiffies; | 102 | unsigned long now_jiffies = jiffies; |
149 | long delta_jiffies = now_jiffies - scd->tick_jiffies; | 103 | long delta_jiffies = now_jiffies - scd->tick_jiffies; |
150 | u64 clock = scd->clock; | 104 | u64 clock = scd->clock; |
151 | u64 min_clock, max_clock; | 105 | u64 min_clock, max_clock; |
152 | s64 delta = now - scd->prev_raw; | 106 | s64 delta = now - scd->tick_raw; |
153 | 107 | ||
154 | WARN_ON_ONCE(!irqs_disabled()); | 108 | WARN_ON_ONCE(!irqs_disabled()); |
155 | 109 | min_clock = scd->tick_gtod + delta_jiffies * TICK_NSEC; | |
156 | /* | ||
157 | * At schedule tick the clock can be just under the gtod. We don't | ||
158 | * want to push it too prematurely. | ||
159 | */ | ||
160 | min_clock = scd->tick_gtod + (delta_jiffies * TICK_NSEC); | ||
161 | if (min_clock > TICK_NSEC) | ||
162 | min_clock -= TICK_NSEC / 2; | ||
163 | 110 | ||
164 | if (unlikely(delta < 0)) { | 111 | if (unlikely(delta < 0)) { |
165 | clock++; | 112 | clock++; |
166 | goto out; | 113 | goto out; |
167 | } | 114 | } |
168 | 115 | ||
169 | /* | 116 | max_clock = min_clock + TICK_NSEC; |
170 | * The clock must stay within a jiffie of the gtod. | ||
171 | * But since we may be at the start of a jiffy or the end of one | ||
172 | * we add another jiffy buffer. | ||
173 | */ | ||
174 | max_clock = scd->tick_gtod + (2 + delta_jiffies) * TICK_NSEC; | ||
175 | |||
176 | delta *= scd->multi; | ||
177 | delta >>= MULTI_SHIFT; | ||
178 | 117 | ||
179 | if (unlikely(clock + delta > max_clock) && check_max(scd)) { | 118 | if (unlikely(clock + delta > max_clock)) { |
180 | if (clock < max_clock) | 119 | if (clock < max_clock) |
181 | clock = max_clock; | 120 | clock = max_clock; |
182 | else | 121 | else |
@@ -189,12 +128,10 @@ static void __update_sched_clock(struct sched_clock_data *scd, u64 now, u64 *tim | |||
189 | if (unlikely(clock < min_clock)) | 128 | if (unlikely(clock < min_clock)) |
190 | clock = min_clock; | 129 | clock = min_clock; |
191 | 130 | ||
192 | if (time) | 131 | scd->tick_jiffies = now_jiffies; |
193 | *time = clock; | 132 | scd->clock = clock; |
194 | else { | 133 | |
195 | scd->prev_raw = now; | 134 | return clock; |
196 | scd->clock = clock; | ||
197 | } | ||
198 | } | 135 | } |
199 | 136 | ||
200 | static void lock_double_clock(struct sched_clock_data *data1, | 137 | static void lock_double_clock(struct sched_clock_data *data1, |
@@ -212,7 +149,7 @@ static void lock_double_clock(struct sched_clock_data *data1, | |||
212 | u64 sched_clock_cpu(int cpu) | 149 | u64 sched_clock_cpu(int cpu) |
213 | { | 150 | { |
214 | struct sched_clock_data *scd = cpu_sdc(cpu); | 151 | struct sched_clock_data *scd = cpu_sdc(cpu); |
215 | u64 now, clock; | 152 | u64 now, clock, this_clock, remote_clock; |
216 | 153 | ||
217 | if (unlikely(!sched_clock_running)) | 154 | if (unlikely(!sched_clock_running)) |
218 | return 0ull; | 155 | return 0ull; |
@@ -221,43 +158,44 @@ u64 sched_clock_cpu(int cpu) | |||
221 | now = sched_clock(); | 158 | now = sched_clock(); |
222 | 159 | ||
223 | if (cpu != raw_smp_processor_id()) { | 160 | if (cpu != raw_smp_processor_id()) { |
224 | /* | ||
225 | * in order to update a remote cpu's clock based on our | ||
226 | * unstable raw time rebase it against: | ||
227 | * tick_raw (offset between raw counters) | ||
228 | * tick_gotd (tick offset between cpus) | ||
229 | */ | ||
230 | struct sched_clock_data *my_scd = this_scd(); | 161 | struct sched_clock_data *my_scd = this_scd(); |
231 | 162 | ||
232 | lock_double_clock(scd, my_scd); | 163 | lock_double_clock(scd, my_scd); |
233 | 164 | ||
234 | now -= my_scd->tick_raw; | 165 | this_clock = __update_sched_clock(my_scd, now); |
235 | now += scd->tick_raw; | 166 | remote_clock = scd->clock; |
236 | 167 | ||
237 | now += my_scd->tick_gtod; | 168 | /* |
238 | now -= scd->tick_gtod; | 169 | * Use the opportunity that we have both locks |
170 | * taken to couple the two clocks: we take the | ||
171 | * larger time as the latest time for both | ||
172 | * runqueues. (this creates monotonic movement) | ||
173 | */ | ||
174 | if (likely(remote_clock < this_clock)) { | ||
175 | clock = this_clock; | ||
176 | scd->clock = clock; | ||
177 | } else { | ||
178 | /* | ||
179 | * Should be rare, but possible: | ||
180 | */ | ||
181 | clock = remote_clock; | ||
182 | my_scd->clock = remote_clock; | ||
183 | } | ||
239 | 184 | ||
240 | __raw_spin_unlock(&my_scd->lock); | 185 | __raw_spin_unlock(&my_scd->lock); |
241 | |||
242 | __update_sched_clock(scd, now, &clock); | ||
243 | |||
244 | __raw_spin_unlock(&scd->lock); | ||
245 | |||
246 | } else { | 186 | } else { |
247 | __raw_spin_lock(&scd->lock); | 187 | __raw_spin_lock(&scd->lock); |
248 | __update_sched_clock(scd, now, NULL); | 188 | clock = __update_sched_clock(scd, now); |
249 | clock = scd->clock; | ||
250 | __raw_spin_unlock(&scd->lock); | ||
251 | } | 189 | } |
252 | 190 | ||
191 | __raw_spin_unlock(&scd->lock); | ||
192 | |||
253 | return clock; | 193 | return clock; |
254 | } | 194 | } |
255 | 195 | ||
256 | void sched_clock_tick(void) | 196 | void sched_clock_tick(void) |
257 | { | 197 | { |
258 | struct sched_clock_data *scd = this_scd(); | 198 | struct sched_clock_data *scd = this_scd(); |
259 | unsigned long now_jiffies = jiffies; | ||
260 | s64 mult, delta_gtod, delta_raw; | ||
261 | u64 now, now_gtod; | 199 | u64 now, now_gtod; |
262 | 200 | ||
263 | if (unlikely(!sched_clock_running)) | 201 | if (unlikely(!sched_clock_running)) |
@@ -269,29 +207,14 @@ void sched_clock_tick(void) | |||
269 | now = sched_clock(); | 207 | now = sched_clock(); |
270 | 208 | ||
271 | __raw_spin_lock(&scd->lock); | 209 | __raw_spin_lock(&scd->lock); |
272 | __update_sched_clock(scd, now, NULL); | 210 | __update_sched_clock(scd, now); |
273 | /* | 211 | /* |
274 | * update tick_gtod after __update_sched_clock() because that will | 212 | * update tick_gtod after __update_sched_clock() because that will |
275 | * already observe 1 new jiffy; adding a new tick_gtod to that would | 213 | * already observe 1 new jiffy; adding a new tick_gtod to that would |
276 | * increase the clock 2 jiffies. | 214 | * increase the clock 2 jiffies. |
277 | */ | 215 | */ |
278 | delta_gtod = now_gtod - scd->tick_gtod; | ||
279 | delta_raw = now - scd->tick_raw; | ||
280 | |||
281 | if ((long)delta_raw > 0) { | ||
282 | mult = delta_gtod << MULTI_SHIFT; | ||
283 | do_div(mult, delta_raw); | ||
284 | scd->multi = mult; | ||
285 | if (scd->multi > MAX_MULTI) | ||
286 | scd->multi = MAX_MULTI; | ||
287 | else if (scd->multi < MIN_MULTI) | ||
288 | scd->multi = MIN_MULTI; | ||
289 | } else | ||
290 | scd->multi = 1 << MULTI_SHIFT; | ||
291 | |||
292 | scd->tick_raw = now; | 216 | scd->tick_raw = now; |
293 | scd->tick_gtod = now_gtod; | 217 | scd->tick_gtod = now_gtod; |
294 | scd->tick_jiffies = now_jiffies; | ||
295 | __raw_spin_unlock(&scd->lock); | 218 | __raw_spin_unlock(&scd->lock); |
296 | } | 219 | } |
297 | 220 | ||
@@ -310,7 +233,6 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | |||
310 | void sched_clock_idle_wakeup_event(u64 delta_ns) | 233 | void sched_clock_idle_wakeup_event(u64 delta_ns) |
311 | { | 234 | { |
312 | struct sched_clock_data *scd = this_scd(); | 235 | struct sched_clock_data *scd = this_scd(); |
313 | u64 now = sched_clock(); | ||
314 | 236 | ||
315 | /* | 237 | /* |
316 | * Override the previous timestamp and ignore all | 238 | * Override the previous timestamp and ignore all |
@@ -319,15 +241,28 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
319 | * rq clock: | 241 | * rq clock: |
320 | */ | 242 | */ |
321 | __raw_spin_lock(&scd->lock); | 243 | __raw_spin_lock(&scd->lock); |
322 | scd->prev_raw = now; | ||
323 | scd->clock += delta_ns; | 244 | scd->clock += delta_ns; |
324 | scd->multi = 1 << MULTI_SHIFT; | ||
325 | __raw_spin_unlock(&scd->lock); | 245 | __raw_spin_unlock(&scd->lock); |
326 | 246 | ||
327 | touch_softlockup_watchdog(); | 247 | touch_softlockup_watchdog(); |
328 | } | 248 | } |
329 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 249 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
330 | 250 | ||
251 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | ||
252 | |||
253 | void sched_clock_init(void) | ||
254 | { | ||
255 | sched_clock_running = 1; | ||
256 | } | ||
257 | |||
258 | u64 sched_clock_cpu(int cpu) | ||
259 | { | ||
260 | if (unlikely(!sched_clock_running)) | ||
261 | return 0; | ||
262 | |||
263 | return sched_clock(); | ||
264 | } | ||
265 | |||
331 | #endif | 266 | #endif |
332 | 267 | ||
333 | unsigned long long cpu_clock(int cpu) | 268 | unsigned long long cpu_clock(int cpu) |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 825b4c00fe44..f5da526424a9 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -289,7 +289,6 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
289 | ts->tick_stopped = 1; | 289 | ts->tick_stopped = 1; |
290 | ts->idle_jiffies = last_jiffies; | 290 | ts->idle_jiffies = last_jiffies; |
291 | rcu_enter_nohz(); | 291 | rcu_enter_nohz(); |
292 | sched_clock_tick_stop(cpu); | ||
293 | } | 292 | } |
294 | 293 | ||
295 | /* | 294 | /* |
@@ -392,7 +391,6 @@ void tick_nohz_restart_sched_tick(void) | |||
392 | select_nohz_load_balancer(0); | 391 | select_nohz_load_balancer(0); |
393 | now = ktime_get(); | 392 | now = ktime_get(); |
394 | tick_do_update_jiffies64(now); | 393 | tick_do_update_jiffies64(now); |
395 | sched_clock_tick_start(cpu); | ||
396 | cpu_clear(cpu, nohz_cpu_mask); | 394 | cpu_clear(cpu, nohz_cpu_mask); |
397 | 395 | ||
398 | /* | 396 | /* |