diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-07-09 12:51:58 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-07-09 12:51:58 -0400 |
commit | 20d315d42aed95423a7203e1d7e84086004b5a00 (patch) | |
tree | 3649d00c3ed1053783727333de1291a71bdb3ca4 /kernel/sched.c | |
parent | 6aa645ea5f7a246702e07f29edc7075d487ae4a3 (diff) |
sched: add rq_clock()/__rq_clock()
add rq_clock()/__rq_clock(), a robust wrapper around sched_clock(),
used by CFS. It protects against common type of sched_clock() problems
(caused by hardware): time warps forwards and backwards.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 46 |
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 085418bedccd..29eb227e33f7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -389,6 +389,52 @@ static inline int cpu_of(struct rq *rq) | |||
389 | } | 389 | } |
390 | 390 | ||
391 | /* | 391 | /* |
392 | * Per-runqueue clock, as finegrained as the platform can give us: | ||
393 | */ | ||
394 | static unsigned long long __rq_clock(struct rq *rq) | ||
395 | { | ||
396 | u64 prev_raw = rq->prev_clock_raw; | ||
397 | u64 now = sched_clock(); | ||
398 | s64 delta = now - prev_raw; | ||
399 | u64 clock = rq->clock; | ||
400 | |||
401 | /* | ||
402 | * Protect against sched_clock() occasionally going backwards: | ||
403 | */ | ||
404 | if (unlikely(delta < 0)) { | ||
405 | clock++; | ||
406 | rq->clock_warps++; | ||
407 | } else { | ||
408 | /* | ||
409 | * Catch too large forward jumps too: | ||
410 | */ | ||
411 | if (unlikely(delta > 2*TICK_NSEC)) { | ||
412 | clock++; | ||
413 | rq->clock_overflows++; | ||
414 | } else { | ||
415 | if (unlikely(delta > rq->clock_max_delta)) | ||
416 | rq->clock_max_delta = delta; | ||
417 | clock += delta; | ||
418 | } | ||
419 | } | ||
420 | |||
421 | rq->prev_clock_raw = now; | ||
422 | rq->clock = clock; | ||
423 | |||
424 | return clock; | ||
425 | } | ||
426 | |||
427 | static inline unsigned long long rq_clock(struct rq *rq) | ||
428 | { | ||
429 | int this_cpu = smp_processor_id(); | ||
430 | |||
431 | if (this_cpu == cpu_of(rq)) | ||
432 | return __rq_clock(rq); | ||
433 | |||
434 | return rq->clock; | ||
435 | } | ||
436 | |||
437 | /* | ||
392 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. | 438 | * The domain tree (rq->sd) is protected by RCU's quiescent state transition. |
393 | * See detach_destroy_domains: synchronize_sched for details. | 439 | * See detach_destroy_domains: synchronize_sched for details. |
394 | * | 440 | * |