aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-09 05:16:46 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-09 05:16:46 -0400
commitb04a0f4c1651a553ee1a03dc70297d66ec74db5c (patch)
tree25a0721d639f244f61bba84edf296b8442e373a4 /kernel
parenta4ac01c36e286dd1b9a1d5cd7422c5af51dc55f8 (diff)
sched: add [__]update_rq_clock(rq)
add the [__]update_rq_clock(rq) functions. (No change in functionality, just reorganization to prepare for elimination of the heavy 64-bit timestamp-passing in the scheduler.) Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c30
1 files changed, 22 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 1fa07c14624e..d613723f324f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -318,15 +318,19 @@ static inline int cpu_of(struct rq *rq)
318} 318}
319 319
320/* 320/*
321 * Per-runqueue clock, as finegrained as the platform can give us: 321 * Update the per-runqueue clock, as finegrained as the platform can give
322 * us, but without assuming monotonicity, etc.:
322 */ 323 */
323static unsigned long long __rq_clock(struct rq *rq) 324static void __update_rq_clock(struct rq *rq)
324{ 325{
325 u64 prev_raw = rq->prev_clock_raw; 326 u64 prev_raw = rq->prev_clock_raw;
326 u64 now = sched_clock(); 327 u64 now = sched_clock();
327 s64 delta = now - prev_raw; 328 s64 delta = now - prev_raw;
328 u64 clock = rq->clock; 329 u64 clock = rq->clock;
329 330
331#ifdef CONFIG_SCHED_DEBUG
332 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
333#endif
330 /* 334 /*
331 * Protect against sched_clock() occasionally going backwards: 335 * Protect against sched_clock() occasionally going backwards:
332 */ 336 */
@@ -349,17 +353,24 @@ static unsigned long long __rq_clock(struct rq *rq)
349 353
350 rq->prev_clock_raw = now; 354 rq->prev_clock_raw = now;
351 rq->clock = clock; 355 rq->clock = clock;
356}
352 357
353 return clock; 358static void update_rq_clock(struct rq *rq)
359{
360 if (likely(smp_processor_id() == cpu_of(rq)))
361 __update_rq_clock(rq);
354} 362}
355 363
356static unsigned long long rq_clock(struct rq *rq) 364static u64 __rq_clock(struct rq *rq)
357{ 365{
358 int this_cpu = smp_processor_id(); 366 __update_rq_clock(rq);
359 367
360 if (this_cpu == cpu_of(rq)) 368 return rq->clock;
361 return __rq_clock(rq); 369}
362 370
371static u64 rq_clock(struct rq *rq)
372{
373 update_rq_clock(rq);
363 return rq->clock; 374 return rq->clock;
364} 375}
365 376
@@ -386,9 +397,12 @@ unsigned long long cpu_clock(int cpu)
386{ 397{
387 unsigned long long now; 398 unsigned long long now;
388 unsigned long flags; 399 unsigned long flags;
400 struct rq *rq;
389 401
390 local_irq_save(flags); 402 local_irq_save(flags);
391 now = rq_clock(cpu_rq(cpu)); 403 rq = cpu_rq(cpu);
404 update_rq_clock(rq);
405 now = rq->clock;
392 local_irq_restore(flags); 406 local_irq_restore(flags);
393 407
394 return now; 408 return now;