diff options
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | kernel/sched.c | 17 |
2 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 94f624aef017..33b9b4841ee7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1348,6 +1348,13 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) | |||
1348 | #endif | 1348 | #endif |
1349 | 1349 | ||
1350 | extern unsigned long long sched_clock(void); | 1350 | extern unsigned long long sched_clock(void); |
1351 | |||
1352 | /* | ||
1353 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
1354 | * clock constructed from sched_clock(): | ||
1355 | */ | ||
1356 | extern unsigned long long cpu_clock(int cpu); | ||
1357 | |||
1351 | extern unsigned long long | 1358 | extern unsigned long long |
1352 | task_sched_runtime(struct task_struct *task); | 1359 | task_sched_runtime(struct task_struct *task); |
1353 | 1360 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index a35a92ff38fd..93cf241cfbe9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -379,6 +379,23 @@ static inline unsigned long long rq_clock(struct rq *rq) | |||
379 | #define task_rq(p) cpu_rq(task_cpu(p)) | 379 | #define task_rq(p) cpu_rq(task_cpu(p)) |
380 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 380 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
381 | 381 | ||
382 | /* | ||
383 | * For kernel-internal use: high-speed (but slightly incorrect) per-cpu | ||
384 | * clock constructed from sched_clock(): | ||
385 | */ | ||
386 | unsigned long long cpu_clock(int cpu) | ||
387 | { | ||
388 | struct rq *rq = cpu_rq(cpu); | ||
389 | unsigned long long now; | ||
390 | unsigned long flags; | ||
391 | |||
392 | spin_lock_irqsave(&rq->lock, flags); | ||
393 | now = rq_clock(rq); | ||
394 | spin_unlock_irqrestore(&rq->lock, flags); | ||
395 | |||
396 | return now; | ||
397 | } | ||
398 | |||
382 | #ifdef CONFIG_FAIR_GROUP_SCHED | 399 | #ifdef CONFIG_FAIR_GROUP_SCHED |
383 | /* Change a task's ->cfs_rq if it moves across CPUs */ | 400 | /* Change a task's ->cfs_rq if it moves across CPUs */ |
384 | static inline void set_task_cfs_rq(struct task_struct *p) | 401 | static inline void set_task_cfs_rq(struct task_struct *p) |