diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-17 08:10:57 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-23 06:45:14 -0500 |
commit | aa9c4c0f967fdb482ea95e8473ec3d201e6e0781 (patch) | |
tree | 8223d34630b7d3130825e8a2197e9bb51c34b7fa /kernel/sched.c | |
parent | 7671581f1666ef4b54a1c1e598c51ac44c060a9b (diff) |
perfcounters: fix task clock counter
Impact: fix per task clock counter precision
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 49 |
1 files changed, 46 insertions, 3 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 382cfdb5e38d..4d84ff4c8774 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -638,7 +638,7 @@ static inline int cpu_of(struct rq *rq) | |||
638 | #define task_rq(p) cpu_rq(task_cpu(p)) | 638 | #define task_rq(p) cpu_rq(task_cpu(p)) |
639 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) | 639 | #define cpu_curr(cpu) (cpu_rq(cpu)->curr) |
640 | 640 | ||
641 | static inline void update_rq_clock(struct rq *rq) | 641 | inline void update_rq_clock(struct rq *rq) |
642 | { | 642 | { |
643 | rq->clock = sched_clock_cpu(cpu_of(rq)); | 643 | rq->clock = sched_clock_cpu(cpu_of(rq)); |
644 | } | 644 | } |
@@ -969,6 +969,26 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | |||
969 | } | 969 | } |
970 | } | 970 | } |
971 | 971 | ||
972 | void curr_rq_lock_irq_save(unsigned long *flags) | ||
973 | __acquires(rq->lock) | ||
974 | { | ||
975 | struct rq *rq; | ||
976 | |||
977 | local_irq_save(*flags); | ||
978 | rq = cpu_rq(smp_processor_id()); | ||
979 | spin_lock(&rq->lock); | ||
980 | } | ||
981 | |||
982 | void curr_rq_unlock_irq_restore(unsigned long *flags) | ||
983 | __releases(rq->lock) | ||
984 | { | ||
985 | struct rq *rq; | ||
986 | |||
987 | rq = cpu_rq(smp_processor_id()); | ||
988 | spin_unlock(&rq->lock); | ||
989 | local_irq_restore(*flags); | ||
990 | } | ||
991 | |||
972 | void task_rq_unlock_wait(struct task_struct *p) | 992 | void task_rq_unlock_wait(struct task_struct *p) |
973 | { | 993 | { |
974 | struct rq *rq = task_rq(p); | 994 | struct rq *rq = task_rq(p); |
@@ -2558,7 +2578,6 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, | |||
2558 | struct task_struct *next) | 2578 | struct task_struct *next) |
2559 | { | 2579 | { |
2560 | fire_sched_out_preempt_notifiers(prev, next); | 2580 | fire_sched_out_preempt_notifiers(prev, next); |
2561 | perf_counter_task_sched_out(prev, cpu_of(rq)); | ||
2562 | prepare_lock_switch(rq, next); | 2581 | prepare_lock_switch(rq, next); |
2563 | prepare_arch_switch(next); | 2582 | prepare_arch_switch(next); |
2564 | } | 2583 | } |
@@ -4093,6 +4112,29 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
4093 | * Return any ns on the sched_clock that have not yet been banked in | 4112 | * Return any ns on the sched_clock that have not yet been banked in |
4094 | * @p in case that task is currently running. | 4113 | * @p in case that task is currently running. |
4095 | */ | 4114 | */ |
4115 | unsigned long long __task_delta_exec(struct task_struct *p, int update) | ||
4116 | { | ||
4117 | s64 delta_exec; | ||
4118 | struct rq *rq; | ||
4119 | |||
4120 | rq = task_rq(p); | ||
4121 | WARN_ON_ONCE(!runqueue_is_locked()); | ||
4122 | WARN_ON_ONCE(!task_current(rq, p)); | ||
4123 | |||
4124 | if (update) | ||
4125 | update_rq_clock(rq); | ||
4126 | |||
4127 | delta_exec = rq->clock - p->se.exec_start; | ||
4128 | |||
4129 | WARN_ON_ONCE(delta_exec < 0); | ||
4130 | |||
4131 | return delta_exec; | ||
4132 | } | ||
4133 | |||
4134 | /* | ||
4135 | * Return any ns on the sched_clock that have not yet been banked in | ||
4136 | * @p in case that task is currently running. | ||
4137 | */ | ||
4096 | unsigned long long task_delta_exec(struct task_struct *p) | 4138 | unsigned long long task_delta_exec(struct task_struct *p) |
4097 | { | 4139 | { |
4098 | unsigned long flags; | 4140 | unsigned long flags; |
@@ -4316,13 +4358,13 @@ void scheduler_tick(void) | |||
4316 | update_rq_clock(rq); | 4358 | update_rq_clock(rq); |
4317 | update_cpu_load(rq); | 4359 | update_cpu_load(rq); |
4318 | curr->sched_class->task_tick(rq, curr, 0); | 4360 | curr->sched_class->task_tick(rq, curr, 0); |
4361 | perf_counter_task_tick(curr, cpu); | ||
4319 | spin_unlock(&rq->lock); | 4362 | spin_unlock(&rq->lock); |
4320 | 4363 | ||
4321 | #ifdef CONFIG_SMP | 4364 | #ifdef CONFIG_SMP |
4322 | rq->idle_at_tick = idle_cpu(cpu); | 4365 | rq->idle_at_tick = idle_cpu(cpu); |
4323 | trigger_load_balance(rq, cpu); | 4366 | trigger_load_balance(rq, cpu); |
4324 | #endif | 4367 | #endif |
4325 | perf_counter_task_tick(curr, cpu); | ||
4326 | } | 4368 | } |
4327 | 4369 | ||
4328 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | 4370 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ |
@@ -4512,6 +4554,7 @@ need_resched_nonpreemptible: | |||
4512 | 4554 | ||
4513 | if (likely(prev != next)) { | 4555 | if (likely(prev != next)) { |
4514 | sched_info_switch(prev, next); | 4556 | sched_info_switch(prev, next); |
4557 | perf_counter_task_sched_out(prev, cpu); | ||
4515 | 4558 | ||
4516 | rq->nr_switches++; | 4559 | rq->nr_switches++; |
4517 | rq->curr = next; | 4560 | rq->curr = next; |