diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6f230596bd0c..d906f72b42d2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -71,6 +71,7 @@ | |||
71 | #include <linux/debugfs.h> | 71 | #include <linux/debugfs.h> |
72 | #include <linux/ctype.h> | 72 | #include <linux/ctype.h> |
73 | #include <linux/ftrace.h> | 73 | #include <linux/ftrace.h> |
74 | #include <trace/sched.h> | ||
74 | 75 | ||
75 | #include <asm/tlb.h> | 76 | #include <asm/tlb.h> |
76 | #include <asm/irq_regs.h> | 77 | #include <asm/irq_regs.h> |
@@ -1936,6 +1937,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
1936 | * just go back and repeat. | 1937 | * just go back and repeat. |
1937 | */ | 1938 | */ |
1938 | rq = task_rq_lock(p, &flags); | 1939 | rq = task_rq_lock(p, &flags); |
1940 | trace_sched_wait_task(rq, p); | ||
1939 | running = task_running(rq, p); | 1941 | running = task_running(rq, p); |
1940 | on_rq = p->se.on_rq; | 1942 | on_rq = p->se.on_rq; |
1941 | ncsw = 0; | 1943 | ncsw = 0; |
@@ -2297,9 +2299,7 @@ out_activate: | |||
2297 | success = 1; | 2299 | success = 1; |
2298 | 2300 | ||
2299 | out_running: | 2301 | out_running: |
2300 | trace_mark(kernel_sched_wakeup, | 2302 | trace_sched_wakeup(rq, p); |
2301 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
2302 | p->pid, p->state, rq, p, rq->curr); | ||
2303 | check_preempt_curr(rq, p, sync); | 2303 | check_preempt_curr(rq, p, sync); |
2304 | 2304 | ||
2305 | p->state = TASK_RUNNING; | 2305 | p->state = TASK_RUNNING; |
@@ -2432,9 +2432,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2432 | p->sched_class->task_new(rq, p); | 2432 | p->sched_class->task_new(rq, p); |
2433 | inc_nr_running(rq); | 2433 | inc_nr_running(rq); |
2434 | } | 2434 | } |
2435 | trace_mark(kernel_sched_wakeup_new, | 2435 | trace_sched_wakeup_new(rq, p); |
2436 | "pid %d state %ld ## rq %p task %p rq->curr %p", | ||
2437 | p->pid, p->state, rq, p, rq->curr); | ||
2438 | check_preempt_curr(rq, p, 0); | 2436 | check_preempt_curr(rq, p, 0); |
2439 | #ifdef CONFIG_SMP | 2437 | #ifdef CONFIG_SMP |
2440 | if (p->sched_class->task_wake_up) | 2438 | if (p->sched_class->task_wake_up) |
@@ -2607,11 +2605,7 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
2607 | struct mm_struct *mm, *oldmm; | 2605 | struct mm_struct *mm, *oldmm; |
2608 | 2606 | ||
2609 | prepare_task_switch(rq, prev, next); | 2607 | prepare_task_switch(rq, prev, next); |
2610 | trace_mark(kernel_sched_schedule, | 2608 | trace_sched_switch(rq, prev, next); |
2611 | "prev_pid %d next_pid %d prev_state %ld " | ||
2612 | "## rq %p prev %p next %p", | ||
2613 | prev->pid, next->pid, prev->state, | ||
2614 | rq, prev, next); | ||
2615 | mm = next->mm; | 2609 | mm = next->mm; |
2616 | oldmm = prev->active_mm; | 2610 | oldmm = prev->active_mm; |
2617 | /* | 2611 | /* |
@@ -2851,6 +2845,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2851 | || unlikely(!cpu_active(dest_cpu))) | 2845 | || unlikely(!cpu_active(dest_cpu))) |
2852 | goto out; | 2846 | goto out; |
2853 | 2847 | ||
2848 | trace_sched_migrate_task(rq, p, dest_cpu); | ||
2854 | /* force the process onto the specified CPU */ | 2849 | /* force the process onto the specified CPU */ |
2855 | if (migrate_task(p, dest_cpu, &req)) { | 2850 | if (migrate_task(p, dest_cpu, &req)) { |
2856 | /* Need to wait for migration thread (might exit: take ref). */ | 2851 | /* Need to wait for migration thread (might exit: take ref). */ |
@@ -4052,23 +4047,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat); | |||
4052 | EXPORT_PER_CPU_SYMBOL(kstat); | 4047 | EXPORT_PER_CPU_SYMBOL(kstat); |
4053 | 4048 | ||
4054 | /* | 4049 | /* |
4055 | * Return p->sum_exec_runtime plus any more ns on the sched_clock | 4050 | * Return any ns on the sched_clock that have not yet been banked in |
4056 | * that have not yet been banked in case the task is currently running. | 4051 | * @p in case that task is currently running. |
4057 | */ | 4052 | */ |
4058 | unsigned long long task_sched_runtime(struct task_struct *p) | 4053 | unsigned long long task_delta_exec(struct task_struct *p) |
4059 | { | 4054 | { |
4060 | unsigned long flags; | 4055 | unsigned long flags; |
4061 | u64 ns, delta_exec; | ||
4062 | struct rq *rq; | 4056 | struct rq *rq; |
4057 | u64 ns = 0; | ||
4063 | 4058 | ||
4064 | rq = task_rq_lock(p, &flags); | 4059 | rq = task_rq_lock(p, &flags); |
4065 | ns = p->se.sum_exec_runtime; | 4060 | |
4066 | if (task_current(rq, p)) { | 4061 | if (task_current(rq, p)) { |
4062 | u64 delta_exec; | ||
4063 | |||
4067 | update_rq_clock(rq); | 4064 | update_rq_clock(rq); |
4068 | delta_exec = rq->clock - p->se.exec_start; | 4065 | delta_exec = rq->clock - p->se.exec_start; |
4069 | if ((s64)delta_exec > 0) | 4066 | if ((s64)delta_exec > 0) |
4070 | ns += delta_exec; | 4067 | ns = delta_exec; |
4071 | } | 4068 | } |
4069 | |||
4072 | task_rq_unlock(rq, &flags); | 4070 | task_rq_unlock(rq, &flags); |
4073 | 4071 | ||
4074 | return ns; | 4072 | return ns; |
@@ -4085,6 +4083,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4085 | cputime64_t tmp; | 4083 | cputime64_t tmp; |
4086 | 4084 | ||
4087 | p->utime = cputime_add(p->utime, cputime); | 4085 | p->utime = cputime_add(p->utime, cputime); |
4086 | account_group_user_time(p, cputime); | ||
4088 | 4087 | ||
4089 | /* Add user time to cpustat. */ | 4088 | /* Add user time to cpustat. */ |
4090 | tmp = cputime_to_cputime64(cputime); | 4089 | tmp = cputime_to_cputime64(cputime); |
@@ -4109,6 +4108,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime) | |||
4109 | tmp = cputime_to_cputime64(cputime); | 4108 | tmp = cputime_to_cputime64(cputime); |
4110 | 4109 | ||
4111 | p->utime = cputime_add(p->utime, cputime); | 4110 | p->utime = cputime_add(p->utime, cputime); |
4111 | account_group_user_time(p, cputime); | ||
4112 | p->gtime = cputime_add(p->gtime, cputime); | 4112 | p->gtime = cputime_add(p->gtime, cputime); |
4113 | 4113 | ||
4114 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4114 | cpustat->user = cputime64_add(cpustat->user, tmp); |
@@ -4144,6 +4144,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4144 | } | 4144 | } |
4145 | 4145 | ||
4146 | p->stime = cputime_add(p->stime, cputime); | 4146 | p->stime = cputime_add(p->stime, cputime); |
4147 | account_group_system_time(p, cputime); | ||
4147 | 4148 | ||
4148 | /* Add system time to cpustat. */ | 4149 | /* Add system time to cpustat. */ |
4149 | tmp = cputime_to_cputime64(cputime); | 4150 | tmp = cputime_to_cputime64(cputime); |
@@ -4185,6 +4186,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
4185 | 4186 | ||
4186 | if (p == rq->idle) { | 4187 | if (p == rq->idle) { |
4187 | p->stime = cputime_add(p->stime, steal); | 4188 | p->stime = cputime_add(p->stime, steal); |
4189 | account_group_system_time(p, steal); | ||
4188 | if (atomic_read(&rq->nr_iowait) > 0) | 4190 | if (atomic_read(&rq->nr_iowait) > 0) |
4189 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4191 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); |
4190 | else | 4192 | else |