aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c36
1 files changed, 19 insertions, 17 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index eb3c72953615..bfa87918380f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <trace/sched.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -1935,6 +1936,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1935 * just go back and repeat. 1936 * just go back and repeat.
1936 */ 1937 */
1937 rq = task_rq_lock(p, &flags); 1938 rq = task_rq_lock(p, &flags);
1939 trace_sched_wait_task(rq, p);
1938 running = task_running(rq, p); 1940 running = task_running(rq, p);
1939 on_rq = p->se.on_rq; 1941 on_rq = p->se.on_rq;
1940 ncsw = 0; 1942 ncsw = 0;
@@ -2296,9 +2298,7 @@ out_activate:
2296 success = 1; 2298 success = 1;
2297 2299
2298out_running: 2300out_running:
2299 trace_mark(kernel_sched_wakeup, 2301 trace_sched_wakeup(rq, p);
2300 "pid %d state %ld ## rq %p task %p rq->curr %p",
2301 p->pid, p->state, rq, p, rq->curr);
2302 check_preempt_curr(rq, p, sync); 2302 check_preempt_curr(rq, p, sync);
2303 2303
2304 p->state = TASK_RUNNING; 2304 p->state = TASK_RUNNING;
@@ -2431,9 +2431,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2431 p->sched_class->task_new(rq, p); 2431 p->sched_class->task_new(rq, p);
2432 inc_nr_running(rq); 2432 inc_nr_running(rq);
2433 } 2433 }
2434 trace_mark(kernel_sched_wakeup_new, 2434 trace_sched_wakeup_new(rq, p);
2435 "pid %d state %ld ## rq %p task %p rq->curr %p",
2436 p->pid, p->state, rq, p, rq->curr);
2437 check_preempt_curr(rq, p, 0); 2435 check_preempt_curr(rq, p, 0);
2438#ifdef CONFIG_SMP 2436#ifdef CONFIG_SMP
2439 if (p->sched_class->task_wake_up) 2437 if (p->sched_class->task_wake_up)
@@ -2606,11 +2604,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2606 struct mm_struct *mm, *oldmm; 2604 struct mm_struct *mm, *oldmm;
2607 2605
2608 prepare_task_switch(rq, prev, next); 2606 prepare_task_switch(rq, prev, next);
2609 trace_mark(kernel_sched_schedule, 2607 trace_sched_switch(rq, prev, next);
2610 "prev_pid %d next_pid %d prev_state %ld "
2611 "## rq %p prev %p next %p",
2612 prev->pid, next->pid, prev->state,
2613 rq, prev, next);
2614 mm = next->mm; 2608 mm = next->mm;
2615 oldmm = prev->active_mm; 2609 oldmm = prev->active_mm;
2616 /* 2610 /*
@@ -2850,6 +2844,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2850 || unlikely(!cpu_active(dest_cpu))) 2844 || unlikely(!cpu_active(dest_cpu)))
2851 goto out; 2845 goto out;
2852 2846
2847 trace_sched_migrate_task(rq, p, dest_cpu);
2853 /* force the process onto the specified CPU */ 2848 /* force the process onto the specified CPU */
2854 if (migrate_task(p, dest_cpu, &req)) { 2849 if (migrate_task(p, dest_cpu, &req)) {
2855 /* Need to wait for migration thread (might exit: take ref). */ 2850 /* Need to wait for migration thread (might exit: take ref). */
@@ -4051,23 +4046,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
4051EXPORT_PER_CPU_SYMBOL(kstat); 4046EXPORT_PER_CPU_SYMBOL(kstat);
4052 4047
4053/* 4048/*
4054 * Return p->sum_exec_runtime plus any more ns on the sched_clock 4049 * Return any ns on the sched_clock that have not yet been banked in
4055 * that have not yet been banked in case the task is currently running. 4050 * @p in case that task is currently running.
4056 */ 4051 */
4057unsigned long long task_sched_runtime(struct task_struct *p) 4052unsigned long long task_delta_exec(struct task_struct *p)
4058{ 4053{
4059 unsigned long flags; 4054 unsigned long flags;
4060 u64 ns, delta_exec;
4061 struct rq *rq; 4055 struct rq *rq;
4056 u64 ns = 0;
4062 4057
4063 rq = task_rq_lock(p, &flags); 4058 rq = task_rq_lock(p, &flags);
4064 ns = p->se.sum_exec_runtime; 4059
4065 if (task_current(rq, p)) { 4060 if (task_current(rq, p)) {
4061 u64 delta_exec;
4062
4066 update_rq_clock(rq); 4063 update_rq_clock(rq);
4067 delta_exec = rq->clock - p->se.exec_start; 4064 delta_exec = rq->clock - p->se.exec_start;
4068 if ((s64)delta_exec > 0) 4065 if ((s64)delta_exec > 0)
4069 ns += delta_exec; 4066 ns = delta_exec;
4070 } 4067 }
4068
4071 task_rq_unlock(rq, &flags); 4069 task_rq_unlock(rq, &flags);
4072 4070
4073 return ns; 4071 return ns;
@@ -4084,6 +4082,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
4084 cputime64_t tmp; 4082 cputime64_t tmp;
4085 4083
4086 p->utime = cputime_add(p->utime, cputime); 4084 p->utime = cputime_add(p->utime, cputime);
4085 account_group_user_time(p, cputime);
4087 4086
4088 /* Add user time to cpustat. */ 4087 /* Add user time to cpustat. */
4089 tmp = cputime_to_cputime64(cputime); 4088 tmp = cputime_to_cputime64(cputime);
@@ -4108,6 +4107,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
4108 tmp = cputime_to_cputime64(cputime); 4107 tmp = cputime_to_cputime64(cputime);
4109 4108
4110 p->utime = cputime_add(p->utime, cputime); 4109 p->utime = cputime_add(p->utime, cputime);
4110 account_group_user_time(p, cputime);
4111 p->gtime = cputime_add(p->gtime, cputime); 4111 p->gtime = cputime_add(p->gtime, cputime);
4112 4112
4113 cpustat->user = cputime64_add(cpustat->user, tmp); 4113 cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -4143,6 +4143,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
4143 } 4143 }
4144 4144
4145 p->stime = cputime_add(p->stime, cputime); 4145 p->stime = cputime_add(p->stime, cputime);
4146 account_group_system_time(p, cputime);
4146 4147
4147 /* Add system time to cpustat. */ 4148 /* Add system time to cpustat. */
4148 tmp = cputime_to_cputime64(cputime); 4149 tmp = cputime_to_cputime64(cputime);
@@ -4184,6 +4185,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
4184 4185
4185 if (p == rq->idle) { 4186 if (p == rq->idle) {
4186 p->stime = cputime_add(p->stime, steal); 4187 p->stime = cputime_add(p->stime, steal);
4188 account_group_system_time(p, steal);
4187 if (atomic_read(&rq->nr_iowait) > 0) 4189 if (atomic_read(&rq->nr_iowait) > 0)
4188 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 4190 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
4189 else 4191 else