aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 11ca39017835..6625c3c4b10d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -55,6 +55,7 @@
55#include <linux/cpuset.h> 55#include <linux/cpuset.h>
56#include <linux/percpu.h> 56#include <linux/percpu.h>
57#include <linux/kthread.h> 57#include <linux/kthread.h>
58#include <linux/proc_fs.h>
58#include <linux/seq_file.h> 59#include <linux/seq_file.h>
59#include <linux/sysctl.h> 60#include <linux/sysctl.h>
60#include <linux/syscalls.h> 61#include <linux/syscalls.h>
@@ -71,6 +72,7 @@
71#include <linux/debugfs.h> 72#include <linux/debugfs.h>
72#include <linux/ctype.h> 73#include <linux/ctype.h>
73#include <linux/ftrace.h> 74#include <linux/ftrace.h>
75#include <trace/sched.h>
74 76
75#include <asm/tlb.h> 77#include <asm/tlb.h>
76#include <asm/irq_regs.h> 78#include <asm/irq_regs.h>
@@ -226,9 +228,8 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
226 228
227 now = hrtimer_cb_get_time(&rt_b->rt_period_timer); 229 now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
228 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); 230 hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
229 hrtimer_start(&rt_b->rt_period_timer, 231 hrtimer_start_expires(&rt_b->rt_period_timer,
230 rt_b->rt_period_timer.expires, 232 HRTIMER_MODE_ABS);
231 HRTIMER_MODE_ABS);
232 } 233 }
233 spin_unlock(&rt_b->rt_runtime_lock); 234 spin_unlock(&rt_b->rt_runtime_lock);
234} 235}
@@ -1070,7 +1071,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
1070 struct hrtimer *timer = &rq->hrtick_timer; 1071 struct hrtimer *timer = &rq->hrtick_timer;
1071 ktime_t time = ktime_add_ns(timer->base->get_time(), delay); 1072 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
1072 1073
1073 timer->expires = time; 1074 hrtimer_set_expires(timer, time);
1074 1075
1075 if (rq == this_rq()) { 1076 if (rq == this_rq()) {
1076 hrtimer_restart(timer); 1077 hrtimer_restart(timer);
@@ -1941,6 +1942,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1941 * just go back and repeat. 1942 * just go back and repeat.
1942 */ 1943 */
1943 rq = task_rq_lock(p, &flags); 1944 rq = task_rq_lock(p, &flags);
1945 trace_sched_wait_task(rq, p);
1944 running = task_running(rq, p); 1946 running = task_running(rq, p);
1945 on_rq = p->se.on_rq; 1947 on_rq = p->se.on_rq;
1946 ncsw = 0; 1948 ncsw = 0;
@@ -2302,9 +2304,7 @@ out_activate:
2302 success = 1; 2304 success = 1;
2303 2305
2304out_running: 2306out_running:
2305 trace_mark(kernel_sched_wakeup, 2307 trace_sched_wakeup(rq, p);
2306 "pid %d state %ld ## rq %p task %p rq->curr %p",
2307 p->pid, p->state, rq, p, rq->curr);
2308 check_preempt_curr(rq, p, sync); 2308 check_preempt_curr(rq, p, sync);
2309 2309
2310 p->state = TASK_RUNNING; 2310 p->state = TASK_RUNNING;
@@ -2437,9 +2437,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2437 p->sched_class->task_new(rq, p); 2437 p->sched_class->task_new(rq, p);
2438 inc_nr_running(rq); 2438 inc_nr_running(rq);
2439 } 2439 }
2440 trace_mark(kernel_sched_wakeup_new, 2440 trace_sched_wakeup_new(rq, p);
2441 "pid %d state %ld ## rq %p task %p rq->curr %p",
2442 p->pid, p->state, rq, p, rq->curr);
2443 check_preempt_curr(rq, p, 0); 2441 check_preempt_curr(rq, p, 0);
2444#ifdef CONFIG_SMP 2442#ifdef CONFIG_SMP
2445 if (p->sched_class->task_wake_up) 2443 if (p->sched_class->task_wake_up)
@@ -2612,11 +2610,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2612 struct mm_struct *mm, *oldmm; 2610 struct mm_struct *mm, *oldmm;
2613 2611
2614 prepare_task_switch(rq, prev, next); 2612 prepare_task_switch(rq, prev, next);
2615 trace_mark(kernel_sched_schedule, 2613 trace_sched_switch(rq, prev, next);
2616 "prev_pid %d next_pid %d prev_state %ld "
2617 "## rq %p prev %p next %p",
2618 prev->pid, next->pid, prev->state,
2619 rq, prev, next);
2620 mm = next->mm; 2614 mm = next->mm;
2621 oldmm = prev->active_mm; 2615 oldmm = prev->active_mm;
2622 /* 2616 /*
@@ -2856,6 +2850,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2856 || unlikely(!cpu_active(dest_cpu))) 2850 || unlikely(!cpu_active(dest_cpu)))
2857 goto out; 2851 goto out;
2858 2852
2853 trace_sched_migrate_task(rq, p, dest_cpu);
2859 /* force the process onto the specified CPU */ 2854 /* force the process onto the specified CPU */
2860 if (migrate_task(p, dest_cpu, &req)) { 2855 if (migrate_task(p, dest_cpu, &req)) {
2861 /* Need to wait for migration thread (might exit: take ref). */ 2856 /* Need to wait for migration thread (might exit: take ref). */
@@ -4057,23 +4052,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
4057EXPORT_PER_CPU_SYMBOL(kstat); 4052EXPORT_PER_CPU_SYMBOL(kstat);
4058 4053
4059/* 4054/*
4060 * Return p->sum_exec_runtime plus any more ns on the sched_clock 4055 * Return any ns on the sched_clock that have not yet been banked in
4061 * that have not yet been banked in case the task is currently running. 4056 * @p in case that task is currently running.
4062 */ 4057 */
4063unsigned long long task_sched_runtime(struct task_struct *p) 4058unsigned long long task_delta_exec(struct task_struct *p)
4064{ 4059{
4065 unsigned long flags; 4060 unsigned long flags;
4066 u64 ns, delta_exec;
4067 struct rq *rq; 4061 struct rq *rq;
4062 u64 ns = 0;
4068 4063
4069 rq = task_rq_lock(p, &flags); 4064 rq = task_rq_lock(p, &flags);
4070 ns = p->se.sum_exec_runtime; 4065
4071 if (task_current(rq, p)) { 4066 if (task_current(rq, p)) {
4067 u64 delta_exec;
4068
4072 update_rq_clock(rq); 4069 update_rq_clock(rq);
4073 delta_exec = rq->clock - p->se.exec_start; 4070 delta_exec = rq->clock - p->se.exec_start;
4074 if ((s64)delta_exec > 0) 4071 if ((s64)delta_exec > 0)
4075 ns += delta_exec; 4072 ns = delta_exec;
4076 } 4073 }
4074
4077 task_rq_unlock(rq, &flags); 4075 task_rq_unlock(rq, &flags);
4078 4076
4079 return ns; 4077 return ns;
@@ -4090,6 +4088,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
4090 cputime64_t tmp; 4088 cputime64_t tmp;
4091 4089
4092 p->utime = cputime_add(p->utime, cputime); 4090 p->utime = cputime_add(p->utime, cputime);
4091 account_group_user_time(p, cputime);
4093 4092
4094 /* Add user time to cpustat. */ 4093 /* Add user time to cpustat. */
4095 tmp = cputime_to_cputime64(cputime); 4094 tmp = cputime_to_cputime64(cputime);
@@ -4114,6 +4113,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
4114 tmp = cputime_to_cputime64(cputime); 4113 tmp = cputime_to_cputime64(cputime);
4115 4114
4116 p->utime = cputime_add(p->utime, cputime); 4115 p->utime = cputime_add(p->utime, cputime);
4116 account_group_user_time(p, cputime);
4117 p->gtime = cputime_add(p->gtime, cputime); 4117 p->gtime = cputime_add(p->gtime, cputime);
4118 4118
4119 cpustat->user = cputime64_add(cpustat->user, tmp); 4119 cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -4149,6 +4149,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
4149 } 4149 }
4150 4150
4151 p->stime = cputime_add(p->stime, cputime); 4151 p->stime = cputime_add(p->stime, cputime);
4152 account_group_system_time(p, cputime);
4152 4153
4153 /* Add system time to cpustat. */ 4154 /* Add system time to cpustat. */
4154 tmp = cputime_to_cputime64(cputime); 4155 tmp = cputime_to_cputime64(cputime);
@@ -4190,6 +4191,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
4190 4191
4191 if (p == rq->idle) { 4192 if (p == rq->idle) {
4192 p->stime = cputime_add(p->stime, steal); 4193 p->stime = cputime_add(p->stime, steal);
4194 account_group_system_time(p, steal);
4193 if (atomic_read(&rq->nr_iowait) > 0) 4195 if (atomic_read(&rq->nr_iowait) > 0)
4194 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 4196 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
4195 else 4197 else