aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_idletask.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_idletask.c')
-rw-r--r--kernel/sched_idletask.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index ca5374860aef..ef7a2661fa10 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -69,6 +69,33 @@ static void set_curr_task_idle(struct rq *rq)
69{ 69{
70} 70}
71 71
72static void switched_to_idle(struct rq *rq, struct task_struct *p,
73 int running)
74{
75 /* Can this actually happen?? */
76 if (running)
77 resched_task(rq->curr);
78 else
79 check_preempt_curr(rq, p);
80}
81
82static void prio_changed_idle(struct rq *rq, struct task_struct *p,
83 int oldprio, int running)
84{
85 /* This can happen for hot plug CPUS */
86
87 /*
88 * Reschedule if we are currently running on this runqueue and
89 * our priority decreased, or if we are not currently running on
90 * this runqueue and our priority is higher than the current's
91 */
92 if (running) {
93 if (p->prio > oldprio)
94 resched_task(rq->curr);
95 } else
96 check_preempt_curr(rq, p);
97}
98
72/* 99/*
73 * Simple, special scheduling class for the per-CPU idle tasks: 100 * Simple, special scheduling class for the per-CPU idle tasks:
74 */ 101 */
@@ -94,5 +121,9 @@ const struct sched_class idle_sched_class = {
94 121
95 .set_curr_task = set_curr_task_idle, 122 .set_curr_task = set_curr_task_idle,
96 .task_tick = task_tick_idle, 123 .task_tick = task_tick_idle,
124
125 .prio_changed = prio_changed_idle,
126 .switched_to = switched_to_idle,
127
97 /* no .task_new for idle tasks */ 128 /* no .task_new for idle tasks */
98}; 129};