aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched.c8
-rw-r--r--kernel/sched_rt.c24
3 files changed, 22 insertions, 11 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e5f928a079e8..836a86c32a65 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1012,6 +1012,7 @@ struct sched_class {
1012 struct rq *busiest, struct sched_domain *sd, 1012 struct rq *busiest, struct sched_domain *sd,
1013 enum cpu_idle_type idle); 1013 enum cpu_idle_type idle);
1014 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1014 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1015 int (*needs_post_schedule) (struct rq *this_rq);
1015 void (*post_schedule) (struct rq *this_rq); 1016 void (*post_schedule) (struct rq *this_rq);
1016 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1017 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1017 1018
diff --git a/kernel/sched.c b/kernel/sched.c
index 8fca364f3593..3acbad8991a2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2621,6 +2621,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2621{ 2621{
2622 struct mm_struct *mm = rq->prev_mm; 2622 struct mm_struct *mm = rq->prev_mm;
2623 long prev_state; 2623 long prev_state;
2624#ifdef CONFIG_SMP
2625 int post_schedule = 0;
2626
2627 if (current->sched_class->needs_post_schedule)
2628 post_schedule = current->sched_class->needs_post_schedule(rq);
2629#endif
2624 2630
2625 rq->prev_mm = NULL; 2631 rq->prev_mm = NULL;
2626 2632
@@ -2639,7 +2645,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2639 finish_arch_switch(prev); 2645 finish_arch_switch(prev);
2640 finish_lock_switch(rq, prev); 2646 finish_lock_switch(rq, prev);
2641#ifdef CONFIG_SMP 2647#ifdef CONFIG_SMP
2642 if (current->sched_class->post_schedule) 2648 if (post_schedule)
2643 current->sched_class->post_schedule(rq); 2649 current->sched_class->post_schedule(rq);
2644#endif 2650#endif
2645 2651
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 8d33843cb2c4..b0b6ea4ed674 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1290,20 +1290,23 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1290 pull_rt_task(rq); 1290 pull_rt_task(rq);
1291} 1291}
1292 1292
1293/*
1294 * assumes rq->lock is held
1295 */
1296static int needs_post_schedule_rt(struct rq *rq)
1297{
1298 return rq->rt.overloaded ? 1 : 0;
1299}
1300
1293static void post_schedule_rt(struct rq *rq) 1301static void post_schedule_rt(struct rq *rq)
1294{ 1302{
1295 /* 1303 /*
1296 * If we have more than one rt_task queued, then 1304 * This is only called if needs_post_schedule_rt() indicates that
1297 * see if we can push the other rt_tasks off to other CPUS. 1305 * we need to push tasks away
1298 * Note we may release the rq lock, and since
1299 * the lock was owned by prev, we need to release it
1300 * first via finish_lock_switch and then reaquire it here.
1301 */ 1306 */
1302 if (unlikely(rq->rt.overloaded)) { 1307 spin_lock_irq(&rq->lock);
1303 spin_lock_irq(&rq->lock); 1308 push_rt_tasks(rq);
1304 push_rt_tasks(rq); 1309 spin_unlock_irq(&rq->lock);
1305 spin_unlock_irq(&rq->lock);
1306 }
1307} 1310}
1308 1311
1309/* 1312/*
@@ -1557,6 +1560,7 @@ static const struct sched_class rt_sched_class = {
1557 .rq_online = rq_online_rt, 1560 .rq_online = rq_online_rt,
1558 .rq_offline = rq_offline_rt, 1561 .rq_offline = rq_offline_rt,
1559 .pre_schedule = pre_schedule_rt, 1562 .pre_schedule = pre_schedule_rt,
1563 .needs_post_schedule = needs_post_schedule_rt,
1560 .post_schedule = post_schedule_rt, 1564 .post_schedule = post_schedule_rt,
1561 .task_wake_up = task_wake_up_rt, 1565 .task_wake_up = task_wake_up_rt,
1562 .switched_from = switched_from_rt, 1566 .switched_from = switched_from_rt,