aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c50
1 files changed, 45 insertions, 5 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index c492fd2b2eec..ae4995c09aac 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -33,6 +33,14 @@ static inline void rt_clear_overload(struct rq *rq)
33 atomic_dec(&rto_count); 33 atomic_dec(&rto_count);
34 cpu_clear(rq->cpu, rt_overload_mask); 34 cpu_clear(rq->cpu, rt_overload_mask);
35} 35}
36
37static void update_rt_migration(struct rq *rq)
38{
39 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1))
40 rt_set_overload(rq);
41 else
42 rt_clear_overload(rq);
43}
36#endif /* CONFIG_SMP */ 44#endif /* CONFIG_SMP */
37 45
38/* 46/*
@@ -65,8 +73,10 @@ static inline void inc_rt_tasks(struct task_struct *p, struct rq *rq)
65#ifdef CONFIG_SMP 73#ifdef CONFIG_SMP
66 if (p->prio < rq->rt.highest_prio) 74 if (p->prio < rq->rt.highest_prio)
67 rq->rt.highest_prio = p->prio; 75 rq->rt.highest_prio = p->prio;
68 if (rq->rt.rt_nr_running > 1) 76 if (p->nr_cpus_allowed > 1)
69 rt_set_overload(rq); 77 rq->rt.rt_nr_migratory++;
78
79 update_rt_migration(rq);
70#endif /* CONFIG_SMP */ 80#endif /* CONFIG_SMP */
71} 81}
72 82
@@ -88,8 +98,10 @@ static inline void dec_rt_tasks(struct task_struct *p, struct rq *rq)
88 } /* otherwise leave rq->highest prio alone */ 98 } /* otherwise leave rq->highest prio alone */
89 } else 99 } else
90 rq->rt.highest_prio = MAX_RT_PRIO; 100 rq->rt.highest_prio = MAX_RT_PRIO;
91 if (rq->rt.rt_nr_running < 2) 101 if (p->nr_cpus_allowed > 1)
92 rt_clear_overload(rq); 102 rq->rt.rt_nr_migratory--;
103
104 update_rt_migration(rq);
93#endif /* CONFIG_SMP */ 105#endif /* CONFIG_SMP */
94} 106}
95 107
@@ -182,7 +194,8 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
182static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 194static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
183{ 195{
184 if (!task_running(rq, p) && 196 if (!task_running(rq, p) &&
185 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed))) 197 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
198 (p->nr_cpus_allowed > 1))
186 return 1; 199 return 1;
187 return 0; 200 return 0;
188} 201}
@@ -584,6 +597,32 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
584 /* don't touch RT tasks */ 597 /* don't touch RT tasks */
585 return 0; 598 return 0;
586} 599}
600static void set_cpus_allowed_rt(struct task_struct *p, cpumask_t *new_mask)
601{
602 int weight = cpus_weight(*new_mask);
603
604 BUG_ON(!rt_task(p));
605
606 /*
607 * Update the migration status of the RQ if we have an RT task
608 * which is running AND changing its weight value.
609 */
610 if (p->se.on_rq && (weight != p->nr_cpus_allowed)) {
611 struct rq *rq = task_rq(p);
612
613 if ((p->nr_cpus_allowed <= 1) && (weight > 1))
614 rq->rt.rt_nr_migratory++;
615 else if((p->nr_cpus_allowed > 1) && (weight <= 1)) {
616 BUG_ON(!rq->rt.rt_nr_migratory);
617 rq->rt.rt_nr_migratory--;
618 }
619
620 update_rt_migration(rq);
621 }
622
623 p->cpus_allowed = *new_mask;
624 p->nr_cpus_allowed = weight;
625}
587#else /* CONFIG_SMP */ 626#else /* CONFIG_SMP */
588# define schedule_tail_balance_rt(rq) do { } while (0) 627# define schedule_tail_balance_rt(rq) do { } while (0)
589# define schedule_balance_rt(rq, prev) do { } while (0) 628# define schedule_balance_rt(rq, prev) do { } while (0)
@@ -637,6 +676,7 @@ const struct sched_class rt_sched_class = {
637#ifdef CONFIG_SMP 676#ifdef CONFIG_SMP
638 .load_balance = load_balance_rt, 677 .load_balance = load_balance_rt,
639 .move_one_task = move_one_task_rt, 678 .move_one_task = move_one_task_rt,
679 .set_cpus_allowed = set_cpus_allowed_rt,
640#endif 680#endif
641 681
642 .set_curr_task = set_curr_task_rt, 682 .set_curr_task = set_curr_task_rt,