aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c50
1 files changed, 40 insertions, 10 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 3640ebbb466b..44af55e6d5d0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -778,12 +778,9 @@ static inline int balance_runtime(struct rt_rq *rt_rq)
778 778
779static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) 779static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
780{ 780{
781 int i, idle = 1; 781 int i, idle = 1, throttled = 0;
782 const struct cpumask *span; 782 const struct cpumask *span;
783 783
784 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
785 return 1;
786
787 span = sched_rt_period_mask(); 784 span = sched_rt_period_mask();
788 for_each_cpu(i, span) { 785 for_each_cpu(i, span) {
789 int enqueue = 0; 786 int enqueue = 0;
@@ -818,12 +815,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
818 if (!rt_rq_throttled(rt_rq)) 815 if (!rt_rq_throttled(rt_rq))
819 enqueue = 1; 816 enqueue = 1;
820 } 817 }
818 if (rt_rq->rt_throttled)
819 throttled = 1;
821 820
822 if (enqueue) 821 if (enqueue)
823 sched_rt_rq_enqueue(rt_rq); 822 sched_rt_rq_enqueue(rt_rq);
824 raw_spin_unlock(&rq->lock); 823 raw_spin_unlock(&rq->lock);
825 } 824 }
826 825
826 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
827 return 1;
828
827 return idle; 829 return idle;
828} 830}
829 831
@@ -855,8 +857,30 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
855 return 0; 857 return 0;
856 858
857 if (rt_rq->rt_time > runtime) { 859 if (rt_rq->rt_time > runtime) {
858 rt_rq->rt_throttled = 1; 860 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
859 printk_once(KERN_WARNING "sched: RT throttling activated\n"); 861
862 /*
863 * Don't actually throttle groups that have no runtime assigned
864 * but accrue some time due to boosting.
865 */
866 if (likely(rt_b->rt_runtime)) {
867 static bool once = false;
868
869 rt_rq->rt_throttled = 1;
870
871 if (!once) {
872 once = true;
873 printk_sched("sched: RT throttling activated\n");
874 }
875 } else {
876 /*
877 * In case we did anyway, make it go away,
878 * replenishment is a joke, since it will replenish us
879 * with exactly 0 ns.
880 */
881 rt_rq->rt_time = 0;
882 }
883
860 if (rt_rq_throttled(rt_rq)) { 884 if (rt_rq_throttled(rt_rq)) {
861 sched_rt_rq_dequeue(rt_rq); 885 sched_rt_rq_dequeue(rt_rq);
862 return 1; 886 return 1;
@@ -884,7 +908,8 @@ static void update_curr_rt(struct rq *rq)
884 if (unlikely((s64)delta_exec < 0)) 908 if (unlikely((s64)delta_exec < 0))
885 delta_exec = 0; 909 delta_exec = 0;
886 910
887 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); 911 schedstat_set(curr->se.statistics.exec_max,
912 max(curr->se.statistics.exec_max, delta_exec));
888 913
889 curr->se.sum_exec_runtime += delta_exec; 914 curr->se.sum_exec_runtime += delta_exec;
890 account_group_exec_runtime(curr, delta_exec); 915 account_group_exec_runtime(curr, delta_exec);
@@ -1403,7 +1428,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1403next_idx: 1428next_idx:
1404 if (idx >= MAX_RT_PRIO) 1429 if (idx >= MAX_RT_PRIO)
1405 continue; 1430 continue;
1406 if (next && next->prio < idx) 1431 if (next && next->prio <= idx)
1407 continue; 1432 continue;
1408 list_for_each_entry(rt_se, array->queue + idx, run_list) { 1433 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1409 struct task_struct *p; 1434 struct task_struct *p;
@@ -1587,6 +1612,11 @@ static int push_rt_task(struct rq *rq)
1587 if (!next_task) 1612 if (!next_task)
1588 return 0; 1613 return 0;
1589 1614
1615#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1616 if (unlikely(task_running(rq, next_task)))
1617 return 0;
1618#endif
1619
1590retry: 1620retry:
1591 if (unlikely(next_task == rq->curr)) { 1621 if (unlikely(next_task == rq->curr)) {
1592 WARN_ON(1); 1622 WARN_ON(1);
@@ -1967,7 +1997,7 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1967 if (--p->rt.time_slice) 1997 if (--p->rt.time_slice)
1968 return; 1998 return;
1969 1999
1970 p->rt.time_slice = DEF_TIMESLICE; 2000 p->rt.time_slice = RR_TIMESLICE;
1971 2001
1972 /* 2002 /*
1973 * Requeue to the end of queue if we are not the only element 2003 * Requeue to the end of queue if we are not the only element
@@ -1995,7 +2025,7 @@ static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1995 * Time slice is 0 for SCHED_FIFO tasks 2025 * Time slice is 0 for SCHED_FIFO tasks
1996 */ 2026 */
1997 if (task->policy == SCHED_RR) 2027 if (task->policy == SCHED_RR)
1998 return DEF_TIMESLICE; 2028 return RR_TIMESLICE;
1999 else 2029 else
2000 return 0; 2030 return 0;
2001} 2031}