aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/rt.c16
-rw-r--r--kernel/sched/sched.h5
3 files changed, 16 insertions, 7 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3cde46483f0a..c907fde01eaa 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
192{ 192{
193 struct rq *rq = cpu_rq(sg_cpu->cpu); 193 struct rq *rq = cpu_rq(sg_cpu->cpu);
194 194
195 if (rq->rt.rt_nr_running) 195 if (rt_rq_is_runnable(&rq->rt))
196 return sg_cpu->max; 196 return sg_cpu->max;
197 197
198 /* 198 /*
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 47556b0c9a95..572567078b60 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
508 508
509 rt_se = rt_rq->tg->rt_se[cpu]; 509 rt_se = rt_rq->tg->rt_se[cpu];
510 510
511 if (!rt_se) 511 if (!rt_se) {
512 dequeue_top_rt_rq(rt_rq); 512 dequeue_top_rt_rq(rt_rq);
513 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
514 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
515 }
513 else if (on_rt_rq(rt_se)) 516 else if (on_rt_rq(rt_se))
514 dequeue_rt_entity(rt_se, 0); 517 dequeue_rt_entity(rt_se, 0);
515} 518}
@@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
1001 sub_nr_running(rq, rt_rq->rt_nr_running); 1004 sub_nr_running(rq, rt_rq->rt_nr_running);
1002 rt_rq->rt_queued = 0; 1005 rt_rq->rt_queued = 0;
1003 1006
1004 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1005 cpufreq_update_util(rq, 0);
1006} 1007}
1007 1008
1008static void 1009static void
@@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
1014 1015
1015 if (rt_rq->rt_queued) 1016 if (rt_rq->rt_queued)
1016 return; 1017 return;
1017 if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running) 1018
1019 if (rt_rq_throttled(rt_rq))
1018 return; 1020 return;
1019 1021
1020 add_nr_running(rq, rt_rq->rt_nr_running); 1022 if (rt_rq->rt_nr_running) {
1021 rt_rq->rt_queued = 1; 1023 add_nr_running(rq, rt_rq->rt_nr_running);
1024 rt_rq->rt_queued = 1;
1025 }
1022 1026
1023 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */ 1027 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1024 cpufreq_update_util(rq, 0); 1028 cpufreq_update_util(rq, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6601baf2361c..27ddec334601 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -609,6 +609,11 @@ struct rt_rq {
609#endif 609#endif
610}; 610};
611 611
612static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
613{
614 return rt_rq->rt_queued && rt_rq->rt_nr_running;
615}
616
612/* Deadline class' related fields in a runqueue */ 617/* Deadline class' related fields in a runqueue */
613struct dl_rq { 618struct dl_rq {
614 /* runqueue is an rbtree, ordered by deadline */ 619 /* runqueue is an rbtree, ordered by deadline */