aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched_rt.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r--kernel/sched_rt.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 908c04f9dad0..552310798dad 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -199,6 +199,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
199 199
200static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 200static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201{ 201{
202 if (rt_rq->rt_nr_running)
203 resched_task(rq_of_rt_rq(rt_rq)->curr);
202} 204}
203 205
204static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 206static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
@@ -298,7 +300,7 @@ static void __disable_runtime(struct rq *rq)
298 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 300 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
299 s64 diff; 301 s64 diff;
300 302
301 if (iter == rt_rq) 303 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
302 continue; 304 continue;
303 305
304 spin_lock(&iter->rt_runtime_lock); 306 spin_lock(&iter->rt_runtime_lock);
@@ -438,9 +440,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
438{ 440{
439 u64 runtime = sched_rt_runtime(rt_rq); 441 u64 runtime = sched_rt_runtime(rt_rq);
440 442
441 if (runtime == RUNTIME_INF)
442 return 0;
443
444 if (rt_rq->rt_throttled) 443 if (rt_rq->rt_throttled)
445 return rt_rq_throttled(rt_rq); 444 return rt_rq_throttled(rt_rq);
446 445
@@ -491,9 +490,11 @@ static void update_curr_rt(struct rq *rq)
491 rt_rq = rt_rq_of_se(rt_se); 490 rt_rq = rt_rq_of_se(rt_se);
492 491
493 spin_lock(&rt_rq->rt_runtime_lock); 492 spin_lock(&rt_rq->rt_runtime_lock);
494 rt_rq->rt_time += delta_exec; 493 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
495 if (sched_rt_runtime_exceeded(rt_rq)) 494 rt_rq->rt_time += delta_exec;
496 resched_task(curr); 495 if (sched_rt_runtime_exceeded(rt_rq))
496 resched_task(curr);
497 }
497 spin_unlock(&rt_rq->rt_runtime_lock); 498 spin_unlock(&rt_rq->rt_runtime_lock);
498 } 499 }
499} 500}
@@ -861,6 +862,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
861#define RT_MAX_TRIES 3 862#define RT_MAX_TRIES 3
862 863
863static int double_lock_balance(struct rq *this_rq, struct rq *busiest); 864static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
865static void double_unlock_balance(struct rq *this_rq, struct rq *busiest);
866
864static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); 867static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
865 868
866static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) 869static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
@@ -1022,7 +1025,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1022 break; 1025 break;
1023 1026
1024 /* try again */ 1027 /* try again */
1025 spin_unlock(&lowest_rq->lock); 1028 double_unlock_balance(rq, lowest_rq);
1026 lowest_rq = NULL; 1029 lowest_rq = NULL;
1027 } 1030 }
1028 1031
@@ -1091,7 +1094,7 @@ static int push_rt_task(struct rq *rq)
1091 1094
1092 resched_task(lowest_rq->curr); 1095 resched_task(lowest_rq->curr);
1093 1096
1094 spin_unlock(&lowest_rq->lock); 1097 double_unlock_balance(rq, lowest_rq);
1095 1098
1096 ret = 1; 1099 ret = 1;
1097out: 1100out:
@@ -1197,7 +1200,7 @@ static int pull_rt_task(struct rq *this_rq)
1197 1200
1198 } 1201 }
1199 skip: 1202 skip:
1200 spin_unlock(&src_rq->lock); 1203 double_unlock_balance(this_rq, src_rq);
1201 } 1204 }
1202 1205
1203 return ret; 1206 return ret;