diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 6163e4cf885b..1113157b2058 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -199,6 +199,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
| 199 | 199 | ||
| 200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
| 201 | { | 201 | { |
| 202 | if (rt_rq->rt_nr_running) | ||
| 203 | resched_task(rq_of_rt_rq(rt_rq)->curr); | ||
| 202 | } | 204 | } |
| 203 | 205 | ||
| 204 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 206 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
| @@ -298,7 +300,7 @@ static void __disable_runtime(struct rq *rq) | |||
| 298 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 300 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
| 299 | s64 diff; | 301 | s64 diff; |
| 300 | 302 | ||
| 301 | if (iter == rt_rq) | 303 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
| 302 | continue; | 304 | continue; |
| 303 | 305 | ||
| 304 | spin_lock(&iter->rt_runtime_lock); | 306 | spin_lock(&iter->rt_runtime_lock); |
| @@ -348,6 +350,7 @@ static void __enable_runtime(struct rq *rq) | |||
| 348 | spin_lock(&rt_rq->rt_runtime_lock); | 350 | spin_lock(&rt_rq->rt_runtime_lock); |
| 349 | rt_rq->rt_runtime = rt_b->rt_runtime; | 351 | rt_rq->rt_runtime = rt_b->rt_runtime; |
| 350 | rt_rq->rt_time = 0; | 352 | rt_rq->rt_time = 0; |
| 353 | rt_rq->rt_throttled = 0; | ||
| 351 | spin_unlock(&rt_rq->rt_runtime_lock); | 354 | spin_unlock(&rt_rq->rt_runtime_lock); |
| 352 | spin_unlock(&rt_b->rt_runtime_lock); | 355 | spin_unlock(&rt_b->rt_runtime_lock); |
| 353 | } | 356 | } |
| @@ -438,9 +441,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) | |||
| 438 | { | 441 | { |
| 439 | u64 runtime = sched_rt_runtime(rt_rq); | 442 | u64 runtime = sched_rt_runtime(rt_rq); |
| 440 | 443 | ||
| 441 | if (runtime == RUNTIME_INF) | ||
| 442 | return 0; | ||
| 443 | |||
| 444 | if (rt_rq->rt_throttled) | 444 | if (rt_rq->rt_throttled) |
| 445 | return rt_rq_throttled(rt_rq); | 445 | return rt_rq_throttled(rt_rq); |
| 446 | 446 | ||
| @@ -491,9 +491,11 @@ static void update_curr_rt(struct rq *rq) | |||
| 491 | rt_rq = rt_rq_of_se(rt_se); | 491 | rt_rq = rt_rq_of_se(rt_se); |
| 492 | 492 | ||
| 493 | spin_lock(&rt_rq->rt_runtime_lock); | 493 | spin_lock(&rt_rq->rt_runtime_lock); |
| 494 | rt_rq->rt_time += delta_exec; | 494 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
| 495 | if (sched_rt_runtime_exceeded(rt_rq)) | 495 | rt_rq->rt_time += delta_exec; |
| 496 | resched_task(curr); | 496 | if (sched_rt_runtime_exceeded(rt_rq)) |
| 497 | resched_task(curr); | ||
| 498 | } | ||
| 497 | spin_unlock(&rt_rq->rt_runtime_lock); | 499 | spin_unlock(&rt_rq->rt_runtime_lock); |
| 498 | } | 500 | } |
| 499 | } | 501 | } |
