diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-24 04:31:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-24 04:31:34 -0400 |
commit | e6aa0f07cb5e81a7cbeaf3be6e2101234c2f0d30 (patch) | |
tree | 77926550ac0c31b1423bcf193a4ed0ecb7fda2c1 /kernel/sched_rt.c | |
parent | d4738792fb86600b6cb7220459d9c47e819b3580 (diff) | |
parent | 72d31053f62c4bc464c2783974926969614a8649 (diff) |
Merge commit 'v2.6.27-rc7' into x86/microcode
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 24 |
1 files changed, 14 insertions, 10 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 908c04f9dad0..1113157b2058 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -199,6 +199,8 @@ static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) | |||
199 | 199 | ||
200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 200 | static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
201 | { | 201 | { |
202 | if (rt_rq->rt_nr_running) | ||
203 | resched_task(rq_of_rt_rq(rt_rq)->curr); | ||
202 | } | 204 | } |
203 | 205 | ||
204 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 206 | static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
@@ -298,7 +300,7 @@ static void __disable_runtime(struct rq *rq) | |||
298 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 300 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
299 | s64 diff; | 301 | s64 diff; |
300 | 302 | ||
301 | if (iter == rt_rq) | 303 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
302 | continue; | 304 | continue; |
303 | 305 | ||
304 | spin_lock(&iter->rt_runtime_lock); | 306 | spin_lock(&iter->rt_runtime_lock); |
@@ -348,6 +350,7 @@ static void __enable_runtime(struct rq *rq) | |||
348 | spin_lock(&rt_rq->rt_runtime_lock); | 350 | spin_lock(&rt_rq->rt_runtime_lock); |
349 | rt_rq->rt_runtime = rt_b->rt_runtime; | 351 | rt_rq->rt_runtime = rt_b->rt_runtime; |
350 | rt_rq->rt_time = 0; | 352 | rt_rq->rt_time = 0; |
353 | rt_rq->rt_throttled = 0; | ||
351 | spin_unlock(&rt_rq->rt_runtime_lock); | 354 | spin_unlock(&rt_rq->rt_runtime_lock); |
352 | spin_unlock(&rt_b->rt_runtime_lock); | 355 | spin_unlock(&rt_b->rt_runtime_lock); |
353 | } | 356 | } |
@@ -438,9 +441,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) | |||
438 | { | 441 | { |
439 | u64 runtime = sched_rt_runtime(rt_rq); | 442 | u64 runtime = sched_rt_runtime(rt_rq); |
440 | 443 | ||
441 | if (runtime == RUNTIME_INF) | ||
442 | return 0; | ||
443 | |||
444 | if (rt_rq->rt_throttled) | 444 | if (rt_rq->rt_throttled) |
445 | return rt_rq_throttled(rt_rq); | 445 | return rt_rq_throttled(rt_rq); |
446 | 446 | ||
@@ -491,9 +491,11 @@ static void update_curr_rt(struct rq *rq) | |||
491 | rt_rq = rt_rq_of_se(rt_se); | 491 | rt_rq = rt_rq_of_se(rt_se); |
492 | 492 | ||
493 | spin_lock(&rt_rq->rt_runtime_lock); | 493 | spin_lock(&rt_rq->rt_runtime_lock); |
494 | rt_rq->rt_time += delta_exec; | 494 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
495 | if (sched_rt_runtime_exceeded(rt_rq)) | 495 | rt_rq->rt_time += delta_exec; |
496 | resched_task(curr); | 496 | if (sched_rt_runtime_exceeded(rt_rq)) |
497 | resched_task(curr); | ||
498 | } | ||
497 | spin_unlock(&rt_rq->rt_runtime_lock); | 499 | spin_unlock(&rt_rq->rt_runtime_lock); |
498 | } | 500 | } |
499 | } | 501 | } |
@@ -861,6 +863,8 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
861 | #define RT_MAX_TRIES 3 | 863 | #define RT_MAX_TRIES 3 |
862 | 864 | ||
863 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | 865 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); |
866 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
867 | |||
864 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 868 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
865 | 869 | ||
866 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 870 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
@@ -1022,7 +1026,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1022 | break; | 1026 | break; |
1023 | 1027 | ||
1024 | /* try again */ | 1028 | /* try again */ |
1025 | spin_unlock(&lowest_rq->lock); | 1029 | double_unlock_balance(rq, lowest_rq); |
1026 | lowest_rq = NULL; | 1030 | lowest_rq = NULL; |
1027 | } | 1031 | } |
1028 | 1032 | ||
@@ -1091,7 +1095,7 @@ static int push_rt_task(struct rq *rq) | |||
1091 | 1095 | ||
1092 | resched_task(lowest_rq->curr); | 1096 | resched_task(lowest_rq->curr); |
1093 | 1097 | ||
1094 | spin_unlock(&lowest_rq->lock); | 1098 | double_unlock_balance(rq, lowest_rq); |
1095 | 1099 | ||
1096 | ret = 1; | 1100 | ret = 1; |
1097 | out: | 1101 | out: |
@@ -1197,7 +1201,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1197 | 1201 | ||
1198 | } | 1202 | } |
1199 | skip: | 1203 | skip: |
1200 | spin_unlock(&src_rq->lock); | 1204 | double_unlock_balance(this_rq, src_rq); |
1201 | } | 1205 | } |
1202 | 1206 | ||
1203 | return ret; | 1207 | return ret; |