diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 62 |
1 files changed, 27 insertions, 35 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3918e01994e0..2eb4bd6a526c 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -3,15 +3,18 @@ | |||
| 3 | * policies) | 3 | * policies) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 7 | |||
| 8 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) | ||
| 9 | |||
| 6 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | 10 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) |
| 7 | { | 11 | { |
| 12 | #ifdef CONFIG_SCHED_DEBUG | ||
| 13 | WARN_ON_ONCE(!rt_entity_is_task(rt_se)); | ||
| 14 | #endif | ||
| 8 | return container_of(rt_se, struct task_struct, rt); | 15 | return container_of(rt_se, struct task_struct, rt); |
| 9 | } | 16 | } |
| 10 | 17 | ||
| 11 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 12 | |||
| 13 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) | ||
| 14 | |||
| 15 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 18 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
| 16 | { | 19 | { |
| 17 | return rt_rq->rq; | 20 | return rt_rq->rq; |
| @@ -26,6 +29,11 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | |||
| 26 | 29 | ||
| 27 | #define rt_entity_is_task(rt_se) (1) | 30 | #define rt_entity_is_task(rt_se) (1) |
| 28 | 31 | ||
| 32 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | ||
| 33 | { | ||
| 34 | return container_of(rt_se, struct task_struct, rt); | ||
| 35 | } | ||
| 36 | |||
| 29 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 37 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
| 30 | { | 38 | { |
| 31 | return container_of(rt_rq, struct rq, rt); | 39 | return container_of(rt_rq, struct rq, rt); |
| @@ -128,6 +136,11 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | |||
| 128 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | 136 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
| 129 | } | 137 | } |
| 130 | 138 | ||
| 139 | static inline int has_pushable_tasks(struct rq *rq) | ||
| 140 | { | ||
| 141 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
| 142 | } | ||
| 143 | |||
| 131 | #else | 144 | #else |
| 132 | 145 | ||
| 133 | static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | 146 | static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
| @@ -602,6 +615,8 @@ static void update_curr_rt(struct rq *rq) | |||
| 602 | curr->se.exec_start = rq->clock; | 615 | curr->se.exec_start = rq->clock; |
| 603 | cpuacct_charge(curr, delta_exec); | 616 | cpuacct_charge(curr, delta_exec); |
| 604 | 617 | ||
| 618 | sched_rt_avg_update(rq, delta_exec); | ||
| 619 | |||
| 605 | if (!rt_bandwidth_enabled()) | 620 | if (!rt_bandwidth_enabled()) |
| 606 | return; | 621 | return; |
| 607 | 622 | ||
| @@ -874,8 +889,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
| 874 | 889 | ||
| 875 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 890 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) |
| 876 | enqueue_pushable_task(rq, p); | 891 | enqueue_pushable_task(rq, p); |
| 877 | |||
| 878 | inc_cpu_load(rq, p->se.load.weight); | ||
| 879 | } | 892 | } |
| 880 | 893 | ||
| 881 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 894 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
| @@ -886,8 +899,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
| 886 | dequeue_rt_entity(rt_se); | 899 | dequeue_rt_entity(rt_se); |
| 887 | 900 | ||
| 888 | dequeue_pushable_task(rq, p); | 901 | dequeue_pushable_task(rq, p); |
| 889 | |||
| 890 | dec_cpu_load(rq, p->se.load.weight); | ||
| 891 | } | 902 | } |
| 892 | 903 | ||
| 893 | /* | 904 | /* |
| @@ -1064,6 +1075,14 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
| 1064 | if (p) | 1075 | if (p) |
| 1065 | dequeue_pushable_task(rq, p); | 1076 | dequeue_pushable_task(rq, p); |
| 1066 | 1077 | ||
| 1078 | #ifdef CONFIG_SMP | ||
| 1079 | /* | ||
| 1080 | * We detect this state here so that we can avoid taking the RQ | ||
| 1081 | * lock again later if there is no need to push | ||
| 1082 | */ | ||
| 1083 | rq->post_schedule = has_pushable_tasks(rq); | ||
| 1084 | #endif | ||
| 1085 | |||
| 1067 | return p; | 1086 | return p; |
| 1068 | } | 1087 | } |
| 1069 | 1088 | ||
| @@ -1162,13 +1181,6 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 1162 | return -1; /* No targets found */ | 1181 | return -1; /* No targets found */ |
| 1163 | 1182 | ||
| 1164 | /* | 1183 | /* |
| 1165 | * Only consider CPUs that are usable for migration. | ||
| 1166 | * I guess we might want to change cpupri_find() to ignore those | ||
| 1167 | * in the first place. | ||
| 1168 | */ | ||
| 1169 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); | ||
| 1170 | |||
| 1171 | /* | ||
| 1172 | * At this point we have built a mask of cpus representing the | 1184 | * At this point we have built a mask of cpus representing the |
| 1173 | * lowest priority tasks in the system. Now we want to elect | 1185 | * lowest priority tasks in the system. Now we want to elect |
| 1174 | * the best one based on our affinity and topology. | 1186 | * the best one based on our affinity and topology. |
| @@ -1262,11 +1274,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
| 1262 | return lowest_rq; | 1274 | return lowest_rq; |
| 1263 | } | 1275 | } |
| 1264 | 1276 | ||
| 1265 | static inline int has_pushable_tasks(struct rq *rq) | ||
| 1266 | { | ||
| 1267 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | static struct task_struct *pick_next_pushable_task(struct rq *rq) | 1277 | static struct task_struct *pick_next_pushable_task(struct rq *rq) |
| 1271 | { | 1278 | { |
| 1272 | struct task_struct *p; | 1279 | struct task_struct *p; |
| @@ -1466,23 +1473,9 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | |||
| 1466 | pull_rt_task(rq); | 1473 | pull_rt_task(rq); |
| 1467 | } | 1474 | } |
| 1468 | 1475 | ||
| 1469 | /* | ||
| 1470 | * assumes rq->lock is held | ||
| 1471 | */ | ||
| 1472 | static int needs_post_schedule_rt(struct rq *rq) | ||
| 1473 | { | ||
| 1474 | return has_pushable_tasks(rq); | ||
| 1475 | } | ||
| 1476 | |||
| 1477 | static void post_schedule_rt(struct rq *rq) | 1476 | static void post_schedule_rt(struct rq *rq) |
| 1478 | { | 1477 | { |
| 1479 | /* | ||
| 1480 | * This is only called if needs_post_schedule_rt() indicates that | ||
| 1481 | * we need to push tasks away | ||
| 1482 | */ | ||
| 1483 | spin_lock_irq(&rq->lock); | ||
| 1484 | push_rt_tasks(rq); | 1478 | push_rt_tasks(rq); |
| 1485 | spin_unlock_irq(&rq->lock); | ||
| 1486 | } | 1479 | } |
| 1487 | 1480 | ||
| 1488 | /* | 1481 | /* |
| @@ -1758,7 +1751,6 @@ static const struct sched_class rt_sched_class = { | |||
| 1758 | .rq_online = rq_online_rt, | 1751 | .rq_online = rq_online_rt, |
| 1759 | .rq_offline = rq_offline_rt, | 1752 | .rq_offline = rq_offline_rt, |
| 1760 | .pre_schedule = pre_schedule_rt, | 1753 | .pre_schedule = pre_schedule_rt, |
| 1761 | .needs_post_schedule = needs_post_schedule_rt, | ||
| 1762 | .post_schedule = post_schedule_rt, | 1754 | .post_schedule = post_schedule_rt, |
| 1763 | .task_wake_up = task_wake_up_rt, | 1755 | .task_wake_up = task_wake_up_rt, |
| 1764 | .switched_from = switched_from_rt, | 1756 | .switched_from = switched_from_rt, |
