diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 82 |
1 files changed, 45 insertions, 37 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3918e01994e0..a4d790cddb19 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -3,15 +3,18 @@ | |||
| 3 | * policies) | 3 | * policies) |
| 4 | */ | 4 | */ |
| 5 | 5 | ||
| 6 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 7 | |||
| 8 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) | ||
| 9 | |||
| 6 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | 10 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) |
| 7 | { | 11 | { |
| 12 | #ifdef CONFIG_SCHED_DEBUG | ||
| 13 | WARN_ON_ONCE(!rt_entity_is_task(rt_se)); | ||
| 14 | #endif | ||
| 8 | return container_of(rt_se, struct task_struct, rt); | 15 | return container_of(rt_se, struct task_struct, rt); |
| 9 | } | 16 | } |
| 10 | 17 | ||
| 11 | #ifdef CONFIG_RT_GROUP_SCHED | ||
| 12 | |||
| 13 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) | ||
| 14 | |||
| 15 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 18 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
| 16 | { | 19 | { |
| 17 | return rt_rq->rq; | 20 | return rt_rq->rq; |
| @@ -26,6 +29,11 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | |||
| 26 | 29 | ||
| 27 | #define rt_entity_is_task(rt_se) (1) | 30 | #define rt_entity_is_task(rt_se) (1) |
| 28 | 31 | ||
| 32 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | ||
| 33 | { | ||
| 34 | return container_of(rt_se, struct task_struct, rt); | ||
| 35 | } | ||
| 36 | |||
| 29 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 37 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
| 30 | { | 38 | { |
| 31 | return container_of(rt_rq, struct rq, rt); | 39 | return container_of(rt_rq, struct rq, rt); |
| @@ -128,6 +136,11 @@ static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | |||
| 128 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | 136 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); |
| 129 | } | 137 | } |
| 130 | 138 | ||
| 139 | static inline int has_pushable_tasks(struct rq *rq) | ||
| 140 | { | ||
| 141 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
| 142 | } | ||
| 143 | |||
| 131 | #else | 144 | #else |
| 132 | 145 | ||
| 133 | static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | 146 | static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) |
| @@ -602,6 +615,8 @@ static void update_curr_rt(struct rq *rq) | |||
| 602 | curr->se.exec_start = rq->clock; | 615 | curr->se.exec_start = rq->clock; |
| 603 | cpuacct_charge(curr, delta_exec); | 616 | cpuacct_charge(curr, delta_exec); |
| 604 | 617 | ||
| 618 | sched_rt_avg_update(rq, delta_exec); | ||
| 619 | |||
| 605 | if (!rt_bandwidth_enabled()) | 620 | if (!rt_bandwidth_enabled()) |
| 606 | return; | 621 | return; |
| 607 | 622 | ||
| @@ -874,8 +889,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
| 874 | 889 | ||
| 875 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | 890 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) |
| 876 | enqueue_pushable_task(rq, p); | 891 | enqueue_pushable_task(rq, p); |
| 877 | |||
| 878 | inc_cpu_load(rq, p->se.load.weight); | ||
| 879 | } | 892 | } |
| 880 | 893 | ||
| 881 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | 894 | static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) |
| @@ -886,8 +899,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
| 886 | dequeue_rt_entity(rt_se); | 899 | dequeue_rt_entity(rt_se); |
| 887 | 900 | ||
| 888 | dequeue_pushable_task(rq, p); | 901 | dequeue_pushable_task(rq, p); |
| 889 | |||
| 890 | dec_cpu_load(rq, p->se.load.weight); | ||
| 891 | } | 902 | } |
| 892 | 903 | ||
| 893 | /* | 904 | /* |
| @@ -927,10 +938,13 @@ static void yield_task_rt(struct rq *rq) | |||
| 927 | #ifdef CONFIG_SMP | 938 | #ifdef CONFIG_SMP |
| 928 | static int find_lowest_rq(struct task_struct *task); | 939 | static int find_lowest_rq(struct task_struct *task); |
| 929 | 940 | ||
| 930 | static int select_task_rq_rt(struct task_struct *p, int sync) | 941 | static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) |
| 931 | { | 942 | { |
| 932 | struct rq *rq = task_rq(p); | 943 | struct rq *rq = task_rq(p); |
| 933 | 944 | ||
| 945 | if (sd_flag != SD_BALANCE_WAKE) | ||
| 946 | return smp_processor_id(); | ||
| 947 | |||
| 934 | /* | 948 | /* |
| 935 | * If the current task is an RT task, then | 949 | * If the current task is an RT task, then |
| 936 | * try to see if we can wake this RT task up on another | 950 | * try to see if we can wake this RT task up on another |
| @@ -988,7 +1002,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
| 988 | /* | 1002 | /* |
| 989 | * Preempt the current task with a newly woken task if needed: | 1003 | * Preempt the current task with a newly woken task if needed: |
| 990 | */ | 1004 | */ |
| 991 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync) | 1005 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) |
| 992 | { | 1006 | { |
| 993 | if (p->prio < rq->curr->prio) { | 1007 | if (p->prio < rq->curr->prio) { |
| 994 | resched_task(rq->curr); | 1008 | resched_task(rq->curr); |
| @@ -1064,6 +1078,14 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
| 1064 | if (p) | 1078 | if (p) |
| 1065 | dequeue_pushable_task(rq, p); | 1079 | dequeue_pushable_task(rq, p); |
| 1066 | 1080 | ||
| 1081 | #ifdef CONFIG_SMP | ||
| 1082 | /* | ||
| 1083 | * We detect this state here so that we can avoid taking the RQ | ||
| 1084 | * lock again later if there is no need to push | ||
| 1085 | */ | ||
| 1086 | rq->post_schedule = has_pushable_tasks(rq); | ||
| 1087 | #endif | ||
| 1088 | |||
| 1067 | return p; | 1089 | return p; |
| 1068 | } | 1090 | } |
| 1069 | 1091 | ||
| @@ -1162,13 +1184,6 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 1162 | return -1; /* No targets found */ | 1184 | return -1; /* No targets found */ |
| 1163 | 1185 | ||
| 1164 | /* | 1186 | /* |
| 1165 | * Only consider CPUs that are usable for migration. | ||
| 1166 | * I guess we might want to change cpupri_find() to ignore those | ||
| 1167 | * in the first place. | ||
| 1168 | */ | ||
| 1169 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); | ||
| 1170 | |||
| 1171 | /* | ||
| 1172 | * At this point we have built a mask of cpus representing the | 1187 | * At this point we have built a mask of cpus representing the |
| 1173 | * lowest priority tasks in the system. Now we want to elect | 1188 | * lowest priority tasks in the system. Now we want to elect |
| 1174 | * the best one based on our affinity and topology. | 1189 | * the best one based on our affinity and topology. |
| @@ -1262,11 +1277,6 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
| 1262 | return lowest_rq; | 1277 | return lowest_rq; |
| 1263 | } | 1278 | } |
| 1264 | 1279 | ||
| 1265 | static inline int has_pushable_tasks(struct rq *rq) | ||
| 1266 | { | ||
| 1267 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
| 1268 | } | ||
| 1269 | |||
| 1270 | static struct task_struct *pick_next_pushable_task(struct rq *rq) | 1280 | static struct task_struct *pick_next_pushable_task(struct rq *rq) |
| 1271 | { | 1281 | { |
| 1272 | struct task_struct *p; | 1282 | struct task_struct *p; |
| @@ -1466,23 +1476,9 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | |||
| 1466 | pull_rt_task(rq); | 1476 | pull_rt_task(rq); |
| 1467 | } | 1477 | } |
| 1468 | 1478 | ||
| 1469 | /* | ||
| 1470 | * assumes rq->lock is held | ||
| 1471 | */ | ||
| 1472 | static int needs_post_schedule_rt(struct rq *rq) | ||
| 1473 | { | ||
| 1474 | return has_pushable_tasks(rq); | ||
| 1475 | } | ||
| 1476 | |||
| 1477 | static void post_schedule_rt(struct rq *rq) | 1479 | static void post_schedule_rt(struct rq *rq) |
| 1478 | { | 1480 | { |
| 1479 | /* | ||
| 1480 | * This is only called if needs_post_schedule_rt() indicates that | ||
| 1481 | * we need to push tasks away | ||
| 1482 | */ | ||
| 1483 | spin_lock_irq(&rq->lock); | ||
| 1484 | push_rt_tasks(rq); | 1481 | push_rt_tasks(rq); |
| 1485 | spin_unlock_irq(&rq->lock); | ||
| 1486 | } | 1482 | } |
| 1487 | 1483 | ||
| 1488 | /* | 1484 | /* |
| @@ -1738,6 +1734,17 @@ static void set_curr_task_rt(struct rq *rq) | |||
| 1738 | dequeue_pushable_task(rq, p); | 1734 | dequeue_pushable_task(rq, p); |
| 1739 | } | 1735 | } |
| 1740 | 1736 | ||
| 1737 | unsigned int get_rr_interval_rt(struct task_struct *task) | ||
| 1738 | { | ||
| 1739 | /* | ||
| 1740 | * Time slice is 0 for SCHED_FIFO tasks | ||
| 1741 | */ | ||
| 1742 | if (task->policy == SCHED_RR) | ||
| 1743 | return DEF_TIMESLICE; | ||
| 1744 | else | ||
| 1745 | return 0; | ||
| 1746 | } | ||
| 1747 | |||
| 1741 | static const struct sched_class rt_sched_class = { | 1748 | static const struct sched_class rt_sched_class = { |
| 1742 | .next = &fair_sched_class, | 1749 | .next = &fair_sched_class, |
| 1743 | .enqueue_task = enqueue_task_rt, | 1750 | .enqueue_task = enqueue_task_rt, |
| @@ -1758,7 +1765,6 @@ static const struct sched_class rt_sched_class = { | |||
| 1758 | .rq_online = rq_online_rt, | 1765 | .rq_online = rq_online_rt, |
| 1759 | .rq_offline = rq_offline_rt, | 1766 | .rq_offline = rq_offline_rt, |
| 1760 | .pre_schedule = pre_schedule_rt, | 1767 | .pre_schedule = pre_schedule_rt, |
| 1761 | .needs_post_schedule = needs_post_schedule_rt, | ||
| 1762 | .post_schedule = post_schedule_rt, | 1768 | .post_schedule = post_schedule_rt, |
| 1763 | .task_wake_up = task_wake_up_rt, | 1769 | .task_wake_up = task_wake_up_rt, |
| 1764 | .switched_from = switched_from_rt, | 1770 | .switched_from = switched_from_rt, |
| @@ -1767,6 +1773,8 @@ static const struct sched_class rt_sched_class = { | |||
| 1767 | .set_curr_task = set_curr_task_rt, | 1773 | .set_curr_task = set_curr_task_rt, |
| 1768 | .task_tick = task_tick_rt, | 1774 | .task_tick = task_tick_rt, |
| 1769 | 1775 | ||
| 1776 | .get_rr_interval = get_rr_interval_rt, | ||
| 1777 | |||
| 1770 | .prio_changed = prio_changed_rt, | 1778 | .prio_changed = prio_changed_rt, |
| 1771 | .switched_to = switched_to_rt, | 1779 | .switched_to = switched_to_rt, |
| 1772 | }; | 1780 | }; |
