diff options
Diffstat (limited to 'kernel/sched_rt.c')
-rw-r--r-- | kernel/sched_rt.c | 106 |
1 files changed, 65 insertions, 41 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 51d2af3e6191..da932f4c8524 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq) | |||
15 | if (!rq->online) | 15 | if (!rq->online) |
16 | return; | 16 | return; |
17 | 17 | ||
18 | cpu_set(rq->cpu, rq->rd->rto_mask); | 18 | cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); |
19 | /* | 19 | /* |
20 | * Make sure the mask is visible before we set | 20 | * Make sure the mask is visible before we set |
21 | * the overload count. That is checked to determine | 21 | * the overload count. That is checked to determine |
@@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq) | |||
34 | 34 | ||
35 | /* the order here really doesn't matter */ | 35 | /* the order here really doesn't matter */ |
36 | atomic_dec(&rq->rd->rto_count); | 36 | atomic_dec(&rq->rd->rto_count); |
37 | cpu_clear(rq->cpu, rq->rd->rto_mask); | 37 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void update_rt_migration(struct rq *rq) | 40 | static void update_rt_migration(struct rq *rq) |
@@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se) | |||
139 | } | 139 | } |
140 | 140 | ||
141 | #ifdef CONFIG_SMP | 141 | #ifdef CONFIG_SMP |
142 | static inline cpumask_t sched_rt_period_mask(void) | 142 | static inline const struct cpumask *sched_rt_period_mask(void) |
143 | { | 143 | { |
144 | return cpu_rq(smp_processor_id())->rd->span; | 144 | return cpu_rq(smp_processor_id())->rd->span; |
145 | } | 145 | } |
146 | #else | 146 | #else |
147 | static inline cpumask_t sched_rt_period_mask(void) | 147 | static inline const struct cpumask *sched_rt_period_mask(void) |
148 | { | 148 | { |
149 | return cpu_online_map; | 149 | return cpu_online_mask; |
150 | } | 150 | } |
151 | #endif | 151 | #endif |
152 | 152 | ||
@@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq) | |||
212 | return rt_rq->rt_throttled; | 212 | return rt_rq->rt_throttled; |
213 | } | 213 | } |
214 | 214 | ||
215 | static inline cpumask_t sched_rt_period_mask(void) | 215 | static inline const struct cpumask *sched_rt_period_mask(void) |
216 | { | 216 | { |
217 | return cpu_online_map; | 217 | return cpu_online_mask; |
218 | } | 218 | } |
219 | 219 | ||
220 | static inline | 220 | static inline |
@@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
241 | int i, weight, more = 0; | 241 | int i, weight, more = 0; |
242 | u64 rt_period; | 242 | u64 rt_period; |
243 | 243 | ||
244 | weight = cpus_weight(rd->span); | 244 | weight = cpumask_weight(rd->span); |
245 | 245 | ||
246 | spin_lock(&rt_b->rt_runtime_lock); | 246 | spin_lock(&rt_b->rt_runtime_lock); |
247 | rt_period = ktime_to_ns(rt_b->rt_period); | 247 | rt_period = ktime_to_ns(rt_b->rt_period); |
248 | for_each_cpu_mask_nr(i, rd->span) { | 248 | for_each_cpu(i, rd->span) { |
249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
250 | s64 diff; | 250 | s64 diff; |
251 | 251 | ||
@@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq) | |||
324 | /* | 324 | /* |
325 | * Greedy reclaim, take back as much as we can. | 325 | * Greedy reclaim, take back as much as we can. |
326 | */ | 326 | */ |
327 | for_each_cpu_mask(i, rd->span) { | 327 | for_each_cpu(i, rd->span) { |
328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
329 | s64 diff; | 329 | s64 diff; |
330 | 330 | ||
@@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq) | |||
429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | 429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
430 | { | 430 | { |
431 | int i, idle = 1; | 431 | int i, idle = 1; |
432 | cpumask_t span; | 432 | const struct cpumask *span; |
433 | 433 | ||
434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) | 434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
435 | return 1; | 435 | return 1; |
436 | 436 | ||
437 | span = sched_rt_period_mask(); | 437 | span = sched_rt_period_mask(); |
438 | for_each_cpu_mask(i, span) { | 438 | for_each_cpu(i, span) { |
439 | int enqueue = 0; | 439 | int enqueue = 0; |
440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
441 | struct rq *rq = rq_of_rt_rq(rt_rq); | 441 | struct rq *rq = rq_of_rt_rq(rt_rq); |
@@ -805,17 +805,20 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
805 | 805 | ||
806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
807 | { | 807 | { |
808 | cpumask_t mask; | 808 | cpumask_var_t mask; |
809 | 809 | ||
810 | if (rq->curr->rt.nr_cpus_allowed == 1) | 810 | if (rq->curr->rt.nr_cpus_allowed == 1) |
811 | return; | 811 | return; |
812 | 812 | ||
813 | if (p->rt.nr_cpus_allowed != 1 | 813 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) |
814 | && cpupri_find(&rq->rd->cpupri, p, &mask)) | ||
815 | return; | 814 | return; |
816 | 815 | ||
817 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | 816 | if (p->rt.nr_cpus_allowed != 1 |
818 | return; | 817 | && cpupri_find(&rq->rd->cpupri, p, mask)) |
818 | goto free; | ||
819 | |||
820 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) | ||
821 | goto free; | ||
819 | 822 | ||
820 | /* | 823 | /* |
821 | * There appears to be other cpus that can accept | 824 | * There appears to be other cpus that can accept |
@@ -824,6 +827,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
824 | */ | 827 | */ |
825 | requeue_task_rt(rq, p, 1); | 828 | requeue_task_rt(rq, p, 1); |
826 | resched_task(rq->curr); | 829 | resched_task(rq->curr); |
830 | free: | ||
831 | free_cpumask_var(mask); | ||
827 | } | 832 | } |
828 | 833 | ||
829 | #endif /* CONFIG_SMP */ | 834 | #endif /* CONFIG_SMP */ |
@@ -914,7 +919,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | |||
914 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 919 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
915 | { | 920 | { |
916 | if (!task_running(rq, p) && | 921 | if (!task_running(rq, p) && |
917 | (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && | 922 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && |
918 | (p->rt.nr_cpus_allowed > 1)) | 923 | (p->rt.nr_cpus_allowed > 1)) |
919 | return 1; | 924 | return 1; |
920 | return 0; | 925 | return 0; |
@@ -953,18 +958,19 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
953 | return next; | 958 | return next; |
954 | } | 959 | } |
955 | 960 | ||
956 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); | 961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
957 | 962 | ||
958 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 963 | static inline int pick_optimal_cpu(int this_cpu, |
964 | const struct cpumask *mask) | ||
959 | { | 965 | { |
960 | int first; | 966 | int first; |
961 | 967 | ||
962 | /* "this_cpu" is cheaper to preempt than a remote processor */ | 968 | /* "this_cpu" is cheaper to preempt than a remote processor */ |
963 | if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) | 969 | if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask)) |
964 | return this_cpu; | 970 | return this_cpu; |
965 | 971 | ||
966 | first = first_cpu(*mask); | 972 | first = cpumask_first(mask); |
967 | if (first != NR_CPUS) | 973 | if (first < nr_cpu_ids) |
968 | return first; | 974 | return first; |
969 | 975 | ||
970 | return -1; | 976 | return -1; |
@@ -973,9 +979,10 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | |||
973 | static int find_lowest_rq(struct task_struct *task) | 979 | static int find_lowest_rq(struct task_struct *task) |
974 | { | 980 | { |
975 | struct sched_domain *sd; | 981 | struct sched_domain *sd; |
976 | cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); | 982 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
977 | int this_cpu = smp_processor_id(); | 983 | int this_cpu = smp_processor_id(); |
978 | int cpu = task_cpu(task); | 984 | int cpu = task_cpu(task); |
985 | cpumask_var_t domain_mask; | ||
979 | 986 | ||
980 | if (task->rt.nr_cpus_allowed == 1) | 987 | if (task->rt.nr_cpus_allowed == 1) |
981 | return -1; /* No other targets possible */ | 988 | return -1; /* No other targets possible */ |
@@ -988,7 +995,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
988 | * I guess we might want to change cpupri_find() to ignore those | 995 | * I guess we might want to change cpupri_find() to ignore those |
989 | * in the first place. | 996 | * in the first place. |
990 | */ | 997 | */ |
991 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | 998 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); |
992 | 999 | ||
993 | /* | 1000 | /* |
994 | * At this point we have built a mask of cpus representing the | 1001 | * At this point we have built a mask of cpus representing the |
@@ -998,7 +1005,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
998 | * We prioritize the last cpu that the task executed on since | 1005 | * We prioritize the last cpu that the task executed on since |
999 | * it is most likely cache-hot in that location. | 1006 | * it is most likely cache-hot in that location. |
1000 | */ | 1007 | */ |
1001 | if (cpu_isset(cpu, *lowest_mask)) | 1008 | if (cpumask_test_cpu(cpu, lowest_mask)) |
1002 | return cpu; | 1009 | return cpu; |
1003 | 1010 | ||
1004 | /* | 1011 | /* |
@@ -1008,18 +1015,25 @@ static int find_lowest_rq(struct task_struct *task) | |||
1008 | if (this_cpu == cpu) | 1015 | if (this_cpu == cpu) |
1009 | this_cpu = -1; /* Skip this_cpu opt if the same */ | 1016 | this_cpu = -1; /* Skip this_cpu opt if the same */ |
1010 | 1017 | ||
1011 | for_each_domain(cpu, sd) { | 1018 | if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) { |
1012 | if (sd->flags & SD_WAKE_AFFINE) { | 1019 | for_each_domain(cpu, sd) { |
1013 | cpumask_t domain_mask; | 1020 | if (sd->flags & SD_WAKE_AFFINE) { |
1014 | int best_cpu; | 1021 | int best_cpu; |
1022 | |||
1023 | cpumask_and(domain_mask, | ||
1024 | sched_domain_span(sd), | ||
1025 | lowest_mask); | ||
1015 | 1026 | ||
1016 | cpus_and(domain_mask, sd->span, *lowest_mask); | 1027 | best_cpu = pick_optimal_cpu(this_cpu, |
1028 | domain_mask); | ||
1017 | 1029 | ||
1018 | best_cpu = pick_optimal_cpu(this_cpu, | 1030 | if (best_cpu != -1) { |
1019 | &domain_mask); | 1031 | free_cpumask_var(domain_mask); |
1020 | if (best_cpu != -1) | 1032 | return best_cpu; |
1021 | return best_cpu; | 1033 | } |
1034 | } | ||
1022 | } | 1035 | } |
1036 | free_cpumask_var(domain_mask); | ||
1023 | } | 1037 | } |
1024 | 1038 | ||
1025 | /* | 1039 | /* |
@@ -1054,8 +1068,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1054 | * Also make sure that it wasn't scheduled on its rq. | 1068 | * Also make sure that it wasn't scheduled on its rq. |
1055 | */ | 1069 | */ |
1056 | if (unlikely(task_rq(task) != rq || | 1070 | if (unlikely(task_rq(task) != rq || |
1057 | !cpu_isset(lowest_rq->cpu, | 1071 | !cpumask_test_cpu(lowest_rq->cpu, |
1058 | task->cpus_allowed) || | 1072 | &task->cpus_allowed) || |
1059 | task_running(rq, task) || | 1073 | task_running(rq, task) || |
1060 | !task->se.on_rq)) { | 1074 | !task->se.on_rq)) { |
1061 | 1075 | ||
@@ -1176,7 +1190,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1176 | 1190 | ||
1177 | next = pick_next_task_rt(this_rq); | 1191 | next = pick_next_task_rt(this_rq); |
1178 | 1192 | ||
1179 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { | 1193 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
1180 | if (this_cpu == cpu) | 1194 | if (this_cpu == cpu) |
1181 | continue; | 1195 | continue; |
1182 | 1196 | ||
@@ -1305,9 +1319,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1305 | } | 1319 | } |
1306 | 1320 | ||
1307 | static void set_cpus_allowed_rt(struct task_struct *p, | 1321 | static void set_cpus_allowed_rt(struct task_struct *p, |
1308 | const cpumask_t *new_mask) | 1322 | const struct cpumask *new_mask) |
1309 | { | 1323 | { |
1310 | int weight = cpus_weight(*new_mask); | 1324 | int weight = cpumask_weight(new_mask); |
1311 | 1325 | ||
1312 | BUG_ON(!rt_task(p)); | 1326 | BUG_ON(!rt_task(p)); |
1313 | 1327 | ||
@@ -1328,7 +1342,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1328 | update_rt_migration(rq); | 1342 | update_rt_migration(rq); |
1329 | } | 1343 | } |
1330 | 1344 | ||
1331 | p->cpus_allowed = *new_mask; | 1345 | cpumask_copy(&p->cpus_allowed, new_mask); |
1332 | p->rt.nr_cpus_allowed = weight; | 1346 | p->rt.nr_cpus_allowed = weight; |
1333 | } | 1347 | } |
1334 | 1348 | ||
@@ -1371,6 +1385,15 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p, | |||
1371 | if (!rq->rt.rt_nr_running) | 1385 | if (!rq->rt.rt_nr_running) |
1372 | pull_rt_task(rq); | 1386 | pull_rt_task(rq); |
1373 | } | 1387 | } |
1388 | |||
1389 | static inline void init_sched_rt_class(void) | ||
1390 | { | ||
1391 | unsigned int i; | ||
1392 | |||
1393 | for_each_possible_cpu(i) | ||
1394 | alloc_cpumask_var_node(&per_cpu(local_cpu_mask, i), | ||
1395 | GFP_KERNEL, cpu_to_node(i)); | ||
1396 | } | ||
1374 | #endif /* CONFIG_SMP */ | 1397 | #endif /* CONFIG_SMP */ |
1375 | 1398 | ||
1376 | /* | 1399 | /* |
@@ -1541,3 +1564,4 @@ static void print_rt_stats(struct seq_file *m, int cpu) | |||
1541 | rcu_read_unlock(); | 1564 | rcu_read_unlock(); |
1542 | } | 1565 | } |
1543 | #endif /* CONFIG_SCHED_DEBUG */ | 1566 | #endif /* CONFIG_SCHED_DEBUG */ |
1567 | |||