diff options
Diffstat (limited to 'kernel/sched_rt.c')
| -rw-r--r-- | kernel/sched_rt.c | 80 |
1 files changed, 46 insertions, 34 deletions
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d9ba9d5f99d6..1bbd99014011 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -15,7 +15,7 @@ static inline void rt_set_overload(struct rq *rq) | |||
| 15 | if (!rq->online) | 15 | if (!rq->online) |
| 16 | return; | 16 | return; |
| 17 | 17 | ||
| 18 | cpu_set(rq->cpu, rq->rd->rto_mask); | 18 | cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); |
| 19 | /* | 19 | /* |
| 20 | * Make sure the mask is visible before we set | 20 | * Make sure the mask is visible before we set |
| 21 | * the overload count. That is checked to determine | 21 | * the overload count. That is checked to determine |
| @@ -34,7 +34,7 @@ static inline void rt_clear_overload(struct rq *rq) | |||
| 34 | 34 | ||
| 35 | /* the order here really doesn't matter */ | 35 | /* the order here really doesn't matter */ |
| 36 | atomic_dec(&rq->rd->rto_count); | 36 | atomic_dec(&rq->rd->rto_count); |
| 37 | cpu_clear(rq->cpu, rq->rd->rto_mask); | 37 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); |
| 38 | } | 38 | } |
| 39 | 39 | ||
| 40 | static void update_rt_migration(struct rq *rq) | 40 | static void update_rt_migration(struct rq *rq) |
| @@ -139,14 +139,14 @@ static int rt_se_boosted(struct sched_rt_entity *rt_se) | |||
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | #ifdef CONFIG_SMP | 141 | #ifdef CONFIG_SMP |
| 142 | static inline cpumask_t sched_rt_period_mask(void) | 142 | static inline const struct cpumask *sched_rt_period_mask(void) |
| 143 | { | 143 | { |
| 144 | return cpu_rq(smp_processor_id())->rd->span; | 144 | return cpu_rq(smp_processor_id())->rd->span; |
| 145 | } | 145 | } |
| 146 | #else | 146 | #else |
| 147 | static inline cpumask_t sched_rt_period_mask(void) | 147 | static inline const struct cpumask *sched_rt_period_mask(void) |
| 148 | { | 148 | { |
| 149 | return cpu_online_map; | 149 | return cpu_online_mask; |
| 150 | } | 150 | } |
| 151 | #endif | 151 | #endif |
| 152 | 152 | ||
| @@ -212,9 +212,9 @@ static inline int rt_rq_throttled(struct rt_rq *rt_rq) | |||
| 212 | return rt_rq->rt_throttled; | 212 | return rt_rq->rt_throttled; |
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | static inline cpumask_t sched_rt_period_mask(void) | 215 | static inline const struct cpumask *sched_rt_period_mask(void) |
| 216 | { | 216 | { |
| 217 | return cpu_online_map; | 217 | return cpu_online_mask; |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | static inline | 220 | static inline |
| @@ -241,11 +241,11 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
| 241 | int i, weight, more = 0; | 241 | int i, weight, more = 0; |
| 242 | u64 rt_period; | 242 | u64 rt_period; |
| 243 | 243 | ||
| 244 | weight = cpus_weight(rd->span); | 244 | weight = cpumask_weight(rd->span); |
| 245 | 245 | ||
| 246 | spin_lock(&rt_b->rt_runtime_lock); | 246 | spin_lock(&rt_b->rt_runtime_lock); |
| 247 | rt_period = ktime_to_ns(rt_b->rt_period); | 247 | rt_period = ktime_to_ns(rt_b->rt_period); |
| 248 | for_each_cpu_mask_nr(i, rd->span) { | 248 | for_each_cpu(i, rd->span) { |
| 249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 249 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
| 250 | s64 diff; | 250 | s64 diff; |
| 251 | 251 | ||
| @@ -324,7 +324,7 @@ static void __disable_runtime(struct rq *rq) | |||
| 324 | /* | 324 | /* |
| 325 | * Greedy reclaim, take back as much as we can. | 325 | * Greedy reclaim, take back as much as we can. |
| 326 | */ | 326 | */ |
| 327 | for_each_cpu_mask(i, rd->span) { | 327 | for_each_cpu(i, rd->span) { |
| 328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
| 329 | s64 diff; | 329 | s64 diff; |
| 330 | 330 | ||
| @@ -429,13 +429,13 @@ static inline int balance_runtime(struct rt_rq *rt_rq) | |||
| 429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | 429 | static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) |
| 430 | { | 430 | { |
| 431 | int i, idle = 1; | 431 | int i, idle = 1; |
| 432 | cpumask_t span; | 432 | const struct cpumask *span; |
| 433 | 433 | ||
| 434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) | 434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
| 435 | return 1; | 435 | return 1; |
| 436 | 436 | ||
| 437 | span = sched_rt_period_mask(); | 437 | span = sched_rt_period_mask(); |
| 438 | for_each_cpu_mask(i, span) { | 438 | for_each_cpu(i, span) { |
| 439 | int enqueue = 0; | 439 | int enqueue = 0; |
| 440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); | 440 | struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); |
| 441 | struct rq *rq = rq_of_rt_rq(rt_rq); | 441 | struct rq *rq = rq_of_rt_rq(rt_rq); |
| @@ -537,13 +537,13 @@ static void update_curr_rt(struct rq *rq) | |||
| 537 | for_each_sched_rt_entity(rt_se) { | 537 | for_each_sched_rt_entity(rt_se) { |
| 538 | rt_rq = rt_rq_of_se(rt_se); | 538 | rt_rq = rt_rq_of_se(rt_se); |
| 539 | 539 | ||
| 540 | spin_lock(&rt_rq->rt_runtime_lock); | ||
| 541 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 540 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
| 541 | spin_lock(&rt_rq->rt_runtime_lock); | ||
| 542 | rt_rq->rt_time += delta_exec; | 542 | rt_rq->rt_time += delta_exec; |
| 543 | if (sched_rt_runtime_exceeded(rt_rq)) | 543 | if (sched_rt_runtime_exceeded(rt_rq)) |
| 544 | resched_task(curr); | 544 | resched_task(curr); |
| 545 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
| 545 | } | 546 | } |
| 546 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
| 547 | } | 547 | } |
| 548 | } | 548 | } |
| 549 | 549 | ||
| @@ -805,17 +805,20 @@ static int select_task_rq_rt(struct task_struct *p, int sync) | |||
| 805 | 805 | ||
| 806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | 806 | static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
| 807 | { | 807 | { |
| 808 | cpumask_t mask; | 808 | cpumask_var_t mask; |
| 809 | 809 | ||
| 810 | if (rq->curr->rt.nr_cpus_allowed == 1) | 810 | if (rq->curr->rt.nr_cpus_allowed == 1) |
| 811 | return; | 811 | return; |
| 812 | 812 | ||
| 813 | if (p->rt.nr_cpus_allowed != 1 | 813 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) |
| 814 | && cpupri_find(&rq->rd->cpupri, p, &mask)) | ||
| 815 | return; | 814 | return; |
| 816 | 815 | ||
| 817 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, &mask)) | 816 | if (p->rt.nr_cpus_allowed != 1 |
| 818 | return; | 817 | && cpupri_find(&rq->rd->cpupri, p, mask)) |
| 818 | goto free; | ||
| 819 | |||
| 820 | if (!cpupri_find(&rq->rd->cpupri, rq->curr, mask)) | ||
| 821 | goto free; | ||
| 819 | 822 | ||
| 820 | /* | 823 | /* |
| 821 | * There appears to be other cpus that can accept | 824 | * There appears to be other cpus that can accept |
| @@ -824,6 +827,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
| 824 | */ | 827 | */ |
| 825 | requeue_task_rt(rq, p, 1); | 828 | requeue_task_rt(rq, p, 1); |
| 826 | resched_task(rq->curr); | 829 | resched_task(rq->curr); |
| 830 | free: | ||
| 831 | free_cpumask_var(mask); | ||
| 827 | } | 832 | } |
| 828 | 833 | ||
| 829 | #endif /* CONFIG_SMP */ | 834 | #endif /* CONFIG_SMP */ |
| @@ -909,15 +914,12 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
| 909 | /* Only try algorithms three times */ | 914 | /* Only try algorithms three times */ |
| 910 | #define RT_MAX_TRIES 3 | 915 | #define RT_MAX_TRIES 3 |
| 911 | 916 | ||
| 912 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | ||
| 913 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
| 914 | |||
| 915 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 917 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
| 916 | 918 | ||
| 917 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 919 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
| 918 | { | 920 | { |
| 919 | if (!task_running(rq, p) && | 921 | if (!task_running(rq, p) && |
| 920 | (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && | 922 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && |
| 921 | (p->rt.nr_cpus_allowed > 1)) | 923 | (p->rt.nr_cpus_allowed > 1)) |
| 922 | return 1; | 924 | return 1; |
| 923 | return 0; | 925 | return 0; |
| @@ -956,7 +958,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) | |||
| 956 | return next; | 958 | return next; |
| 957 | } | 959 | } |
| 958 | 960 | ||
| 959 | static DEFINE_PER_CPU(cpumask_t, local_cpu_mask); | 961 | static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); |
| 960 | 962 | ||
| 961 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | 963 | static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) |
| 962 | { | 964 | { |
| @@ -976,7 +978,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | |||
| 976 | static int find_lowest_rq(struct task_struct *task) | 978 | static int find_lowest_rq(struct task_struct *task) |
| 977 | { | 979 | { |
| 978 | struct sched_domain *sd; | 980 | struct sched_domain *sd; |
| 979 | cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask); | 981 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
| 980 | int this_cpu = smp_processor_id(); | 982 | int this_cpu = smp_processor_id(); |
| 981 | int cpu = task_cpu(task); | 983 | int cpu = task_cpu(task); |
| 982 | 984 | ||
| @@ -991,7 +993,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 991 | * I guess we might want to change cpupri_find() to ignore those | 993 | * I guess we might want to change cpupri_find() to ignore those |
| 992 | * in the first place. | 994 | * in the first place. |
| 993 | */ | 995 | */ |
| 994 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | 996 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); |
| 995 | 997 | ||
| 996 | /* | 998 | /* |
| 997 | * At this point we have built a mask of cpus representing the | 999 | * At this point we have built a mask of cpus representing the |
| @@ -1001,7 +1003,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 1001 | * We prioritize the last cpu that the task executed on since | 1003 | * We prioritize the last cpu that the task executed on since |
| 1002 | * it is most likely cache-hot in that location. | 1004 | * it is most likely cache-hot in that location. |
| 1003 | */ | 1005 | */ |
| 1004 | if (cpu_isset(cpu, *lowest_mask)) | 1006 | if (cpumask_test_cpu(cpu, lowest_mask)) |
| 1005 | return cpu; | 1007 | return cpu; |
| 1006 | 1008 | ||
| 1007 | /* | 1009 | /* |
| @@ -1016,7 +1018,8 @@ static int find_lowest_rq(struct task_struct *task) | |||
| 1016 | cpumask_t domain_mask; | 1018 | cpumask_t domain_mask; |
| 1017 | int best_cpu; | 1019 | int best_cpu; |
| 1018 | 1020 | ||
| 1019 | cpus_and(domain_mask, sd->span, *lowest_mask); | 1021 | cpumask_and(&domain_mask, sched_domain_span(sd), |
| 1022 | lowest_mask); | ||
| 1020 | 1023 | ||
| 1021 | best_cpu = pick_optimal_cpu(this_cpu, | 1024 | best_cpu = pick_optimal_cpu(this_cpu, |
| 1022 | &domain_mask); | 1025 | &domain_mask); |
| @@ -1057,8 +1060,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
| 1057 | * Also make sure that it wasn't scheduled on its rq. | 1060 | * Also make sure that it wasn't scheduled on its rq. |
| 1058 | */ | 1061 | */ |
| 1059 | if (unlikely(task_rq(task) != rq || | 1062 | if (unlikely(task_rq(task) != rq || |
| 1060 | !cpu_isset(lowest_rq->cpu, | 1063 | !cpumask_test_cpu(lowest_rq->cpu, |
| 1061 | task->cpus_allowed) || | 1064 | &task->cpus_allowed) || |
| 1062 | task_running(rq, task) || | 1065 | task_running(rq, task) || |
| 1063 | !task->se.on_rq)) { | 1066 | !task->se.on_rq)) { |
| 1064 | 1067 | ||
| @@ -1179,7 +1182,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
| 1179 | 1182 | ||
| 1180 | next = pick_next_task_rt(this_rq); | 1183 | next = pick_next_task_rt(this_rq); |
| 1181 | 1184 | ||
| 1182 | for_each_cpu_mask_nr(cpu, this_rq->rd->rto_mask) { | 1185 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
| 1183 | if (this_cpu == cpu) | 1186 | if (this_cpu == cpu) |
| 1184 | continue; | 1187 | continue; |
| 1185 | 1188 | ||
| @@ -1308,9 +1311,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
| 1308 | } | 1311 | } |
| 1309 | 1312 | ||
| 1310 | static void set_cpus_allowed_rt(struct task_struct *p, | 1313 | static void set_cpus_allowed_rt(struct task_struct *p, |
| 1311 | const cpumask_t *new_mask) | 1314 | const struct cpumask *new_mask) |
| 1312 | { | 1315 | { |
| 1313 | int weight = cpus_weight(*new_mask); | 1316 | int weight = cpumask_weight(new_mask); |
| 1314 | 1317 | ||
| 1315 | BUG_ON(!rt_task(p)); | 1318 | BUG_ON(!rt_task(p)); |
| 1316 | 1319 | ||
| @@ -1331,7 +1334,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
| 1331 | update_rt_migration(rq); | 1334 | update_rt_migration(rq); |
| 1332 | } | 1335 | } |
| 1333 | 1336 | ||
| 1334 | p->cpus_allowed = *new_mask; | 1337 | cpumask_copy(&p->cpus_allowed, new_mask); |
| 1335 | p->rt.nr_cpus_allowed = weight; | 1338 | p->rt.nr_cpus_allowed = weight; |
| 1336 | } | 1339 | } |
| 1337 | 1340 | ||
| @@ -1374,6 +1377,14 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p, | |||
| 1374 | if (!rq->rt.rt_nr_running) | 1377 | if (!rq->rt.rt_nr_running) |
| 1375 | pull_rt_task(rq); | 1378 | pull_rt_task(rq); |
| 1376 | } | 1379 | } |
| 1380 | |||
| 1381 | static inline void init_sched_rt_class(void) | ||
| 1382 | { | ||
| 1383 | unsigned int i; | ||
| 1384 | |||
| 1385 | for_each_possible_cpu(i) | ||
| 1386 | alloc_cpumask_var(&per_cpu(local_cpu_mask, i), GFP_KERNEL); | ||
| 1387 | } | ||
| 1377 | #endif /* CONFIG_SMP */ | 1388 | #endif /* CONFIG_SMP */ |
| 1378 | 1389 | ||
| 1379 | /* | 1390 | /* |
| @@ -1544,3 +1555,4 @@ static void print_rt_stats(struct seq_file *m, int cpu) | |||
| 1544 | rcu_read_unlock(); | 1555 | rcu_read_unlock(); |
| 1545 | } | 1556 | } |
| 1546 | #endif /* CONFIG_SCHED_DEBUG */ | 1557 | #endif /* CONFIG_SCHED_DEBUG */ |
| 1558 | |||
