diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 1106 |
1 files changed, 628 insertions, 478 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 748ff924a290..c5019a5dcaa4 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -209,7 +209,6 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
209 | hrtimer_init(&rt_b->rt_period_timer, | 209 | hrtimer_init(&rt_b->rt_period_timer, |
210 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 210 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
211 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 211 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
212 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; | ||
213 | } | 212 | } |
214 | 213 | ||
215 | static inline int rt_bandwidth_enabled(void) | 214 | static inline int rt_bandwidth_enabled(void) |
@@ -499,18 +498,26 @@ struct rt_rq { | |||
499 | */ | 498 | */ |
500 | struct root_domain { | 499 | struct root_domain { |
501 | atomic_t refcount; | 500 | atomic_t refcount; |
502 | cpumask_t span; | 501 | cpumask_var_t span; |
503 | cpumask_t online; | 502 | cpumask_var_t online; |
504 | 503 | ||
505 | /* | 504 | /* |
506 | * The "RT overload" flag: it gets set if a CPU has more than | 505 | * The "RT overload" flag: it gets set if a CPU has more than |
507 | * one runnable RT task. | 506 | * one runnable RT task. |
508 | */ | 507 | */ |
509 | cpumask_t rto_mask; | 508 | cpumask_var_t rto_mask; |
510 | atomic_t rto_count; | 509 | atomic_t rto_count; |
511 | #ifdef CONFIG_SMP | 510 | #ifdef CONFIG_SMP |
512 | struct cpupri cpupri; | 511 | struct cpupri cpupri; |
513 | #endif | 512 | #endif |
513 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
514 | /* | ||
515 | * Preferred wake up cpu nominated by sched_mc balance that will be | ||
516 | * used when most cpus are idle in the system indicating overall very | ||
517 | * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2) | ||
518 | */ | ||
519 | unsigned int sched_mc_preferred_wakeup_cpu; | ||
520 | #endif | ||
514 | }; | 521 | }; |
515 | 522 | ||
516 | /* | 523 | /* |
@@ -1139,7 +1146,6 @@ static void init_rq_hrtick(struct rq *rq) | |||
1139 | 1146 | ||
1140 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1147 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1141 | rq->hrtick_timer.function = hrtick; | 1148 | rq->hrtick_timer.function = hrtick; |
1142 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; | ||
1143 | } | 1149 | } |
1144 | #else /* CONFIG_SCHED_HRTICK */ | 1150 | #else /* CONFIG_SCHED_HRTICK */ |
1145 | static inline void hrtick_clear(struct rq *rq) | 1151 | static inline void hrtick_clear(struct rq *rq) |
@@ -1516,7 +1522,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1516 | struct sched_domain *sd = data; | 1522 | struct sched_domain *sd = data; |
1517 | int i; | 1523 | int i; |
1518 | 1524 | ||
1519 | for_each_cpu_mask(i, sd->span) { | 1525 | for_each_cpu(i, sched_domain_span(sd)) { |
1520 | /* | 1526 | /* |
1521 | * If there are currently no tasks on the cpu pretend there | 1527 | * If there are currently no tasks on the cpu pretend there |
1522 | * is one of average load so that when a new task gets to | 1528 | * is one of average load so that when a new task gets to |
@@ -1537,7 +1543,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1537 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1543 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
1538 | shares = tg->shares; | 1544 | shares = tg->shares; |
1539 | 1545 | ||
1540 | for_each_cpu_mask(i, sd->span) | 1546 | for_each_cpu(i, sched_domain_span(sd)) |
1541 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1547 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1542 | 1548 | ||
1543 | return 0; | 1549 | return 0; |
@@ -2103,15 +2109,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2103 | int i; | 2109 | int i; |
2104 | 2110 | ||
2105 | /* Skip over this group if it has no CPUs allowed */ | 2111 | /* Skip over this group if it has no CPUs allowed */ |
2106 | if (!cpus_intersects(group->cpumask, p->cpus_allowed)) | 2112 | if (!cpumask_intersects(sched_group_cpus(group), |
2113 | &p->cpus_allowed)) | ||
2107 | continue; | 2114 | continue; |
2108 | 2115 | ||
2109 | local_group = cpu_isset(this_cpu, group->cpumask); | 2116 | local_group = cpumask_test_cpu(this_cpu, |
2117 | sched_group_cpus(group)); | ||
2110 | 2118 | ||
2111 | /* Tally up the load of all CPUs in the group */ | 2119 | /* Tally up the load of all CPUs in the group */ |
2112 | avg_load = 0; | 2120 | avg_load = 0; |
2113 | 2121 | ||
2114 | for_each_cpu_mask_nr(i, group->cpumask) { | 2122 | for_each_cpu(i, sched_group_cpus(group)) { |
2115 | /* Bias balancing toward cpus of our domain */ | 2123 | /* Bias balancing toward cpus of our domain */ |
2116 | if (local_group) | 2124 | if (local_group) |
2117 | load = source_load(i, load_idx); | 2125 | load = source_load(i, load_idx); |
@@ -2143,17 +2151,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2143 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 2151 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
2144 | */ | 2152 | */ |
2145 | static int | 2153 | static int |
2146 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | 2154 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
2147 | cpumask_t *tmp) | ||
2148 | { | 2155 | { |
2149 | unsigned long load, min_load = ULONG_MAX; | 2156 | unsigned long load, min_load = ULONG_MAX; |
2150 | int idlest = -1; | 2157 | int idlest = -1; |
2151 | int i; | 2158 | int i; |
2152 | 2159 | ||
2153 | /* Traverse only the allowed CPUs */ | 2160 | /* Traverse only the allowed CPUs */ |
2154 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2161 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { |
2155 | |||
2156 | for_each_cpu_mask_nr(i, *tmp) { | ||
2157 | load = weighted_cpuload(i); | 2162 | load = weighted_cpuload(i); |
2158 | 2163 | ||
2159 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2164 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2195,7 +2200,6 @@ static int sched_balance_self(int cpu, int flag) | |||
2195 | update_shares(sd); | 2200 | update_shares(sd); |
2196 | 2201 | ||
2197 | while (sd) { | 2202 | while (sd) { |
2198 | cpumask_t span, tmpmask; | ||
2199 | struct sched_group *group; | 2203 | struct sched_group *group; |
2200 | int new_cpu, weight; | 2204 | int new_cpu, weight; |
2201 | 2205 | ||
@@ -2204,14 +2208,13 @@ static int sched_balance_self(int cpu, int flag) | |||
2204 | continue; | 2208 | continue; |
2205 | } | 2209 | } |
2206 | 2210 | ||
2207 | span = sd->span; | ||
2208 | group = find_idlest_group(sd, t, cpu); | 2211 | group = find_idlest_group(sd, t, cpu); |
2209 | if (!group) { | 2212 | if (!group) { |
2210 | sd = sd->child; | 2213 | sd = sd->child; |
2211 | continue; | 2214 | continue; |
2212 | } | 2215 | } |
2213 | 2216 | ||
2214 | new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); | 2217 | new_cpu = find_idlest_cpu(group, t, cpu); |
2215 | if (new_cpu == -1 || new_cpu == cpu) { | 2218 | if (new_cpu == -1 || new_cpu == cpu) { |
2216 | /* Now try balancing at a lower domain level of cpu */ | 2219 | /* Now try balancing at a lower domain level of cpu */ |
2217 | sd = sd->child; | 2220 | sd = sd->child; |
@@ -2220,10 +2223,10 @@ static int sched_balance_self(int cpu, int flag) | |||
2220 | 2223 | ||
2221 | /* Now try balancing at a lower domain level of new_cpu */ | 2224 | /* Now try balancing at a lower domain level of new_cpu */ |
2222 | cpu = new_cpu; | 2225 | cpu = new_cpu; |
2226 | weight = cpumask_weight(sched_domain_span(sd)); | ||
2223 | sd = NULL; | 2227 | sd = NULL; |
2224 | weight = cpus_weight(span); | ||
2225 | for_each_domain(cpu, tmp) { | 2228 | for_each_domain(cpu, tmp) { |
2226 | if (weight <= cpus_weight(tmp->span)) | 2229 | if (weight <= cpumask_weight(sched_domain_span(tmp))) |
2227 | break; | 2230 | break; |
2228 | if (tmp->flags & flag) | 2231 | if (tmp->flags & flag) |
2229 | sd = tmp; | 2232 | sd = tmp; |
@@ -2268,7 +2271,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2268 | cpu = task_cpu(p); | 2271 | cpu = task_cpu(p); |
2269 | 2272 | ||
2270 | for_each_domain(this_cpu, sd) { | 2273 | for_each_domain(this_cpu, sd) { |
2271 | if (cpu_isset(cpu, sd->span)) { | 2274 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2272 | update_shares(sd); | 2275 | update_shares(sd); |
2273 | break; | 2276 | break; |
2274 | } | 2277 | } |
@@ -2317,7 +2320,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2317 | else { | 2320 | else { |
2318 | struct sched_domain *sd; | 2321 | struct sched_domain *sd; |
2319 | for_each_domain(this_cpu, sd) { | 2322 | for_each_domain(this_cpu, sd) { |
2320 | if (cpu_isset(cpu, sd->span)) { | 2323 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2321 | schedstat_inc(sd, ttwu_wake_remote); | 2324 | schedstat_inc(sd, ttwu_wake_remote); |
2322 | break; | 2325 | break; |
2323 | } | 2326 | } |
@@ -2848,7 +2851,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2848 | struct rq *rq; | 2851 | struct rq *rq; |
2849 | 2852 | ||
2850 | rq = task_rq_lock(p, &flags); | 2853 | rq = task_rq_lock(p, &flags); |
2851 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2854 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
2852 | || unlikely(!cpu_active(dest_cpu))) | 2855 | || unlikely(!cpu_active(dest_cpu))) |
2853 | goto out; | 2856 | goto out; |
2854 | 2857 | ||
@@ -2913,7 +2916,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2913 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 2916 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
2914 | * 3) are cache-hot on their current CPU. | 2917 | * 3) are cache-hot on their current CPU. |
2915 | */ | 2918 | */ |
2916 | if (!cpu_isset(this_cpu, p->cpus_allowed)) { | 2919 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
2917 | schedstat_inc(p, se.nr_failed_migrations_affine); | 2920 | schedstat_inc(p, se.nr_failed_migrations_affine); |
2918 | return 0; | 2921 | return 0; |
2919 | } | 2922 | } |
@@ -3088,7 +3091,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3088 | static struct sched_group * | 3091 | static struct sched_group * |
3089 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3092 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
3090 | unsigned long *imbalance, enum cpu_idle_type idle, | 3093 | unsigned long *imbalance, enum cpu_idle_type idle, |
3091 | int *sd_idle, const cpumask_t *cpus, int *balance) | 3094 | int *sd_idle, const struct cpumask *cpus, int *balance) |
3092 | { | 3095 | { |
3093 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3096 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
3094 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3097 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
@@ -3124,10 +3127,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3124 | unsigned long sum_avg_load_per_task; | 3127 | unsigned long sum_avg_load_per_task; |
3125 | unsigned long avg_load_per_task; | 3128 | unsigned long avg_load_per_task; |
3126 | 3129 | ||
3127 | local_group = cpu_isset(this_cpu, group->cpumask); | 3130 | local_group = cpumask_test_cpu(this_cpu, |
3131 | sched_group_cpus(group)); | ||
3128 | 3132 | ||
3129 | if (local_group) | 3133 | if (local_group) |
3130 | balance_cpu = first_cpu(group->cpumask); | 3134 | balance_cpu = cpumask_first(sched_group_cpus(group)); |
3131 | 3135 | ||
3132 | /* Tally up the load of all CPUs in the group */ | 3136 | /* Tally up the load of all CPUs in the group */ |
3133 | sum_weighted_load = sum_nr_running = avg_load = 0; | 3137 | sum_weighted_load = sum_nr_running = avg_load = 0; |
@@ -3136,13 +3140,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3136 | max_cpu_load = 0; | 3140 | max_cpu_load = 0; |
3137 | min_cpu_load = ~0UL; | 3141 | min_cpu_load = ~0UL; |
3138 | 3142 | ||
3139 | for_each_cpu_mask_nr(i, group->cpumask) { | 3143 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
3140 | struct rq *rq; | 3144 | struct rq *rq = cpu_rq(i); |
3141 | |||
3142 | if (!cpu_isset(i, *cpus)) | ||
3143 | continue; | ||
3144 | |||
3145 | rq = cpu_rq(i); | ||
3146 | 3145 | ||
3147 | if (*sd_idle && rq->nr_running) | 3146 | if (*sd_idle && rq->nr_running) |
3148 | *sd_idle = 0; | 3147 | *sd_idle = 0; |
@@ -3253,8 +3252,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3253 | */ | 3252 | */ |
3254 | if ((sum_nr_running < min_nr_running) || | 3253 | if ((sum_nr_running < min_nr_running) || |
3255 | (sum_nr_running == min_nr_running && | 3254 | (sum_nr_running == min_nr_running && |
3256 | first_cpu(group->cpumask) < | 3255 | cpumask_first(sched_group_cpus(group)) > |
3257 | first_cpu(group_min->cpumask))) { | 3256 | cpumask_first(sched_group_cpus(group_min)))) { |
3258 | group_min = group; | 3257 | group_min = group; |
3259 | min_nr_running = sum_nr_running; | 3258 | min_nr_running = sum_nr_running; |
3260 | min_load_per_task = sum_weighted_load / | 3259 | min_load_per_task = sum_weighted_load / |
@@ -3269,8 +3268,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3269 | if (sum_nr_running <= group_capacity - 1) { | 3268 | if (sum_nr_running <= group_capacity - 1) { |
3270 | if (sum_nr_running > leader_nr_running || | 3269 | if (sum_nr_running > leader_nr_running || |
3271 | (sum_nr_running == leader_nr_running && | 3270 | (sum_nr_running == leader_nr_running && |
3272 | first_cpu(group->cpumask) > | 3271 | cpumask_first(sched_group_cpus(group)) < |
3273 | first_cpu(group_leader->cpumask))) { | 3272 | cpumask_first(sched_group_cpus(group_leader)))) { |
3274 | group_leader = group; | 3273 | group_leader = group; |
3275 | leader_nr_running = sum_nr_running; | 3274 | leader_nr_running = sum_nr_running; |
3276 | } | 3275 | } |
@@ -3396,6 +3395,10 @@ out_balanced: | |||
3396 | 3395 | ||
3397 | if (this == group_leader && group_leader != group_min) { | 3396 | if (this == group_leader && group_leader != group_min) { |
3398 | *imbalance = min_load_per_task; | 3397 | *imbalance = min_load_per_task; |
3398 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { | ||
3399 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = | ||
3400 | cpumask_first(sched_group_cpus(group_leader)); | ||
3401 | } | ||
3399 | return group_min; | 3402 | return group_min; |
3400 | } | 3403 | } |
3401 | #endif | 3404 | #endif |
@@ -3409,16 +3412,16 @@ ret: | |||
3409 | */ | 3412 | */ |
3410 | static struct rq * | 3413 | static struct rq * |
3411 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3414 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
3412 | unsigned long imbalance, const cpumask_t *cpus) | 3415 | unsigned long imbalance, const struct cpumask *cpus) |
3413 | { | 3416 | { |
3414 | struct rq *busiest = NULL, *rq; | 3417 | struct rq *busiest = NULL, *rq; |
3415 | unsigned long max_load = 0; | 3418 | unsigned long max_load = 0; |
3416 | int i; | 3419 | int i; |
3417 | 3420 | ||
3418 | for_each_cpu_mask_nr(i, group->cpumask) { | 3421 | for_each_cpu(i, sched_group_cpus(group)) { |
3419 | unsigned long wl; | 3422 | unsigned long wl; |
3420 | 3423 | ||
3421 | if (!cpu_isset(i, *cpus)) | 3424 | if (!cpumask_test_cpu(i, cpus)) |
3422 | continue; | 3425 | continue; |
3423 | 3426 | ||
3424 | rq = cpu_rq(i); | 3427 | rq = cpu_rq(i); |
@@ -3448,7 +3451,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3448 | */ | 3451 | */ |
3449 | static int load_balance(int this_cpu, struct rq *this_rq, | 3452 | static int load_balance(int this_cpu, struct rq *this_rq, |
3450 | struct sched_domain *sd, enum cpu_idle_type idle, | 3453 | struct sched_domain *sd, enum cpu_idle_type idle, |
3451 | int *balance, cpumask_t *cpus) | 3454 | int *balance, struct cpumask *cpus) |
3452 | { | 3455 | { |
3453 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3456 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3454 | struct sched_group *group; | 3457 | struct sched_group *group; |
@@ -3456,7 +3459,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
3456 | struct rq *busiest; | 3459 | struct rq *busiest; |
3457 | unsigned long flags; | 3460 | unsigned long flags; |
3458 | 3461 | ||
3459 | cpus_setall(*cpus); | 3462 | cpumask_setall(cpus); |
3460 | 3463 | ||
3461 | /* | 3464 | /* |
3462 | * When power savings policy is enabled for the parent domain, idle | 3465 | * When power savings policy is enabled for the parent domain, idle |
@@ -3516,8 +3519,8 @@ redo: | |||
3516 | 3519 | ||
3517 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3520 | /* All tasks on this runqueue were pinned by CPU affinity */ |
3518 | if (unlikely(all_pinned)) { | 3521 | if (unlikely(all_pinned)) { |
3519 | cpu_clear(cpu_of(busiest), *cpus); | 3522 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3520 | if (!cpus_empty(*cpus)) | 3523 | if (!cpumask_empty(cpus)) |
3521 | goto redo; | 3524 | goto redo; |
3522 | goto out_balanced; | 3525 | goto out_balanced; |
3523 | } | 3526 | } |
@@ -3534,7 +3537,8 @@ redo: | |||
3534 | /* don't kick the migration_thread, if the curr | 3537 | /* don't kick the migration_thread, if the curr |
3535 | * task on busiest cpu can't be moved to this_cpu | 3538 | * task on busiest cpu can't be moved to this_cpu |
3536 | */ | 3539 | */ |
3537 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3540 | if (!cpumask_test_cpu(this_cpu, |
3541 | &busiest->curr->cpus_allowed)) { | ||
3538 | spin_unlock_irqrestore(&busiest->lock, flags); | 3542 | spin_unlock_irqrestore(&busiest->lock, flags); |
3539 | all_pinned = 1; | 3543 | all_pinned = 1; |
3540 | goto out_one_pinned; | 3544 | goto out_one_pinned; |
@@ -3609,7 +3613,7 @@ out: | |||
3609 | */ | 3613 | */ |
3610 | static int | 3614 | static int |
3611 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 3615 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
3612 | cpumask_t *cpus) | 3616 | struct cpumask *cpus) |
3613 | { | 3617 | { |
3614 | struct sched_group *group; | 3618 | struct sched_group *group; |
3615 | struct rq *busiest = NULL; | 3619 | struct rq *busiest = NULL; |
@@ -3618,7 +3622,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
3618 | int sd_idle = 0; | 3622 | int sd_idle = 0; |
3619 | int all_pinned = 0; | 3623 | int all_pinned = 0; |
3620 | 3624 | ||
3621 | cpus_setall(*cpus); | 3625 | cpumask_setall(cpus); |
3622 | 3626 | ||
3623 | /* | 3627 | /* |
3624 | * When power savings policy is enabled for the parent domain, idle | 3628 | * When power savings policy is enabled for the parent domain, idle |
@@ -3662,17 +3666,71 @@ redo: | |||
3662 | double_unlock_balance(this_rq, busiest); | 3666 | double_unlock_balance(this_rq, busiest); |
3663 | 3667 | ||
3664 | if (unlikely(all_pinned)) { | 3668 | if (unlikely(all_pinned)) { |
3665 | cpu_clear(cpu_of(busiest), *cpus); | 3669 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3666 | if (!cpus_empty(*cpus)) | 3670 | if (!cpumask_empty(cpus)) |
3667 | goto redo; | 3671 | goto redo; |
3668 | } | 3672 | } |
3669 | } | 3673 | } |
3670 | 3674 | ||
3671 | if (!ld_moved) { | 3675 | if (!ld_moved) { |
3676 | int active_balance = 0; | ||
3677 | |||
3672 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | 3678 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
3673 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3679 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
3674 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3680 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
3675 | return -1; | 3681 | return -1; |
3682 | |||
3683 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
3684 | return -1; | ||
3685 | |||
3686 | if (sd->nr_balance_failed++ < 2) | ||
3687 | return -1; | ||
3688 | |||
3689 | /* | ||
3690 | * The only task running in a non-idle cpu can be moved to this | ||
3691 | * cpu in an attempt to completely freeup the other CPU | ||
3692 | * package. The same method used to move task in load_balance() | ||
3693 | * have been extended for load_balance_newidle() to speedup | ||
3694 | * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) | ||
3695 | * | ||
3696 | * The package power saving logic comes from | ||
3697 | * find_busiest_group(). If there are no imbalance, then | ||
3698 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
3699 | * f_b_g() will select a group from which a running task may be | ||
3700 | * pulled to this cpu in order to make the other package idle. | ||
3701 | * If there is no opportunity to make a package idle and if | ||
3702 | * there are no imbalance, then f_b_g() will return NULL and no | ||
3703 | * action will be taken in load_balance_newidle(). | ||
3704 | * | ||
3705 | * Under normal task pull operation due to imbalance, there | ||
3706 | * will be more than one task in the source run queue and | ||
3707 | * move_tasks() will succeed. ld_moved will be true and this | ||
3708 | * active balance code will not be triggered. | ||
3709 | */ | ||
3710 | |||
3711 | /* Lock busiest in correct order while this_rq is held */ | ||
3712 | double_lock_balance(this_rq, busiest); | ||
3713 | |||
3714 | /* | ||
3715 | * don't kick the migration_thread, if the curr | ||
3716 | * task on busiest cpu can't be moved to this_cpu | ||
3717 | */ | ||
3718 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { | ||
3719 | double_unlock_balance(this_rq, busiest); | ||
3720 | all_pinned = 1; | ||
3721 | return ld_moved; | ||
3722 | } | ||
3723 | |||
3724 | if (!busiest->active_balance) { | ||
3725 | busiest->active_balance = 1; | ||
3726 | busiest->push_cpu = this_cpu; | ||
3727 | active_balance = 1; | ||
3728 | } | ||
3729 | |||
3730 | double_unlock_balance(this_rq, busiest); | ||
3731 | if (active_balance) | ||
3732 | wake_up_process(busiest->migration_thread); | ||
3733 | |||
3676 | } else | 3734 | } else |
3677 | sd->nr_balance_failed = 0; | 3735 | sd->nr_balance_failed = 0; |
3678 | 3736 | ||
@@ -3698,7 +3756,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3698 | struct sched_domain *sd; | 3756 | struct sched_domain *sd; |
3699 | int pulled_task = 0; | 3757 | int pulled_task = 0; |
3700 | unsigned long next_balance = jiffies + HZ; | 3758 | unsigned long next_balance = jiffies + HZ; |
3701 | cpumask_t tmpmask; | 3759 | cpumask_var_t tmpmask; |
3760 | |||
3761 | if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) | ||
3762 | return; | ||
3702 | 3763 | ||
3703 | for_each_domain(this_cpu, sd) { | 3764 | for_each_domain(this_cpu, sd) { |
3704 | unsigned long interval; | 3765 | unsigned long interval; |
@@ -3709,7 +3770,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3709 | if (sd->flags & SD_BALANCE_NEWIDLE) | 3770 | if (sd->flags & SD_BALANCE_NEWIDLE) |
3710 | /* If we've pulled tasks over stop searching: */ | 3771 | /* If we've pulled tasks over stop searching: */ |
3711 | pulled_task = load_balance_newidle(this_cpu, this_rq, | 3772 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
3712 | sd, &tmpmask); | 3773 | sd, tmpmask); |
3713 | 3774 | ||
3714 | interval = msecs_to_jiffies(sd->balance_interval); | 3775 | interval = msecs_to_jiffies(sd->balance_interval); |
3715 | if (time_after(next_balance, sd->last_balance + interval)) | 3776 | if (time_after(next_balance, sd->last_balance + interval)) |
@@ -3724,6 +3785,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3724 | */ | 3785 | */ |
3725 | this_rq->next_balance = next_balance; | 3786 | this_rq->next_balance = next_balance; |
3726 | } | 3787 | } |
3788 | free_cpumask_var(tmpmask); | ||
3727 | } | 3789 | } |
3728 | 3790 | ||
3729 | /* | 3791 | /* |
@@ -3761,7 +3823,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3761 | /* Search for an sd spanning us and the target CPU. */ | 3823 | /* Search for an sd spanning us and the target CPU. */ |
3762 | for_each_domain(target_cpu, sd) { | 3824 | for_each_domain(target_cpu, sd) { |
3763 | if ((sd->flags & SD_LOAD_BALANCE) && | 3825 | if ((sd->flags & SD_LOAD_BALANCE) && |
3764 | cpu_isset(busiest_cpu, sd->span)) | 3826 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
3765 | break; | 3827 | break; |
3766 | } | 3828 | } |
3767 | 3829 | ||
@@ -3780,10 +3842,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3780 | #ifdef CONFIG_NO_HZ | 3842 | #ifdef CONFIG_NO_HZ |
3781 | static struct { | 3843 | static struct { |
3782 | atomic_t load_balancer; | 3844 | atomic_t load_balancer; |
3783 | cpumask_t cpu_mask; | 3845 | cpumask_var_t cpu_mask; |
3784 | } nohz ____cacheline_aligned = { | 3846 | } nohz ____cacheline_aligned = { |
3785 | .load_balancer = ATOMIC_INIT(-1), | 3847 | .load_balancer = ATOMIC_INIT(-1), |
3786 | .cpu_mask = CPU_MASK_NONE, | ||
3787 | }; | 3848 | }; |
3788 | 3849 | ||
3789 | /* | 3850 | /* |
@@ -3811,7 +3872,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3811 | int cpu = smp_processor_id(); | 3872 | int cpu = smp_processor_id(); |
3812 | 3873 | ||
3813 | if (stop_tick) { | 3874 | if (stop_tick) { |
3814 | cpu_set(cpu, nohz.cpu_mask); | 3875 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
3815 | cpu_rq(cpu)->in_nohz_recently = 1; | 3876 | cpu_rq(cpu)->in_nohz_recently = 1; |
3816 | 3877 | ||
3817 | /* | 3878 | /* |
@@ -3825,7 +3886,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3825 | } | 3886 | } |
3826 | 3887 | ||
3827 | /* time for ilb owner also to sleep */ | 3888 | /* time for ilb owner also to sleep */ |
3828 | if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 3889 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3829 | if (atomic_read(&nohz.load_balancer) == cpu) | 3890 | if (atomic_read(&nohz.load_balancer) == cpu) |
3830 | atomic_set(&nohz.load_balancer, -1); | 3891 | atomic_set(&nohz.load_balancer, -1); |
3831 | return 0; | 3892 | return 0; |
@@ -3838,10 +3899,10 @@ int select_nohz_load_balancer(int stop_tick) | |||
3838 | } else if (atomic_read(&nohz.load_balancer) == cpu) | 3899 | } else if (atomic_read(&nohz.load_balancer) == cpu) |
3839 | return 1; | 3900 | return 1; |
3840 | } else { | 3901 | } else { |
3841 | if (!cpu_isset(cpu, nohz.cpu_mask)) | 3902 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) |
3842 | return 0; | 3903 | return 0; |
3843 | 3904 | ||
3844 | cpu_clear(cpu, nohz.cpu_mask); | 3905 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3845 | 3906 | ||
3846 | if (atomic_read(&nohz.load_balancer) == cpu) | 3907 | if (atomic_read(&nohz.load_balancer) == cpu) |
3847 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3908 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
@@ -3869,7 +3930,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3869 | unsigned long next_balance = jiffies + 60*HZ; | 3930 | unsigned long next_balance = jiffies + 60*HZ; |
3870 | int update_next_balance = 0; | 3931 | int update_next_balance = 0; |
3871 | int need_serialize; | 3932 | int need_serialize; |
3872 | cpumask_t tmp; | 3933 | cpumask_var_t tmp; |
3934 | |||
3935 | /* Fails alloc? Rebalancing probably not a priority right now. */ | ||
3936 | if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) | ||
3937 | return; | ||
3873 | 3938 | ||
3874 | for_each_domain(cpu, sd) { | 3939 | for_each_domain(cpu, sd) { |
3875 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3940 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -3894,7 +3959,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3894 | } | 3959 | } |
3895 | 3960 | ||
3896 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 3961 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
3897 | if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { | 3962 | if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { |
3898 | /* | 3963 | /* |
3899 | * We've pulled tasks over so either we're no | 3964 | * We've pulled tasks over so either we're no |
3900 | * longer idle, or one of our SMT siblings is | 3965 | * longer idle, or one of our SMT siblings is |
@@ -3928,6 +3993,8 @@ out: | |||
3928 | */ | 3993 | */ |
3929 | if (likely(update_next_balance)) | 3994 | if (likely(update_next_balance)) |
3930 | rq->next_balance = next_balance; | 3995 | rq->next_balance = next_balance; |
3996 | |||
3997 | free_cpumask_var(tmp); | ||
3931 | } | 3998 | } |
3932 | 3999 | ||
3933 | /* | 4000 | /* |
@@ -3952,12 +4019,13 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3952 | */ | 4019 | */ |
3953 | if (this_rq->idle_at_tick && | 4020 | if (this_rq->idle_at_tick && |
3954 | atomic_read(&nohz.load_balancer) == this_cpu) { | 4021 | atomic_read(&nohz.load_balancer) == this_cpu) { |
3955 | cpumask_t cpus = nohz.cpu_mask; | ||
3956 | struct rq *rq; | 4022 | struct rq *rq; |
3957 | int balance_cpu; | 4023 | int balance_cpu; |
3958 | 4024 | ||
3959 | cpu_clear(this_cpu, cpus); | 4025 | for_each_cpu(balance_cpu, nohz.cpu_mask) { |
3960 | for_each_cpu_mask_nr(balance_cpu, cpus) { | 4026 | if (balance_cpu == this_cpu) |
4027 | continue; | ||
4028 | |||
3961 | /* | 4029 | /* |
3962 | * If this cpu gets work to do, stop the load balancing | 4030 | * If this cpu gets work to do, stop the load balancing |
3963 | * work being done for other cpus. Next load | 4031 | * work being done for other cpus. Next load |
@@ -3995,7 +4063,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3995 | rq->in_nohz_recently = 0; | 4063 | rq->in_nohz_recently = 0; |
3996 | 4064 | ||
3997 | if (atomic_read(&nohz.load_balancer) == cpu) { | 4065 | if (atomic_read(&nohz.load_balancer) == cpu) { |
3998 | cpu_clear(cpu, nohz.cpu_mask); | 4066 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3999 | atomic_set(&nohz.load_balancer, -1); | 4067 | atomic_set(&nohz.load_balancer, -1); |
4000 | } | 4068 | } |
4001 | 4069 | ||
@@ -4008,7 +4076,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4008 | * TBD: Traverse the sched domains and nominate | 4076 | * TBD: Traverse the sched domains and nominate |
4009 | * the nearest cpu in the nohz.cpu_mask. | 4077 | * the nearest cpu in the nohz.cpu_mask. |
4010 | */ | 4078 | */ |
4011 | int ilb = first_cpu(nohz.cpu_mask); | 4079 | int ilb = cpumask_first(nohz.cpu_mask); |
4012 | 4080 | ||
4013 | if (ilb < nr_cpu_ids) | 4081 | if (ilb < nr_cpu_ids) |
4014 | resched_cpu(ilb); | 4082 | resched_cpu(ilb); |
@@ -4020,7 +4088,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4020 | * cpus with ticks stopped, is it time for that to stop? | 4088 | * cpus with ticks stopped, is it time for that to stop? |
4021 | */ | 4089 | */ |
4022 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | 4090 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && |
4023 | cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 4091 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
4024 | resched_cpu(cpu); | 4092 | resched_cpu(cpu); |
4025 | return; | 4093 | return; |
4026 | } | 4094 | } |
@@ -4030,7 +4098,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4030 | * someone else, then no need raise the SCHED_SOFTIRQ | 4098 | * someone else, then no need raise the SCHED_SOFTIRQ |
4031 | */ | 4099 | */ |
4032 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | 4100 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && |
4033 | cpu_isset(cpu, nohz.cpu_mask)) | 4101 | cpumask_test_cpu(cpu, nohz.cpu_mask)) |
4034 | return; | 4102 | return; |
4035 | #endif | 4103 | #endif |
4036 | if (time_after_eq(jiffies, rq->next_balance)) | 4104 | if (time_after_eq(jiffies, rq->next_balance)) |
@@ -4082,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
4082 | * Account user cpu time to a process. | 4150 | * Account user cpu time to a process. |
4083 | * @p: the process that the cpu time gets accounted to | 4151 | * @p: the process that the cpu time gets accounted to |
4084 | * @cputime: the cpu time spent in user space since the last update | 4152 | * @cputime: the cpu time spent in user space since the last update |
4153 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4085 | */ | 4154 | */ |
4086 | void account_user_time(struct task_struct *p, cputime_t cputime) | 4155 | void account_user_time(struct task_struct *p, cputime_t cputime, |
4156 | cputime_t cputime_scaled) | ||
4087 | { | 4157 | { |
4088 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4158 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4089 | cputime64_t tmp; | 4159 | cputime64_t tmp; |
4090 | 4160 | ||
4161 | /* Add user time to process. */ | ||
4091 | p->utime = cputime_add(p->utime, cputime); | 4162 | p->utime = cputime_add(p->utime, cputime); |
4163 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4092 | account_group_user_time(p, cputime); | 4164 | account_group_user_time(p, cputime); |
4093 | 4165 | ||
4094 | /* Add user time to cpustat. */ | 4166 | /* Add user time to cpustat. */ |
@@ -4105,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4105 | * Account guest cpu time to a process. | 4177 | * Account guest cpu time to a process. |
4106 | * @p: the process that the cpu time gets accounted to | 4178 | * @p: the process that the cpu time gets accounted to |
4107 | * @cputime: the cpu time spent in virtual machine since the last update | 4179 | * @cputime: the cpu time spent in virtual machine since the last update |
4180 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4108 | */ | 4181 | */ |
4109 | static void account_guest_time(struct task_struct *p, cputime_t cputime) | 4182 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
4183 | cputime_t cputime_scaled) | ||
4110 | { | 4184 | { |
4111 | cputime64_t tmp; | 4185 | cputime64_t tmp; |
4112 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4186 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4113 | 4187 | ||
4114 | tmp = cputime_to_cputime64(cputime); | 4188 | tmp = cputime_to_cputime64(cputime); |
4115 | 4189 | ||
4190 | /* Add guest time to process. */ | ||
4116 | p->utime = cputime_add(p->utime, cputime); | 4191 | p->utime = cputime_add(p->utime, cputime); |
4192 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4117 | account_group_user_time(p, cputime); | 4193 | account_group_user_time(p, cputime); |
4118 | p->gtime = cputime_add(p->gtime, cputime); | 4194 | p->gtime = cputime_add(p->gtime, cputime); |
4119 | 4195 | ||
4196 | /* Add guest time to cpustat. */ | ||
4120 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4197 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4121 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 4198 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
4122 | } | 4199 | } |
4123 | 4200 | ||
4124 | /* | 4201 | /* |
4125 | * Account scaled user cpu time to a process. | ||
4126 | * @p: the process that the cpu time gets accounted to | ||
4127 | * @cputime: the cpu time spent in user space since the last update | ||
4128 | */ | ||
4129 | void account_user_time_scaled(struct task_struct *p, cputime_t cputime) | ||
4130 | { | ||
4131 | p->utimescaled = cputime_add(p->utimescaled, cputime); | ||
4132 | } | ||
4133 | |||
4134 | /* | ||
4135 | * Account system cpu time to a process. | 4202 | * Account system cpu time to a process. |
4136 | * @p: the process that the cpu time gets accounted to | 4203 | * @p: the process that the cpu time gets accounted to |
4137 | * @hardirq_offset: the offset to subtract from hardirq_count() | 4204 | * @hardirq_offset: the offset to subtract from hardirq_count() |
4138 | * @cputime: the cpu time spent in kernel space since the last update | 4205 | * @cputime: the cpu time spent in kernel space since the last update |
4206 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4139 | */ | 4207 | */ |
4140 | void account_system_time(struct task_struct *p, int hardirq_offset, | 4208 | void account_system_time(struct task_struct *p, int hardirq_offset, |
4141 | cputime_t cputime) | 4209 | cputime_t cputime, cputime_t cputime_scaled) |
4142 | { | 4210 | { |
4143 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4211 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4144 | struct rq *rq = this_rq(); | ||
4145 | cputime64_t tmp; | 4212 | cputime64_t tmp; |
4146 | 4213 | ||
4147 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 4214 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
4148 | account_guest_time(p, cputime); | 4215 | account_guest_time(p, cputime, cputime_scaled); |
4149 | return; | 4216 | return; |
4150 | } | 4217 | } |
4151 | 4218 | ||
4219 | /* Add system time to process. */ | ||
4152 | p->stime = cputime_add(p->stime, cputime); | 4220 | p->stime = cputime_add(p->stime, cputime); |
4221 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); | ||
4153 | account_group_system_time(p, cputime); | 4222 | account_group_system_time(p, cputime); |
4154 | 4223 | ||
4155 | /* Add system time to cpustat. */ | 4224 | /* Add system time to cpustat. */ |
@@ -4158,50 +4227,85 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4158 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 4227 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
4159 | else if (softirq_count()) | 4228 | else if (softirq_count()) |
4160 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 4229 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
4161 | else if (p != rq->idle) | ||
4162 | cpustat->system = cputime64_add(cpustat->system, tmp); | ||
4163 | else if (atomic_read(&rq->nr_iowait) > 0) | ||
4164 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | ||
4165 | else | 4230 | else |
4166 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4231 | cpustat->system = cputime64_add(cpustat->system, tmp); |
4232 | |||
4167 | /* Account for system time used */ | 4233 | /* Account for system time used */ |
4168 | acct_update_integrals(p); | 4234 | acct_update_integrals(p); |
4169 | } | 4235 | } |
4170 | 4236 | ||
4171 | /* | 4237 | /* |
4172 | * Account scaled system cpu time to a process. | 4238 | * Account for involuntary wait time. |
4173 | * @p: the process that the cpu time gets accounted to | 4239 | * @steal: the cpu time spent in involuntary wait |
4174 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
4175 | * @cputime: the cpu time spent in kernel space since the last update | ||
4176 | */ | 4240 | */ |
4177 | void account_system_time_scaled(struct task_struct *p, cputime_t cputime) | 4241 | void account_steal_time(cputime_t cputime) |
4178 | { | 4242 | { |
4179 | p->stimescaled = cputime_add(p->stimescaled, cputime); | 4243 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4244 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
4245 | |||
4246 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | ||
4180 | } | 4247 | } |
4181 | 4248 | ||
4182 | /* | 4249 | /* |
4183 | * Account for involuntary wait time. | 4250 | * Account for idle time. |
4184 | * @p: the process from which the cpu time has been stolen | 4251 | * @cputime: the cpu time spent in idle wait |
4185 | * @steal: the cpu time spent in involuntary wait | ||
4186 | */ | 4252 | */ |
4187 | void account_steal_time(struct task_struct *p, cputime_t steal) | 4253 | void account_idle_time(cputime_t cputime) |
4188 | { | 4254 | { |
4189 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4255 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4190 | cputime64_t tmp = cputime_to_cputime64(steal); | 4256 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
4191 | struct rq *rq = this_rq(); | 4257 | struct rq *rq = this_rq(); |
4192 | 4258 | ||
4193 | if (p == rq->idle) { | 4259 | if (atomic_read(&rq->nr_iowait) > 0) |
4194 | p->stime = cputime_add(p->stime, steal); | 4260 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
4195 | account_group_system_time(p, steal); | 4261 | else |
4196 | if (atomic_read(&rq->nr_iowait) > 0) | 4262 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
4197 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4263 | } |
4198 | else | 4264 | |
4199 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4265 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
4200 | } else | 4266 | |
4201 | cpustat->steal = cputime64_add(cpustat->steal, tmp); | 4267 | /* |
4268 | * Account a single tick of cpu time. | ||
4269 | * @p: the process that the cpu time gets accounted to | ||
4270 | * @user_tick: indicates if the tick is a user or a system tick | ||
4271 | */ | ||
4272 | void account_process_tick(struct task_struct *p, int user_tick) | ||
4273 | { | ||
4274 | cputime_t one_jiffy = jiffies_to_cputime(1); | ||
4275 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
4276 | struct rq *rq = this_rq(); | ||
4277 | |||
4278 | if (user_tick) | ||
4279 | account_user_time(p, one_jiffy, one_jiffy_scaled); | ||
4280 | else if (p != rq->idle) | ||
4281 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | ||
4282 | one_jiffy_scaled); | ||
4283 | else | ||
4284 | account_idle_time(one_jiffy); | ||
4285 | } | ||
4286 | |||
4287 | /* | ||
4288 | * Account multiple ticks of steal time. | ||
4289 | * @p: the process from which the cpu time has been stolen | ||
4290 | * @ticks: number of stolen ticks | ||
4291 | */ | ||
4292 | void account_steal_ticks(unsigned long ticks) | ||
4293 | { | ||
4294 | account_steal_time(jiffies_to_cputime(ticks)); | ||
4202 | } | 4295 | } |
4203 | 4296 | ||
4204 | /* | 4297 | /* |
4298 | * Account multiple ticks of idle time. | ||
4299 | * @ticks: number of stolen ticks | ||
4300 | */ | ||
4301 | void account_idle_ticks(unsigned long ticks) | ||
4302 | { | ||
4303 | account_idle_time(jiffies_to_cputime(ticks)); | ||
4304 | } | ||
4305 | |||
4306 | #endif | ||
4307 | |||
4308 | /* | ||
4205 | * Use precise platform statistics if available: | 4309 | * Use precise platform statistics if available: |
4206 | */ | 4310 | */ |
4207 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 4311 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -4328,7 +4432,7 @@ void __kprobes sub_preempt_count(int val) | |||
4328 | /* | 4432 | /* |
4329 | * Underflow? | 4433 | * Underflow? |
4330 | */ | 4434 | */ |
4331 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) | 4435 | if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) |
4332 | return; | 4436 | return; |
4333 | /* | 4437 | /* |
4334 | * Is the spinlock portion underflowing? | 4438 | * Is the spinlock portion underflowing? |
@@ -5404,10 +5508,9 @@ out_unlock: | |||
5404 | return retval; | 5508 | return retval; |
5405 | } | 5509 | } |
5406 | 5510 | ||
5407 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | 5511 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
5408 | { | 5512 | { |
5409 | cpumask_t cpus_allowed; | 5513 | cpumask_var_t cpus_allowed, new_mask; |
5410 | cpumask_t new_mask = *in_mask; | ||
5411 | struct task_struct *p; | 5514 | struct task_struct *p; |
5412 | int retval; | 5515 | int retval; |
5413 | 5516 | ||
@@ -5429,6 +5532,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5429 | get_task_struct(p); | 5532 | get_task_struct(p); |
5430 | read_unlock(&tasklist_lock); | 5533 | read_unlock(&tasklist_lock); |
5431 | 5534 | ||
5535 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
5536 | retval = -ENOMEM; | ||
5537 | goto out_put_task; | ||
5538 | } | ||
5539 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
5540 | retval = -ENOMEM; | ||
5541 | goto out_free_cpus_allowed; | ||
5542 | } | ||
5432 | retval = -EPERM; | 5543 | retval = -EPERM; |
5433 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 5544 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
5434 | goto out_unlock; | 5545 | goto out_unlock; |
@@ -5437,37 +5548,41 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5437 | if (retval) | 5548 | if (retval) |
5438 | goto out_unlock; | 5549 | goto out_unlock; |
5439 | 5550 | ||
5440 | cpuset_cpus_allowed(p, &cpus_allowed); | 5551 | cpuset_cpus_allowed(p, cpus_allowed); |
5441 | cpus_and(new_mask, new_mask, cpus_allowed); | 5552 | cpumask_and(new_mask, in_mask, cpus_allowed); |
5442 | again: | 5553 | again: |
5443 | retval = set_cpus_allowed_ptr(p, &new_mask); | 5554 | retval = set_cpus_allowed_ptr(p, new_mask); |
5444 | 5555 | ||
5445 | if (!retval) { | 5556 | if (!retval) { |
5446 | cpuset_cpus_allowed(p, &cpus_allowed); | 5557 | cpuset_cpus_allowed(p, cpus_allowed); |
5447 | if (!cpus_subset(new_mask, cpus_allowed)) { | 5558 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
5448 | /* | 5559 | /* |
5449 | * We must have raced with a concurrent cpuset | 5560 | * We must have raced with a concurrent cpuset |
5450 | * update. Just reset the cpus_allowed to the | 5561 | * update. Just reset the cpus_allowed to the |
5451 | * cpuset's cpus_allowed | 5562 | * cpuset's cpus_allowed |
5452 | */ | 5563 | */ |
5453 | new_mask = cpus_allowed; | 5564 | cpumask_copy(new_mask, cpus_allowed); |
5454 | goto again; | 5565 | goto again; |
5455 | } | 5566 | } |
5456 | } | 5567 | } |
5457 | out_unlock: | 5568 | out_unlock: |
5569 | free_cpumask_var(new_mask); | ||
5570 | out_free_cpus_allowed: | ||
5571 | free_cpumask_var(cpus_allowed); | ||
5572 | out_put_task: | ||
5458 | put_task_struct(p); | 5573 | put_task_struct(p); |
5459 | put_online_cpus(); | 5574 | put_online_cpus(); |
5460 | return retval; | 5575 | return retval; |
5461 | } | 5576 | } |
5462 | 5577 | ||
5463 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | 5578 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
5464 | cpumask_t *new_mask) | 5579 | struct cpumask *new_mask) |
5465 | { | 5580 | { |
5466 | if (len < sizeof(cpumask_t)) { | 5581 | if (len < cpumask_size()) |
5467 | memset(new_mask, 0, sizeof(cpumask_t)); | 5582 | cpumask_clear(new_mask); |
5468 | } else if (len > sizeof(cpumask_t)) { | 5583 | else if (len > cpumask_size()) |
5469 | len = sizeof(cpumask_t); | 5584 | len = cpumask_size(); |
5470 | } | 5585 | |
5471 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; | 5586 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
5472 | } | 5587 | } |
5473 | 5588 | ||
@@ -5480,17 +5595,20 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
5480 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5595 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, |
5481 | unsigned long __user *user_mask_ptr) | 5596 | unsigned long __user *user_mask_ptr) |
5482 | { | 5597 | { |
5483 | cpumask_t new_mask; | 5598 | cpumask_var_t new_mask; |
5484 | int retval; | 5599 | int retval; |
5485 | 5600 | ||
5486 | retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); | 5601 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
5487 | if (retval) | 5602 | return -ENOMEM; |
5488 | return retval; | ||
5489 | 5603 | ||
5490 | return sched_setaffinity(pid, &new_mask); | 5604 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
5605 | if (retval == 0) | ||
5606 | retval = sched_setaffinity(pid, new_mask); | ||
5607 | free_cpumask_var(new_mask); | ||
5608 | return retval; | ||
5491 | } | 5609 | } |
5492 | 5610 | ||
5493 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 5611 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
5494 | { | 5612 | { |
5495 | struct task_struct *p; | 5613 | struct task_struct *p; |
5496 | int retval; | 5614 | int retval; |
@@ -5507,7 +5625,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
5507 | if (retval) | 5625 | if (retval) |
5508 | goto out_unlock; | 5626 | goto out_unlock; |
5509 | 5627 | ||
5510 | cpus_and(*mask, p->cpus_allowed, cpu_online_map); | 5628 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5511 | 5629 | ||
5512 | out_unlock: | 5630 | out_unlock: |
5513 | read_unlock(&tasklist_lock); | 5631 | read_unlock(&tasklist_lock); |
@@ -5526,19 +5644,24 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
5526 | unsigned long __user *user_mask_ptr) | 5644 | unsigned long __user *user_mask_ptr) |
5527 | { | 5645 | { |
5528 | int ret; | 5646 | int ret; |
5529 | cpumask_t mask; | 5647 | cpumask_var_t mask; |
5530 | 5648 | ||
5531 | if (len < sizeof(cpumask_t)) | 5649 | if (len < cpumask_size()) |
5532 | return -EINVAL; | 5650 | return -EINVAL; |
5533 | 5651 | ||
5534 | ret = sched_getaffinity(pid, &mask); | 5652 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
5535 | if (ret < 0) | 5653 | return -ENOMEM; |
5536 | return ret; | ||
5537 | 5654 | ||
5538 | if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) | 5655 | ret = sched_getaffinity(pid, mask); |
5539 | return -EFAULT; | 5656 | if (ret == 0) { |
5657 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | ||
5658 | ret = -EFAULT; | ||
5659 | else | ||
5660 | ret = cpumask_size(); | ||
5661 | } | ||
5662 | free_cpumask_var(mask); | ||
5540 | 5663 | ||
5541 | return sizeof(cpumask_t); | 5664 | return ret; |
5542 | } | 5665 | } |
5543 | 5666 | ||
5544 | /** | 5667 | /** |
@@ -5880,7 +6003,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5880 | idle->se.exec_start = sched_clock(); | 6003 | idle->se.exec_start = sched_clock(); |
5881 | 6004 | ||
5882 | idle->prio = idle->normal_prio = MAX_PRIO; | 6005 | idle->prio = idle->normal_prio = MAX_PRIO; |
5883 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 6006 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
5884 | __set_task_cpu(idle, cpu); | 6007 | __set_task_cpu(idle, cpu); |
5885 | 6008 | ||
5886 | rq->curr = rq->idle = idle; | 6009 | rq->curr = rq->idle = idle; |
@@ -5907,9 +6030,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5907 | * indicates which cpus entered this state. This is used | 6030 | * indicates which cpus entered this state. This is used |
5908 | * in the rcu update to wait only for active cpus. For system | 6031 | * in the rcu update to wait only for active cpus. For system |
5909 | * which do not switch off the HZ timer nohz_cpu_mask should | 6032 | * which do not switch off the HZ timer nohz_cpu_mask should |
5910 | * always be CPU_MASK_NONE. | 6033 | * always be CPU_BITS_NONE. |
5911 | */ | 6034 | */ |
5912 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 6035 | cpumask_var_t nohz_cpu_mask; |
5913 | 6036 | ||
5914 | /* | 6037 | /* |
5915 | * Increase the granularity value when there are more CPUs, | 6038 | * Increase the granularity value when there are more CPUs, |
@@ -5964,7 +6087,7 @@ static inline void sched_init_granularity(void) | |||
5964 | * task must not exit() & deallocate itself prematurely. The | 6087 | * task must not exit() & deallocate itself prematurely. The |
5965 | * call is not atomic; no spinlocks may be held. | 6088 | * call is not atomic; no spinlocks may be held. |
5966 | */ | 6089 | */ |
5967 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | 6090 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
5968 | { | 6091 | { |
5969 | struct migration_req req; | 6092 | struct migration_req req; |
5970 | unsigned long flags; | 6093 | unsigned long flags; |
@@ -5972,13 +6095,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5972 | int ret = 0; | 6095 | int ret = 0; |
5973 | 6096 | ||
5974 | rq = task_rq_lock(p, &flags); | 6097 | rq = task_rq_lock(p, &flags); |
5975 | if (!cpus_intersects(*new_mask, cpu_online_map)) { | 6098 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { |
5976 | ret = -EINVAL; | 6099 | ret = -EINVAL; |
5977 | goto out; | 6100 | goto out; |
5978 | } | 6101 | } |
5979 | 6102 | ||
5980 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 6103 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
5981 | !cpus_equal(p->cpus_allowed, *new_mask))) { | 6104 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
5982 | ret = -EINVAL; | 6105 | ret = -EINVAL; |
5983 | goto out; | 6106 | goto out; |
5984 | } | 6107 | } |
@@ -5986,15 +6109,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5986 | if (p->sched_class->set_cpus_allowed) | 6109 | if (p->sched_class->set_cpus_allowed) |
5987 | p->sched_class->set_cpus_allowed(p, new_mask); | 6110 | p->sched_class->set_cpus_allowed(p, new_mask); |
5988 | else { | 6111 | else { |
5989 | p->cpus_allowed = *new_mask; | 6112 | cpumask_copy(&p->cpus_allowed, new_mask); |
5990 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); | 6113 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
5991 | } | 6114 | } |
5992 | 6115 | ||
5993 | /* Can the task run on the task's current CPU? If so, we're done */ | 6116 | /* Can the task run on the task's current CPU? If so, we're done */ |
5994 | if (cpu_isset(task_cpu(p), *new_mask)) | 6117 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
5995 | goto out; | 6118 | goto out; |
5996 | 6119 | ||
5997 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { | 6120 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
5998 | /* Need help from migration thread: drop lock and wait. */ | 6121 | /* Need help from migration thread: drop lock and wait. */ |
5999 | task_rq_unlock(rq, &flags); | 6122 | task_rq_unlock(rq, &flags); |
6000 | wake_up_process(rq->migration_thread); | 6123 | wake_up_process(rq->migration_thread); |
@@ -6036,7 +6159,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6036 | if (task_cpu(p) != src_cpu) | 6159 | if (task_cpu(p) != src_cpu) |
6037 | goto done; | 6160 | goto done; |
6038 | /* Affinity changed (again). */ | 6161 | /* Affinity changed (again). */ |
6039 | if (!cpu_isset(dest_cpu, p->cpus_allowed)) | 6162 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6040 | goto fail; | 6163 | goto fail; |
6041 | 6164 | ||
6042 | on_rq = p->se.on_rq; | 6165 | on_rq = p->se.on_rq; |
@@ -6133,50 +6256,41 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6133 | */ | 6256 | */ |
6134 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6257 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6135 | { | 6258 | { |
6136 | unsigned long flags; | ||
6137 | cpumask_t mask; | ||
6138 | struct rq *rq; | ||
6139 | int dest_cpu; | 6259 | int dest_cpu; |
6260 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); | ||
6140 | 6261 | ||
6141 | do { | 6262 | again: |
6142 | /* On same node? */ | 6263 | /* Look for allowed, online CPU in same node. */ |
6143 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 6264 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) |
6144 | cpus_and(mask, mask, p->cpus_allowed); | 6265 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6145 | dest_cpu = any_online_cpu(mask); | 6266 | goto move; |
6146 | 6267 | ||
6147 | /* On any allowed CPU? */ | 6268 | /* Any allowed, online CPU? */ |
6148 | if (dest_cpu >= nr_cpu_ids) | 6269 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); |
6149 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6270 | if (dest_cpu < nr_cpu_ids) |
6271 | goto move; | ||
6150 | 6272 | ||
6151 | /* No more Mr. Nice Guy. */ | 6273 | /* No more Mr. Nice Guy. */ |
6152 | if (dest_cpu >= nr_cpu_ids) { | 6274 | if (dest_cpu >= nr_cpu_ids) { |
6153 | cpumask_t cpus_allowed; | 6275 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); |
6276 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | ||
6154 | 6277 | ||
6155 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | 6278 | /* |
6156 | /* | 6279 | * Don't tell them about moving exiting tasks or |
6157 | * Try to stay on the same cpuset, where the | 6280 | * kernel threads (both mm NULL), since they never |
6158 | * current cpuset may be a subset of all cpus. | 6281 | * leave kernel. |
6159 | * The cpuset_cpus_allowed_locked() variant of | 6282 | */ |
6160 | * cpuset_cpus_allowed() will not block. It must be | 6283 | if (p->mm && printk_ratelimit()) { |
6161 | * called within calls to cpuset_lock/cpuset_unlock. | 6284 | printk(KERN_INFO "process %d (%s) no " |
6162 | */ | 6285 | "longer affine to cpu%d\n", |
6163 | rq = task_rq_lock(p, &flags); | 6286 | task_pid_nr(p), p->comm, dead_cpu); |
6164 | p->cpus_allowed = cpus_allowed; | ||
6165 | dest_cpu = any_online_cpu(p->cpus_allowed); | ||
6166 | task_rq_unlock(rq, &flags); | ||
6167 | |||
6168 | /* | ||
6169 | * Don't tell them about moving exiting tasks or | ||
6170 | * kernel threads (both mm NULL), since they never | ||
6171 | * leave kernel. | ||
6172 | */ | ||
6173 | if (p->mm && printk_ratelimit()) { | ||
6174 | printk(KERN_INFO "process %d (%s) no " | ||
6175 | "longer affine to cpu%d\n", | ||
6176 | task_pid_nr(p), p->comm, dead_cpu); | ||
6177 | } | ||
6178 | } | 6287 | } |
6179 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 6288 | } |
6289 | |||
6290 | move: | ||
6291 | /* It can have affinity changed while we were choosing. */ | ||
6292 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | ||
6293 | goto again; | ||
6180 | } | 6294 | } |
6181 | 6295 | ||
6182 | /* | 6296 | /* |
@@ -6188,7 +6302,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6188 | */ | 6302 | */ |
6189 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6303 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
6190 | { | 6304 | { |
6191 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); | 6305 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); |
6192 | unsigned long flags; | 6306 | unsigned long flags; |
6193 | 6307 | ||
6194 | local_irq_save(flags); | 6308 | local_irq_save(flags); |
@@ -6478,7 +6592,7 @@ static void set_rq_online(struct rq *rq) | |||
6478 | if (!rq->online) { | 6592 | if (!rq->online) { |
6479 | const struct sched_class *class; | 6593 | const struct sched_class *class; |
6480 | 6594 | ||
6481 | cpu_set(rq->cpu, rq->rd->online); | 6595 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
6482 | rq->online = 1; | 6596 | rq->online = 1; |
6483 | 6597 | ||
6484 | for_each_class(class) { | 6598 | for_each_class(class) { |
@@ -6498,7 +6612,7 @@ static void set_rq_offline(struct rq *rq) | |||
6498 | class->rq_offline(rq); | 6612 | class->rq_offline(rq); |
6499 | } | 6613 | } |
6500 | 6614 | ||
6501 | cpu_clear(rq->cpu, rq->rd->online); | 6615 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
6502 | rq->online = 0; | 6616 | rq->online = 0; |
6503 | } | 6617 | } |
6504 | } | 6618 | } |
@@ -6539,7 +6653,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6539 | rq = cpu_rq(cpu); | 6653 | rq = cpu_rq(cpu); |
6540 | spin_lock_irqsave(&rq->lock, flags); | 6654 | spin_lock_irqsave(&rq->lock, flags); |
6541 | if (rq->rd) { | 6655 | if (rq->rd) { |
6542 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6656 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6543 | 6657 | ||
6544 | set_rq_online(rq); | 6658 | set_rq_online(rq); |
6545 | } | 6659 | } |
@@ -6553,7 +6667,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6553 | break; | 6667 | break; |
6554 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 6668 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
6555 | kthread_bind(cpu_rq(cpu)->migration_thread, | 6669 | kthread_bind(cpu_rq(cpu)->migration_thread, |
6556 | any_online_cpu(cpu_online_map)); | 6670 | cpumask_any(cpu_online_mask)); |
6557 | kthread_stop(cpu_rq(cpu)->migration_thread); | 6671 | kthread_stop(cpu_rq(cpu)->migration_thread); |
6558 | cpu_rq(cpu)->migration_thread = NULL; | 6672 | cpu_rq(cpu)->migration_thread = NULL; |
6559 | break; | 6673 | break; |
@@ -6603,7 +6717,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6603 | rq = cpu_rq(cpu); | 6717 | rq = cpu_rq(cpu); |
6604 | spin_lock_irqsave(&rq->lock, flags); | 6718 | spin_lock_irqsave(&rq->lock, flags); |
6605 | if (rq->rd) { | 6719 | if (rq->rd) { |
6606 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6720 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6607 | set_rq_offline(rq); | 6721 | set_rq_offline(rq); |
6608 | } | 6722 | } |
6609 | spin_unlock_irqrestore(&rq->lock, flags); | 6723 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -6642,13 +6756,13 @@ early_initcall(migration_init); | |||
6642 | #ifdef CONFIG_SCHED_DEBUG | 6756 | #ifdef CONFIG_SCHED_DEBUG |
6643 | 6757 | ||
6644 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6758 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6645 | cpumask_t *groupmask) | 6759 | struct cpumask *groupmask) |
6646 | { | 6760 | { |
6647 | struct sched_group *group = sd->groups; | 6761 | struct sched_group *group = sd->groups; |
6648 | char str[256]; | 6762 | char str[256]; |
6649 | 6763 | ||
6650 | cpulist_scnprintf(str, sizeof(str), sd->span); | 6764 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
6651 | cpus_clear(*groupmask); | 6765 | cpumask_clear(groupmask); |
6652 | 6766 | ||
6653 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6767 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
6654 | 6768 | ||
@@ -6662,11 +6776,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6662 | 6776 | ||
6663 | printk(KERN_CONT "span %s level %s\n", str, sd->name); | 6777 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6664 | 6778 | ||
6665 | if (!cpu_isset(cpu, sd->span)) { | 6779 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
6666 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6780 | printk(KERN_ERR "ERROR: domain->span does not contain " |
6667 | "CPU%d\n", cpu); | 6781 | "CPU%d\n", cpu); |
6668 | } | 6782 | } |
6669 | if (!cpu_isset(cpu, group->cpumask)) { | 6783 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
6670 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 6784 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
6671 | " CPU%d\n", cpu); | 6785 | " CPU%d\n", cpu); |
6672 | } | 6786 | } |
@@ -6686,31 +6800,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6686 | break; | 6800 | break; |
6687 | } | 6801 | } |
6688 | 6802 | ||
6689 | if (!cpus_weight(group->cpumask)) { | 6803 | if (!cpumask_weight(sched_group_cpus(group))) { |
6690 | printk(KERN_CONT "\n"); | 6804 | printk(KERN_CONT "\n"); |
6691 | printk(KERN_ERR "ERROR: empty group\n"); | 6805 | printk(KERN_ERR "ERROR: empty group\n"); |
6692 | break; | 6806 | break; |
6693 | } | 6807 | } |
6694 | 6808 | ||
6695 | if (cpus_intersects(*groupmask, group->cpumask)) { | 6809 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
6696 | printk(KERN_CONT "\n"); | 6810 | printk(KERN_CONT "\n"); |
6697 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 6811 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
6698 | break; | 6812 | break; |
6699 | } | 6813 | } |
6700 | 6814 | ||
6701 | cpus_or(*groupmask, *groupmask, group->cpumask); | 6815 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
6702 | 6816 | ||
6703 | cpulist_scnprintf(str, sizeof(str), group->cpumask); | 6817 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
6704 | printk(KERN_CONT " %s", str); | 6818 | printk(KERN_CONT " %s", str); |
6705 | 6819 | ||
6706 | group = group->next; | 6820 | group = group->next; |
6707 | } while (group != sd->groups); | 6821 | } while (group != sd->groups); |
6708 | printk(KERN_CONT "\n"); | 6822 | printk(KERN_CONT "\n"); |
6709 | 6823 | ||
6710 | if (!cpus_equal(sd->span, *groupmask)) | 6824 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
6711 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 6825 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
6712 | 6826 | ||
6713 | if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) | 6827 | if (sd->parent && |
6828 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | ||
6714 | printk(KERN_ERR "ERROR: parent span is not a superset " | 6829 | printk(KERN_ERR "ERROR: parent span is not a superset " |
6715 | "of domain->span\n"); | 6830 | "of domain->span\n"); |
6716 | return 0; | 6831 | return 0; |
@@ -6718,7 +6833,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6718 | 6833 | ||
6719 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6834 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6720 | { | 6835 | { |
6721 | cpumask_t *groupmask; | 6836 | cpumask_var_t groupmask; |
6722 | int level = 0; | 6837 | int level = 0; |
6723 | 6838 | ||
6724 | if (!sd) { | 6839 | if (!sd) { |
@@ -6728,8 +6843,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6728 | 6843 | ||
6729 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6844 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6730 | 6845 | ||
6731 | groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6846 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
6732 | if (!groupmask) { | ||
6733 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | 6847 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
6734 | return; | 6848 | return; |
6735 | } | 6849 | } |
@@ -6742,7 +6856,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6742 | if (!sd) | 6856 | if (!sd) |
6743 | break; | 6857 | break; |
6744 | } | 6858 | } |
6745 | kfree(groupmask); | 6859 | free_cpumask_var(groupmask); |
6746 | } | 6860 | } |
6747 | #else /* !CONFIG_SCHED_DEBUG */ | 6861 | #else /* !CONFIG_SCHED_DEBUG */ |
6748 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6862 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6750,7 +6864,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6750 | 6864 | ||
6751 | static int sd_degenerate(struct sched_domain *sd) | 6865 | static int sd_degenerate(struct sched_domain *sd) |
6752 | { | 6866 | { |
6753 | if (cpus_weight(sd->span) == 1) | 6867 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
6754 | return 1; | 6868 | return 1; |
6755 | 6869 | ||
6756 | /* Following flags need at least 2 groups */ | 6870 | /* Following flags need at least 2 groups */ |
@@ -6781,7 +6895,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6781 | if (sd_degenerate(parent)) | 6895 | if (sd_degenerate(parent)) |
6782 | return 1; | 6896 | return 1; |
6783 | 6897 | ||
6784 | if (!cpus_equal(sd->span, parent->span)) | 6898 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
6785 | return 0; | 6899 | return 0; |
6786 | 6900 | ||
6787 | /* Does parent contain flags not in child? */ | 6901 | /* Does parent contain flags not in child? */ |
@@ -6805,6 +6919,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6805 | return 1; | 6919 | return 1; |
6806 | } | 6920 | } |
6807 | 6921 | ||
6922 | static void free_rootdomain(struct root_domain *rd) | ||
6923 | { | ||
6924 | cpupri_cleanup(&rd->cpupri); | ||
6925 | |||
6926 | free_cpumask_var(rd->rto_mask); | ||
6927 | free_cpumask_var(rd->online); | ||
6928 | free_cpumask_var(rd->span); | ||
6929 | kfree(rd); | ||
6930 | } | ||
6931 | |||
6808 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6932 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6809 | { | 6933 | { |
6810 | unsigned long flags; | 6934 | unsigned long flags; |
@@ -6814,38 +6938,63 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6814 | if (rq->rd) { | 6938 | if (rq->rd) { |
6815 | struct root_domain *old_rd = rq->rd; | 6939 | struct root_domain *old_rd = rq->rd; |
6816 | 6940 | ||
6817 | if (cpu_isset(rq->cpu, old_rd->online)) | 6941 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6818 | set_rq_offline(rq); | 6942 | set_rq_offline(rq); |
6819 | 6943 | ||
6820 | cpu_clear(rq->cpu, old_rd->span); | 6944 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6821 | 6945 | ||
6822 | if (atomic_dec_and_test(&old_rd->refcount)) | 6946 | if (atomic_dec_and_test(&old_rd->refcount)) |
6823 | kfree(old_rd); | 6947 | free_rootdomain(old_rd); |
6824 | } | 6948 | } |
6825 | 6949 | ||
6826 | atomic_inc(&rd->refcount); | 6950 | atomic_inc(&rd->refcount); |
6827 | rq->rd = rd; | 6951 | rq->rd = rd; |
6828 | 6952 | ||
6829 | cpu_set(rq->cpu, rd->span); | 6953 | cpumask_set_cpu(rq->cpu, rd->span); |
6830 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6954 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) |
6831 | set_rq_online(rq); | 6955 | set_rq_online(rq); |
6832 | 6956 | ||
6833 | spin_unlock_irqrestore(&rq->lock, flags); | 6957 | spin_unlock_irqrestore(&rq->lock, flags); |
6834 | } | 6958 | } |
6835 | 6959 | ||
6836 | static void init_rootdomain(struct root_domain *rd) | 6960 | static int init_rootdomain(struct root_domain *rd, bool bootmem) |
6837 | { | 6961 | { |
6838 | memset(rd, 0, sizeof(*rd)); | 6962 | memset(rd, 0, sizeof(*rd)); |
6839 | 6963 | ||
6840 | cpus_clear(rd->span); | 6964 | if (bootmem) { |
6841 | cpus_clear(rd->online); | 6965 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
6966 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
6967 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
6968 | cpupri_init(&rd->cpupri, true); | ||
6969 | return 0; | ||
6970 | } | ||
6971 | |||
6972 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | ||
6973 | goto free_rd; | ||
6974 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | ||
6975 | goto free_span; | ||
6976 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | ||
6977 | goto free_online; | ||
6978 | |||
6979 | if (cpupri_init(&rd->cpupri, false) != 0) | ||
6980 | goto free_rto_mask; | ||
6981 | return 0; | ||
6842 | 6982 | ||
6843 | cpupri_init(&rd->cpupri); | 6983 | free_rto_mask: |
6984 | free_cpumask_var(rd->rto_mask); | ||
6985 | free_online: | ||
6986 | free_cpumask_var(rd->online); | ||
6987 | free_span: | ||
6988 | free_cpumask_var(rd->span); | ||
6989 | free_rd: | ||
6990 | kfree(rd); | ||
6991 | return -ENOMEM; | ||
6844 | } | 6992 | } |
6845 | 6993 | ||
6846 | static void init_defrootdomain(void) | 6994 | static void init_defrootdomain(void) |
6847 | { | 6995 | { |
6848 | init_rootdomain(&def_root_domain); | 6996 | init_rootdomain(&def_root_domain, true); |
6997 | |||
6849 | atomic_set(&def_root_domain.refcount, 1); | 6998 | atomic_set(&def_root_domain.refcount, 1); |
6850 | } | 6999 | } |
6851 | 7000 | ||
@@ -6857,7 +7006,10 @@ static struct root_domain *alloc_rootdomain(void) | |||
6857 | if (!rd) | 7006 | if (!rd) |
6858 | return NULL; | 7007 | return NULL; |
6859 | 7008 | ||
6860 | init_rootdomain(rd); | 7009 | if (init_rootdomain(rd, false) != 0) { |
7010 | kfree(rd); | ||
7011 | return NULL; | ||
7012 | } | ||
6861 | 7013 | ||
6862 | return rd; | 7014 | return rd; |
6863 | } | 7015 | } |
@@ -6899,19 +7051,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6899 | } | 7051 | } |
6900 | 7052 | ||
6901 | /* cpus with isolated domains */ | 7053 | /* cpus with isolated domains */ |
6902 | static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | 7054 | static cpumask_var_t cpu_isolated_map; |
6903 | 7055 | ||
6904 | /* Setup the mask of cpus configured for isolated domains */ | 7056 | /* Setup the mask of cpus configured for isolated domains */ |
6905 | static int __init isolated_cpu_setup(char *str) | 7057 | static int __init isolated_cpu_setup(char *str) |
6906 | { | 7058 | { |
6907 | static int __initdata ints[NR_CPUS]; | 7059 | cpulist_parse(str, cpu_isolated_map); |
6908 | int i; | ||
6909 | |||
6910 | str = get_options(str, ARRAY_SIZE(ints), ints); | ||
6911 | cpus_clear(cpu_isolated_map); | ||
6912 | for (i = 1; i <= ints[0]; i++) | ||
6913 | if (ints[i] < NR_CPUS) | ||
6914 | cpu_set(ints[i], cpu_isolated_map); | ||
6915 | return 1; | 7060 | return 1; |
6916 | } | 7061 | } |
6917 | 7062 | ||
@@ -6920,42 +7065,43 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
6920 | /* | 7065 | /* |
6921 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | 7066 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
6922 | * to a function which identifies what group(along with sched group) a CPU | 7067 | * to a function which identifies what group(along with sched group) a CPU |
6923 | * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS | 7068 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
6924 | * (due to the fact that we keep track of groups covered with a cpumask_t). | 7069 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
6925 | * | 7070 | * |
6926 | * init_sched_build_groups will build a circular linked list of the groups | 7071 | * init_sched_build_groups will build a circular linked list of the groups |
6927 | * covered by the given span, and will set each group's ->cpumask correctly, | 7072 | * covered by the given span, and will set each group's ->cpumask correctly, |
6928 | * and ->cpu_power to 0. | 7073 | * and ->cpu_power to 0. |
6929 | */ | 7074 | */ |
6930 | static void | 7075 | static void |
6931 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | 7076 | init_sched_build_groups(const struct cpumask *span, |
6932 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 7077 | const struct cpumask *cpu_map, |
7078 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
6933 | struct sched_group **sg, | 7079 | struct sched_group **sg, |
6934 | cpumask_t *tmpmask), | 7080 | struct cpumask *tmpmask), |
6935 | cpumask_t *covered, cpumask_t *tmpmask) | 7081 | struct cpumask *covered, struct cpumask *tmpmask) |
6936 | { | 7082 | { |
6937 | struct sched_group *first = NULL, *last = NULL; | 7083 | struct sched_group *first = NULL, *last = NULL; |
6938 | int i; | 7084 | int i; |
6939 | 7085 | ||
6940 | cpus_clear(*covered); | 7086 | cpumask_clear(covered); |
6941 | 7087 | ||
6942 | for_each_cpu_mask_nr(i, *span) { | 7088 | for_each_cpu(i, span) { |
6943 | struct sched_group *sg; | 7089 | struct sched_group *sg; |
6944 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 7090 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6945 | int j; | 7091 | int j; |
6946 | 7092 | ||
6947 | if (cpu_isset(i, *covered)) | 7093 | if (cpumask_test_cpu(i, covered)) |
6948 | continue; | 7094 | continue; |
6949 | 7095 | ||
6950 | cpus_clear(sg->cpumask); | 7096 | cpumask_clear(sched_group_cpus(sg)); |
6951 | sg->__cpu_power = 0; | 7097 | sg->__cpu_power = 0; |
6952 | 7098 | ||
6953 | for_each_cpu_mask_nr(j, *span) { | 7099 | for_each_cpu(j, span) { |
6954 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 7100 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6955 | continue; | 7101 | continue; |
6956 | 7102 | ||
6957 | cpu_set(j, *covered); | 7103 | cpumask_set_cpu(j, covered); |
6958 | cpu_set(j, sg->cpumask); | 7104 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
6959 | } | 7105 | } |
6960 | if (!first) | 7106 | if (!first) |
6961 | first = sg; | 7107 | first = sg; |
@@ -7019,23 +7165,21 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7019 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7165 | * should be one that prevents unnecessary balancing, but also spreads tasks |
7020 | * out optimally. | 7166 | * out optimally. |
7021 | */ | 7167 | */ |
7022 | static void sched_domain_node_span(int node, cpumask_t *span) | 7168 | static void sched_domain_node_span(int node, struct cpumask *span) |
7023 | { | 7169 | { |
7024 | nodemask_t used_nodes; | 7170 | nodemask_t used_nodes; |
7025 | node_to_cpumask_ptr(nodemask, node); | ||
7026 | int i; | 7171 | int i; |
7027 | 7172 | ||
7028 | cpus_clear(*span); | 7173 | cpumask_clear(span); |
7029 | nodes_clear(used_nodes); | 7174 | nodes_clear(used_nodes); |
7030 | 7175 | ||
7031 | cpus_or(*span, *span, *nodemask); | 7176 | cpumask_or(span, span, cpumask_of_node(node)); |
7032 | node_set(node, used_nodes); | 7177 | node_set(node, used_nodes); |
7033 | 7178 | ||
7034 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7179 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
7035 | int next_node = find_next_best_node(node, &used_nodes); | 7180 | int next_node = find_next_best_node(node, &used_nodes); |
7036 | 7181 | ||
7037 | node_to_cpumask_ptr_next(nodemask, next_node); | 7182 | cpumask_or(span, span, cpumask_of_node(next_node)); |
7038 | cpus_or(*span, *span, *nodemask); | ||
7039 | } | 7183 | } |
7040 | } | 7184 | } |
7041 | #endif /* CONFIG_NUMA */ | 7185 | #endif /* CONFIG_NUMA */ |
@@ -7043,18 +7187,33 @@ static void sched_domain_node_span(int node, cpumask_t *span) | |||
7043 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 7187 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
7044 | 7188 | ||
7045 | /* | 7189 | /* |
7190 | * The cpus mask in sched_group and sched_domain hangs off the end. | ||
7191 | * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space | ||
7192 | * for nr_cpu_ids < CONFIG_NR_CPUS. | ||
7193 | */ | ||
7194 | struct static_sched_group { | ||
7195 | struct sched_group sg; | ||
7196 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); | ||
7197 | }; | ||
7198 | |||
7199 | struct static_sched_domain { | ||
7200 | struct sched_domain sd; | ||
7201 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | ||
7202 | }; | ||
7203 | |||
7204 | /* | ||
7046 | * SMT sched-domains: | 7205 | * SMT sched-domains: |
7047 | */ | 7206 | */ |
7048 | #ifdef CONFIG_SCHED_SMT | 7207 | #ifdef CONFIG_SCHED_SMT |
7049 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 7208 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
7050 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 7209 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); |
7051 | 7210 | ||
7052 | static int | 7211 | static int |
7053 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7212 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
7054 | cpumask_t *unused) | 7213 | struct sched_group **sg, struct cpumask *unused) |
7055 | { | 7214 | { |
7056 | if (sg) | 7215 | if (sg) |
7057 | *sg = &per_cpu(sched_group_cpus, cpu); | 7216 | *sg = &per_cpu(sched_group_cpus, cpu).sg; |
7058 | return cpu; | 7217 | return cpu; |
7059 | } | 7218 | } |
7060 | #endif /* CONFIG_SCHED_SMT */ | 7219 | #endif /* CONFIG_SCHED_SMT */ |
@@ -7063,56 +7222,53 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
7063 | * multi-core sched-domains: | 7222 | * multi-core sched-domains: |
7064 | */ | 7223 | */ |
7065 | #ifdef CONFIG_SCHED_MC | 7224 | #ifdef CONFIG_SCHED_MC |
7066 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 7225 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
7067 | static DEFINE_PER_CPU(struct sched_group, sched_group_core); | 7226 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
7068 | #endif /* CONFIG_SCHED_MC */ | 7227 | #endif /* CONFIG_SCHED_MC */ |
7069 | 7228 | ||
7070 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7229 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
7071 | static int | 7230 | static int |
7072 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7231 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7073 | cpumask_t *mask) | 7232 | struct sched_group **sg, struct cpumask *mask) |
7074 | { | 7233 | { |
7075 | int group; | 7234 | int group; |
7076 | 7235 | ||
7077 | *mask = per_cpu(cpu_sibling_map, cpu); | 7236 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7078 | cpus_and(*mask, *mask, *cpu_map); | 7237 | group = cpumask_first(mask); |
7079 | group = first_cpu(*mask); | ||
7080 | if (sg) | 7238 | if (sg) |
7081 | *sg = &per_cpu(sched_group_core, group); | 7239 | *sg = &per_cpu(sched_group_core, group).sg; |
7082 | return group; | 7240 | return group; |
7083 | } | 7241 | } |
7084 | #elif defined(CONFIG_SCHED_MC) | 7242 | #elif defined(CONFIG_SCHED_MC) |
7085 | static int | 7243 | static int |
7086 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7244 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7087 | cpumask_t *unused) | 7245 | struct sched_group **sg, struct cpumask *unused) |
7088 | { | 7246 | { |
7089 | if (sg) | 7247 | if (sg) |
7090 | *sg = &per_cpu(sched_group_core, cpu); | 7248 | *sg = &per_cpu(sched_group_core, cpu).sg; |
7091 | return cpu; | 7249 | return cpu; |
7092 | } | 7250 | } |
7093 | #endif | 7251 | #endif |
7094 | 7252 | ||
7095 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 7253 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
7096 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 7254 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
7097 | 7255 | ||
7098 | static int | 7256 | static int |
7099 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7257 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
7100 | cpumask_t *mask) | 7258 | struct sched_group **sg, struct cpumask *mask) |
7101 | { | 7259 | { |
7102 | int group; | 7260 | int group; |
7103 | #ifdef CONFIG_SCHED_MC | 7261 | #ifdef CONFIG_SCHED_MC |
7104 | *mask = cpu_coregroup_map(cpu); | 7262 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7105 | cpus_and(*mask, *mask, *cpu_map); | 7263 | group = cpumask_first(mask); |
7106 | group = first_cpu(*mask); | ||
7107 | #elif defined(CONFIG_SCHED_SMT) | 7264 | #elif defined(CONFIG_SCHED_SMT) |
7108 | *mask = per_cpu(cpu_sibling_map, cpu); | 7265 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7109 | cpus_and(*mask, *mask, *cpu_map); | 7266 | group = cpumask_first(mask); |
7110 | group = first_cpu(*mask); | ||
7111 | #else | 7267 | #else |
7112 | group = cpu; | 7268 | group = cpu; |
7113 | #endif | 7269 | #endif |
7114 | if (sg) | 7270 | if (sg) |
7115 | *sg = &per_cpu(sched_group_phys, group); | 7271 | *sg = &per_cpu(sched_group_phys, group).sg; |
7116 | return group; | 7272 | return group; |
7117 | } | 7273 | } |
7118 | 7274 | ||
@@ -7126,19 +7282,19 @@ static DEFINE_PER_CPU(struct sched_domain, node_domains); | |||
7126 | static struct sched_group ***sched_group_nodes_bycpu; | 7282 | static struct sched_group ***sched_group_nodes_bycpu; |
7127 | 7283 | ||
7128 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7284 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
7129 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 7285 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7130 | 7286 | ||
7131 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7287 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
7132 | struct sched_group **sg, cpumask_t *nodemask) | 7288 | struct sched_group **sg, |
7289 | struct cpumask *nodemask) | ||
7133 | { | 7290 | { |
7134 | int group; | 7291 | int group; |
7135 | 7292 | ||
7136 | *nodemask = node_to_cpumask(cpu_to_node(cpu)); | 7293 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
7137 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7294 | group = cpumask_first(nodemask); |
7138 | group = first_cpu(*nodemask); | ||
7139 | 7295 | ||
7140 | if (sg) | 7296 | if (sg) |
7141 | *sg = &per_cpu(sched_group_allnodes, group); | 7297 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
7142 | return group; | 7298 | return group; |
7143 | } | 7299 | } |
7144 | 7300 | ||
@@ -7150,11 +7306,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7150 | if (!sg) | 7306 | if (!sg) |
7151 | return; | 7307 | return; |
7152 | do { | 7308 | do { |
7153 | for_each_cpu_mask_nr(j, sg->cpumask) { | 7309 | for_each_cpu(j, sched_group_cpus(sg)) { |
7154 | struct sched_domain *sd; | 7310 | struct sched_domain *sd; |
7155 | 7311 | ||
7156 | sd = &per_cpu(phys_domains, j); | 7312 | sd = &per_cpu(phys_domains, j).sd; |
7157 | if (j != first_cpu(sd->groups->cpumask)) { | 7313 | if (j != cpumask_first(sched_group_cpus(sd->groups))) { |
7158 | /* | 7314 | /* |
7159 | * Only add "power" once for each | 7315 | * Only add "power" once for each |
7160 | * physical package. | 7316 | * physical package. |
@@ -7171,11 +7327,12 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7171 | 7327 | ||
7172 | #ifdef CONFIG_NUMA | 7328 | #ifdef CONFIG_NUMA |
7173 | /* Free memory allocated for various sched_group structures */ | 7329 | /* Free memory allocated for various sched_group structures */ |
7174 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7330 | static void free_sched_groups(const struct cpumask *cpu_map, |
7331 | struct cpumask *nodemask) | ||
7175 | { | 7332 | { |
7176 | int cpu, i; | 7333 | int cpu, i; |
7177 | 7334 | ||
7178 | for_each_cpu_mask_nr(cpu, *cpu_map) { | 7335 | for_each_cpu(cpu, cpu_map) { |
7179 | struct sched_group **sched_group_nodes | 7336 | struct sched_group **sched_group_nodes |
7180 | = sched_group_nodes_bycpu[cpu]; | 7337 | = sched_group_nodes_bycpu[cpu]; |
7181 | 7338 | ||
@@ -7185,9 +7342,8 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7185 | for (i = 0; i < nr_node_ids; i++) { | 7342 | for (i = 0; i < nr_node_ids; i++) { |
7186 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7343 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7187 | 7344 | ||
7188 | *nodemask = node_to_cpumask(i); | 7345 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7189 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7346 | if (cpumask_empty(nodemask)) |
7190 | if (cpus_empty(*nodemask)) | ||
7191 | continue; | 7347 | continue; |
7192 | 7348 | ||
7193 | if (sg == NULL) | 7349 | if (sg == NULL) |
@@ -7205,7 +7361,8 @@ next_sg: | |||
7205 | } | 7361 | } |
7206 | } | 7362 | } |
7207 | #else /* !CONFIG_NUMA */ | 7363 | #else /* !CONFIG_NUMA */ |
7208 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7364 | static void free_sched_groups(const struct cpumask *cpu_map, |
7365 | struct cpumask *nodemask) | ||
7209 | { | 7366 | { |
7210 | } | 7367 | } |
7211 | #endif /* CONFIG_NUMA */ | 7368 | #endif /* CONFIG_NUMA */ |
@@ -7231,7 +7388,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7231 | 7388 | ||
7232 | WARN_ON(!sd || !sd->groups); | 7389 | WARN_ON(!sd || !sd->groups); |
7233 | 7390 | ||
7234 | if (cpu != first_cpu(sd->groups->cpumask)) | 7391 | if (cpu != cpumask_first(sched_group_cpus(sd->groups))) |
7235 | return; | 7392 | return; |
7236 | 7393 | ||
7237 | child = sd->child; | 7394 | child = sd->child; |
@@ -7296,48 +7453,6 @@ SD_INIT_FUNC(CPU) | |||
7296 | SD_INIT_FUNC(MC) | 7453 | SD_INIT_FUNC(MC) |
7297 | #endif | 7454 | #endif |
7298 | 7455 | ||
7299 | /* | ||
7300 | * To minimize stack usage kmalloc room for cpumasks and share the | ||
7301 | * space as the usage in build_sched_domains() dictates. Used only | ||
7302 | * if the amount of space is significant. | ||
7303 | */ | ||
7304 | struct allmasks { | ||
7305 | cpumask_t tmpmask; /* make this one first */ | ||
7306 | union { | ||
7307 | cpumask_t nodemask; | ||
7308 | cpumask_t this_sibling_map; | ||
7309 | cpumask_t this_core_map; | ||
7310 | }; | ||
7311 | cpumask_t send_covered; | ||
7312 | |||
7313 | #ifdef CONFIG_NUMA | ||
7314 | cpumask_t domainspan; | ||
7315 | cpumask_t covered; | ||
7316 | cpumask_t notcovered; | ||
7317 | #endif | ||
7318 | }; | ||
7319 | |||
7320 | #if NR_CPUS > 128 | ||
7321 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | ||
7322 | static inline void sched_cpumask_alloc(struct allmasks **masks) | ||
7323 | { | ||
7324 | *masks = kmalloc(sizeof(**masks), GFP_KERNEL); | ||
7325 | } | ||
7326 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7327 | { | ||
7328 | kfree(masks); | ||
7329 | } | ||
7330 | #else | ||
7331 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | ||
7332 | static inline void sched_cpumask_alloc(struct allmasks **masks) | ||
7333 | { } | ||
7334 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7335 | { } | ||
7336 | #endif | ||
7337 | |||
7338 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | ||
7339 | ((unsigned long)(a) + offsetof(struct allmasks, v)) | ||
7340 | |||
7341 | static int default_relax_domain_level = -1; | 7456 | static int default_relax_domain_level = -1; |
7342 | 7457 | ||
7343 | static int __init setup_relax_domain_level(char *str) | 7458 | static int __init setup_relax_domain_level(char *str) |
@@ -7377,17 +7492,38 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7377 | * Build sched domains for a given set of cpus and attach the sched domains | 7492 | * Build sched domains for a given set of cpus and attach the sched domains |
7378 | * to the individual cpus | 7493 | * to the individual cpus |
7379 | */ | 7494 | */ |
7380 | static int __build_sched_domains(const cpumask_t *cpu_map, | 7495 | static int __build_sched_domains(const struct cpumask *cpu_map, |
7381 | struct sched_domain_attr *attr) | 7496 | struct sched_domain_attr *attr) |
7382 | { | 7497 | { |
7383 | int i; | 7498 | int i, err = -ENOMEM; |
7384 | struct root_domain *rd; | 7499 | struct root_domain *rd; |
7385 | SCHED_CPUMASK_DECLARE(allmasks); | 7500 | cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, |
7386 | cpumask_t *tmpmask; | 7501 | tmpmask; |
7387 | #ifdef CONFIG_NUMA | 7502 | #ifdef CONFIG_NUMA |
7503 | cpumask_var_t domainspan, covered, notcovered; | ||
7388 | struct sched_group **sched_group_nodes = NULL; | 7504 | struct sched_group **sched_group_nodes = NULL; |
7389 | int sd_allnodes = 0; | 7505 | int sd_allnodes = 0; |
7390 | 7506 | ||
7507 | if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) | ||
7508 | goto out; | ||
7509 | if (!alloc_cpumask_var(&covered, GFP_KERNEL)) | ||
7510 | goto free_domainspan; | ||
7511 | if (!alloc_cpumask_var(¬covered, GFP_KERNEL)) | ||
7512 | goto free_covered; | ||
7513 | #endif | ||
7514 | |||
7515 | if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) | ||
7516 | goto free_notcovered; | ||
7517 | if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) | ||
7518 | goto free_nodemask; | ||
7519 | if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) | ||
7520 | goto free_this_sibling_map; | ||
7521 | if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) | ||
7522 | goto free_this_core_map; | ||
7523 | if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
7524 | goto free_send_covered; | ||
7525 | |||
7526 | #ifdef CONFIG_NUMA | ||
7391 | /* | 7527 | /* |
7392 | * Allocate the per-node list of sched groups | 7528 | * Allocate the per-node list of sched groups |
7393 | */ | 7529 | */ |
@@ -7395,54 +7531,35 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7395 | GFP_KERNEL); | 7531 | GFP_KERNEL); |
7396 | if (!sched_group_nodes) { | 7532 | if (!sched_group_nodes) { |
7397 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 7533 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
7398 | return -ENOMEM; | 7534 | goto free_tmpmask; |
7399 | } | 7535 | } |
7400 | #endif | 7536 | #endif |
7401 | 7537 | ||
7402 | rd = alloc_rootdomain(); | 7538 | rd = alloc_rootdomain(); |
7403 | if (!rd) { | 7539 | if (!rd) { |
7404 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7540 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
7405 | #ifdef CONFIG_NUMA | 7541 | goto free_sched_groups; |
7406 | kfree(sched_group_nodes); | ||
7407 | #endif | ||
7408 | return -ENOMEM; | ||
7409 | } | 7542 | } |
7410 | 7543 | ||
7411 | /* get space for all scratch cpumask variables */ | ||
7412 | sched_cpumask_alloc(&allmasks); | ||
7413 | if (!allmasks) { | ||
7414 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | ||
7415 | kfree(rd); | ||
7416 | #ifdef CONFIG_NUMA | 7544 | #ifdef CONFIG_NUMA |
7417 | kfree(sched_group_nodes); | 7545 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; |
7418 | #endif | ||
7419 | return -ENOMEM; | ||
7420 | } | ||
7421 | |||
7422 | tmpmask = (cpumask_t *)allmasks; | ||
7423 | |||
7424 | |||
7425 | #ifdef CONFIG_NUMA | ||
7426 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | ||
7427 | #endif | 7546 | #endif |
7428 | 7547 | ||
7429 | /* | 7548 | /* |
7430 | * Set up domains for cpus specified by the cpu_map. | 7549 | * Set up domains for cpus specified by the cpu_map. |
7431 | */ | 7550 | */ |
7432 | for_each_cpu_mask_nr(i, *cpu_map) { | 7551 | for_each_cpu(i, cpu_map) { |
7433 | struct sched_domain *sd = NULL, *p; | 7552 | struct sched_domain *sd = NULL, *p; |
7434 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7435 | 7553 | ||
7436 | *nodemask = node_to_cpumask(cpu_to_node(i)); | 7554 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); |
7437 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7438 | 7555 | ||
7439 | #ifdef CONFIG_NUMA | 7556 | #ifdef CONFIG_NUMA |
7440 | if (cpus_weight(*cpu_map) > | 7557 | if (cpumask_weight(cpu_map) > |
7441 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { | 7558 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7442 | sd = &per_cpu(allnodes_domains, i); | 7559 | sd = &per_cpu(allnodes_domains, i); |
7443 | SD_INIT(sd, ALLNODES); | 7560 | SD_INIT(sd, ALLNODES); |
7444 | set_domain_attribute(sd, attr); | 7561 | set_domain_attribute(sd, attr); |
7445 | sd->span = *cpu_map; | 7562 | cpumask_copy(sched_domain_span(sd), cpu_map); |
7446 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | 7563 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); |
7447 | p = sd; | 7564 | p = sd; |
7448 | sd_allnodes = 1; | 7565 | sd_allnodes = 1; |
@@ -7452,18 +7569,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7452 | sd = &per_cpu(node_domains, i); | 7569 | sd = &per_cpu(node_domains, i); |
7453 | SD_INIT(sd, NODE); | 7570 | SD_INIT(sd, NODE); |
7454 | set_domain_attribute(sd, attr); | 7571 | set_domain_attribute(sd, attr); |
7455 | sched_domain_node_span(cpu_to_node(i), &sd->span); | 7572 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
7456 | sd->parent = p; | 7573 | sd->parent = p; |
7457 | if (p) | 7574 | if (p) |
7458 | p->child = sd; | 7575 | p->child = sd; |
7459 | cpus_and(sd->span, sd->span, *cpu_map); | 7576 | cpumask_and(sched_domain_span(sd), |
7577 | sched_domain_span(sd), cpu_map); | ||
7460 | #endif | 7578 | #endif |
7461 | 7579 | ||
7462 | p = sd; | 7580 | p = sd; |
7463 | sd = &per_cpu(phys_domains, i); | 7581 | sd = &per_cpu(phys_domains, i).sd; |
7464 | SD_INIT(sd, CPU); | 7582 | SD_INIT(sd, CPU); |
7465 | set_domain_attribute(sd, attr); | 7583 | set_domain_attribute(sd, attr); |
7466 | sd->span = *nodemask; | 7584 | cpumask_copy(sched_domain_span(sd), nodemask); |
7467 | sd->parent = p; | 7585 | sd->parent = p; |
7468 | if (p) | 7586 | if (p) |
7469 | p->child = sd; | 7587 | p->child = sd; |
@@ -7471,11 +7589,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7471 | 7589 | ||
7472 | #ifdef CONFIG_SCHED_MC | 7590 | #ifdef CONFIG_SCHED_MC |
7473 | p = sd; | 7591 | p = sd; |
7474 | sd = &per_cpu(core_domains, i); | 7592 | sd = &per_cpu(core_domains, i).sd; |
7475 | SD_INIT(sd, MC); | 7593 | SD_INIT(sd, MC); |
7476 | set_domain_attribute(sd, attr); | 7594 | set_domain_attribute(sd, attr); |
7477 | sd->span = cpu_coregroup_map(i); | 7595 | cpumask_and(sched_domain_span(sd), cpu_map, |
7478 | cpus_and(sd->span, sd->span, *cpu_map); | 7596 | cpu_coregroup_mask(i)); |
7479 | sd->parent = p; | 7597 | sd->parent = p; |
7480 | p->child = sd; | 7598 | p->child = sd; |
7481 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7599 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7483,11 +7601,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7483 | 7601 | ||
7484 | #ifdef CONFIG_SCHED_SMT | 7602 | #ifdef CONFIG_SCHED_SMT |
7485 | p = sd; | 7603 | p = sd; |
7486 | sd = &per_cpu(cpu_domains, i); | 7604 | sd = &per_cpu(cpu_domains, i).sd; |
7487 | SD_INIT(sd, SIBLING); | 7605 | SD_INIT(sd, SIBLING); |
7488 | set_domain_attribute(sd, attr); | 7606 | set_domain_attribute(sd, attr); |
7489 | sd->span = per_cpu(cpu_sibling_map, i); | 7607 | cpumask_and(sched_domain_span(sd), |
7490 | cpus_and(sd->span, sd->span, *cpu_map); | 7608 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7491 | sd->parent = p; | 7609 | sd->parent = p; |
7492 | p->child = sd; | 7610 | p->child = sd; |
7493 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 7611 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7496,13 +7614,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7496 | 7614 | ||
7497 | #ifdef CONFIG_SCHED_SMT | 7615 | #ifdef CONFIG_SCHED_SMT |
7498 | /* Set up CPU (sibling) groups */ | 7616 | /* Set up CPU (sibling) groups */ |
7499 | for_each_cpu_mask_nr(i, *cpu_map) { | 7617 | for_each_cpu(i, cpu_map) { |
7500 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7618 | cpumask_and(this_sibling_map, |
7501 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7619 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7502 | 7620 | if (i != cpumask_first(this_sibling_map)) | |
7503 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | ||
7504 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | ||
7505 | if (i != first_cpu(*this_sibling_map)) | ||
7506 | continue; | 7621 | continue; |
7507 | 7622 | ||
7508 | init_sched_build_groups(this_sibling_map, cpu_map, | 7623 | init_sched_build_groups(this_sibling_map, cpu_map, |
@@ -7513,13 +7628,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7513 | 7628 | ||
7514 | #ifdef CONFIG_SCHED_MC | 7629 | #ifdef CONFIG_SCHED_MC |
7515 | /* Set up multi-core groups */ | 7630 | /* Set up multi-core groups */ |
7516 | for_each_cpu_mask_nr(i, *cpu_map) { | 7631 | for_each_cpu(i, cpu_map) { |
7517 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7632 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); |
7518 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7633 | if (i != cpumask_first(this_core_map)) |
7519 | |||
7520 | *this_core_map = cpu_coregroup_map(i); | ||
7521 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
7522 | if (i != first_cpu(*this_core_map)) | ||
7523 | continue; | 7634 | continue; |
7524 | 7635 | ||
7525 | init_sched_build_groups(this_core_map, cpu_map, | 7636 | init_sched_build_groups(this_core_map, cpu_map, |
@@ -7530,12 +7641,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7530 | 7641 | ||
7531 | /* Set up physical groups */ | 7642 | /* Set up physical groups */ |
7532 | for (i = 0; i < nr_node_ids; i++) { | 7643 | for (i = 0; i < nr_node_ids; i++) { |
7533 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7644 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7534 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7645 | if (cpumask_empty(nodemask)) |
7535 | |||
7536 | *nodemask = node_to_cpumask(i); | ||
7537 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7538 | if (cpus_empty(*nodemask)) | ||
7539 | continue; | 7646 | continue; |
7540 | 7647 | ||
7541 | init_sched_build_groups(nodemask, cpu_map, | 7648 | init_sched_build_groups(nodemask, cpu_map, |
@@ -7546,8 +7653,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7546 | #ifdef CONFIG_NUMA | 7653 | #ifdef CONFIG_NUMA |
7547 | /* Set up node groups */ | 7654 | /* Set up node groups */ |
7548 | if (sd_allnodes) { | 7655 | if (sd_allnodes) { |
7549 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
7550 | |||
7551 | init_sched_build_groups(cpu_map, cpu_map, | 7656 | init_sched_build_groups(cpu_map, cpu_map, |
7552 | &cpu_to_allnodes_group, | 7657 | &cpu_to_allnodes_group, |
7553 | send_covered, tmpmask); | 7658 | send_covered, tmpmask); |
@@ -7556,58 +7661,53 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7556 | for (i = 0; i < nr_node_ids; i++) { | 7661 | for (i = 0; i < nr_node_ids; i++) { |
7557 | /* Set up node groups */ | 7662 | /* Set up node groups */ |
7558 | struct sched_group *sg, *prev; | 7663 | struct sched_group *sg, *prev; |
7559 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7560 | SCHED_CPUMASK_VAR(domainspan, allmasks); | ||
7561 | SCHED_CPUMASK_VAR(covered, allmasks); | ||
7562 | int j; | 7664 | int j; |
7563 | 7665 | ||
7564 | *nodemask = node_to_cpumask(i); | 7666 | cpumask_clear(covered); |
7565 | cpus_clear(*covered); | 7667 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7566 | 7668 | if (cpumask_empty(nodemask)) { | |
7567 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7568 | if (cpus_empty(*nodemask)) { | ||
7569 | sched_group_nodes[i] = NULL; | 7669 | sched_group_nodes[i] = NULL; |
7570 | continue; | 7670 | continue; |
7571 | } | 7671 | } |
7572 | 7672 | ||
7573 | sched_domain_node_span(i, domainspan); | 7673 | sched_domain_node_span(i, domainspan); |
7574 | cpus_and(*domainspan, *domainspan, *cpu_map); | 7674 | cpumask_and(domainspan, domainspan, cpu_map); |
7575 | 7675 | ||
7576 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7676 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
7677 | GFP_KERNEL, i); | ||
7577 | if (!sg) { | 7678 | if (!sg) { |
7578 | printk(KERN_WARNING "Can not alloc domain group for " | 7679 | printk(KERN_WARNING "Can not alloc domain group for " |
7579 | "node %d\n", i); | 7680 | "node %d\n", i); |
7580 | goto error; | 7681 | goto error; |
7581 | } | 7682 | } |
7582 | sched_group_nodes[i] = sg; | 7683 | sched_group_nodes[i] = sg; |
7583 | for_each_cpu_mask_nr(j, *nodemask) { | 7684 | for_each_cpu(j, nodemask) { |
7584 | struct sched_domain *sd; | 7685 | struct sched_domain *sd; |
7585 | 7686 | ||
7586 | sd = &per_cpu(node_domains, j); | 7687 | sd = &per_cpu(node_domains, j); |
7587 | sd->groups = sg; | 7688 | sd->groups = sg; |
7588 | } | 7689 | } |
7589 | sg->__cpu_power = 0; | 7690 | sg->__cpu_power = 0; |
7590 | sg->cpumask = *nodemask; | 7691 | cpumask_copy(sched_group_cpus(sg), nodemask); |
7591 | sg->next = sg; | 7692 | sg->next = sg; |
7592 | cpus_or(*covered, *covered, *nodemask); | 7693 | cpumask_or(covered, covered, nodemask); |
7593 | prev = sg; | 7694 | prev = sg; |
7594 | 7695 | ||
7595 | for (j = 0; j < nr_node_ids; j++) { | 7696 | for (j = 0; j < nr_node_ids; j++) { |
7596 | SCHED_CPUMASK_VAR(notcovered, allmasks); | ||
7597 | int n = (i + j) % nr_node_ids; | 7697 | int n = (i + j) % nr_node_ids; |
7598 | node_to_cpumask_ptr(pnodemask, n); | ||
7599 | 7698 | ||
7600 | cpus_complement(*notcovered, *covered); | 7699 | cpumask_complement(notcovered, covered); |
7601 | cpus_and(*tmpmask, *notcovered, *cpu_map); | 7700 | cpumask_and(tmpmask, notcovered, cpu_map); |
7602 | cpus_and(*tmpmask, *tmpmask, *domainspan); | 7701 | cpumask_and(tmpmask, tmpmask, domainspan); |
7603 | if (cpus_empty(*tmpmask)) | 7702 | if (cpumask_empty(tmpmask)) |
7604 | break; | 7703 | break; |
7605 | 7704 | ||
7606 | cpus_and(*tmpmask, *tmpmask, *pnodemask); | 7705 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); |
7607 | if (cpus_empty(*tmpmask)) | 7706 | if (cpumask_empty(tmpmask)) |
7608 | continue; | 7707 | continue; |
7609 | 7708 | ||
7610 | sg = kmalloc_node(sizeof(struct sched_group), | 7709 | sg = kmalloc_node(sizeof(struct sched_group) + |
7710 | cpumask_size(), | ||
7611 | GFP_KERNEL, i); | 7711 | GFP_KERNEL, i); |
7612 | if (!sg) { | 7712 | if (!sg) { |
7613 | printk(KERN_WARNING | 7713 | printk(KERN_WARNING |
@@ -7615,9 +7715,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7615 | goto error; | 7715 | goto error; |
7616 | } | 7716 | } |
7617 | sg->__cpu_power = 0; | 7717 | sg->__cpu_power = 0; |
7618 | sg->cpumask = *tmpmask; | 7718 | cpumask_copy(sched_group_cpus(sg), tmpmask); |
7619 | sg->next = prev->next; | 7719 | sg->next = prev->next; |
7620 | cpus_or(*covered, *covered, *tmpmask); | 7720 | cpumask_or(covered, covered, tmpmask); |
7621 | prev->next = sg; | 7721 | prev->next = sg; |
7622 | prev = sg; | 7722 | prev = sg; |
7623 | } | 7723 | } |
@@ -7626,22 +7726,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7626 | 7726 | ||
7627 | /* Calculate CPU power for physical packages and nodes */ | 7727 | /* Calculate CPU power for physical packages and nodes */ |
7628 | #ifdef CONFIG_SCHED_SMT | 7728 | #ifdef CONFIG_SCHED_SMT |
7629 | for_each_cpu_mask_nr(i, *cpu_map) { | 7729 | for_each_cpu(i, cpu_map) { |
7630 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7730 | struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; |
7631 | 7731 | ||
7632 | init_sched_groups_power(i, sd); | 7732 | init_sched_groups_power(i, sd); |
7633 | } | 7733 | } |
7634 | #endif | 7734 | #endif |
7635 | #ifdef CONFIG_SCHED_MC | 7735 | #ifdef CONFIG_SCHED_MC |
7636 | for_each_cpu_mask_nr(i, *cpu_map) { | 7736 | for_each_cpu(i, cpu_map) { |
7637 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7737 | struct sched_domain *sd = &per_cpu(core_domains, i).sd; |
7638 | 7738 | ||
7639 | init_sched_groups_power(i, sd); | 7739 | init_sched_groups_power(i, sd); |
7640 | } | 7740 | } |
7641 | #endif | 7741 | #endif |
7642 | 7742 | ||
7643 | for_each_cpu_mask_nr(i, *cpu_map) { | 7743 | for_each_cpu(i, cpu_map) { |
7644 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7744 | struct sched_domain *sd = &per_cpu(phys_domains, i).sd; |
7645 | 7745 | ||
7646 | init_sched_groups_power(i, sd); | 7746 | init_sched_groups_power(i, sd); |
7647 | } | 7747 | } |
@@ -7653,53 +7753,78 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7653 | if (sd_allnodes) { | 7753 | if (sd_allnodes) { |
7654 | struct sched_group *sg; | 7754 | struct sched_group *sg; |
7655 | 7755 | ||
7656 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, | 7756 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
7657 | tmpmask); | 7757 | tmpmask); |
7658 | init_numa_sched_groups_power(sg); | 7758 | init_numa_sched_groups_power(sg); |
7659 | } | 7759 | } |
7660 | #endif | 7760 | #endif |
7661 | 7761 | ||
7662 | /* Attach the domains */ | 7762 | /* Attach the domains */ |
7663 | for_each_cpu_mask_nr(i, *cpu_map) { | 7763 | for_each_cpu(i, cpu_map) { |
7664 | struct sched_domain *sd; | 7764 | struct sched_domain *sd; |
7665 | #ifdef CONFIG_SCHED_SMT | 7765 | #ifdef CONFIG_SCHED_SMT |
7666 | sd = &per_cpu(cpu_domains, i); | 7766 | sd = &per_cpu(cpu_domains, i).sd; |
7667 | #elif defined(CONFIG_SCHED_MC) | 7767 | #elif defined(CONFIG_SCHED_MC) |
7668 | sd = &per_cpu(core_domains, i); | 7768 | sd = &per_cpu(core_domains, i).sd; |
7669 | #else | 7769 | #else |
7670 | sd = &per_cpu(phys_domains, i); | 7770 | sd = &per_cpu(phys_domains, i).sd; |
7671 | #endif | 7771 | #endif |
7672 | cpu_attach_domain(sd, rd, i); | 7772 | cpu_attach_domain(sd, rd, i); |
7673 | } | 7773 | } |
7674 | 7774 | ||
7675 | sched_cpumask_free(allmasks); | 7775 | err = 0; |
7676 | return 0; | 7776 | |
7777 | free_tmpmask: | ||
7778 | free_cpumask_var(tmpmask); | ||
7779 | free_send_covered: | ||
7780 | free_cpumask_var(send_covered); | ||
7781 | free_this_core_map: | ||
7782 | free_cpumask_var(this_core_map); | ||
7783 | free_this_sibling_map: | ||
7784 | free_cpumask_var(this_sibling_map); | ||
7785 | free_nodemask: | ||
7786 | free_cpumask_var(nodemask); | ||
7787 | free_notcovered: | ||
7788 | #ifdef CONFIG_NUMA | ||
7789 | free_cpumask_var(notcovered); | ||
7790 | free_covered: | ||
7791 | free_cpumask_var(covered); | ||
7792 | free_domainspan: | ||
7793 | free_cpumask_var(domainspan); | ||
7794 | out: | ||
7795 | #endif | ||
7796 | return err; | ||
7797 | |||
7798 | free_sched_groups: | ||
7799 | #ifdef CONFIG_NUMA | ||
7800 | kfree(sched_group_nodes); | ||
7801 | #endif | ||
7802 | goto free_tmpmask; | ||
7677 | 7803 | ||
7678 | #ifdef CONFIG_NUMA | 7804 | #ifdef CONFIG_NUMA |
7679 | error: | 7805 | error: |
7680 | free_sched_groups(cpu_map, tmpmask); | 7806 | free_sched_groups(cpu_map, tmpmask); |
7681 | sched_cpumask_free(allmasks); | 7807 | free_rootdomain(rd); |
7682 | kfree(rd); | 7808 | goto free_tmpmask; |
7683 | return -ENOMEM; | ||
7684 | #endif | 7809 | #endif |
7685 | } | 7810 | } |
7686 | 7811 | ||
7687 | static int build_sched_domains(const cpumask_t *cpu_map) | 7812 | static int build_sched_domains(const struct cpumask *cpu_map) |
7688 | { | 7813 | { |
7689 | return __build_sched_domains(cpu_map, NULL); | 7814 | return __build_sched_domains(cpu_map, NULL); |
7690 | } | 7815 | } |
7691 | 7816 | ||
7692 | static cpumask_t *doms_cur; /* current sched domains */ | 7817 | static struct cpumask *doms_cur; /* current sched domains */ |
7693 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7818 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
7694 | static struct sched_domain_attr *dattr_cur; | 7819 | static struct sched_domain_attr *dattr_cur; |
7695 | /* attribues of custom domains in 'doms_cur' */ | 7820 | /* attribues of custom domains in 'doms_cur' */ |
7696 | 7821 | ||
7697 | /* | 7822 | /* |
7698 | * Special case: If a kmalloc of a doms_cur partition (array of | 7823 | * Special case: If a kmalloc of a doms_cur partition (array of |
7699 | * cpumask_t) fails, then fallback to a single sched domain, | 7824 | * cpumask) fails, then fallback to a single sched domain, |
7700 | * as determined by the single cpumask_t fallback_doms. | 7825 | * as determined by the single cpumask fallback_doms. |
7701 | */ | 7826 | */ |
7702 | static cpumask_t fallback_doms; | 7827 | static cpumask_var_t fallback_doms; |
7703 | 7828 | ||
7704 | /* | 7829 | /* |
7705 | * arch_update_cpu_topology lets virtualized architectures update the | 7830 | * arch_update_cpu_topology lets virtualized architectures update the |
@@ -7716,16 +7841,16 @@ int __attribute__((weak)) arch_update_cpu_topology(void) | |||
7716 | * For now this just excludes isolated cpus, but could be used to | 7841 | * For now this just excludes isolated cpus, but could be used to |
7717 | * exclude other special cases in the future. | 7842 | * exclude other special cases in the future. |
7718 | */ | 7843 | */ |
7719 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 7844 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
7720 | { | 7845 | { |
7721 | int err; | 7846 | int err; |
7722 | 7847 | ||
7723 | arch_update_cpu_topology(); | 7848 | arch_update_cpu_topology(); |
7724 | ndoms_cur = 1; | 7849 | ndoms_cur = 1; |
7725 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7850 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); |
7726 | if (!doms_cur) | 7851 | if (!doms_cur) |
7727 | doms_cur = &fallback_doms; | 7852 | doms_cur = fallback_doms; |
7728 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7853 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
7729 | dattr_cur = NULL; | 7854 | dattr_cur = NULL; |
7730 | err = build_sched_domains(doms_cur); | 7855 | err = build_sched_domains(doms_cur); |
7731 | register_sched_domain_sysctl(); | 7856 | register_sched_domain_sysctl(); |
@@ -7733,8 +7858,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7733 | return err; | 7858 | return err; |
7734 | } | 7859 | } |
7735 | 7860 | ||
7736 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | 7861 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
7737 | cpumask_t *tmpmask) | 7862 | struct cpumask *tmpmask) |
7738 | { | 7863 | { |
7739 | free_sched_groups(cpu_map, tmpmask); | 7864 | free_sched_groups(cpu_map, tmpmask); |
7740 | } | 7865 | } |
@@ -7743,15 +7868,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | |||
7743 | * Detach sched domains from a group of cpus specified in cpu_map | 7868 | * Detach sched domains from a group of cpus specified in cpu_map |
7744 | * These cpus will now be attached to the NULL domain | 7869 | * These cpus will now be attached to the NULL domain |
7745 | */ | 7870 | */ |
7746 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7871 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
7747 | { | 7872 | { |
7748 | cpumask_t tmpmask; | 7873 | /* Save because hotplug lock held. */ |
7874 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
7749 | int i; | 7875 | int i; |
7750 | 7876 | ||
7751 | for_each_cpu_mask_nr(i, *cpu_map) | 7877 | for_each_cpu(i, cpu_map) |
7752 | cpu_attach_domain(NULL, &def_root_domain, i); | 7878 | cpu_attach_domain(NULL, &def_root_domain, i); |
7753 | synchronize_sched(); | 7879 | synchronize_sched(); |
7754 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7880 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
7755 | } | 7881 | } |
7756 | 7882 | ||
7757 | /* handle null as "default" */ | 7883 | /* handle null as "default" */ |
@@ -7776,7 +7902,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7776 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 7902 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
7777 | * It destroys each deleted domain and builds each new domain. | 7903 | * It destroys each deleted domain and builds each new domain. |
7778 | * | 7904 | * |
7779 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 7905 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. |
7780 | * The masks don't intersect (don't overlap.) We should setup one | 7906 | * The masks don't intersect (don't overlap.) We should setup one |
7781 | * sched domain for each mask. CPUs not in any of the cpumasks will | 7907 | * sched domain for each mask. CPUs not in any of the cpumasks will |
7782 | * not be load balanced. If the same cpumask appears both in the | 7908 | * not be load balanced. If the same cpumask appears both in the |
@@ -7790,13 +7916,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7790 | * the single partition 'fallback_doms', it also forces the domains | 7916 | * the single partition 'fallback_doms', it also forces the domains |
7791 | * to be rebuilt. | 7917 | * to be rebuilt. |
7792 | * | 7918 | * |
7793 | * If doms_new == NULL it will be replaced with cpu_online_map. | 7919 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
7794 | * ndoms_new == 0 is a special case for destroying existing domains, | 7920 | * ndoms_new == 0 is a special case for destroying existing domains, |
7795 | * and it will not create the default domain. | 7921 | * and it will not create the default domain. |
7796 | * | 7922 | * |
7797 | * Call with hotplug lock held | 7923 | * Call with hotplug lock held |
7798 | */ | 7924 | */ |
7799 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7925 | /* FIXME: Change to struct cpumask *doms_new[] */ |
7926 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
7800 | struct sched_domain_attr *dattr_new) | 7927 | struct sched_domain_attr *dattr_new) |
7801 | { | 7928 | { |
7802 | int i, j, n; | 7929 | int i, j, n; |
@@ -7815,7 +7942,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7815 | /* Destroy deleted domains */ | 7942 | /* Destroy deleted domains */ |
7816 | for (i = 0; i < ndoms_cur; i++) { | 7943 | for (i = 0; i < ndoms_cur; i++) { |
7817 | for (j = 0; j < n && !new_topology; j++) { | 7944 | for (j = 0; j < n && !new_topology; j++) { |
7818 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7945 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) |
7819 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7946 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7820 | goto match1; | 7947 | goto match1; |
7821 | } | 7948 | } |
@@ -7827,15 +7954,15 @@ match1: | |||
7827 | 7954 | ||
7828 | if (doms_new == NULL) { | 7955 | if (doms_new == NULL) { |
7829 | ndoms_cur = 0; | 7956 | ndoms_cur = 0; |
7830 | doms_new = &fallback_doms; | 7957 | doms_new = fallback_doms; |
7831 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7958 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
7832 | WARN_ON_ONCE(dattr_new); | 7959 | WARN_ON_ONCE(dattr_new); |
7833 | } | 7960 | } |
7834 | 7961 | ||
7835 | /* Build new domains */ | 7962 | /* Build new domains */ |
7836 | for (i = 0; i < ndoms_new; i++) { | 7963 | for (i = 0; i < ndoms_new; i++) { |
7837 | for (j = 0; j < ndoms_cur && !new_topology; j++) { | 7964 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
7838 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7965 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) |
7839 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7966 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7840 | goto match2; | 7967 | goto match2; |
7841 | } | 7968 | } |
@@ -7847,7 +7974,7 @@ match2: | |||
7847 | } | 7974 | } |
7848 | 7975 | ||
7849 | /* Remember the new sched domains */ | 7976 | /* Remember the new sched domains */ |
7850 | if (doms_cur != &fallback_doms) | 7977 | if (doms_cur != fallback_doms) |
7851 | kfree(doms_cur); | 7978 | kfree(doms_cur); |
7852 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 7979 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
7853 | doms_cur = doms_new; | 7980 | doms_cur = doms_new; |
@@ -7860,7 +7987,7 @@ match2: | |||
7860 | } | 7987 | } |
7861 | 7988 | ||
7862 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 7989 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
7863 | int arch_reinit_sched_domains(void) | 7990 | static void arch_reinit_sched_domains(void) |
7864 | { | 7991 | { |
7865 | get_online_cpus(); | 7992 | get_online_cpus(); |
7866 | 7993 | ||
@@ -7869,25 +7996,33 @@ int arch_reinit_sched_domains(void) | |||
7869 | 7996 | ||
7870 | rebuild_sched_domains(); | 7997 | rebuild_sched_domains(); |
7871 | put_online_cpus(); | 7998 | put_online_cpus(); |
7872 | |||
7873 | return 0; | ||
7874 | } | 7999 | } |
7875 | 8000 | ||
7876 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 8001 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
7877 | { | 8002 | { |
7878 | int ret; | 8003 | unsigned int level = 0; |
8004 | |||
8005 | if (sscanf(buf, "%u", &level) != 1) | ||
8006 | return -EINVAL; | ||
8007 | |||
8008 | /* | ||
8009 | * level is always be positive so don't check for | ||
8010 | * level < POWERSAVINGS_BALANCE_NONE which is 0 | ||
8011 | * What happens on 0 or 1 byte write, | ||
8012 | * need to check for count as well? | ||
8013 | */ | ||
7879 | 8014 | ||
7880 | if (buf[0] != '0' && buf[0] != '1') | 8015 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) |
7881 | return -EINVAL; | 8016 | return -EINVAL; |
7882 | 8017 | ||
7883 | if (smt) | 8018 | if (smt) |
7884 | sched_smt_power_savings = (buf[0] == '1'); | 8019 | sched_smt_power_savings = level; |
7885 | else | 8020 | else |
7886 | sched_mc_power_savings = (buf[0] == '1'); | 8021 | sched_mc_power_savings = level; |
7887 | 8022 | ||
7888 | ret = arch_reinit_sched_domains(); | 8023 | arch_reinit_sched_domains(); |
7889 | 8024 | ||
7890 | return ret ? ret : count; | 8025 | return count; |
7891 | } | 8026 | } |
7892 | 8027 | ||
7893 | #ifdef CONFIG_SCHED_MC | 8028 | #ifdef CONFIG_SCHED_MC |
@@ -7922,7 +8057,7 @@ static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, | |||
7922 | sched_smt_power_savings_store); | 8057 | sched_smt_power_savings_store); |
7923 | #endif | 8058 | #endif |
7924 | 8059 | ||
7925 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | 8060 | int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) |
7926 | { | 8061 | { |
7927 | int err = 0; | 8062 | int err = 0; |
7928 | 8063 | ||
@@ -7987,7 +8122,9 @@ static int update_runtime(struct notifier_block *nfb, | |||
7987 | 8122 | ||
7988 | void __init sched_init_smp(void) | 8123 | void __init sched_init_smp(void) |
7989 | { | 8124 | { |
7990 | cpumask_t non_isolated_cpus; | 8125 | cpumask_var_t non_isolated_cpus; |
8126 | |||
8127 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | ||
7991 | 8128 | ||
7992 | #if defined(CONFIG_NUMA) | 8129 | #if defined(CONFIG_NUMA) |
7993 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 8130 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
@@ -7996,10 +8133,10 @@ void __init sched_init_smp(void) | |||
7996 | #endif | 8133 | #endif |
7997 | get_online_cpus(); | 8134 | get_online_cpus(); |
7998 | mutex_lock(&sched_domains_mutex); | 8135 | mutex_lock(&sched_domains_mutex); |
7999 | arch_init_sched_domains(&cpu_online_map); | 8136 | arch_init_sched_domains(cpu_online_mask); |
8000 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 8137 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
8001 | if (cpus_empty(non_isolated_cpus)) | 8138 | if (cpumask_empty(non_isolated_cpus)) |
8002 | cpu_set(smp_processor_id(), non_isolated_cpus); | 8139 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
8003 | mutex_unlock(&sched_domains_mutex); | 8140 | mutex_unlock(&sched_domains_mutex); |
8004 | put_online_cpus(); | 8141 | put_online_cpus(); |
8005 | 8142 | ||
@@ -8014,9 +8151,13 @@ void __init sched_init_smp(void) | |||
8014 | init_hrtick(); | 8151 | init_hrtick(); |
8015 | 8152 | ||
8016 | /* Move init over to a non-isolated CPU */ | 8153 | /* Move init over to a non-isolated CPU */ |
8017 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) | 8154 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
8018 | BUG(); | 8155 | BUG(); |
8019 | sched_init_granularity(); | 8156 | sched_init_granularity(); |
8157 | free_cpumask_var(non_isolated_cpus); | ||
8158 | |||
8159 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
8160 | init_sched_rt_class(); | ||
8020 | } | 8161 | } |
8021 | #else | 8162 | #else |
8022 | void __init sched_init_smp(void) | 8163 | void __init sched_init_smp(void) |
@@ -8331,6 +8472,15 @@ void __init sched_init(void) | |||
8331 | */ | 8472 | */ |
8332 | current->sched_class = &fair_sched_class; | 8473 | current->sched_class = &fair_sched_class; |
8333 | 8474 | ||
8475 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | ||
8476 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | ||
8477 | #ifdef CONFIG_SMP | ||
8478 | #ifdef CONFIG_NO_HZ | ||
8479 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | ||
8480 | #endif | ||
8481 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8482 | #endif /* SMP */ | ||
8483 | |||
8334 | scheduler_running = 1; | 8484 | scheduler_running = 1; |
8335 | } | 8485 | } |
8336 | 8486 | ||