diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:37:14 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:37:14 -0500 |
commit | b2b062b8163391c42b3219d466ca1ac9742b9c7b (patch) | |
tree | f3f920c09b8de694b1bc1d4b878cfd2b0b98c913 /kernel/sched.c | |
parent | a9de18eb761f7c1c860964b2e5addc1a35c7e861 (diff) | |
parent | 99937d6455cea95405ac681c86a857d0fcd530bd (diff) |
Merge branch 'core/percpu' into stackprotector
Conflicts:
arch/x86/include/asm/pda.h
arch/x86/include/asm/system.h
Also, moved include/asm-x86/stackprotector.h to arch/x86/include/asm.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 1118 |
1 files changed, 639 insertions, 479 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index c731dd820d1a..1d2909067040 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -125,6 +125,9 @@ DEFINE_TRACE(sched_switch); | |||
125 | DEFINE_TRACE(sched_migrate_task); | 125 | DEFINE_TRACE(sched_migrate_task); |
126 | 126 | ||
127 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
128 | |||
129 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | ||
130 | |||
128 | /* | 131 | /* |
129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 132 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
130 | * Since cpu_power is a 'constant', we can use a reciprocal divide. | 133 | * Since cpu_power is a 'constant', we can use a reciprocal divide. |
@@ -498,18 +501,26 @@ struct rt_rq { | |||
498 | */ | 501 | */ |
499 | struct root_domain { | 502 | struct root_domain { |
500 | atomic_t refcount; | 503 | atomic_t refcount; |
501 | cpumask_t span; | 504 | cpumask_var_t span; |
502 | cpumask_t online; | 505 | cpumask_var_t online; |
503 | 506 | ||
504 | /* | 507 | /* |
505 | * The "RT overload" flag: it gets set if a CPU has more than | 508 | * The "RT overload" flag: it gets set if a CPU has more than |
506 | * one runnable RT task. | 509 | * one runnable RT task. |
507 | */ | 510 | */ |
508 | cpumask_t rto_mask; | 511 | cpumask_var_t rto_mask; |
509 | atomic_t rto_count; | 512 | atomic_t rto_count; |
510 | #ifdef CONFIG_SMP | 513 | #ifdef CONFIG_SMP |
511 | struct cpupri cpupri; | 514 | struct cpupri cpupri; |
512 | #endif | 515 | #endif |
516 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | ||
517 | /* | ||
518 | * Preferred wake up cpu nominated by sched_mc balance that will be | ||
519 | * used when most cpus are idle in the system indicating overall very | ||
520 | * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2) | ||
521 | */ | ||
522 | unsigned int sched_mc_preferred_wakeup_cpu; | ||
523 | #endif | ||
513 | }; | 524 | }; |
514 | 525 | ||
515 | /* | 526 | /* |
@@ -1514,7 +1525,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1514 | struct sched_domain *sd = data; | 1525 | struct sched_domain *sd = data; |
1515 | int i; | 1526 | int i; |
1516 | 1527 | ||
1517 | for_each_cpu_mask(i, sd->span) { | 1528 | for_each_cpu(i, sched_domain_span(sd)) { |
1518 | /* | 1529 | /* |
1519 | * If there are currently no tasks on the cpu pretend there | 1530 | * If there are currently no tasks on the cpu pretend there |
1520 | * is one of average load so that when a new task gets to | 1531 | * is one of average load so that when a new task gets to |
@@ -1535,7 +1546,7 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1535 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1546 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
1536 | shares = tg->shares; | 1547 | shares = tg->shares; |
1537 | 1548 | ||
1538 | for_each_cpu_mask(i, sd->span) | 1549 | for_each_cpu(i, sched_domain_span(sd)) |
1539 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1550 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1540 | 1551 | ||
1541 | return 0; | 1552 | return 0; |
@@ -2101,15 +2112,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2101 | int i; | 2112 | int i; |
2102 | 2113 | ||
2103 | /* Skip over this group if it has no CPUs allowed */ | 2114 | /* Skip over this group if it has no CPUs allowed */ |
2104 | if (!cpus_intersects(group->cpumask, p->cpus_allowed)) | 2115 | if (!cpumask_intersects(sched_group_cpus(group), |
2116 | &p->cpus_allowed)) | ||
2105 | continue; | 2117 | continue; |
2106 | 2118 | ||
2107 | local_group = cpu_isset(this_cpu, group->cpumask); | 2119 | local_group = cpumask_test_cpu(this_cpu, |
2120 | sched_group_cpus(group)); | ||
2108 | 2121 | ||
2109 | /* Tally up the load of all CPUs in the group */ | 2122 | /* Tally up the load of all CPUs in the group */ |
2110 | avg_load = 0; | 2123 | avg_load = 0; |
2111 | 2124 | ||
2112 | for_each_cpu_mask_nr(i, group->cpumask) { | 2125 | for_each_cpu(i, sched_group_cpus(group)) { |
2113 | /* Bias balancing toward cpus of our domain */ | 2126 | /* Bias balancing toward cpus of our domain */ |
2114 | if (local_group) | 2127 | if (local_group) |
2115 | load = source_load(i, load_idx); | 2128 | load = source_load(i, load_idx); |
@@ -2141,17 +2154,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) | |||
2141 | * find_idlest_cpu - find the idlest cpu among the cpus in group. | 2154 | * find_idlest_cpu - find the idlest cpu among the cpus in group. |
2142 | */ | 2155 | */ |
2143 | static int | 2156 | static int |
2144 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, | 2157 | find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
2145 | cpumask_t *tmp) | ||
2146 | { | 2158 | { |
2147 | unsigned long load, min_load = ULONG_MAX; | 2159 | unsigned long load, min_load = ULONG_MAX; |
2148 | int idlest = -1; | 2160 | int idlest = -1; |
2149 | int i; | 2161 | int i; |
2150 | 2162 | ||
2151 | /* Traverse only the allowed CPUs */ | 2163 | /* Traverse only the allowed CPUs */ |
2152 | cpus_and(*tmp, group->cpumask, p->cpus_allowed); | 2164 | for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { |
2153 | |||
2154 | for_each_cpu_mask_nr(i, *tmp) { | ||
2155 | load = weighted_cpuload(i); | 2165 | load = weighted_cpuload(i); |
2156 | 2166 | ||
2157 | if (load < min_load || (load == min_load && i == this_cpu)) { | 2167 | if (load < min_load || (load == min_load && i == this_cpu)) { |
@@ -2193,7 +2203,6 @@ static int sched_balance_self(int cpu, int flag) | |||
2193 | update_shares(sd); | 2203 | update_shares(sd); |
2194 | 2204 | ||
2195 | while (sd) { | 2205 | while (sd) { |
2196 | cpumask_t span, tmpmask; | ||
2197 | struct sched_group *group; | 2206 | struct sched_group *group; |
2198 | int new_cpu, weight; | 2207 | int new_cpu, weight; |
2199 | 2208 | ||
@@ -2202,14 +2211,13 @@ static int sched_balance_self(int cpu, int flag) | |||
2202 | continue; | 2211 | continue; |
2203 | } | 2212 | } |
2204 | 2213 | ||
2205 | span = sd->span; | ||
2206 | group = find_idlest_group(sd, t, cpu); | 2214 | group = find_idlest_group(sd, t, cpu); |
2207 | if (!group) { | 2215 | if (!group) { |
2208 | sd = sd->child; | 2216 | sd = sd->child; |
2209 | continue; | 2217 | continue; |
2210 | } | 2218 | } |
2211 | 2219 | ||
2212 | new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); | 2220 | new_cpu = find_idlest_cpu(group, t, cpu); |
2213 | if (new_cpu == -1 || new_cpu == cpu) { | 2221 | if (new_cpu == -1 || new_cpu == cpu) { |
2214 | /* Now try balancing at a lower domain level of cpu */ | 2222 | /* Now try balancing at a lower domain level of cpu */ |
2215 | sd = sd->child; | 2223 | sd = sd->child; |
@@ -2218,10 +2226,10 @@ static int sched_balance_self(int cpu, int flag) | |||
2218 | 2226 | ||
2219 | /* Now try balancing at a lower domain level of new_cpu */ | 2227 | /* Now try balancing at a lower domain level of new_cpu */ |
2220 | cpu = new_cpu; | 2228 | cpu = new_cpu; |
2229 | weight = cpumask_weight(sched_domain_span(sd)); | ||
2221 | sd = NULL; | 2230 | sd = NULL; |
2222 | weight = cpus_weight(span); | ||
2223 | for_each_domain(cpu, tmp) { | 2231 | for_each_domain(cpu, tmp) { |
2224 | if (weight <= cpus_weight(tmp->span)) | 2232 | if (weight <= cpumask_weight(sched_domain_span(tmp))) |
2225 | break; | 2233 | break; |
2226 | if (tmp->flags & flag) | 2234 | if (tmp->flags & flag) |
2227 | sd = tmp; | 2235 | sd = tmp; |
@@ -2266,7 +2274,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2266 | cpu = task_cpu(p); | 2274 | cpu = task_cpu(p); |
2267 | 2275 | ||
2268 | for_each_domain(this_cpu, sd) { | 2276 | for_each_domain(this_cpu, sd) { |
2269 | if (cpu_isset(cpu, sd->span)) { | 2277 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2270 | update_shares(sd); | 2278 | update_shares(sd); |
2271 | break; | 2279 | break; |
2272 | } | 2280 | } |
@@ -2315,7 +2323,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2315 | else { | 2323 | else { |
2316 | struct sched_domain *sd; | 2324 | struct sched_domain *sd; |
2317 | for_each_domain(this_cpu, sd) { | 2325 | for_each_domain(this_cpu, sd) { |
2318 | if (cpu_isset(cpu, sd->span)) { | 2326 | if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
2319 | schedstat_inc(sd, ttwu_wake_remote); | 2327 | schedstat_inc(sd, ttwu_wake_remote); |
2320 | break; | 2328 | break; |
2321 | } | 2329 | } |
@@ -2846,7 +2854,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2846 | struct rq *rq; | 2854 | struct rq *rq; |
2847 | 2855 | ||
2848 | rq = task_rq_lock(p, &flags); | 2856 | rq = task_rq_lock(p, &flags); |
2849 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2857 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
2850 | || unlikely(!cpu_active(dest_cpu))) | 2858 | || unlikely(!cpu_active(dest_cpu))) |
2851 | goto out; | 2859 | goto out; |
2852 | 2860 | ||
@@ -2911,7 +2919,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2911 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 2919 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
2912 | * 3) are cache-hot on their current CPU. | 2920 | * 3) are cache-hot on their current CPU. |
2913 | */ | 2921 | */ |
2914 | if (!cpu_isset(this_cpu, p->cpus_allowed)) { | 2922 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
2915 | schedstat_inc(p, se.nr_failed_migrations_affine); | 2923 | schedstat_inc(p, se.nr_failed_migrations_affine); |
2916 | return 0; | 2924 | return 0; |
2917 | } | 2925 | } |
@@ -3086,7 +3094,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3086 | static struct sched_group * | 3094 | static struct sched_group * |
3087 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3095 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
3088 | unsigned long *imbalance, enum cpu_idle_type idle, | 3096 | unsigned long *imbalance, enum cpu_idle_type idle, |
3089 | int *sd_idle, const cpumask_t *cpus, int *balance) | 3097 | int *sd_idle, const struct cpumask *cpus, int *balance) |
3090 | { | 3098 | { |
3091 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3099 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
3092 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3100 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
@@ -3122,10 +3130,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3122 | unsigned long sum_avg_load_per_task; | 3130 | unsigned long sum_avg_load_per_task; |
3123 | unsigned long avg_load_per_task; | 3131 | unsigned long avg_load_per_task; |
3124 | 3132 | ||
3125 | local_group = cpu_isset(this_cpu, group->cpumask); | 3133 | local_group = cpumask_test_cpu(this_cpu, |
3134 | sched_group_cpus(group)); | ||
3126 | 3135 | ||
3127 | if (local_group) | 3136 | if (local_group) |
3128 | balance_cpu = first_cpu(group->cpumask); | 3137 | balance_cpu = cpumask_first(sched_group_cpus(group)); |
3129 | 3138 | ||
3130 | /* Tally up the load of all CPUs in the group */ | 3139 | /* Tally up the load of all CPUs in the group */ |
3131 | sum_weighted_load = sum_nr_running = avg_load = 0; | 3140 | sum_weighted_load = sum_nr_running = avg_load = 0; |
@@ -3134,13 +3143,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3134 | max_cpu_load = 0; | 3143 | max_cpu_load = 0; |
3135 | min_cpu_load = ~0UL; | 3144 | min_cpu_load = ~0UL; |
3136 | 3145 | ||
3137 | for_each_cpu_mask_nr(i, group->cpumask) { | 3146 | for_each_cpu_and(i, sched_group_cpus(group), cpus) { |
3138 | struct rq *rq; | 3147 | struct rq *rq = cpu_rq(i); |
3139 | |||
3140 | if (!cpu_isset(i, *cpus)) | ||
3141 | continue; | ||
3142 | |||
3143 | rq = cpu_rq(i); | ||
3144 | 3148 | ||
3145 | if (*sd_idle && rq->nr_running) | 3149 | if (*sd_idle && rq->nr_running) |
3146 | *sd_idle = 0; | 3150 | *sd_idle = 0; |
@@ -3251,8 +3255,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3251 | */ | 3255 | */ |
3252 | if ((sum_nr_running < min_nr_running) || | 3256 | if ((sum_nr_running < min_nr_running) || |
3253 | (sum_nr_running == min_nr_running && | 3257 | (sum_nr_running == min_nr_running && |
3254 | first_cpu(group->cpumask) < | 3258 | cpumask_first(sched_group_cpus(group)) > |
3255 | first_cpu(group_min->cpumask))) { | 3259 | cpumask_first(sched_group_cpus(group_min)))) { |
3256 | group_min = group; | 3260 | group_min = group; |
3257 | min_nr_running = sum_nr_running; | 3261 | min_nr_running = sum_nr_running; |
3258 | min_load_per_task = sum_weighted_load / | 3262 | min_load_per_task = sum_weighted_load / |
@@ -3267,8 +3271,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
3267 | if (sum_nr_running <= group_capacity - 1) { | 3271 | if (sum_nr_running <= group_capacity - 1) { |
3268 | if (sum_nr_running > leader_nr_running || | 3272 | if (sum_nr_running > leader_nr_running || |
3269 | (sum_nr_running == leader_nr_running && | 3273 | (sum_nr_running == leader_nr_running && |
3270 | first_cpu(group->cpumask) > | 3274 | cpumask_first(sched_group_cpus(group)) < |
3271 | first_cpu(group_leader->cpumask))) { | 3275 | cpumask_first(sched_group_cpus(group_leader)))) { |
3272 | group_leader = group; | 3276 | group_leader = group; |
3273 | leader_nr_running = sum_nr_running; | 3277 | leader_nr_running = sum_nr_running; |
3274 | } | 3278 | } |
@@ -3394,6 +3398,10 @@ out_balanced: | |||
3394 | 3398 | ||
3395 | if (this == group_leader && group_leader != group_min) { | 3399 | if (this == group_leader && group_leader != group_min) { |
3396 | *imbalance = min_load_per_task; | 3400 | *imbalance = min_load_per_task; |
3401 | if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { | ||
3402 | cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = | ||
3403 | cpumask_first(sched_group_cpus(group_leader)); | ||
3404 | } | ||
3397 | return group_min; | 3405 | return group_min; |
3398 | } | 3406 | } |
3399 | #endif | 3407 | #endif |
@@ -3407,16 +3415,16 @@ ret: | |||
3407 | */ | 3415 | */ |
3408 | static struct rq * | 3416 | static struct rq * |
3409 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3417 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
3410 | unsigned long imbalance, const cpumask_t *cpus) | 3418 | unsigned long imbalance, const struct cpumask *cpus) |
3411 | { | 3419 | { |
3412 | struct rq *busiest = NULL, *rq; | 3420 | struct rq *busiest = NULL, *rq; |
3413 | unsigned long max_load = 0; | 3421 | unsigned long max_load = 0; |
3414 | int i; | 3422 | int i; |
3415 | 3423 | ||
3416 | for_each_cpu_mask_nr(i, group->cpumask) { | 3424 | for_each_cpu(i, sched_group_cpus(group)) { |
3417 | unsigned long wl; | 3425 | unsigned long wl; |
3418 | 3426 | ||
3419 | if (!cpu_isset(i, *cpus)) | 3427 | if (!cpumask_test_cpu(i, cpus)) |
3420 | continue; | 3428 | continue; |
3421 | 3429 | ||
3422 | rq = cpu_rq(i); | 3430 | rq = cpu_rq(i); |
@@ -3446,7 +3454,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3446 | */ | 3454 | */ |
3447 | static int load_balance(int this_cpu, struct rq *this_rq, | 3455 | static int load_balance(int this_cpu, struct rq *this_rq, |
3448 | struct sched_domain *sd, enum cpu_idle_type idle, | 3456 | struct sched_domain *sd, enum cpu_idle_type idle, |
3449 | int *balance, cpumask_t *cpus) | 3457 | int *balance, struct cpumask *cpus) |
3450 | { | 3458 | { |
3451 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3459 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3452 | struct sched_group *group; | 3460 | struct sched_group *group; |
@@ -3454,7 +3462,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
3454 | struct rq *busiest; | 3462 | struct rq *busiest; |
3455 | unsigned long flags; | 3463 | unsigned long flags; |
3456 | 3464 | ||
3457 | cpus_setall(*cpus); | 3465 | cpumask_setall(cpus); |
3458 | 3466 | ||
3459 | /* | 3467 | /* |
3460 | * When power savings policy is enabled for the parent domain, idle | 3468 | * When power savings policy is enabled for the parent domain, idle |
@@ -3514,8 +3522,8 @@ redo: | |||
3514 | 3522 | ||
3515 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3523 | /* All tasks on this runqueue were pinned by CPU affinity */ |
3516 | if (unlikely(all_pinned)) { | 3524 | if (unlikely(all_pinned)) { |
3517 | cpu_clear(cpu_of(busiest), *cpus); | 3525 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3518 | if (!cpus_empty(*cpus)) | 3526 | if (!cpumask_empty(cpus)) |
3519 | goto redo; | 3527 | goto redo; |
3520 | goto out_balanced; | 3528 | goto out_balanced; |
3521 | } | 3529 | } |
@@ -3532,7 +3540,8 @@ redo: | |||
3532 | /* don't kick the migration_thread, if the curr | 3540 | /* don't kick the migration_thread, if the curr |
3533 | * task on busiest cpu can't be moved to this_cpu | 3541 | * task on busiest cpu can't be moved to this_cpu |
3534 | */ | 3542 | */ |
3535 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3543 | if (!cpumask_test_cpu(this_cpu, |
3544 | &busiest->curr->cpus_allowed)) { | ||
3536 | spin_unlock_irqrestore(&busiest->lock, flags); | 3545 | spin_unlock_irqrestore(&busiest->lock, flags); |
3537 | all_pinned = 1; | 3546 | all_pinned = 1; |
3538 | goto out_one_pinned; | 3547 | goto out_one_pinned; |
@@ -3607,7 +3616,7 @@ out: | |||
3607 | */ | 3616 | */ |
3608 | static int | 3617 | static int |
3609 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 3618 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
3610 | cpumask_t *cpus) | 3619 | struct cpumask *cpus) |
3611 | { | 3620 | { |
3612 | struct sched_group *group; | 3621 | struct sched_group *group; |
3613 | struct rq *busiest = NULL; | 3622 | struct rq *busiest = NULL; |
@@ -3616,7 +3625,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
3616 | int sd_idle = 0; | 3625 | int sd_idle = 0; |
3617 | int all_pinned = 0; | 3626 | int all_pinned = 0; |
3618 | 3627 | ||
3619 | cpus_setall(*cpus); | 3628 | cpumask_setall(cpus); |
3620 | 3629 | ||
3621 | /* | 3630 | /* |
3622 | * When power savings policy is enabled for the parent domain, idle | 3631 | * When power savings policy is enabled for the parent domain, idle |
@@ -3660,17 +3669,76 @@ redo: | |||
3660 | double_unlock_balance(this_rq, busiest); | 3669 | double_unlock_balance(this_rq, busiest); |
3661 | 3670 | ||
3662 | if (unlikely(all_pinned)) { | 3671 | if (unlikely(all_pinned)) { |
3663 | cpu_clear(cpu_of(busiest), *cpus); | 3672 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3664 | if (!cpus_empty(*cpus)) | 3673 | if (!cpumask_empty(cpus)) |
3665 | goto redo; | 3674 | goto redo; |
3666 | } | 3675 | } |
3667 | } | 3676 | } |
3668 | 3677 | ||
3669 | if (!ld_moved) { | 3678 | if (!ld_moved) { |
3679 | int active_balance = 0; | ||
3680 | |||
3670 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); | 3681 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
3671 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 3682 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
3672 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 3683 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
3673 | return -1; | 3684 | return -1; |
3685 | |||
3686 | if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP) | ||
3687 | return -1; | ||
3688 | |||
3689 | if (sd->nr_balance_failed++ < 2) | ||
3690 | return -1; | ||
3691 | |||
3692 | /* | ||
3693 | * The only task running in a non-idle cpu can be moved to this | ||
3694 | * cpu in an attempt to completely freeup the other CPU | ||
3695 | * package. The same method used to move task in load_balance() | ||
3696 | * have been extended for load_balance_newidle() to speedup | ||
3697 | * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2) | ||
3698 | * | ||
3699 | * The package power saving logic comes from | ||
3700 | * find_busiest_group(). If there are no imbalance, then | ||
3701 | * f_b_g() will return NULL. However when sched_mc={1,2} then | ||
3702 | * f_b_g() will select a group from which a running task may be | ||
3703 | * pulled to this cpu in order to make the other package idle. | ||
3704 | * If there is no opportunity to make a package idle and if | ||
3705 | * there are no imbalance, then f_b_g() will return NULL and no | ||
3706 | * action will be taken in load_balance_newidle(). | ||
3707 | * | ||
3708 | * Under normal task pull operation due to imbalance, there | ||
3709 | * will be more than one task in the source run queue and | ||
3710 | * move_tasks() will succeed. ld_moved will be true and this | ||
3711 | * active balance code will not be triggered. | ||
3712 | */ | ||
3713 | |||
3714 | /* Lock busiest in correct order while this_rq is held */ | ||
3715 | double_lock_balance(this_rq, busiest); | ||
3716 | |||
3717 | /* | ||
3718 | * don't kick the migration_thread, if the curr | ||
3719 | * task on busiest cpu can't be moved to this_cpu | ||
3720 | */ | ||
3721 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { | ||
3722 | double_unlock_balance(this_rq, busiest); | ||
3723 | all_pinned = 1; | ||
3724 | return ld_moved; | ||
3725 | } | ||
3726 | |||
3727 | if (!busiest->active_balance) { | ||
3728 | busiest->active_balance = 1; | ||
3729 | busiest->push_cpu = this_cpu; | ||
3730 | active_balance = 1; | ||
3731 | } | ||
3732 | |||
3733 | double_unlock_balance(this_rq, busiest); | ||
3734 | /* | ||
3735 | * Should not call ttwu while holding a rq->lock | ||
3736 | */ | ||
3737 | spin_unlock(&this_rq->lock); | ||
3738 | if (active_balance) | ||
3739 | wake_up_process(busiest->migration_thread); | ||
3740 | spin_lock(&this_rq->lock); | ||
3741 | |||
3674 | } else | 3742 | } else |
3675 | sd->nr_balance_failed = 0; | 3743 | sd->nr_balance_failed = 0; |
3676 | 3744 | ||
@@ -3696,7 +3764,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3696 | struct sched_domain *sd; | 3764 | struct sched_domain *sd; |
3697 | int pulled_task = 0; | 3765 | int pulled_task = 0; |
3698 | unsigned long next_balance = jiffies + HZ; | 3766 | unsigned long next_balance = jiffies + HZ; |
3699 | cpumask_t tmpmask; | 3767 | cpumask_var_t tmpmask; |
3768 | |||
3769 | if (!alloc_cpumask_var(&tmpmask, GFP_ATOMIC)) | ||
3770 | return; | ||
3700 | 3771 | ||
3701 | for_each_domain(this_cpu, sd) { | 3772 | for_each_domain(this_cpu, sd) { |
3702 | unsigned long interval; | 3773 | unsigned long interval; |
@@ -3707,7 +3778,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3707 | if (sd->flags & SD_BALANCE_NEWIDLE) | 3778 | if (sd->flags & SD_BALANCE_NEWIDLE) |
3708 | /* If we've pulled tasks over stop searching: */ | 3779 | /* If we've pulled tasks over stop searching: */ |
3709 | pulled_task = load_balance_newidle(this_cpu, this_rq, | 3780 | pulled_task = load_balance_newidle(this_cpu, this_rq, |
3710 | sd, &tmpmask); | 3781 | sd, tmpmask); |
3711 | 3782 | ||
3712 | interval = msecs_to_jiffies(sd->balance_interval); | 3783 | interval = msecs_to_jiffies(sd->balance_interval); |
3713 | if (time_after(next_balance, sd->last_balance + interval)) | 3784 | if (time_after(next_balance, sd->last_balance + interval)) |
@@ -3722,6 +3793,7 @@ static void idle_balance(int this_cpu, struct rq *this_rq) | |||
3722 | */ | 3793 | */ |
3723 | this_rq->next_balance = next_balance; | 3794 | this_rq->next_balance = next_balance; |
3724 | } | 3795 | } |
3796 | free_cpumask_var(tmpmask); | ||
3725 | } | 3797 | } |
3726 | 3798 | ||
3727 | /* | 3799 | /* |
@@ -3759,7 +3831,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3759 | /* Search for an sd spanning us and the target CPU. */ | 3831 | /* Search for an sd spanning us and the target CPU. */ |
3760 | for_each_domain(target_cpu, sd) { | 3832 | for_each_domain(target_cpu, sd) { |
3761 | if ((sd->flags & SD_LOAD_BALANCE) && | 3833 | if ((sd->flags & SD_LOAD_BALANCE) && |
3762 | cpu_isset(busiest_cpu, sd->span)) | 3834 | cpumask_test_cpu(busiest_cpu, sched_domain_span(sd))) |
3763 | break; | 3835 | break; |
3764 | } | 3836 | } |
3765 | 3837 | ||
@@ -3778,10 +3850,9 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3778 | #ifdef CONFIG_NO_HZ | 3850 | #ifdef CONFIG_NO_HZ |
3779 | static struct { | 3851 | static struct { |
3780 | atomic_t load_balancer; | 3852 | atomic_t load_balancer; |
3781 | cpumask_t cpu_mask; | 3853 | cpumask_var_t cpu_mask; |
3782 | } nohz ____cacheline_aligned = { | 3854 | } nohz ____cacheline_aligned = { |
3783 | .load_balancer = ATOMIC_INIT(-1), | 3855 | .load_balancer = ATOMIC_INIT(-1), |
3784 | .cpu_mask = CPU_MASK_NONE, | ||
3785 | }; | 3856 | }; |
3786 | 3857 | ||
3787 | /* | 3858 | /* |
@@ -3809,7 +3880,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3809 | int cpu = smp_processor_id(); | 3880 | int cpu = smp_processor_id(); |
3810 | 3881 | ||
3811 | if (stop_tick) { | 3882 | if (stop_tick) { |
3812 | cpu_set(cpu, nohz.cpu_mask); | 3883 | cpumask_set_cpu(cpu, nohz.cpu_mask); |
3813 | cpu_rq(cpu)->in_nohz_recently = 1; | 3884 | cpu_rq(cpu)->in_nohz_recently = 1; |
3814 | 3885 | ||
3815 | /* | 3886 | /* |
@@ -3823,7 +3894,7 @@ int select_nohz_load_balancer(int stop_tick) | |||
3823 | } | 3894 | } |
3824 | 3895 | ||
3825 | /* time for ilb owner also to sleep */ | 3896 | /* time for ilb owner also to sleep */ |
3826 | if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 3897 | if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
3827 | if (atomic_read(&nohz.load_balancer) == cpu) | 3898 | if (atomic_read(&nohz.load_balancer) == cpu) |
3828 | atomic_set(&nohz.load_balancer, -1); | 3899 | atomic_set(&nohz.load_balancer, -1); |
3829 | return 0; | 3900 | return 0; |
@@ -3836,10 +3907,10 @@ int select_nohz_load_balancer(int stop_tick) | |||
3836 | } else if (atomic_read(&nohz.load_balancer) == cpu) | 3907 | } else if (atomic_read(&nohz.load_balancer) == cpu) |
3837 | return 1; | 3908 | return 1; |
3838 | } else { | 3909 | } else { |
3839 | if (!cpu_isset(cpu, nohz.cpu_mask)) | 3910 | if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) |
3840 | return 0; | 3911 | return 0; |
3841 | 3912 | ||
3842 | cpu_clear(cpu, nohz.cpu_mask); | 3913 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3843 | 3914 | ||
3844 | if (atomic_read(&nohz.load_balancer) == cpu) | 3915 | if (atomic_read(&nohz.load_balancer) == cpu) |
3845 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) | 3916 | if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) |
@@ -3867,7 +3938,11 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3867 | unsigned long next_balance = jiffies + 60*HZ; | 3938 | unsigned long next_balance = jiffies + 60*HZ; |
3868 | int update_next_balance = 0; | 3939 | int update_next_balance = 0; |
3869 | int need_serialize; | 3940 | int need_serialize; |
3870 | cpumask_t tmp; | 3941 | cpumask_var_t tmp; |
3942 | |||
3943 | /* Fails alloc? Rebalancing probably not a priority right now. */ | ||
3944 | if (!alloc_cpumask_var(&tmp, GFP_ATOMIC)) | ||
3945 | return; | ||
3871 | 3946 | ||
3872 | for_each_domain(cpu, sd) { | 3947 | for_each_domain(cpu, sd) { |
3873 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3948 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -3892,7 +3967,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3892 | } | 3967 | } |
3893 | 3968 | ||
3894 | if (time_after_eq(jiffies, sd->last_balance + interval)) { | 3969 | if (time_after_eq(jiffies, sd->last_balance + interval)) { |
3895 | if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) { | 3970 | if (load_balance(cpu, rq, sd, idle, &balance, tmp)) { |
3896 | /* | 3971 | /* |
3897 | * We've pulled tasks over so either we're no | 3972 | * We've pulled tasks over so either we're no |
3898 | * longer idle, or one of our SMT siblings is | 3973 | * longer idle, or one of our SMT siblings is |
@@ -3926,6 +4001,8 @@ out: | |||
3926 | */ | 4001 | */ |
3927 | if (likely(update_next_balance)) | 4002 | if (likely(update_next_balance)) |
3928 | rq->next_balance = next_balance; | 4003 | rq->next_balance = next_balance; |
4004 | |||
4005 | free_cpumask_var(tmp); | ||
3929 | } | 4006 | } |
3930 | 4007 | ||
3931 | /* | 4008 | /* |
@@ -3950,12 +4027,13 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3950 | */ | 4027 | */ |
3951 | if (this_rq->idle_at_tick && | 4028 | if (this_rq->idle_at_tick && |
3952 | atomic_read(&nohz.load_balancer) == this_cpu) { | 4029 | atomic_read(&nohz.load_balancer) == this_cpu) { |
3953 | cpumask_t cpus = nohz.cpu_mask; | ||
3954 | struct rq *rq; | 4030 | struct rq *rq; |
3955 | int balance_cpu; | 4031 | int balance_cpu; |
3956 | 4032 | ||
3957 | cpu_clear(this_cpu, cpus); | 4033 | for_each_cpu(balance_cpu, nohz.cpu_mask) { |
3958 | for_each_cpu_mask_nr(balance_cpu, cpus) { | 4034 | if (balance_cpu == this_cpu) |
4035 | continue; | ||
4036 | |||
3959 | /* | 4037 | /* |
3960 | * If this cpu gets work to do, stop the load balancing | 4038 | * If this cpu gets work to do, stop the load balancing |
3961 | * work being done for other cpus. Next load | 4039 | * work being done for other cpus. Next load |
@@ -3993,7 +4071,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
3993 | rq->in_nohz_recently = 0; | 4071 | rq->in_nohz_recently = 0; |
3994 | 4072 | ||
3995 | if (atomic_read(&nohz.load_balancer) == cpu) { | 4073 | if (atomic_read(&nohz.load_balancer) == cpu) { |
3996 | cpu_clear(cpu, nohz.cpu_mask); | 4074 | cpumask_clear_cpu(cpu, nohz.cpu_mask); |
3997 | atomic_set(&nohz.load_balancer, -1); | 4075 | atomic_set(&nohz.load_balancer, -1); |
3998 | } | 4076 | } |
3999 | 4077 | ||
@@ -4006,7 +4084,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4006 | * TBD: Traverse the sched domains and nominate | 4084 | * TBD: Traverse the sched domains and nominate |
4007 | * the nearest cpu in the nohz.cpu_mask. | 4085 | * the nearest cpu in the nohz.cpu_mask. |
4008 | */ | 4086 | */ |
4009 | int ilb = first_cpu(nohz.cpu_mask); | 4087 | int ilb = cpumask_first(nohz.cpu_mask); |
4010 | 4088 | ||
4011 | if (ilb < nr_cpu_ids) | 4089 | if (ilb < nr_cpu_ids) |
4012 | resched_cpu(ilb); | 4090 | resched_cpu(ilb); |
@@ -4018,7 +4096,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4018 | * cpus with ticks stopped, is it time for that to stop? | 4096 | * cpus with ticks stopped, is it time for that to stop? |
4019 | */ | 4097 | */ |
4020 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && | 4098 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu && |
4021 | cpus_weight(nohz.cpu_mask) == num_online_cpus()) { | 4099 | cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { |
4022 | resched_cpu(cpu); | 4100 | resched_cpu(cpu); |
4023 | return; | 4101 | return; |
4024 | } | 4102 | } |
@@ -4028,7 +4106,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu) | |||
4028 | * someone else, then no need raise the SCHED_SOFTIRQ | 4106 | * someone else, then no need raise the SCHED_SOFTIRQ |
4029 | */ | 4107 | */ |
4030 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && | 4108 | if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu && |
4031 | cpu_isset(cpu, nohz.cpu_mask)) | 4109 | cpumask_test_cpu(cpu, nohz.cpu_mask)) |
4032 | return; | 4110 | return; |
4033 | #endif | 4111 | #endif |
4034 | if (time_after_eq(jiffies, rq->next_balance)) | 4112 | if (time_after_eq(jiffies, rq->next_balance)) |
@@ -4080,13 +4158,17 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
4080 | * Account user cpu time to a process. | 4158 | * Account user cpu time to a process. |
4081 | * @p: the process that the cpu time gets accounted to | 4159 | * @p: the process that the cpu time gets accounted to |
4082 | * @cputime: the cpu time spent in user space since the last update | 4160 | * @cputime: the cpu time spent in user space since the last update |
4161 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4083 | */ | 4162 | */ |
4084 | void account_user_time(struct task_struct *p, cputime_t cputime) | 4163 | void account_user_time(struct task_struct *p, cputime_t cputime, |
4164 | cputime_t cputime_scaled) | ||
4085 | { | 4165 | { |
4086 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4166 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4087 | cputime64_t tmp; | 4167 | cputime64_t tmp; |
4088 | 4168 | ||
4169 | /* Add user time to process. */ | ||
4089 | p->utime = cputime_add(p->utime, cputime); | 4170 | p->utime = cputime_add(p->utime, cputime); |
4171 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4090 | account_group_user_time(p, cputime); | 4172 | account_group_user_time(p, cputime); |
4091 | 4173 | ||
4092 | /* Add user time to cpustat. */ | 4174 | /* Add user time to cpustat. */ |
@@ -4103,51 +4185,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4103 | * Account guest cpu time to a process. | 4185 | * Account guest cpu time to a process. |
4104 | * @p: the process that the cpu time gets accounted to | 4186 | * @p: the process that the cpu time gets accounted to |
4105 | * @cputime: the cpu time spent in virtual machine since the last update | 4187 | * @cputime: the cpu time spent in virtual machine since the last update |
4188 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4106 | */ | 4189 | */ |
4107 | static void account_guest_time(struct task_struct *p, cputime_t cputime) | 4190 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
4191 | cputime_t cputime_scaled) | ||
4108 | { | 4192 | { |
4109 | cputime64_t tmp; | 4193 | cputime64_t tmp; |
4110 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4194 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4111 | 4195 | ||
4112 | tmp = cputime_to_cputime64(cputime); | 4196 | tmp = cputime_to_cputime64(cputime); |
4113 | 4197 | ||
4198 | /* Add guest time to process. */ | ||
4114 | p->utime = cputime_add(p->utime, cputime); | 4199 | p->utime = cputime_add(p->utime, cputime); |
4200 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4115 | account_group_user_time(p, cputime); | 4201 | account_group_user_time(p, cputime); |
4116 | p->gtime = cputime_add(p->gtime, cputime); | 4202 | p->gtime = cputime_add(p->gtime, cputime); |
4117 | 4203 | ||
4204 | /* Add guest time to cpustat. */ | ||
4118 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4205 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4119 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 4206 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
4120 | } | 4207 | } |
4121 | 4208 | ||
4122 | /* | 4209 | /* |
4123 | * Account scaled user cpu time to a process. | ||
4124 | * @p: the process that the cpu time gets accounted to | ||
4125 | * @cputime: the cpu time spent in user space since the last update | ||
4126 | */ | ||
4127 | void account_user_time_scaled(struct task_struct *p, cputime_t cputime) | ||
4128 | { | ||
4129 | p->utimescaled = cputime_add(p->utimescaled, cputime); | ||
4130 | } | ||
4131 | |||
4132 | /* | ||
4133 | * Account system cpu time to a process. | 4210 | * Account system cpu time to a process. |
4134 | * @p: the process that the cpu time gets accounted to | 4211 | * @p: the process that the cpu time gets accounted to |
4135 | * @hardirq_offset: the offset to subtract from hardirq_count() | 4212 | * @hardirq_offset: the offset to subtract from hardirq_count() |
4136 | * @cputime: the cpu time spent in kernel space since the last update | 4213 | * @cputime: the cpu time spent in kernel space since the last update |
4214 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4137 | */ | 4215 | */ |
4138 | void account_system_time(struct task_struct *p, int hardirq_offset, | 4216 | void account_system_time(struct task_struct *p, int hardirq_offset, |
4139 | cputime_t cputime) | 4217 | cputime_t cputime, cputime_t cputime_scaled) |
4140 | { | 4218 | { |
4141 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4219 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4142 | struct rq *rq = this_rq(); | ||
4143 | cputime64_t tmp; | 4220 | cputime64_t tmp; |
4144 | 4221 | ||
4145 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 4222 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
4146 | account_guest_time(p, cputime); | 4223 | account_guest_time(p, cputime, cputime_scaled); |
4147 | return; | 4224 | return; |
4148 | } | 4225 | } |
4149 | 4226 | ||
4227 | /* Add system time to process. */ | ||
4150 | p->stime = cputime_add(p->stime, cputime); | 4228 | p->stime = cputime_add(p->stime, cputime); |
4229 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); | ||
4151 | account_group_system_time(p, cputime); | 4230 | account_group_system_time(p, cputime); |
4152 | 4231 | ||
4153 | /* Add system time to cpustat. */ | 4232 | /* Add system time to cpustat. */ |
@@ -4156,49 +4235,85 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4156 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 4235 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
4157 | else if (softirq_count()) | 4236 | else if (softirq_count()) |
4158 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 4237 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
4159 | else if (p != rq->idle) | ||
4160 | cpustat->system = cputime64_add(cpustat->system, tmp); | ||
4161 | else if (atomic_read(&rq->nr_iowait) > 0) | ||
4162 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | ||
4163 | else | 4238 | else |
4164 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4239 | cpustat->system = cputime64_add(cpustat->system, tmp); |
4240 | |||
4165 | /* Account for system time used */ | 4241 | /* Account for system time used */ |
4166 | acct_update_integrals(p); | 4242 | acct_update_integrals(p); |
4167 | } | 4243 | } |
4168 | 4244 | ||
4169 | /* | 4245 | /* |
4170 | * Account scaled system cpu time to a process. | 4246 | * Account for involuntary wait time. |
4171 | * @p: the process that the cpu time gets accounted to | 4247 | * @steal: the cpu time spent in involuntary wait |
4172 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
4173 | * @cputime: the cpu time spent in kernel space since the last update | ||
4174 | */ | 4248 | */ |
4175 | void account_system_time_scaled(struct task_struct *p, cputime_t cputime) | 4249 | void account_steal_time(cputime_t cputime) |
4176 | { | 4250 | { |
4177 | p->stimescaled = cputime_add(p->stimescaled, cputime); | 4251 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4252 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
4253 | |||
4254 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | ||
4178 | } | 4255 | } |
4179 | 4256 | ||
4180 | /* | 4257 | /* |
4181 | * Account for involuntary wait time. | 4258 | * Account for idle time. |
4182 | * @p: the process from which the cpu time has been stolen | 4259 | * @cputime: the cpu time spent in idle wait |
4183 | * @steal: the cpu time spent in involuntary wait | ||
4184 | */ | 4260 | */ |
4185 | void account_steal_time(struct task_struct *p, cputime_t steal) | 4261 | void account_idle_time(cputime_t cputime) |
4186 | { | 4262 | { |
4187 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4263 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4188 | cputime64_t tmp = cputime_to_cputime64(steal); | 4264 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
4189 | struct rq *rq = this_rq(); | 4265 | struct rq *rq = this_rq(); |
4190 | 4266 | ||
4191 | if (p == rq->idle) { | 4267 | if (atomic_read(&rq->nr_iowait) > 0) |
4192 | p->stime = cputime_add(p->stime, steal); | 4268 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
4193 | if (atomic_read(&rq->nr_iowait) > 0) | 4269 | else |
4194 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4270 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
4195 | else | 4271 | } |
4196 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4272 | |
4197 | } else | 4273 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
4198 | cpustat->steal = cputime64_add(cpustat->steal, tmp); | 4274 | |
4275 | /* | ||
4276 | * Account a single tick of cpu time. | ||
4277 | * @p: the process that the cpu time gets accounted to | ||
4278 | * @user_tick: indicates if the tick is a user or a system tick | ||
4279 | */ | ||
4280 | void account_process_tick(struct task_struct *p, int user_tick) | ||
4281 | { | ||
4282 | cputime_t one_jiffy = jiffies_to_cputime(1); | ||
4283 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
4284 | struct rq *rq = this_rq(); | ||
4285 | |||
4286 | if (user_tick) | ||
4287 | account_user_time(p, one_jiffy, one_jiffy_scaled); | ||
4288 | else if (p != rq->idle) | ||
4289 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | ||
4290 | one_jiffy_scaled); | ||
4291 | else | ||
4292 | account_idle_time(one_jiffy); | ||
4199 | } | 4293 | } |
4200 | 4294 | ||
4201 | /* | 4295 | /* |
4296 | * Account multiple ticks of steal time. | ||
4297 | * @p: the process from which the cpu time has been stolen | ||
4298 | * @ticks: number of stolen ticks | ||
4299 | */ | ||
4300 | void account_steal_ticks(unsigned long ticks) | ||
4301 | { | ||
4302 | account_steal_time(jiffies_to_cputime(ticks)); | ||
4303 | } | ||
4304 | |||
4305 | /* | ||
4306 | * Account multiple ticks of idle time. | ||
4307 | * @ticks: number of stolen ticks | ||
4308 | */ | ||
4309 | void account_idle_ticks(unsigned long ticks) | ||
4310 | { | ||
4311 | account_idle_time(jiffies_to_cputime(ticks)); | ||
4312 | } | ||
4313 | |||
4314 | #endif | ||
4315 | |||
4316 | /* | ||
4202 | * Use precise platform statistics if available: | 4317 | * Use precise platform statistics if available: |
4203 | */ | 4318 | */ |
4204 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 4319 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -5401,10 +5516,9 @@ out_unlock: | |||
5401 | return retval; | 5516 | return retval; |
5402 | } | 5517 | } |
5403 | 5518 | ||
5404 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | 5519 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
5405 | { | 5520 | { |
5406 | cpumask_t cpus_allowed; | 5521 | cpumask_var_t cpus_allowed, new_mask; |
5407 | cpumask_t new_mask = *in_mask; | ||
5408 | struct task_struct *p; | 5522 | struct task_struct *p; |
5409 | int retval; | 5523 | int retval; |
5410 | 5524 | ||
@@ -5426,6 +5540,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5426 | get_task_struct(p); | 5540 | get_task_struct(p); |
5427 | read_unlock(&tasklist_lock); | 5541 | read_unlock(&tasklist_lock); |
5428 | 5542 | ||
5543 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
5544 | retval = -ENOMEM; | ||
5545 | goto out_put_task; | ||
5546 | } | ||
5547 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
5548 | retval = -ENOMEM; | ||
5549 | goto out_free_cpus_allowed; | ||
5550 | } | ||
5429 | retval = -EPERM; | 5551 | retval = -EPERM; |
5430 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) | 5552 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
5431 | goto out_unlock; | 5553 | goto out_unlock; |
@@ -5434,37 +5556,41 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5434 | if (retval) | 5556 | if (retval) |
5435 | goto out_unlock; | 5557 | goto out_unlock; |
5436 | 5558 | ||
5437 | cpuset_cpus_allowed(p, &cpus_allowed); | 5559 | cpuset_cpus_allowed(p, cpus_allowed); |
5438 | cpus_and(new_mask, new_mask, cpus_allowed); | 5560 | cpumask_and(new_mask, in_mask, cpus_allowed); |
5439 | again: | 5561 | again: |
5440 | retval = set_cpus_allowed_ptr(p, &new_mask); | 5562 | retval = set_cpus_allowed_ptr(p, new_mask); |
5441 | 5563 | ||
5442 | if (!retval) { | 5564 | if (!retval) { |
5443 | cpuset_cpus_allowed(p, &cpus_allowed); | 5565 | cpuset_cpus_allowed(p, cpus_allowed); |
5444 | if (!cpus_subset(new_mask, cpus_allowed)) { | 5566 | if (!cpumask_subset(new_mask, cpus_allowed)) { |
5445 | /* | 5567 | /* |
5446 | * We must have raced with a concurrent cpuset | 5568 | * We must have raced with a concurrent cpuset |
5447 | * update. Just reset the cpus_allowed to the | 5569 | * update. Just reset the cpus_allowed to the |
5448 | * cpuset's cpus_allowed | 5570 | * cpuset's cpus_allowed |
5449 | */ | 5571 | */ |
5450 | new_mask = cpus_allowed; | 5572 | cpumask_copy(new_mask, cpus_allowed); |
5451 | goto again; | 5573 | goto again; |
5452 | } | 5574 | } |
5453 | } | 5575 | } |
5454 | out_unlock: | 5576 | out_unlock: |
5577 | free_cpumask_var(new_mask); | ||
5578 | out_free_cpus_allowed: | ||
5579 | free_cpumask_var(cpus_allowed); | ||
5580 | out_put_task: | ||
5455 | put_task_struct(p); | 5581 | put_task_struct(p); |
5456 | put_online_cpus(); | 5582 | put_online_cpus(); |
5457 | return retval; | 5583 | return retval; |
5458 | } | 5584 | } |
5459 | 5585 | ||
5460 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | 5586 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
5461 | cpumask_t *new_mask) | 5587 | struct cpumask *new_mask) |
5462 | { | 5588 | { |
5463 | if (len < sizeof(cpumask_t)) { | 5589 | if (len < cpumask_size()) |
5464 | memset(new_mask, 0, sizeof(cpumask_t)); | 5590 | cpumask_clear(new_mask); |
5465 | } else if (len > sizeof(cpumask_t)) { | 5591 | else if (len > cpumask_size()) |
5466 | len = sizeof(cpumask_t); | 5592 | len = cpumask_size(); |
5467 | } | 5593 | |
5468 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; | 5594 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
5469 | } | 5595 | } |
5470 | 5596 | ||
@@ -5477,17 +5603,20 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
5477 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 5603 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, |
5478 | unsigned long __user *user_mask_ptr) | 5604 | unsigned long __user *user_mask_ptr) |
5479 | { | 5605 | { |
5480 | cpumask_t new_mask; | 5606 | cpumask_var_t new_mask; |
5481 | int retval; | 5607 | int retval; |
5482 | 5608 | ||
5483 | retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask); | 5609 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) |
5484 | if (retval) | 5610 | return -ENOMEM; |
5485 | return retval; | ||
5486 | 5611 | ||
5487 | return sched_setaffinity(pid, &new_mask); | 5612 | retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); |
5613 | if (retval == 0) | ||
5614 | retval = sched_setaffinity(pid, new_mask); | ||
5615 | free_cpumask_var(new_mask); | ||
5616 | return retval; | ||
5488 | } | 5617 | } |
5489 | 5618 | ||
5490 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 5619 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
5491 | { | 5620 | { |
5492 | struct task_struct *p; | 5621 | struct task_struct *p; |
5493 | int retval; | 5622 | int retval; |
@@ -5504,7 +5633,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
5504 | if (retval) | 5633 | if (retval) |
5505 | goto out_unlock; | 5634 | goto out_unlock; |
5506 | 5635 | ||
5507 | cpus_and(*mask, p->cpus_allowed, cpu_online_map); | 5636 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5508 | 5637 | ||
5509 | out_unlock: | 5638 | out_unlock: |
5510 | read_unlock(&tasklist_lock); | 5639 | read_unlock(&tasklist_lock); |
@@ -5523,19 +5652,24 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
5523 | unsigned long __user *user_mask_ptr) | 5652 | unsigned long __user *user_mask_ptr) |
5524 | { | 5653 | { |
5525 | int ret; | 5654 | int ret; |
5526 | cpumask_t mask; | 5655 | cpumask_var_t mask; |
5527 | 5656 | ||
5528 | if (len < sizeof(cpumask_t)) | 5657 | if (len < cpumask_size()) |
5529 | return -EINVAL; | 5658 | return -EINVAL; |
5530 | 5659 | ||
5531 | ret = sched_getaffinity(pid, &mask); | 5660 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
5532 | if (ret < 0) | 5661 | return -ENOMEM; |
5533 | return ret; | ||
5534 | 5662 | ||
5535 | if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t))) | 5663 | ret = sched_getaffinity(pid, mask); |
5536 | return -EFAULT; | 5664 | if (ret == 0) { |
5665 | if (copy_to_user(user_mask_ptr, mask, cpumask_size())) | ||
5666 | ret = -EFAULT; | ||
5667 | else | ||
5668 | ret = cpumask_size(); | ||
5669 | } | ||
5670 | free_cpumask_var(mask); | ||
5537 | 5671 | ||
5538 | return sizeof(cpumask_t); | 5672 | return ret; |
5539 | } | 5673 | } |
5540 | 5674 | ||
5541 | /** | 5675 | /** |
@@ -5872,7 +6006,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5872 | idle->se.exec_start = sched_clock(); | 6006 | idle->se.exec_start = sched_clock(); |
5873 | 6007 | ||
5874 | idle->prio = idle->normal_prio = MAX_PRIO; | 6008 | idle->prio = idle->normal_prio = MAX_PRIO; |
5875 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 6009 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
5876 | __set_task_cpu(idle, cpu); | 6010 | __set_task_cpu(idle, cpu); |
5877 | 6011 | ||
5878 | rq->curr = rq->idle = idle; | 6012 | rq->curr = rq->idle = idle; |
@@ -5899,9 +6033,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5899 | * indicates which cpus entered this state. This is used | 6033 | * indicates which cpus entered this state. This is used |
5900 | * in the rcu update to wait only for active cpus. For system | 6034 | * in the rcu update to wait only for active cpus. For system |
5901 | * which do not switch off the HZ timer nohz_cpu_mask should | 6035 | * which do not switch off the HZ timer nohz_cpu_mask should |
5902 | * always be CPU_MASK_NONE. | 6036 | * always be CPU_BITS_NONE. |
5903 | */ | 6037 | */ |
5904 | cpumask_t nohz_cpu_mask = CPU_MASK_NONE; | 6038 | cpumask_var_t nohz_cpu_mask; |
5905 | 6039 | ||
5906 | /* | 6040 | /* |
5907 | * Increase the granularity value when there are more CPUs, | 6041 | * Increase the granularity value when there are more CPUs, |
@@ -5956,7 +6090,7 @@ static inline void sched_init_granularity(void) | |||
5956 | * task must not exit() & deallocate itself prematurely. The | 6090 | * task must not exit() & deallocate itself prematurely. The |
5957 | * call is not atomic; no spinlocks may be held. | 6091 | * call is not atomic; no spinlocks may be held. |
5958 | */ | 6092 | */ |
5959 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | 6093 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
5960 | { | 6094 | { |
5961 | struct migration_req req; | 6095 | struct migration_req req; |
5962 | unsigned long flags; | 6096 | unsigned long flags; |
@@ -5964,13 +6098,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5964 | int ret = 0; | 6098 | int ret = 0; |
5965 | 6099 | ||
5966 | rq = task_rq_lock(p, &flags); | 6100 | rq = task_rq_lock(p, &flags); |
5967 | if (!cpus_intersects(*new_mask, cpu_online_map)) { | 6101 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { |
5968 | ret = -EINVAL; | 6102 | ret = -EINVAL; |
5969 | goto out; | 6103 | goto out; |
5970 | } | 6104 | } |
5971 | 6105 | ||
5972 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 6106 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
5973 | !cpus_equal(p->cpus_allowed, *new_mask))) { | 6107 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
5974 | ret = -EINVAL; | 6108 | ret = -EINVAL; |
5975 | goto out; | 6109 | goto out; |
5976 | } | 6110 | } |
@@ -5978,15 +6112,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5978 | if (p->sched_class->set_cpus_allowed) | 6112 | if (p->sched_class->set_cpus_allowed) |
5979 | p->sched_class->set_cpus_allowed(p, new_mask); | 6113 | p->sched_class->set_cpus_allowed(p, new_mask); |
5980 | else { | 6114 | else { |
5981 | p->cpus_allowed = *new_mask; | 6115 | cpumask_copy(&p->cpus_allowed, new_mask); |
5982 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); | 6116 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
5983 | } | 6117 | } |
5984 | 6118 | ||
5985 | /* Can the task run on the task's current CPU? If so, we're done */ | 6119 | /* Can the task run on the task's current CPU? If so, we're done */ |
5986 | if (cpu_isset(task_cpu(p), *new_mask)) | 6120 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
5987 | goto out; | 6121 | goto out; |
5988 | 6122 | ||
5989 | if (migrate_task(p, any_online_cpu(*new_mask), &req)) { | 6123 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
5990 | /* Need help from migration thread: drop lock and wait. */ | 6124 | /* Need help from migration thread: drop lock and wait. */ |
5991 | task_rq_unlock(rq, &flags); | 6125 | task_rq_unlock(rq, &flags); |
5992 | wake_up_process(rq->migration_thread); | 6126 | wake_up_process(rq->migration_thread); |
@@ -6028,7 +6162,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6028 | if (task_cpu(p) != src_cpu) | 6162 | if (task_cpu(p) != src_cpu) |
6029 | goto done; | 6163 | goto done; |
6030 | /* Affinity changed (again). */ | 6164 | /* Affinity changed (again). */ |
6031 | if (!cpu_isset(dest_cpu, p->cpus_allowed)) | 6165 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6032 | goto fail; | 6166 | goto fail; |
6033 | 6167 | ||
6034 | on_rq = p->se.on_rq; | 6168 | on_rq = p->se.on_rq; |
@@ -6125,50 +6259,41 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6125 | */ | 6259 | */ |
6126 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6260 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6127 | { | 6261 | { |
6128 | unsigned long flags; | ||
6129 | cpumask_t mask; | ||
6130 | struct rq *rq; | ||
6131 | int dest_cpu; | 6262 | int dest_cpu; |
6263 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); | ||
6132 | 6264 | ||
6133 | do { | 6265 | again: |
6134 | /* On same node? */ | 6266 | /* Look for allowed, online CPU in same node. */ |
6135 | mask = node_to_cpumask(cpu_to_node(dead_cpu)); | 6267 | for_each_cpu_and(dest_cpu, nodemask, cpu_online_mask) |
6136 | cpus_and(mask, mask, p->cpus_allowed); | 6268 | if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6137 | dest_cpu = any_online_cpu(mask); | 6269 | goto move; |
6138 | 6270 | ||
6139 | /* On any allowed CPU? */ | 6271 | /* Any allowed, online CPU? */ |
6140 | if (dest_cpu >= nr_cpu_ids) | 6272 | dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_online_mask); |
6141 | dest_cpu = any_online_cpu(p->cpus_allowed); | 6273 | if (dest_cpu < nr_cpu_ids) |
6274 | goto move; | ||
6142 | 6275 | ||
6143 | /* No more Mr. Nice Guy. */ | 6276 | /* No more Mr. Nice Guy. */ |
6144 | if (dest_cpu >= nr_cpu_ids) { | 6277 | if (dest_cpu >= nr_cpu_ids) { |
6145 | cpumask_t cpus_allowed; | 6278 | cpuset_cpus_allowed_locked(p, &p->cpus_allowed); |
6146 | 6279 | dest_cpu = cpumask_any_and(cpu_online_mask, &p->cpus_allowed); | |
6147 | cpuset_cpus_allowed_locked(p, &cpus_allowed); | ||
6148 | /* | ||
6149 | * Try to stay on the same cpuset, where the | ||
6150 | * current cpuset may be a subset of all cpus. | ||
6151 | * The cpuset_cpus_allowed_locked() variant of | ||
6152 | * cpuset_cpus_allowed() will not block. It must be | ||
6153 | * called within calls to cpuset_lock/cpuset_unlock. | ||
6154 | */ | ||
6155 | rq = task_rq_lock(p, &flags); | ||
6156 | p->cpus_allowed = cpus_allowed; | ||
6157 | dest_cpu = any_online_cpu(p->cpus_allowed); | ||
6158 | task_rq_unlock(rq, &flags); | ||
6159 | 6280 | ||
6160 | /* | 6281 | /* |
6161 | * Don't tell them about moving exiting tasks or | 6282 | * Don't tell them about moving exiting tasks or |
6162 | * kernel threads (both mm NULL), since they never | 6283 | * kernel threads (both mm NULL), since they never |
6163 | * leave kernel. | 6284 | * leave kernel. |
6164 | */ | 6285 | */ |
6165 | if (p->mm && printk_ratelimit()) { | 6286 | if (p->mm && printk_ratelimit()) { |
6166 | printk(KERN_INFO "process %d (%s) no " | 6287 | printk(KERN_INFO "process %d (%s) no " |
6167 | "longer affine to cpu%d\n", | 6288 | "longer affine to cpu%d\n", |
6168 | task_pid_nr(p), p->comm, dead_cpu); | 6289 | task_pid_nr(p), p->comm, dead_cpu); |
6169 | } | ||
6170 | } | 6290 | } |
6171 | } while (!__migrate_task_irq(p, dead_cpu, dest_cpu)); | 6291 | } |
6292 | |||
6293 | move: | ||
6294 | /* It can have affinity changed while we were choosing. */ | ||
6295 | if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) | ||
6296 | goto again; | ||
6172 | } | 6297 | } |
6173 | 6298 | ||
6174 | /* | 6299 | /* |
@@ -6180,7 +6305,7 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | |||
6180 | */ | 6305 | */ |
6181 | static void migrate_nr_uninterruptible(struct rq *rq_src) | 6306 | static void migrate_nr_uninterruptible(struct rq *rq_src) |
6182 | { | 6307 | { |
6183 | struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR)); | 6308 | struct rq *rq_dest = cpu_rq(cpumask_any(cpu_online_mask)); |
6184 | unsigned long flags; | 6309 | unsigned long flags; |
6185 | 6310 | ||
6186 | local_irq_save(flags); | 6311 | local_irq_save(flags); |
@@ -6470,7 +6595,7 @@ static void set_rq_online(struct rq *rq) | |||
6470 | if (!rq->online) { | 6595 | if (!rq->online) { |
6471 | const struct sched_class *class; | 6596 | const struct sched_class *class; |
6472 | 6597 | ||
6473 | cpu_set(rq->cpu, rq->rd->online); | 6598 | cpumask_set_cpu(rq->cpu, rq->rd->online); |
6474 | rq->online = 1; | 6599 | rq->online = 1; |
6475 | 6600 | ||
6476 | for_each_class(class) { | 6601 | for_each_class(class) { |
@@ -6490,7 +6615,7 @@ static void set_rq_offline(struct rq *rq) | |||
6490 | class->rq_offline(rq); | 6615 | class->rq_offline(rq); |
6491 | } | 6616 | } |
6492 | 6617 | ||
6493 | cpu_clear(rq->cpu, rq->rd->online); | 6618 | cpumask_clear_cpu(rq->cpu, rq->rd->online); |
6494 | rq->online = 0; | 6619 | rq->online = 0; |
6495 | } | 6620 | } |
6496 | } | 6621 | } |
@@ -6531,7 +6656,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6531 | rq = cpu_rq(cpu); | 6656 | rq = cpu_rq(cpu); |
6532 | spin_lock_irqsave(&rq->lock, flags); | 6657 | spin_lock_irqsave(&rq->lock, flags); |
6533 | if (rq->rd) { | 6658 | if (rq->rd) { |
6534 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6659 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6535 | 6660 | ||
6536 | set_rq_online(rq); | 6661 | set_rq_online(rq); |
6537 | } | 6662 | } |
@@ -6545,7 +6670,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6545 | break; | 6670 | break; |
6546 | /* Unbind it from offline cpu so it can run. Fall thru. */ | 6671 | /* Unbind it from offline cpu so it can run. Fall thru. */ |
6547 | kthread_bind(cpu_rq(cpu)->migration_thread, | 6672 | kthread_bind(cpu_rq(cpu)->migration_thread, |
6548 | any_online_cpu(cpu_online_map)); | 6673 | cpumask_any(cpu_online_mask)); |
6549 | kthread_stop(cpu_rq(cpu)->migration_thread); | 6674 | kthread_stop(cpu_rq(cpu)->migration_thread); |
6550 | cpu_rq(cpu)->migration_thread = NULL; | 6675 | cpu_rq(cpu)->migration_thread = NULL; |
6551 | break; | 6676 | break; |
@@ -6595,7 +6720,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6595 | rq = cpu_rq(cpu); | 6720 | rq = cpu_rq(cpu); |
6596 | spin_lock_irqsave(&rq->lock, flags); | 6721 | spin_lock_irqsave(&rq->lock, flags); |
6597 | if (rq->rd) { | 6722 | if (rq->rd) { |
6598 | BUG_ON(!cpu_isset(cpu, rq->rd->span)); | 6723 | BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); |
6599 | set_rq_offline(rq); | 6724 | set_rq_offline(rq); |
6600 | } | 6725 | } |
6601 | spin_unlock_irqrestore(&rq->lock, flags); | 6726 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -6634,13 +6759,13 @@ early_initcall(migration_init); | |||
6634 | #ifdef CONFIG_SCHED_DEBUG | 6759 | #ifdef CONFIG_SCHED_DEBUG |
6635 | 6760 | ||
6636 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6761 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6637 | cpumask_t *groupmask) | 6762 | struct cpumask *groupmask) |
6638 | { | 6763 | { |
6639 | struct sched_group *group = sd->groups; | 6764 | struct sched_group *group = sd->groups; |
6640 | char str[256]; | 6765 | char str[256]; |
6641 | 6766 | ||
6642 | cpulist_scnprintf(str, sizeof(str), sd->span); | 6767 | cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd)); |
6643 | cpus_clear(*groupmask); | 6768 | cpumask_clear(groupmask); |
6644 | 6769 | ||
6645 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6770 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
6646 | 6771 | ||
@@ -6654,11 +6779,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6654 | 6779 | ||
6655 | printk(KERN_CONT "span %s level %s\n", str, sd->name); | 6780 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6656 | 6781 | ||
6657 | if (!cpu_isset(cpu, sd->span)) { | 6782 | if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { |
6658 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6783 | printk(KERN_ERR "ERROR: domain->span does not contain " |
6659 | "CPU%d\n", cpu); | 6784 | "CPU%d\n", cpu); |
6660 | } | 6785 | } |
6661 | if (!cpu_isset(cpu, group->cpumask)) { | 6786 | if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) { |
6662 | printk(KERN_ERR "ERROR: domain->groups does not contain" | 6787 | printk(KERN_ERR "ERROR: domain->groups does not contain" |
6663 | " CPU%d\n", cpu); | 6788 | " CPU%d\n", cpu); |
6664 | } | 6789 | } |
@@ -6678,31 +6803,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6678 | break; | 6803 | break; |
6679 | } | 6804 | } |
6680 | 6805 | ||
6681 | if (!cpus_weight(group->cpumask)) { | 6806 | if (!cpumask_weight(sched_group_cpus(group))) { |
6682 | printk(KERN_CONT "\n"); | 6807 | printk(KERN_CONT "\n"); |
6683 | printk(KERN_ERR "ERROR: empty group\n"); | 6808 | printk(KERN_ERR "ERROR: empty group\n"); |
6684 | break; | 6809 | break; |
6685 | } | 6810 | } |
6686 | 6811 | ||
6687 | if (cpus_intersects(*groupmask, group->cpumask)) { | 6812 | if (cpumask_intersects(groupmask, sched_group_cpus(group))) { |
6688 | printk(KERN_CONT "\n"); | 6813 | printk(KERN_CONT "\n"); |
6689 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 6814 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
6690 | break; | 6815 | break; |
6691 | } | 6816 | } |
6692 | 6817 | ||
6693 | cpus_or(*groupmask, *groupmask, group->cpumask); | 6818 | cpumask_or(groupmask, groupmask, sched_group_cpus(group)); |
6694 | 6819 | ||
6695 | cpulist_scnprintf(str, sizeof(str), group->cpumask); | 6820 | cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); |
6696 | printk(KERN_CONT " %s", str); | 6821 | printk(KERN_CONT " %s", str); |
6697 | 6822 | ||
6698 | group = group->next; | 6823 | group = group->next; |
6699 | } while (group != sd->groups); | 6824 | } while (group != sd->groups); |
6700 | printk(KERN_CONT "\n"); | 6825 | printk(KERN_CONT "\n"); |
6701 | 6826 | ||
6702 | if (!cpus_equal(sd->span, *groupmask)) | 6827 | if (!cpumask_equal(sched_domain_span(sd), groupmask)) |
6703 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); | 6828 | printk(KERN_ERR "ERROR: groups don't span domain->span\n"); |
6704 | 6829 | ||
6705 | if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) | 6830 | if (sd->parent && |
6831 | !cpumask_subset(groupmask, sched_domain_span(sd->parent))) | ||
6706 | printk(KERN_ERR "ERROR: parent span is not a superset " | 6832 | printk(KERN_ERR "ERROR: parent span is not a superset " |
6707 | "of domain->span\n"); | 6833 | "of domain->span\n"); |
6708 | return 0; | 6834 | return 0; |
@@ -6710,7 +6836,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6710 | 6836 | ||
6711 | static void sched_domain_debug(struct sched_domain *sd, int cpu) | 6837 | static void sched_domain_debug(struct sched_domain *sd, int cpu) |
6712 | { | 6838 | { |
6713 | cpumask_t *groupmask; | 6839 | cpumask_var_t groupmask; |
6714 | int level = 0; | 6840 | int level = 0; |
6715 | 6841 | ||
6716 | if (!sd) { | 6842 | if (!sd) { |
@@ -6720,8 +6846,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6720 | 6846 | ||
6721 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); | 6847 | printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); |
6722 | 6848 | ||
6723 | groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6849 | if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) { |
6724 | if (!groupmask) { | ||
6725 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); | 6850 | printk(KERN_DEBUG "Cannot load-balance (out of memory)\n"); |
6726 | return; | 6851 | return; |
6727 | } | 6852 | } |
@@ -6734,7 +6859,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6734 | if (!sd) | 6859 | if (!sd) |
6735 | break; | 6860 | break; |
6736 | } | 6861 | } |
6737 | kfree(groupmask); | 6862 | free_cpumask_var(groupmask); |
6738 | } | 6863 | } |
6739 | #else /* !CONFIG_SCHED_DEBUG */ | 6864 | #else /* !CONFIG_SCHED_DEBUG */ |
6740 | # define sched_domain_debug(sd, cpu) do { } while (0) | 6865 | # define sched_domain_debug(sd, cpu) do { } while (0) |
@@ -6742,7 +6867,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
6742 | 6867 | ||
6743 | static int sd_degenerate(struct sched_domain *sd) | 6868 | static int sd_degenerate(struct sched_domain *sd) |
6744 | { | 6869 | { |
6745 | if (cpus_weight(sd->span) == 1) | 6870 | if (cpumask_weight(sched_domain_span(sd)) == 1) |
6746 | return 1; | 6871 | return 1; |
6747 | 6872 | ||
6748 | /* Following flags need at least 2 groups */ | 6873 | /* Following flags need at least 2 groups */ |
@@ -6773,7 +6898,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6773 | if (sd_degenerate(parent)) | 6898 | if (sd_degenerate(parent)) |
6774 | return 1; | 6899 | return 1; |
6775 | 6900 | ||
6776 | if (!cpus_equal(sd->span, parent->span)) | 6901 | if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) |
6777 | return 0; | 6902 | return 0; |
6778 | 6903 | ||
6779 | /* Does parent contain flags not in child? */ | 6904 | /* Does parent contain flags not in child? */ |
@@ -6797,6 +6922,16 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6797 | return 1; | 6922 | return 1; |
6798 | } | 6923 | } |
6799 | 6924 | ||
6925 | static void free_rootdomain(struct root_domain *rd) | ||
6926 | { | ||
6927 | cpupri_cleanup(&rd->cpupri); | ||
6928 | |||
6929 | free_cpumask_var(rd->rto_mask); | ||
6930 | free_cpumask_var(rd->online); | ||
6931 | free_cpumask_var(rd->span); | ||
6932 | kfree(rd); | ||
6933 | } | ||
6934 | |||
6800 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) | 6935 | static void rq_attach_root(struct rq *rq, struct root_domain *rd) |
6801 | { | 6936 | { |
6802 | unsigned long flags; | 6937 | unsigned long flags; |
@@ -6806,38 +6941,62 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd) | |||
6806 | if (rq->rd) { | 6941 | if (rq->rd) { |
6807 | struct root_domain *old_rd = rq->rd; | 6942 | struct root_domain *old_rd = rq->rd; |
6808 | 6943 | ||
6809 | if (cpu_isset(rq->cpu, old_rd->online)) | 6944 | if (cpumask_test_cpu(rq->cpu, old_rd->online)) |
6810 | set_rq_offline(rq); | 6945 | set_rq_offline(rq); |
6811 | 6946 | ||
6812 | cpu_clear(rq->cpu, old_rd->span); | 6947 | cpumask_clear_cpu(rq->cpu, old_rd->span); |
6813 | 6948 | ||
6814 | if (atomic_dec_and_test(&old_rd->refcount)) | 6949 | if (atomic_dec_and_test(&old_rd->refcount)) |
6815 | kfree(old_rd); | 6950 | free_rootdomain(old_rd); |
6816 | } | 6951 | } |
6817 | 6952 | ||
6818 | atomic_inc(&rd->refcount); | 6953 | atomic_inc(&rd->refcount); |
6819 | rq->rd = rd; | 6954 | rq->rd = rd; |
6820 | 6955 | ||
6821 | cpu_set(rq->cpu, rd->span); | 6956 | cpumask_set_cpu(rq->cpu, rd->span); |
6822 | if (cpu_isset(rq->cpu, cpu_online_map)) | 6957 | if (cpumask_test_cpu(rq->cpu, cpu_online_mask)) |
6823 | set_rq_online(rq); | 6958 | set_rq_online(rq); |
6824 | 6959 | ||
6825 | spin_unlock_irqrestore(&rq->lock, flags); | 6960 | spin_unlock_irqrestore(&rq->lock, flags); |
6826 | } | 6961 | } |
6827 | 6962 | ||
6828 | static void init_rootdomain(struct root_domain *rd) | 6963 | static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) |
6829 | { | 6964 | { |
6830 | memset(rd, 0, sizeof(*rd)); | 6965 | memset(rd, 0, sizeof(*rd)); |
6831 | 6966 | ||
6832 | cpus_clear(rd->span); | 6967 | if (bootmem) { |
6833 | cpus_clear(rd->online); | 6968 | alloc_bootmem_cpumask_var(&def_root_domain.span); |
6969 | alloc_bootmem_cpumask_var(&def_root_domain.online); | ||
6970 | alloc_bootmem_cpumask_var(&def_root_domain.rto_mask); | ||
6971 | cpupri_init(&rd->cpupri, true); | ||
6972 | return 0; | ||
6973 | } | ||
6974 | |||
6975 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | ||
6976 | goto out; | ||
6977 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | ||
6978 | goto free_span; | ||
6979 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | ||
6980 | goto free_online; | ||
6834 | 6981 | ||
6835 | cpupri_init(&rd->cpupri); | 6982 | if (cpupri_init(&rd->cpupri, false) != 0) |
6983 | goto free_rto_mask; | ||
6984 | return 0; | ||
6985 | |||
6986 | free_rto_mask: | ||
6987 | free_cpumask_var(rd->rto_mask); | ||
6988 | free_online: | ||
6989 | free_cpumask_var(rd->online); | ||
6990 | free_span: | ||
6991 | free_cpumask_var(rd->span); | ||
6992 | out: | ||
6993 | return -ENOMEM; | ||
6836 | } | 6994 | } |
6837 | 6995 | ||
6838 | static void init_defrootdomain(void) | 6996 | static void init_defrootdomain(void) |
6839 | { | 6997 | { |
6840 | init_rootdomain(&def_root_domain); | 6998 | init_rootdomain(&def_root_domain, true); |
6999 | |||
6841 | atomic_set(&def_root_domain.refcount, 1); | 7000 | atomic_set(&def_root_domain.refcount, 1); |
6842 | } | 7001 | } |
6843 | 7002 | ||
@@ -6849,7 +7008,10 @@ static struct root_domain *alloc_rootdomain(void) | |||
6849 | if (!rd) | 7008 | if (!rd) |
6850 | return NULL; | 7009 | return NULL; |
6851 | 7010 | ||
6852 | init_rootdomain(rd); | 7011 | if (init_rootdomain(rd, false) != 0) { |
7012 | kfree(rd); | ||
7013 | return NULL; | ||
7014 | } | ||
6853 | 7015 | ||
6854 | return rd; | 7016 | return rd; |
6855 | } | 7017 | } |
@@ -6891,19 +7053,12 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) | |||
6891 | } | 7053 | } |
6892 | 7054 | ||
6893 | /* cpus with isolated domains */ | 7055 | /* cpus with isolated domains */ |
6894 | static cpumask_t cpu_isolated_map = CPU_MASK_NONE; | 7056 | static cpumask_var_t cpu_isolated_map; |
6895 | 7057 | ||
6896 | /* Setup the mask of cpus configured for isolated domains */ | 7058 | /* Setup the mask of cpus configured for isolated domains */ |
6897 | static int __init isolated_cpu_setup(char *str) | 7059 | static int __init isolated_cpu_setup(char *str) |
6898 | { | 7060 | { |
6899 | static int __initdata ints[NR_CPUS]; | 7061 | cpulist_parse(str, cpu_isolated_map); |
6900 | int i; | ||
6901 | |||
6902 | str = get_options(str, ARRAY_SIZE(ints), ints); | ||
6903 | cpus_clear(cpu_isolated_map); | ||
6904 | for (i = 1; i <= ints[0]; i++) | ||
6905 | if (ints[i] < NR_CPUS) | ||
6906 | cpu_set(ints[i], cpu_isolated_map); | ||
6907 | return 1; | 7062 | return 1; |
6908 | } | 7063 | } |
6909 | 7064 | ||
@@ -6912,42 +7067,43 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
6912 | /* | 7067 | /* |
6913 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | 7068 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
6914 | * to a function which identifies what group(along with sched group) a CPU | 7069 | * to a function which identifies what group(along with sched group) a CPU |
6915 | * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS | 7070 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
6916 | * (due to the fact that we keep track of groups covered with a cpumask_t). | 7071 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
6917 | * | 7072 | * |
6918 | * init_sched_build_groups will build a circular linked list of the groups | 7073 | * init_sched_build_groups will build a circular linked list of the groups |
6919 | * covered by the given span, and will set each group's ->cpumask correctly, | 7074 | * covered by the given span, and will set each group's ->cpumask correctly, |
6920 | * and ->cpu_power to 0. | 7075 | * and ->cpu_power to 0. |
6921 | */ | 7076 | */ |
6922 | static void | 7077 | static void |
6923 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | 7078 | init_sched_build_groups(const struct cpumask *span, |
6924 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 7079 | const struct cpumask *cpu_map, |
7080 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
6925 | struct sched_group **sg, | 7081 | struct sched_group **sg, |
6926 | cpumask_t *tmpmask), | 7082 | struct cpumask *tmpmask), |
6927 | cpumask_t *covered, cpumask_t *tmpmask) | 7083 | struct cpumask *covered, struct cpumask *tmpmask) |
6928 | { | 7084 | { |
6929 | struct sched_group *first = NULL, *last = NULL; | 7085 | struct sched_group *first = NULL, *last = NULL; |
6930 | int i; | 7086 | int i; |
6931 | 7087 | ||
6932 | cpus_clear(*covered); | 7088 | cpumask_clear(covered); |
6933 | 7089 | ||
6934 | for_each_cpu_mask_nr(i, *span) { | 7090 | for_each_cpu(i, span) { |
6935 | struct sched_group *sg; | 7091 | struct sched_group *sg; |
6936 | int group = group_fn(i, cpu_map, &sg, tmpmask); | 7092 | int group = group_fn(i, cpu_map, &sg, tmpmask); |
6937 | int j; | 7093 | int j; |
6938 | 7094 | ||
6939 | if (cpu_isset(i, *covered)) | 7095 | if (cpumask_test_cpu(i, covered)) |
6940 | continue; | 7096 | continue; |
6941 | 7097 | ||
6942 | cpus_clear(sg->cpumask); | 7098 | cpumask_clear(sched_group_cpus(sg)); |
6943 | sg->__cpu_power = 0; | 7099 | sg->__cpu_power = 0; |
6944 | 7100 | ||
6945 | for_each_cpu_mask_nr(j, *span) { | 7101 | for_each_cpu(j, span) { |
6946 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 7102 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6947 | continue; | 7103 | continue; |
6948 | 7104 | ||
6949 | cpu_set(j, *covered); | 7105 | cpumask_set_cpu(j, covered); |
6950 | cpu_set(j, sg->cpumask); | 7106 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
6951 | } | 7107 | } |
6952 | if (!first) | 7108 | if (!first) |
6953 | first = sg; | 7109 | first = sg; |
@@ -7011,23 +7167,21 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7011 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7167 | * should be one that prevents unnecessary balancing, but also spreads tasks |
7012 | * out optimally. | 7168 | * out optimally. |
7013 | */ | 7169 | */ |
7014 | static void sched_domain_node_span(int node, cpumask_t *span) | 7170 | static void sched_domain_node_span(int node, struct cpumask *span) |
7015 | { | 7171 | { |
7016 | nodemask_t used_nodes; | 7172 | nodemask_t used_nodes; |
7017 | node_to_cpumask_ptr(nodemask, node); | ||
7018 | int i; | 7173 | int i; |
7019 | 7174 | ||
7020 | cpus_clear(*span); | 7175 | cpumask_clear(span); |
7021 | nodes_clear(used_nodes); | 7176 | nodes_clear(used_nodes); |
7022 | 7177 | ||
7023 | cpus_or(*span, *span, *nodemask); | 7178 | cpumask_or(span, span, cpumask_of_node(node)); |
7024 | node_set(node, used_nodes); | 7179 | node_set(node, used_nodes); |
7025 | 7180 | ||
7026 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7181 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
7027 | int next_node = find_next_best_node(node, &used_nodes); | 7182 | int next_node = find_next_best_node(node, &used_nodes); |
7028 | 7183 | ||
7029 | node_to_cpumask_ptr_next(nodemask, next_node); | 7184 | cpumask_or(span, span, cpumask_of_node(next_node)); |
7030 | cpus_or(*span, *span, *nodemask); | ||
7031 | } | 7185 | } |
7032 | } | 7186 | } |
7033 | #endif /* CONFIG_NUMA */ | 7187 | #endif /* CONFIG_NUMA */ |
@@ -7035,18 +7189,33 @@ static void sched_domain_node_span(int node, cpumask_t *span) | |||
7035 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; | 7189 | int sched_smt_power_savings = 0, sched_mc_power_savings = 0; |
7036 | 7190 | ||
7037 | /* | 7191 | /* |
7192 | * The cpus mask in sched_group and sched_domain hangs off the end. | ||
7193 | * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space | ||
7194 | * for nr_cpu_ids < CONFIG_NR_CPUS. | ||
7195 | */ | ||
7196 | struct static_sched_group { | ||
7197 | struct sched_group sg; | ||
7198 | DECLARE_BITMAP(cpus, CONFIG_NR_CPUS); | ||
7199 | }; | ||
7200 | |||
7201 | struct static_sched_domain { | ||
7202 | struct sched_domain sd; | ||
7203 | DECLARE_BITMAP(span, CONFIG_NR_CPUS); | ||
7204 | }; | ||
7205 | |||
7206 | /* | ||
7038 | * SMT sched-domains: | 7207 | * SMT sched-domains: |
7039 | */ | 7208 | */ |
7040 | #ifdef CONFIG_SCHED_SMT | 7209 | #ifdef CONFIG_SCHED_SMT |
7041 | static DEFINE_PER_CPU(struct sched_domain, cpu_domains); | 7210 | static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); |
7042 | static DEFINE_PER_CPU(struct sched_group, sched_group_cpus); | 7211 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); |
7043 | 7212 | ||
7044 | static int | 7213 | static int |
7045 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7214 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
7046 | cpumask_t *unused) | 7215 | struct sched_group **sg, struct cpumask *unused) |
7047 | { | 7216 | { |
7048 | if (sg) | 7217 | if (sg) |
7049 | *sg = &per_cpu(sched_group_cpus, cpu); | 7218 | *sg = &per_cpu(sched_group_cpus, cpu).sg; |
7050 | return cpu; | 7219 | return cpu; |
7051 | } | 7220 | } |
7052 | #endif /* CONFIG_SCHED_SMT */ | 7221 | #endif /* CONFIG_SCHED_SMT */ |
@@ -7055,56 +7224,53 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
7055 | * multi-core sched-domains: | 7224 | * multi-core sched-domains: |
7056 | */ | 7225 | */ |
7057 | #ifdef CONFIG_SCHED_MC | 7226 | #ifdef CONFIG_SCHED_MC |
7058 | static DEFINE_PER_CPU(struct sched_domain, core_domains); | 7227 | static DEFINE_PER_CPU(struct static_sched_domain, core_domains); |
7059 | static DEFINE_PER_CPU(struct sched_group, sched_group_core); | 7228 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); |
7060 | #endif /* CONFIG_SCHED_MC */ | 7229 | #endif /* CONFIG_SCHED_MC */ |
7061 | 7230 | ||
7062 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7231 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
7063 | static int | 7232 | static int |
7064 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7233 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7065 | cpumask_t *mask) | 7234 | struct sched_group **sg, struct cpumask *mask) |
7066 | { | 7235 | { |
7067 | int group; | 7236 | int group; |
7068 | 7237 | ||
7069 | *mask = per_cpu(cpu_sibling_map, cpu); | 7238 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7070 | cpus_and(*mask, *mask, *cpu_map); | 7239 | group = cpumask_first(mask); |
7071 | group = first_cpu(*mask); | ||
7072 | if (sg) | 7240 | if (sg) |
7073 | *sg = &per_cpu(sched_group_core, group); | 7241 | *sg = &per_cpu(sched_group_core, group).sg; |
7074 | return group; | 7242 | return group; |
7075 | } | 7243 | } |
7076 | #elif defined(CONFIG_SCHED_MC) | 7244 | #elif defined(CONFIG_SCHED_MC) |
7077 | static int | 7245 | static int |
7078 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7246 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7079 | cpumask_t *unused) | 7247 | struct sched_group **sg, struct cpumask *unused) |
7080 | { | 7248 | { |
7081 | if (sg) | 7249 | if (sg) |
7082 | *sg = &per_cpu(sched_group_core, cpu); | 7250 | *sg = &per_cpu(sched_group_core, cpu).sg; |
7083 | return cpu; | 7251 | return cpu; |
7084 | } | 7252 | } |
7085 | #endif | 7253 | #endif |
7086 | 7254 | ||
7087 | static DEFINE_PER_CPU(struct sched_domain, phys_domains); | 7255 | static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); |
7088 | static DEFINE_PER_CPU(struct sched_group, sched_group_phys); | 7256 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
7089 | 7257 | ||
7090 | static int | 7258 | static int |
7091 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7259 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
7092 | cpumask_t *mask) | 7260 | struct sched_group **sg, struct cpumask *mask) |
7093 | { | 7261 | { |
7094 | int group; | 7262 | int group; |
7095 | #ifdef CONFIG_SCHED_MC | 7263 | #ifdef CONFIG_SCHED_MC |
7096 | *mask = cpu_coregroup_map(cpu); | 7264 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7097 | cpus_and(*mask, *mask, *cpu_map); | 7265 | group = cpumask_first(mask); |
7098 | group = first_cpu(*mask); | ||
7099 | #elif defined(CONFIG_SCHED_SMT) | 7266 | #elif defined(CONFIG_SCHED_SMT) |
7100 | *mask = per_cpu(cpu_sibling_map, cpu); | 7267 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7101 | cpus_and(*mask, *mask, *cpu_map); | 7268 | group = cpumask_first(mask); |
7102 | group = first_cpu(*mask); | ||
7103 | #else | 7269 | #else |
7104 | group = cpu; | 7270 | group = cpu; |
7105 | #endif | 7271 | #endif |
7106 | if (sg) | 7272 | if (sg) |
7107 | *sg = &per_cpu(sched_group_phys, group); | 7273 | *sg = &per_cpu(sched_group_phys, group).sg; |
7108 | return group; | 7274 | return group; |
7109 | } | 7275 | } |
7110 | 7276 | ||
@@ -7114,23 +7280,23 @@ cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | |||
7114 | * groups, so roll our own. Now each node has its own list of groups which | 7280 | * groups, so roll our own. Now each node has its own list of groups which |
7115 | * gets dynamically allocated. | 7281 | * gets dynamically allocated. |
7116 | */ | 7282 | */ |
7117 | static DEFINE_PER_CPU(struct sched_domain, node_domains); | 7283 | static DEFINE_PER_CPU(struct static_sched_domain, node_domains); |
7118 | static struct sched_group ***sched_group_nodes_bycpu; | 7284 | static struct sched_group ***sched_group_nodes_bycpu; |
7119 | 7285 | ||
7120 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7286 | static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains); |
7121 | static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes); | 7287 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7122 | 7288 | ||
7123 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7289 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
7124 | struct sched_group **sg, cpumask_t *nodemask) | 7290 | struct sched_group **sg, |
7291 | struct cpumask *nodemask) | ||
7125 | { | 7292 | { |
7126 | int group; | 7293 | int group; |
7127 | 7294 | ||
7128 | *nodemask = node_to_cpumask(cpu_to_node(cpu)); | 7295 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
7129 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7296 | group = cpumask_first(nodemask); |
7130 | group = first_cpu(*nodemask); | ||
7131 | 7297 | ||
7132 | if (sg) | 7298 | if (sg) |
7133 | *sg = &per_cpu(sched_group_allnodes, group); | 7299 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
7134 | return group; | 7300 | return group; |
7135 | } | 7301 | } |
7136 | 7302 | ||
@@ -7142,11 +7308,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7142 | if (!sg) | 7308 | if (!sg) |
7143 | return; | 7309 | return; |
7144 | do { | 7310 | do { |
7145 | for_each_cpu_mask_nr(j, sg->cpumask) { | 7311 | for_each_cpu(j, sched_group_cpus(sg)) { |
7146 | struct sched_domain *sd; | 7312 | struct sched_domain *sd; |
7147 | 7313 | ||
7148 | sd = &per_cpu(phys_domains, j); | 7314 | sd = &per_cpu(phys_domains, j).sd; |
7149 | if (j != first_cpu(sd->groups->cpumask)) { | 7315 | if (j != cpumask_first(sched_group_cpus(sd->groups))) { |
7150 | /* | 7316 | /* |
7151 | * Only add "power" once for each | 7317 | * Only add "power" once for each |
7152 | * physical package. | 7318 | * physical package. |
@@ -7163,11 +7329,12 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7163 | 7329 | ||
7164 | #ifdef CONFIG_NUMA | 7330 | #ifdef CONFIG_NUMA |
7165 | /* Free memory allocated for various sched_group structures */ | 7331 | /* Free memory allocated for various sched_group structures */ |
7166 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7332 | static void free_sched_groups(const struct cpumask *cpu_map, |
7333 | struct cpumask *nodemask) | ||
7167 | { | 7334 | { |
7168 | int cpu, i; | 7335 | int cpu, i; |
7169 | 7336 | ||
7170 | for_each_cpu_mask_nr(cpu, *cpu_map) { | 7337 | for_each_cpu(cpu, cpu_map) { |
7171 | struct sched_group **sched_group_nodes | 7338 | struct sched_group **sched_group_nodes |
7172 | = sched_group_nodes_bycpu[cpu]; | 7339 | = sched_group_nodes_bycpu[cpu]; |
7173 | 7340 | ||
@@ -7177,9 +7344,8 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7177 | for (i = 0; i < nr_node_ids; i++) { | 7344 | for (i = 0; i < nr_node_ids; i++) { |
7178 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7345 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7179 | 7346 | ||
7180 | *nodemask = node_to_cpumask(i); | 7347 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7181 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7348 | if (cpumask_empty(nodemask)) |
7182 | if (cpus_empty(*nodemask)) | ||
7183 | continue; | 7349 | continue; |
7184 | 7350 | ||
7185 | if (sg == NULL) | 7351 | if (sg == NULL) |
@@ -7197,7 +7363,8 @@ next_sg: | |||
7197 | } | 7363 | } |
7198 | } | 7364 | } |
7199 | #else /* !CONFIG_NUMA */ | 7365 | #else /* !CONFIG_NUMA */ |
7200 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7366 | static void free_sched_groups(const struct cpumask *cpu_map, |
7367 | struct cpumask *nodemask) | ||
7201 | { | 7368 | { |
7202 | } | 7369 | } |
7203 | #endif /* CONFIG_NUMA */ | 7370 | #endif /* CONFIG_NUMA */ |
@@ -7223,7 +7390,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7223 | 7390 | ||
7224 | WARN_ON(!sd || !sd->groups); | 7391 | WARN_ON(!sd || !sd->groups); |
7225 | 7392 | ||
7226 | if (cpu != first_cpu(sd->groups->cpumask)) | 7393 | if (cpu != cpumask_first(sched_group_cpus(sd->groups))) |
7227 | return; | 7394 | return; |
7228 | 7395 | ||
7229 | child = sd->child; | 7396 | child = sd->child; |
@@ -7288,48 +7455,6 @@ SD_INIT_FUNC(CPU) | |||
7288 | SD_INIT_FUNC(MC) | 7455 | SD_INIT_FUNC(MC) |
7289 | #endif | 7456 | #endif |
7290 | 7457 | ||
7291 | /* | ||
7292 | * To minimize stack usage kmalloc room for cpumasks and share the | ||
7293 | * space as the usage in build_sched_domains() dictates. Used only | ||
7294 | * if the amount of space is significant. | ||
7295 | */ | ||
7296 | struct allmasks { | ||
7297 | cpumask_t tmpmask; /* make this one first */ | ||
7298 | union { | ||
7299 | cpumask_t nodemask; | ||
7300 | cpumask_t this_sibling_map; | ||
7301 | cpumask_t this_core_map; | ||
7302 | }; | ||
7303 | cpumask_t send_covered; | ||
7304 | |||
7305 | #ifdef CONFIG_NUMA | ||
7306 | cpumask_t domainspan; | ||
7307 | cpumask_t covered; | ||
7308 | cpumask_t notcovered; | ||
7309 | #endif | ||
7310 | }; | ||
7311 | |||
7312 | #if NR_CPUS > 128 | ||
7313 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | ||
7314 | static inline void sched_cpumask_alloc(struct allmasks **masks) | ||
7315 | { | ||
7316 | *masks = kmalloc(sizeof(**masks), GFP_KERNEL); | ||
7317 | } | ||
7318 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7319 | { | ||
7320 | kfree(masks); | ||
7321 | } | ||
7322 | #else | ||
7323 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | ||
7324 | static inline void sched_cpumask_alloc(struct allmasks **masks) | ||
7325 | { } | ||
7326 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7327 | { } | ||
7328 | #endif | ||
7329 | |||
7330 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | ||
7331 | ((unsigned long)(a) + offsetof(struct allmasks, v)) | ||
7332 | |||
7333 | static int default_relax_domain_level = -1; | 7458 | static int default_relax_domain_level = -1; |
7334 | 7459 | ||
7335 | static int __init setup_relax_domain_level(char *str) | 7460 | static int __init setup_relax_domain_level(char *str) |
@@ -7369,17 +7494,38 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7369 | * Build sched domains for a given set of cpus and attach the sched domains | 7494 | * Build sched domains for a given set of cpus and attach the sched domains |
7370 | * to the individual cpus | 7495 | * to the individual cpus |
7371 | */ | 7496 | */ |
7372 | static int __build_sched_domains(const cpumask_t *cpu_map, | 7497 | static int __build_sched_domains(const struct cpumask *cpu_map, |
7373 | struct sched_domain_attr *attr) | 7498 | struct sched_domain_attr *attr) |
7374 | { | 7499 | { |
7375 | int i; | 7500 | int i, err = -ENOMEM; |
7376 | struct root_domain *rd; | 7501 | struct root_domain *rd; |
7377 | SCHED_CPUMASK_DECLARE(allmasks); | 7502 | cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered, |
7378 | cpumask_t *tmpmask; | 7503 | tmpmask; |
7379 | #ifdef CONFIG_NUMA | 7504 | #ifdef CONFIG_NUMA |
7505 | cpumask_var_t domainspan, covered, notcovered; | ||
7380 | struct sched_group **sched_group_nodes = NULL; | 7506 | struct sched_group **sched_group_nodes = NULL; |
7381 | int sd_allnodes = 0; | 7507 | int sd_allnodes = 0; |
7382 | 7508 | ||
7509 | if (!alloc_cpumask_var(&domainspan, GFP_KERNEL)) | ||
7510 | goto out; | ||
7511 | if (!alloc_cpumask_var(&covered, GFP_KERNEL)) | ||
7512 | goto free_domainspan; | ||
7513 | if (!alloc_cpumask_var(¬covered, GFP_KERNEL)) | ||
7514 | goto free_covered; | ||
7515 | #endif | ||
7516 | |||
7517 | if (!alloc_cpumask_var(&nodemask, GFP_KERNEL)) | ||
7518 | goto free_notcovered; | ||
7519 | if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL)) | ||
7520 | goto free_nodemask; | ||
7521 | if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL)) | ||
7522 | goto free_this_sibling_map; | ||
7523 | if (!alloc_cpumask_var(&send_covered, GFP_KERNEL)) | ||
7524 | goto free_this_core_map; | ||
7525 | if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL)) | ||
7526 | goto free_send_covered; | ||
7527 | |||
7528 | #ifdef CONFIG_NUMA | ||
7383 | /* | 7529 | /* |
7384 | * Allocate the per-node list of sched groups | 7530 | * Allocate the per-node list of sched groups |
7385 | */ | 7531 | */ |
@@ -7387,75 +7533,57 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7387 | GFP_KERNEL); | 7533 | GFP_KERNEL); |
7388 | if (!sched_group_nodes) { | 7534 | if (!sched_group_nodes) { |
7389 | printk(KERN_WARNING "Can not alloc sched group node list\n"); | 7535 | printk(KERN_WARNING "Can not alloc sched group node list\n"); |
7390 | return -ENOMEM; | 7536 | goto free_tmpmask; |
7391 | } | 7537 | } |
7392 | #endif | 7538 | #endif |
7393 | 7539 | ||
7394 | rd = alloc_rootdomain(); | 7540 | rd = alloc_rootdomain(); |
7395 | if (!rd) { | 7541 | if (!rd) { |
7396 | printk(KERN_WARNING "Cannot alloc root domain\n"); | 7542 | printk(KERN_WARNING "Cannot alloc root domain\n"); |
7397 | #ifdef CONFIG_NUMA | 7543 | goto free_sched_groups; |
7398 | kfree(sched_group_nodes); | ||
7399 | #endif | ||
7400 | return -ENOMEM; | ||
7401 | } | ||
7402 | |||
7403 | /* get space for all scratch cpumask variables */ | ||
7404 | sched_cpumask_alloc(&allmasks); | ||
7405 | if (!allmasks) { | ||
7406 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | ||
7407 | kfree(rd); | ||
7408 | #ifdef CONFIG_NUMA | ||
7409 | kfree(sched_group_nodes); | ||
7410 | #endif | ||
7411 | return -ENOMEM; | ||
7412 | } | 7544 | } |
7413 | 7545 | ||
7414 | tmpmask = (cpumask_t *)allmasks; | ||
7415 | |||
7416 | |||
7417 | #ifdef CONFIG_NUMA | 7546 | #ifdef CONFIG_NUMA |
7418 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | 7547 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; |
7419 | #endif | 7548 | #endif |
7420 | 7549 | ||
7421 | /* | 7550 | /* |
7422 | * Set up domains for cpus specified by the cpu_map. | 7551 | * Set up domains for cpus specified by the cpu_map. |
7423 | */ | 7552 | */ |
7424 | for_each_cpu_mask_nr(i, *cpu_map) { | 7553 | for_each_cpu(i, cpu_map) { |
7425 | struct sched_domain *sd = NULL, *p; | 7554 | struct sched_domain *sd = NULL, *p; |
7426 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7427 | 7555 | ||
7428 | *nodemask = node_to_cpumask(cpu_to_node(i)); | 7556 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); |
7429 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7430 | 7557 | ||
7431 | #ifdef CONFIG_NUMA | 7558 | #ifdef CONFIG_NUMA |
7432 | if (cpus_weight(*cpu_map) > | 7559 | if (cpumask_weight(cpu_map) > |
7433 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { | 7560 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7434 | sd = &per_cpu(allnodes_domains, i); | 7561 | sd = &per_cpu(allnodes_domains, i).sd; |
7435 | SD_INIT(sd, ALLNODES); | 7562 | SD_INIT(sd, ALLNODES); |
7436 | set_domain_attribute(sd, attr); | 7563 | set_domain_attribute(sd, attr); |
7437 | sd->span = *cpu_map; | 7564 | cpumask_copy(sched_domain_span(sd), cpu_map); |
7438 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); | 7565 | cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); |
7439 | p = sd; | 7566 | p = sd; |
7440 | sd_allnodes = 1; | 7567 | sd_allnodes = 1; |
7441 | } else | 7568 | } else |
7442 | p = NULL; | 7569 | p = NULL; |
7443 | 7570 | ||
7444 | sd = &per_cpu(node_domains, i); | 7571 | sd = &per_cpu(node_domains, i).sd; |
7445 | SD_INIT(sd, NODE); | 7572 | SD_INIT(sd, NODE); |
7446 | set_domain_attribute(sd, attr); | 7573 | set_domain_attribute(sd, attr); |
7447 | sched_domain_node_span(cpu_to_node(i), &sd->span); | 7574 | sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd)); |
7448 | sd->parent = p; | 7575 | sd->parent = p; |
7449 | if (p) | 7576 | if (p) |
7450 | p->child = sd; | 7577 | p->child = sd; |
7451 | cpus_and(sd->span, sd->span, *cpu_map); | 7578 | cpumask_and(sched_domain_span(sd), |
7579 | sched_domain_span(sd), cpu_map); | ||
7452 | #endif | 7580 | #endif |
7453 | 7581 | ||
7454 | p = sd; | 7582 | p = sd; |
7455 | sd = &per_cpu(phys_domains, i); | 7583 | sd = &per_cpu(phys_domains, i).sd; |
7456 | SD_INIT(sd, CPU); | 7584 | SD_INIT(sd, CPU); |
7457 | set_domain_attribute(sd, attr); | 7585 | set_domain_attribute(sd, attr); |
7458 | sd->span = *nodemask; | 7586 | cpumask_copy(sched_domain_span(sd), nodemask); |
7459 | sd->parent = p; | 7587 | sd->parent = p; |
7460 | if (p) | 7588 | if (p) |
7461 | p->child = sd; | 7589 | p->child = sd; |
@@ -7463,11 +7591,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7463 | 7591 | ||
7464 | #ifdef CONFIG_SCHED_MC | 7592 | #ifdef CONFIG_SCHED_MC |
7465 | p = sd; | 7593 | p = sd; |
7466 | sd = &per_cpu(core_domains, i); | 7594 | sd = &per_cpu(core_domains, i).sd; |
7467 | SD_INIT(sd, MC); | 7595 | SD_INIT(sd, MC); |
7468 | set_domain_attribute(sd, attr); | 7596 | set_domain_attribute(sd, attr); |
7469 | sd->span = cpu_coregroup_map(i); | 7597 | cpumask_and(sched_domain_span(sd), cpu_map, |
7470 | cpus_and(sd->span, sd->span, *cpu_map); | 7598 | cpu_coregroup_mask(i)); |
7471 | sd->parent = p; | 7599 | sd->parent = p; |
7472 | p->child = sd; | 7600 | p->child = sd; |
7473 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7601 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7475,11 +7603,11 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7475 | 7603 | ||
7476 | #ifdef CONFIG_SCHED_SMT | 7604 | #ifdef CONFIG_SCHED_SMT |
7477 | p = sd; | 7605 | p = sd; |
7478 | sd = &per_cpu(cpu_domains, i); | 7606 | sd = &per_cpu(cpu_domains, i).sd; |
7479 | SD_INIT(sd, SIBLING); | 7607 | SD_INIT(sd, SIBLING); |
7480 | set_domain_attribute(sd, attr); | 7608 | set_domain_attribute(sd, attr); |
7481 | sd->span = per_cpu(cpu_sibling_map, i); | 7609 | cpumask_and(sched_domain_span(sd), |
7482 | cpus_and(sd->span, sd->span, *cpu_map); | 7610 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7483 | sd->parent = p; | 7611 | sd->parent = p; |
7484 | p->child = sd; | 7612 | p->child = sd; |
7485 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); | 7613 | cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7488,13 +7616,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7488 | 7616 | ||
7489 | #ifdef CONFIG_SCHED_SMT | 7617 | #ifdef CONFIG_SCHED_SMT |
7490 | /* Set up CPU (sibling) groups */ | 7618 | /* Set up CPU (sibling) groups */ |
7491 | for_each_cpu_mask_nr(i, *cpu_map) { | 7619 | for_each_cpu(i, cpu_map) { |
7492 | SCHED_CPUMASK_VAR(this_sibling_map, allmasks); | 7620 | cpumask_and(this_sibling_map, |
7493 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7621 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7494 | 7622 | if (i != cpumask_first(this_sibling_map)) | |
7495 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | ||
7496 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | ||
7497 | if (i != first_cpu(*this_sibling_map)) | ||
7498 | continue; | 7623 | continue; |
7499 | 7624 | ||
7500 | init_sched_build_groups(this_sibling_map, cpu_map, | 7625 | init_sched_build_groups(this_sibling_map, cpu_map, |
@@ -7505,13 +7630,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7505 | 7630 | ||
7506 | #ifdef CONFIG_SCHED_MC | 7631 | #ifdef CONFIG_SCHED_MC |
7507 | /* Set up multi-core groups */ | 7632 | /* Set up multi-core groups */ |
7508 | for_each_cpu_mask_nr(i, *cpu_map) { | 7633 | for_each_cpu(i, cpu_map) { |
7509 | SCHED_CPUMASK_VAR(this_core_map, allmasks); | 7634 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); |
7510 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7635 | if (i != cpumask_first(this_core_map)) |
7511 | |||
7512 | *this_core_map = cpu_coregroup_map(i); | ||
7513 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
7514 | if (i != first_cpu(*this_core_map)) | ||
7515 | continue; | 7636 | continue; |
7516 | 7637 | ||
7517 | init_sched_build_groups(this_core_map, cpu_map, | 7638 | init_sched_build_groups(this_core_map, cpu_map, |
@@ -7522,12 +7643,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7522 | 7643 | ||
7523 | /* Set up physical groups */ | 7644 | /* Set up physical groups */ |
7524 | for (i = 0; i < nr_node_ids; i++) { | 7645 | for (i = 0; i < nr_node_ids; i++) { |
7525 | SCHED_CPUMASK_VAR(nodemask, allmasks); | 7646 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7526 | SCHED_CPUMASK_VAR(send_covered, allmasks); | 7647 | if (cpumask_empty(nodemask)) |
7527 | |||
7528 | *nodemask = node_to_cpumask(i); | ||
7529 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7530 | if (cpus_empty(*nodemask)) | ||
7531 | continue; | 7648 | continue; |
7532 | 7649 | ||
7533 | init_sched_build_groups(nodemask, cpu_map, | 7650 | init_sched_build_groups(nodemask, cpu_map, |
@@ -7538,8 +7655,6 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7538 | #ifdef CONFIG_NUMA | 7655 | #ifdef CONFIG_NUMA |
7539 | /* Set up node groups */ | 7656 | /* Set up node groups */ |
7540 | if (sd_allnodes) { | 7657 | if (sd_allnodes) { |
7541 | SCHED_CPUMASK_VAR(send_covered, allmasks); | ||
7542 | |||
7543 | init_sched_build_groups(cpu_map, cpu_map, | 7658 | init_sched_build_groups(cpu_map, cpu_map, |
7544 | &cpu_to_allnodes_group, | 7659 | &cpu_to_allnodes_group, |
7545 | send_covered, tmpmask); | 7660 | send_covered, tmpmask); |
@@ -7548,58 +7663,53 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7548 | for (i = 0; i < nr_node_ids; i++) { | 7663 | for (i = 0; i < nr_node_ids; i++) { |
7549 | /* Set up node groups */ | 7664 | /* Set up node groups */ |
7550 | struct sched_group *sg, *prev; | 7665 | struct sched_group *sg, *prev; |
7551 | SCHED_CPUMASK_VAR(nodemask, allmasks); | ||
7552 | SCHED_CPUMASK_VAR(domainspan, allmasks); | ||
7553 | SCHED_CPUMASK_VAR(covered, allmasks); | ||
7554 | int j; | 7666 | int j; |
7555 | 7667 | ||
7556 | *nodemask = node_to_cpumask(i); | 7668 | cpumask_clear(covered); |
7557 | cpus_clear(*covered); | 7669 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7558 | 7670 | if (cpumask_empty(nodemask)) { | |
7559 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7560 | if (cpus_empty(*nodemask)) { | ||
7561 | sched_group_nodes[i] = NULL; | 7671 | sched_group_nodes[i] = NULL; |
7562 | continue; | 7672 | continue; |
7563 | } | 7673 | } |
7564 | 7674 | ||
7565 | sched_domain_node_span(i, domainspan); | 7675 | sched_domain_node_span(i, domainspan); |
7566 | cpus_and(*domainspan, *domainspan, *cpu_map); | 7676 | cpumask_and(domainspan, domainspan, cpu_map); |
7567 | 7677 | ||
7568 | sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i); | 7678 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
7679 | GFP_KERNEL, i); | ||
7569 | if (!sg) { | 7680 | if (!sg) { |
7570 | printk(KERN_WARNING "Can not alloc domain group for " | 7681 | printk(KERN_WARNING "Can not alloc domain group for " |
7571 | "node %d\n", i); | 7682 | "node %d\n", i); |
7572 | goto error; | 7683 | goto error; |
7573 | } | 7684 | } |
7574 | sched_group_nodes[i] = sg; | 7685 | sched_group_nodes[i] = sg; |
7575 | for_each_cpu_mask_nr(j, *nodemask) { | 7686 | for_each_cpu(j, nodemask) { |
7576 | struct sched_domain *sd; | 7687 | struct sched_domain *sd; |
7577 | 7688 | ||
7578 | sd = &per_cpu(node_domains, j); | 7689 | sd = &per_cpu(node_domains, j).sd; |
7579 | sd->groups = sg; | 7690 | sd->groups = sg; |
7580 | } | 7691 | } |
7581 | sg->__cpu_power = 0; | 7692 | sg->__cpu_power = 0; |
7582 | sg->cpumask = *nodemask; | 7693 | cpumask_copy(sched_group_cpus(sg), nodemask); |
7583 | sg->next = sg; | 7694 | sg->next = sg; |
7584 | cpus_or(*covered, *covered, *nodemask); | 7695 | cpumask_or(covered, covered, nodemask); |
7585 | prev = sg; | 7696 | prev = sg; |
7586 | 7697 | ||
7587 | for (j = 0; j < nr_node_ids; j++) { | 7698 | for (j = 0; j < nr_node_ids; j++) { |
7588 | SCHED_CPUMASK_VAR(notcovered, allmasks); | ||
7589 | int n = (i + j) % nr_node_ids; | 7699 | int n = (i + j) % nr_node_ids; |
7590 | node_to_cpumask_ptr(pnodemask, n); | ||
7591 | 7700 | ||
7592 | cpus_complement(*notcovered, *covered); | 7701 | cpumask_complement(notcovered, covered); |
7593 | cpus_and(*tmpmask, *notcovered, *cpu_map); | 7702 | cpumask_and(tmpmask, notcovered, cpu_map); |
7594 | cpus_and(*tmpmask, *tmpmask, *domainspan); | 7703 | cpumask_and(tmpmask, tmpmask, domainspan); |
7595 | if (cpus_empty(*tmpmask)) | 7704 | if (cpumask_empty(tmpmask)) |
7596 | break; | 7705 | break; |
7597 | 7706 | ||
7598 | cpus_and(*tmpmask, *tmpmask, *pnodemask); | 7707 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); |
7599 | if (cpus_empty(*tmpmask)) | 7708 | if (cpumask_empty(tmpmask)) |
7600 | continue; | 7709 | continue; |
7601 | 7710 | ||
7602 | sg = kmalloc_node(sizeof(struct sched_group), | 7711 | sg = kmalloc_node(sizeof(struct sched_group) + |
7712 | cpumask_size(), | ||
7603 | GFP_KERNEL, i); | 7713 | GFP_KERNEL, i); |
7604 | if (!sg) { | 7714 | if (!sg) { |
7605 | printk(KERN_WARNING | 7715 | printk(KERN_WARNING |
@@ -7607,9 +7717,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7607 | goto error; | 7717 | goto error; |
7608 | } | 7718 | } |
7609 | sg->__cpu_power = 0; | 7719 | sg->__cpu_power = 0; |
7610 | sg->cpumask = *tmpmask; | 7720 | cpumask_copy(sched_group_cpus(sg), tmpmask); |
7611 | sg->next = prev->next; | 7721 | sg->next = prev->next; |
7612 | cpus_or(*covered, *covered, *tmpmask); | 7722 | cpumask_or(covered, covered, tmpmask); |
7613 | prev->next = sg; | 7723 | prev->next = sg; |
7614 | prev = sg; | 7724 | prev = sg; |
7615 | } | 7725 | } |
@@ -7618,22 +7728,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7618 | 7728 | ||
7619 | /* Calculate CPU power for physical packages and nodes */ | 7729 | /* Calculate CPU power for physical packages and nodes */ |
7620 | #ifdef CONFIG_SCHED_SMT | 7730 | #ifdef CONFIG_SCHED_SMT |
7621 | for_each_cpu_mask_nr(i, *cpu_map) { | 7731 | for_each_cpu(i, cpu_map) { |
7622 | struct sched_domain *sd = &per_cpu(cpu_domains, i); | 7732 | struct sched_domain *sd = &per_cpu(cpu_domains, i).sd; |
7623 | 7733 | ||
7624 | init_sched_groups_power(i, sd); | 7734 | init_sched_groups_power(i, sd); |
7625 | } | 7735 | } |
7626 | #endif | 7736 | #endif |
7627 | #ifdef CONFIG_SCHED_MC | 7737 | #ifdef CONFIG_SCHED_MC |
7628 | for_each_cpu_mask_nr(i, *cpu_map) { | 7738 | for_each_cpu(i, cpu_map) { |
7629 | struct sched_domain *sd = &per_cpu(core_domains, i); | 7739 | struct sched_domain *sd = &per_cpu(core_domains, i).sd; |
7630 | 7740 | ||
7631 | init_sched_groups_power(i, sd); | 7741 | init_sched_groups_power(i, sd); |
7632 | } | 7742 | } |
7633 | #endif | 7743 | #endif |
7634 | 7744 | ||
7635 | for_each_cpu_mask_nr(i, *cpu_map) { | 7745 | for_each_cpu(i, cpu_map) { |
7636 | struct sched_domain *sd = &per_cpu(phys_domains, i); | 7746 | struct sched_domain *sd = &per_cpu(phys_domains, i).sd; |
7637 | 7747 | ||
7638 | init_sched_groups_power(i, sd); | 7748 | init_sched_groups_power(i, sd); |
7639 | } | 7749 | } |
@@ -7645,53 +7755,78 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7645 | if (sd_allnodes) { | 7755 | if (sd_allnodes) { |
7646 | struct sched_group *sg; | 7756 | struct sched_group *sg; |
7647 | 7757 | ||
7648 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, | 7758 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
7649 | tmpmask); | 7759 | tmpmask); |
7650 | init_numa_sched_groups_power(sg); | 7760 | init_numa_sched_groups_power(sg); |
7651 | } | 7761 | } |
7652 | #endif | 7762 | #endif |
7653 | 7763 | ||
7654 | /* Attach the domains */ | 7764 | /* Attach the domains */ |
7655 | for_each_cpu_mask_nr(i, *cpu_map) { | 7765 | for_each_cpu(i, cpu_map) { |
7656 | struct sched_domain *sd; | 7766 | struct sched_domain *sd; |
7657 | #ifdef CONFIG_SCHED_SMT | 7767 | #ifdef CONFIG_SCHED_SMT |
7658 | sd = &per_cpu(cpu_domains, i); | 7768 | sd = &per_cpu(cpu_domains, i).sd; |
7659 | #elif defined(CONFIG_SCHED_MC) | 7769 | #elif defined(CONFIG_SCHED_MC) |
7660 | sd = &per_cpu(core_domains, i); | 7770 | sd = &per_cpu(core_domains, i).sd; |
7661 | #else | 7771 | #else |
7662 | sd = &per_cpu(phys_domains, i); | 7772 | sd = &per_cpu(phys_domains, i).sd; |
7663 | #endif | 7773 | #endif |
7664 | cpu_attach_domain(sd, rd, i); | 7774 | cpu_attach_domain(sd, rd, i); |
7665 | } | 7775 | } |
7666 | 7776 | ||
7667 | sched_cpumask_free(allmasks); | 7777 | err = 0; |
7668 | return 0; | 7778 | |
7779 | free_tmpmask: | ||
7780 | free_cpumask_var(tmpmask); | ||
7781 | free_send_covered: | ||
7782 | free_cpumask_var(send_covered); | ||
7783 | free_this_core_map: | ||
7784 | free_cpumask_var(this_core_map); | ||
7785 | free_this_sibling_map: | ||
7786 | free_cpumask_var(this_sibling_map); | ||
7787 | free_nodemask: | ||
7788 | free_cpumask_var(nodemask); | ||
7789 | free_notcovered: | ||
7790 | #ifdef CONFIG_NUMA | ||
7791 | free_cpumask_var(notcovered); | ||
7792 | free_covered: | ||
7793 | free_cpumask_var(covered); | ||
7794 | free_domainspan: | ||
7795 | free_cpumask_var(domainspan); | ||
7796 | out: | ||
7797 | #endif | ||
7798 | return err; | ||
7799 | |||
7800 | free_sched_groups: | ||
7801 | #ifdef CONFIG_NUMA | ||
7802 | kfree(sched_group_nodes); | ||
7803 | #endif | ||
7804 | goto free_tmpmask; | ||
7669 | 7805 | ||
7670 | #ifdef CONFIG_NUMA | 7806 | #ifdef CONFIG_NUMA |
7671 | error: | 7807 | error: |
7672 | free_sched_groups(cpu_map, tmpmask); | 7808 | free_sched_groups(cpu_map, tmpmask); |
7673 | sched_cpumask_free(allmasks); | 7809 | free_rootdomain(rd); |
7674 | kfree(rd); | 7810 | goto free_tmpmask; |
7675 | return -ENOMEM; | ||
7676 | #endif | 7811 | #endif |
7677 | } | 7812 | } |
7678 | 7813 | ||
7679 | static int build_sched_domains(const cpumask_t *cpu_map) | 7814 | static int build_sched_domains(const struct cpumask *cpu_map) |
7680 | { | 7815 | { |
7681 | return __build_sched_domains(cpu_map, NULL); | 7816 | return __build_sched_domains(cpu_map, NULL); |
7682 | } | 7817 | } |
7683 | 7818 | ||
7684 | static cpumask_t *doms_cur; /* current sched domains */ | 7819 | static struct cpumask *doms_cur; /* current sched domains */ |
7685 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7820 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
7686 | static struct sched_domain_attr *dattr_cur; | 7821 | static struct sched_domain_attr *dattr_cur; |
7687 | /* attribues of custom domains in 'doms_cur' */ | 7822 | /* attribues of custom domains in 'doms_cur' */ |
7688 | 7823 | ||
7689 | /* | 7824 | /* |
7690 | * Special case: If a kmalloc of a doms_cur partition (array of | 7825 | * Special case: If a kmalloc of a doms_cur partition (array of |
7691 | * cpumask_t) fails, then fallback to a single sched domain, | 7826 | * cpumask) fails, then fallback to a single sched domain, |
7692 | * as determined by the single cpumask_t fallback_doms. | 7827 | * as determined by the single cpumask fallback_doms. |
7693 | */ | 7828 | */ |
7694 | static cpumask_t fallback_doms; | 7829 | static cpumask_var_t fallback_doms; |
7695 | 7830 | ||
7696 | /* | 7831 | /* |
7697 | * arch_update_cpu_topology lets virtualized architectures update the | 7832 | * arch_update_cpu_topology lets virtualized architectures update the |
@@ -7708,16 +7843,16 @@ int __attribute__((weak)) arch_update_cpu_topology(void) | |||
7708 | * For now this just excludes isolated cpus, but could be used to | 7843 | * For now this just excludes isolated cpus, but could be used to |
7709 | * exclude other special cases in the future. | 7844 | * exclude other special cases in the future. |
7710 | */ | 7845 | */ |
7711 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 7846 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
7712 | { | 7847 | { |
7713 | int err; | 7848 | int err; |
7714 | 7849 | ||
7715 | arch_update_cpu_topology(); | 7850 | arch_update_cpu_topology(); |
7716 | ndoms_cur = 1; | 7851 | ndoms_cur = 1; |
7717 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7852 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); |
7718 | if (!doms_cur) | 7853 | if (!doms_cur) |
7719 | doms_cur = &fallback_doms; | 7854 | doms_cur = fallback_doms; |
7720 | cpus_andnot(*doms_cur, *cpu_map, cpu_isolated_map); | 7855 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
7721 | dattr_cur = NULL; | 7856 | dattr_cur = NULL; |
7722 | err = build_sched_domains(doms_cur); | 7857 | err = build_sched_domains(doms_cur); |
7723 | register_sched_domain_sysctl(); | 7858 | register_sched_domain_sysctl(); |
@@ -7725,8 +7860,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7725 | return err; | 7860 | return err; |
7726 | } | 7861 | } |
7727 | 7862 | ||
7728 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | 7863 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
7729 | cpumask_t *tmpmask) | 7864 | struct cpumask *tmpmask) |
7730 | { | 7865 | { |
7731 | free_sched_groups(cpu_map, tmpmask); | 7866 | free_sched_groups(cpu_map, tmpmask); |
7732 | } | 7867 | } |
@@ -7735,15 +7870,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | |||
7735 | * Detach sched domains from a group of cpus specified in cpu_map | 7870 | * Detach sched domains from a group of cpus specified in cpu_map |
7736 | * These cpus will now be attached to the NULL domain | 7871 | * These cpus will now be attached to the NULL domain |
7737 | */ | 7872 | */ |
7738 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7873 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
7739 | { | 7874 | { |
7740 | cpumask_t tmpmask; | 7875 | /* Save because hotplug lock held. */ |
7876 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
7741 | int i; | 7877 | int i; |
7742 | 7878 | ||
7743 | for_each_cpu_mask_nr(i, *cpu_map) | 7879 | for_each_cpu(i, cpu_map) |
7744 | cpu_attach_domain(NULL, &def_root_domain, i); | 7880 | cpu_attach_domain(NULL, &def_root_domain, i); |
7745 | synchronize_sched(); | 7881 | synchronize_sched(); |
7746 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7882 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
7747 | } | 7883 | } |
7748 | 7884 | ||
7749 | /* handle null as "default" */ | 7885 | /* handle null as "default" */ |
@@ -7768,7 +7904,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7768 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 7904 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
7769 | * It destroys each deleted domain and builds each new domain. | 7905 | * It destroys each deleted domain and builds each new domain. |
7770 | * | 7906 | * |
7771 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 7907 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. |
7772 | * The masks don't intersect (don't overlap.) We should setup one | 7908 | * The masks don't intersect (don't overlap.) We should setup one |
7773 | * sched domain for each mask. CPUs not in any of the cpumasks will | 7909 | * sched domain for each mask. CPUs not in any of the cpumasks will |
7774 | * not be load balanced. If the same cpumask appears both in the | 7910 | * not be load balanced. If the same cpumask appears both in the |
@@ -7782,13 +7918,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7782 | * the single partition 'fallback_doms', it also forces the domains | 7918 | * the single partition 'fallback_doms', it also forces the domains |
7783 | * to be rebuilt. | 7919 | * to be rebuilt. |
7784 | * | 7920 | * |
7785 | * If doms_new == NULL it will be replaced with cpu_online_map. | 7921 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
7786 | * ndoms_new == 0 is a special case for destroying existing domains, | 7922 | * ndoms_new == 0 is a special case for destroying existing domains, |
7787 | * and it will not create the default domain. | 7923 | * and it will not create the default domain. |
7788 | * | 7924 | * |
7789 | * Call with hotplug lock held | 7925 | * Call with hotplug lock held |
7790 | */ | 7926 | */ |
7791 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7927 | /* FIXME: Change to struct cpumask *doms_new[] */ |
7928 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
7792 | struct sched_domain_attr *dattr_new) | 7929 | struct sched_domain_attr *dattr_new) |
7793 | { | 7930 | { |
7794 | int i, j, n; | 7931 | int i, j, n; |
@@ -7807,7 +7944,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7807 | /* Destroy deleted domains */ | 7944 | /* Destroy deleted domains */ |
7808 | for (i = 0; i < ndoms_cur; i++) { | 7945 | for (i = 0; i < ndoms_cur; i++) { |
7809 | for (j = 0; j < n && !new_topology; j++) { | 7946 | for (j = 0; j < n && !new_topology; j++) { |
7810 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7947 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) |
7811 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7948 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7812 | goto match1; | 7949 | goto match1; |
7813 | } | 7950 | } |
@@ -7819,15 +7956,15 @@ match1: | |||
7819 | 7956 | ||
7820 | if (doms_new == NULL) { | 7957 | if (doms_new == NULL) { |
7821 | ndoms_cur = 0; | 7958 | ndoms_cur = 0; |
7822 | doms_new = &fallback_doms; | 7959 | doms_new = fallback_doms; |
7823 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7960 | cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map); |
7824 | WARN_ON_ONCE(dattr_new); | 7961 | WARN_ON_ONCE(dattr_new); |
7825 | } | 7962 | } |
7826 | 7963 | ||
7827 | /* Build new domains */ | 7964 | /* Build new domains */ |
7828 | for (i = 0; i < ndoms_new; i++) { | 7965 | for (i = 0; i < ndoms_new; i++) { |
7829 | for (j = 0; j < ndoms_cur && !new_topology; j++) { | 7966 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
7830 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7967 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) |
7831 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7968 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7832 | goto match2; | 7969 | goto match2; |
7833 | } | 7970 | } |
@@ -7839,7 +7976,7 @@ match2: | |||
7839 | } | 7976 | } |
7840 | 7977 | ||
7841 | /* Remember the new sched domains */ | 7978 | /* Remember the new sched domains */ |
7842 | if (doms_cur != &fallback_doms) | 7979 | if (doms_cur != fallback_doms) |
7843 | kfree(doms_cur); | 7980 | kfree(doms_cur); |
7844 | kfree(dattr_cur); /* kfree(NULL) is safe */ | 7981 | kfree(dattr_cur); /* kfree(NULL) is safe */ |
7845 | doms_cur = doms_new; | 7982 | doms_cur = doms_new; |
@@ -7852,7 +7989,7 @@ match2: | |||
7852 | } | 7989 | } |
7853 | 7990 | ||
7854 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 7991 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
7855 | int arch_reinit_sched_domains(void) | 7992 | static void arch_reinit_sched_domains(void) |
7856 | { | 7993 | { |
7857 | get_online_cpus(); | 7994 | get_online_cpus(); |
7858 | 7995 | ||
@@ -7861,25 +7998,33 @@ int arch_reinit_sched_domains(void) | |||
7861 | 7998 | ||
7862 | rebuild_sched_domains(); | 7999 | rebuild_sched_domains(); |
7863 | put_online_cpus(); | 8000 | put_online_cpus(); |
7864 | |||
7865 | return 0; | ||
7866 | } | 8001 | } |
7867 | 8002 | ||
7868 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | 8003 | static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) |
7869 | { | 8004 | { |
7870 | int ret; | 8005 | unsigned int level = 0; |
8006 | |||
8007 | if (sscanf(buf, "%u", &level) != 1) | ||
8008 | return -EINVAL; | ||
8009 | |||
8010 | /* | ||
8011 | * level is always be positive so don't check for | ||
8012 | * level < POWERSAVINGS_BALANCE_NONE which is 0 | ||
8013 | * What happens on 0 or 1 byte write, | ||
8014 | * need to check for count as well? | ||
8015 | */ | ||
7871 | 8016 | ||
7872 | if (buf[0] != '0' && buf[0] != '1') | 8017 | if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS) |
7873 | return -EINVAL; | 8018 | return -EINVAL; |
7874 | 8019 | ||
7875 | if (smt) | 8020 | if (smt) |
7876 | sched_smt_power_savings = (buf[0] == '1'); | 8021 | sched_smt_power_savings = level; |
7877 | else | 8022 | else |
7878 | sched_mc_power_savings = (buf[0] == '1'); | 8023 | sched_mc_power_savings = level; |
7879 | 8024 | ||
7880 | ret = arch_reinit_sched_domains(); | 8025 | arch_reinit_sched_domains(); |
7881 | 8026 | ||
7882 | return ret ? ret : count; | 8027 | return count; |
7883 | } | 8028 | } |
7884 | 8029 | ||
7885 | #ifdef CONFIG_SCHED_MC | 8030 | #ifdef CONFIG_SCHED_MC |
@@ -7914,7 +8059,7 @@ static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, | |||
7914 | sched_smt_power_savings_store); | 8059 | sched_smt_power_savings_store); |
7915 | #endif | 8060 | #endif |
7916 | 8061 | ||
7917 | int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) | 8062 | int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls) |
7918 | { | 8063 | { |
7919 | int err = 0; | 8064 | int err = 0; |
7920 | 8065 | ||
@@ -7979,7 +8124,9 @@ static int update_runtime(struct notifier_block *nfb, | |||
7979 | 8124 | ||
7980 | void __init sched_init_smp(void) | 8125 | void __init sched_init_smp(void) |
7981 | { | 8126 | { |
7982 | cpumask_t non_isolated_cpus; | 8127 | cpumask_var_t non_isolated_cpus; |
8128 | |||
8129 | alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); | ||
7983 | 8130 | ||
7984 | #if defined(CONFIG_NUMA) | 8131 | #if defined(CONFIG_NUMA) |
7985 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), | 8132 | sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), |
@@ -7988,10 +8135,10 @@ void __init sched_init_smp(void) | |||
7988 | #endif | 8135 | #endif |
7989 | get_online_cpus(); | 8136 | get_online_cpus(); |
7990 | mutex_lock(&sched_domains_mutex); | 8137 | mutex_lock(&sched_domains_mutex); |
7991 | arch_init_sched_domains(&cpu_online_map); | 8138 | arch_init_sched_domains(cpu_online_mask); |
7992 | cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map); | 8139 | cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); |
7993 | if (cpus_empty(non_isolated_cpus)) | 8140 | if (cpumask_empty(non_isolated_cpus)) |
7994 | cpu_set(smp_processor_id(), non_isolated_cpus); | 8141 | cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); |
7995 | mutex_unlock(&sched_domains_mutex); | 8142 | mutex_unlock(&sched_domains_mutex); |
7996 | put_online_cpus(); | 8143 | put_online_cpus(); |
7997 | 8144 | ||
@@ -8006,9 +8153,13 @@ void __init sched_init_smp(void) | |||
8006 | init_hrtick(); | 8153 | init_hrtick(); |
8007 | 8154 | ||
8008 | /* Move init over to a non-isolated CPU */ | 8155 | /* Move init over to a non-isolated CPU */ |
8009 | if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0) | 8156 | if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) |
8010 | BUG(); | 8157 | BUG(); |
8011 | sched_init_granularity(); | 8158 | sched_init_granularity(); |
8159 | free_cpumask_var(non_isolated_cpus); | ||
8160 | |||
8161 | alloc_cpumask_var(&fallback_doms, GFP_KERNEL); | ||
8162 | init_sched_rt_class(); | ||
8012 | } | 8163 | } |
8013 | #else | 8164 | #else |
8014 | void __init sched_init_smp(void) | 8165 | void __init sched_init_smp(void) |
@@ -8323,6 +8474,15 @@ void __init sched_init(void) | |||
8323 | */ | 8474 | */ |
8324 | current->sched_class = &fair_sched_class; | 8475 | current->sched_class = &fair_sched_class; |
8325 | 8476 | ||
8477 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | ||
8478 | alloc_bootmem_cpumask_var(&nohz_cpu_mask); | ||
8479 | #ifdef CONFIG_SMP | ||
8480 | #ifdef CONFIG_NO_HZ | ||
8481 | alloc_bootmem_cpumask_var(&nohz.cpu_mask); | ||
8482 | #endif | ||
8483 | alloc_bootmem_cpumask_var(&cpu_isolated_map); | ||
8484 | #endif /* SMP */ | ||
8485 | |||
8326 | scheduler_running = 1; | 8486 | scheduler_running = 1; |
8327 | } | 8487 | } |
8328 | 8488 | ||