diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 212 | ||||
-rw-r--r-- | kernel/sched_fair.c | 4 | ||||
-rw-r--r-- | kernel/sched_rt.c | 18 |
3 files changed, 124 insertions, 110 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f2be61870030..eba6a156d334 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2829,7 +2829,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2829 | struct rq *rq; | 2829 | struct rq *rq; |
2830 | 2830 | ||
2831 | rq = task_rq_lock(p, &flags); | 2831 | rq = task_rq_lock(p, &flags); |
2832 | if (!cpu_isset(dest_cpu, p->cpus_allowed) | 2832 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) |
2833 | || unlikely(!cpu_active(dest_cpu))) | 2833 | || unlikely(!cpu_active(dest_cpu))) |
2834 | goto out; | 2834 | goto out; |
2835 | 2835 | ||
@@ -2895,7 +2895,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2895 | * 2) cannot be migrated to this CPU due to cpus_allowed, or | 2895 | * 2) cannot be migrated to this CPU due to cpus_allowed, or |
2896 | * 3) are cache-hot on their current CPU. | 2896 | * 3) are cache-hot on their current CPU. |
2897 | */ | 2897 | */ |
2898 | if (!cpu_isset(this_cpu, p->cpus_allowed)) { | 2898 | if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { |
2899 | schedstat_inc(p, se.nr_failed_migrations_affine); | 2899 | schedstat_inc(p, se.nr_failed_migrations_affine); |
2900 | return 0; | 2900 | return 0; |
2901 | } | 2901 | } |
@@ -3070,7 +3070,7 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3070 | static struct sched_group * | 3070 | static struct sched_group * |
3071 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 3071 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
3072 | unsigned long *imbalance, enum cpu_idle_type idle, | 3072 | unsigned long *imbalance, enum cpu_idle_type idle, |
3073 | int *sd_idle, const cpumask_t *cpus, int *balance) | 3073 | int *sd_idle, const struct cpumask *cpus, int *balance) |
3074 | { | 3074 | { |
3075 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 3075 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
3076 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; | 3076 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
@@ -3387,7 +3387,7 @@ ret: | |||
3387 | */ | 3387 | */ |
3388 | static struct rq * | 3388 | static struct rq * |
3389 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | 3389 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
3390 | unsigned long imbalance, const cpumask_t *cpus) | 3390 | unsigned long imbalance, const struct cpumask *cpus) |
3391 | { | 3391 | { |
3392 | struct rq *busiest = NULL, *rq; | 3392 | struct rq *busiest = NULL, *rq; |
3393 | unsigned long max_load = 0; | 3393 | unsigned long max_load = 0; |
@@ -3396,7 +3396,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3396 | for_each_cpu(i, sched_group_cpus(group)) { | 3396 | for_each_cpu(i, sched_group_cpus(group)) { |
3397 | unsigned long wl; | 3397 | unsigned long wl; |
3398 | 3398 | ||
3399 | if (!cpu_isset(i, *cpus)) | 3399 | if (!cpumask_test_cpu(i, cpus)) |
3400 | continue; | 3400 | continue; |
3401 | 3401 | ||
3402 | rq = cpu_rq(i); | 3402 | rq = cpu_rq(i); |
@@ -3426,7 +3426,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, | |||
3426 | */ | 3426 | */ |
3427 | static int load_balance(int this_cpu, struct rq *this_rq, | 3427 | static int load_balance(int this_cpu, struct rq *this_rq, |
3428 | struct sched_domain *sd, enum cpu_idle_type idle, | 3428 | struct sched_domain *sd, enum cpu_idle_type idle, |
3429 | int *balance, cpumask_t *cpus) | 3429 | int *balance, struct cpumask *cpus) |
3430 | { | 3430 | { |
3431 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 3431 | int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
3432 | struct sched_group *group; | 3432 | struct sched_group *group; |
@@ -3434,7 +3434,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
3434 | struct rq *busiest; | 3434 | struct rq *busiest; |
3435 | unsigned long flags; | 3435 | unsigned long flags; |
3436 | 3436 | ||
3437 | cpus_setall(*cpus); | 3437 | cpumask_setall(cpus); |
3438 | 3438 | ||
3439 | /* | 3439 | /* |
3440 | * When power savings policy is enabled for the parent domain, idle | 3440 | * When power savings policy is enabled for the parent domain, idle |
@@ -3494,8 +3494,8 @@ redo: | |||
3494 | 3494 | ||
3495 | /* All tasks on this runqueue were pinned by CPU affinity */ | 3495 | /* All tasks on this runqueue were pinned by CPU affinity */ |
3496 | if (unlikely(all_pinned)) { | 3496 | if (unlikely(all_pinned)) { |
3497 | cpu_clear(cpu_of(busiest), *cpus); | 3497 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3498 | if (!cpus_empty(*cpus)) | 3498 | if (!cpumask_empty(cpus)) |
3499 | goto redo; | 3499 | goto redo; |
3500 | goto out_balanced; | 3500 | goto out_balanced; |
3501 | } | 3501 | } |
@@ -3512,7 +3512,8 @@ redo: | |||
3512 | /* don't kick the migration_thread, if the curr | 3512 | /* don't kick the migration_thread, if the curr |
3513 | * task on busiest cpu can't be moved to this_cpu | 3513 | * task on busiest cpu can't be moved to this_cpu |
3514 | */ | 3514 | */ |
3515 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3515 | if (!cpumask_test_cpu(this_cpu, |
3516 | &busiest->curr->cpus_allowed)) { | ||
3516 | spin_unlock_irqrestore(&busiest->lock, flags); | 3517 | spin_unlock_irqrestore(&busiest->lock, flags); |
3517 | all_pinned = 1; | 3518 | all_pinned = 1; |
3518 | goto out_one_pinned; | 3519 | goto out_one_pinned; |
@@ -3587,7 +3588,7 @@ out: | |||
3587 | */ | 3588 | */ |
3588 | static int | 3589 | static int |
3589 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | 3590 | load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, |
3590 | cpumask_t *cpus) | 3591 | struct cpumask *cpus) |
3591 | { | 3592 | { |
3592 | struct sched_group *group; | 3593 | struct sched_group *group; |
3593 | struct rq *busiest = NULL; | 3594 | struct rq *busiest = NULL; |
@@ -3596,7 +3597,7 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd, | |||
3596 | int sd_idle = 0; | 3597 | int sd_idle = 0; |
3597 | int all_pinned = 0; | 3598 | int all_pinned = 0; |
3598 | 3599 | ||
3599 | cpus_setall(*cpus); | 3600 | cpumask_setall(cpus); |
3600 | 3601 | ||
3601 | /* | 3602 | /* |
3602 | * When power savings policy is enabled for the parent domain, idle | 3603 | * When power savings policy is enabled for the parent domain, idle |
@@ -3640,8 +3641,8 @@ redo: | |||
3640 | double_unlock_balance(this_rq, busiest); | 3641 | double_unlock_balance(this_rq, busiest); |
3641 | 3642 | ||
3642 | if (unlikely(all_pinned)) { | 3643 | if (unlikely(all_pinned)) { |
3643 | cpu_clear(cpu_of(busiest), *cpus); | 3644 | cpumask_clear_cpu(cpu_of(busiest), cpus); |
3644 | if (!cpus_empty(*cpus)) | 3645 | if (!cpumask_empty(cpus)) |
3645 | goto redo; | 3646 | goto redo; |
3646 | } | 3647 | } |
3647 | } | 3648 | } |
@@ -5376,7 +5377,7 @@ out_unlock: | |||
5376 | return retval; | 5377 | return retval; |
5377 | } | 5378 | } |
5378 | 5379 | ||
5379 | long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | 5380 | long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) |
5380 | { | 5381 | { |
5381 | cpumask_var_t cpus_allowed, new_mask; | 5382 | cpumask_var_t cpus_allowed, new_mask; |
5382 | struct task_struct *p; | 5383 | struct task_struct *p; |
@@ -5445,13 +5446,13 @@ out_put_task: | |||
5445 | } | 5446 | } |
5446 | 5447 | ||
5447 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | 5448 | static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, |
5448 | cpumask_t *new_mask) | 5449 | struct cpumask *new_mask) |
5449 | { | 5450 | { |
5450 | if (len < sizeof(cpumask_t)) { | 5451 | if (len < cpumask_size()) |
5451 | memset(new_mask, 0, sizeof(cpumask_t)); | 5452 | cpumask_clear(new_mask); |
5452 | } else if (len > sizeof(cpumask_t)) { | 5453 | else if (len > cpumask_size()) |
5453 | len = sizeof(cpumask_t); | 5454 | len = cpumask_size(); |
5454 | } | 5455 | |
5455 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; | 5456 | return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; |
5456 | } | 5457 | } |
5457 | 5458 | ||
@@ -5477,7 +5478,7 @@ asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
5477 | return retval; | 5478 | return retval; |
5478 | } | 5479 | } |
5479 | 5480 | ||
5480 | long sched_getaffinity(pid_t pid, cpumask_t *mask) | 5481 | long sched_getaffinity(pid_t pid, struct cpumask *mask) |
5481 | { | 5482 | { |
5482 | struct task_struct *p; | 5483 | struct task_struct *p; |
5483 | int retval; | 5484 | int retval; |
@@ -5494,7 +5495,7 @@ long sched_getaffinity(pid_t pid, cpumask_t *mask) | |||
5494 | if (retval) | 5495 | if (retval) |
5495 | goto out_unlock; | 5496 | goto out_unlock; |
5496 | 5497 | ||
5497 | cpus_and(*mask, p->cpus_allowed, cpu_online_map); | 5498 | cpumask_and(mask, &p->cpus_allowed, cpu_online_mask); |
5498 | 5499 | ||
5499 | out_unlock: | 5500 | out_unlock: |
5500 | read_unlock(&tasklist_lock); | 5501 | read_unlock(&tasklist_lock); |
@@ -5872,7 +5873,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5872 | idle->se.exec_start = sched_clock(); | 5873 | idle->se.exec_start = sched_clock(); |
5873 | 5874 | ||
5874 | idle->prio = idle->normal_prio = MAX_PRIO; | 5875 | idle->prio = idle->normal_prio = MAX_PRIO; |
5875 | idle->cpus_allowed = cpumask_of_cpu(cpu); | 5876 | cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu)); |
5876 | __set_task_cpu(idle, cpu); | 5877 | __set_task_cpu(idle, cpu); |
5877 | 5878 | ||
5878 | rq->curr = rq->idle = idle; | 5879 | rq->curr = rq->idle = idle; |
@@ -5956,7 +5957,7 @@ static inline void sched_init_granularity(void) | |||
5956 | * task must not exit() & deallocate itself prematurely. The | 5957 | * task must not exit() & deallocate itself prematurely. The |
5957 | * call is not atomic; no spinlocks may be held. | 5958 | * call is not atomic; no spinlocks may be held. |
5958 | */ | 5959 | */ |
5959 | int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | 5960 | int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) |
5960 | { | 5961 | { |
5961 | struct migration_req req; | 5962 | struct migration_req req; |
5962 | unsigned long flags; | 5963 | unsigned long flags; |
@@ -5964,13 +5965,13 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5964 | int ret = 0; | 5965 | int ret = 0; |
5965 | 5966 | ||
5966 | rq = task_rq_lock(p, &flags); | 5967 | rq = task_rq_lock(p, &flags); |
5967 | if (!cpus_intersects(*new_mask, cpu_online_map)) { | 5968 | if (!cpumask_intersects(new_mask, cpu_online_mask)) { |
5968 | ret = -EINVAL; | 5969 | ret = -EINVAL; |
5969 | goto out; | 5970 | goto out; |
5970 | } | 5971 | } |
5971 | 5972 | ||
5972 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && | 5973 | if (unlikely((p->flags & PF_THREAD_BOUND) && p != current && |
5973 | !cpus_equal(p->cpus_allowed, *new_mask))) { | 5974 | !cpumask_equal(&p->cpus_allowed, new_mask))) { |
5974 | ret = -EINVAL; | 5975 | ret = -EINVAL; |
5975 | goto out; | 5976 | goto out; |
5976 | } | 5977 | } |
@@ -5978,12 +5979,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const cpumask_t *new_mask) | |||
5978 | if (p->sched_class->set_cpus_allowed) | 5979 | if (p->sched_class->set_cpus_allowed) |
5979 | p->sched_class->set_cpus_allowed(p, new_mask); | 5980 | p->sched_class->set_cpus_allowed(p, new_mask); |
5980 | else { | 5981 | else { |
5981 | p->cpus_allowed = *new_mask; | 5982 | cpumask_copy(&p->cpus_allowed, new_mask); |
5982 | p->rt.nr_cpus_allowed = cpus_weight(*new_mask); | 5983 | p->rt.nr_cpus_allowed = cpumask_weight(new_mask); |
5983 | } | 5984 | } |
5984 | 5985 | ||
5985 | /* Can the task run on the task's current CPU? If so, we're done */ | 5986 | /* Can the task run on the task's current CPU? If so, we're done */ |
5986 | if (cpu_isset(task_cpu(p), *new_mask)) | 5987 | if (cpumask_test_cpu(task_cpu(p), new_mask)) |
5987 | goto out; | 5988 | goto out; |
5988 | 5989 | ||
5989 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { | 5990 | if (migrate_task(p, cpumask_any_and(cpu_online_mask, new_mask), &req)) { |
@@ -6028,7 +6029,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6028 | if (task_cpu(p) != src_cpu) | 6029 | if (task_cpu(p) != src_cpu) |
6029 | goto done; | 6030 | goto done; |
6030 | /* Affinity changed (again). */ | 6031 | /* Affinity changed (again). */ |
6031 | if (!cpu_isset(dest_cpu, p->cpus_allowed)) | 6032 | if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) |
6032 | goto fail; | 6033 | goto fail; |
6033 | 6034 | ||
6034 | on_rq = p->se.on_rq; | 6035 | on_rq = p->se.on_rq; |
@@ -6629,13 +6630,13 @@ early_initcall(migration_init); | |||
6629 | #ifdef CONFIG_SCHED_DEBUG | 6630 | #ifdef CONFIG_SCHED_DEBUG |
6630 | 6631 | ||
6631 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6632 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6632 | cpumask_t *groupmask) | 6633 | struct cpumask *groupmask) |
6633 | { | 6634 | { |
6634 | struct sched_group *group = sd->groups; | 6635 | struct sched_group *group = sd->groups; |
6635 | char str[256]; | 6636 | char str[256]; |
6636 | 6637 | ||
6637 | cpulist_scnprintf(str, sizeof(str), *sched_domain_span(sd)); | 6638 | cpulist_scnprintf(str, sizeof(str), *sched_domain_span(sd)); |
6638 | cpus_clear(*groupmask); | 6639 | cpumask_clear(groupmask); |
6639 | 6640 | ||
6640 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); | 6641 | printk(KERN_DEBUG "%*s domain %d: ", level, "", level); |
6641 | 6642 | ||
@@ -6936,24 +6937,25 @@ __setup("isolcpus=", isolated_cpu_setup); | |||
6936 | /* | 6937 | /* |
6937 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer | 6938 | * init_sched_build_groups takes the cpumask we wish to span, and a pointer |
6938 | * to a function which identifies what group(along with sched group) a CPU | 6939 | * to a function which identifies what group(along with sched group) a CPU |
6939 | * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS | 6940 | * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids |
6940 | * (due to the fact that we keep track of groups covered with a cpumask_t). | 6941 | * (due to the fact that we keep track of groups covered with a struct cpumask). |
6941 | * | 6942 | * |
6942 | * init_sched_build_groups will build a circular linked list of the groups | 6943 | * init_sched_build_groups will build a circular linked list of the groups |
6943 | * covered by the given span, and will set each group's ->cpumask correctly, | 6944 | * covered by the given span, and will set each group's ->cpumask correctly, |
6944 | * and ->cpu_power to 0. | 6945 | * and ->cpu_power to 0. |
6945 | */ | 6946 | */ |
6946 | static void | 6947 | static void |
6947 | init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | 6948 | init_sched_build_groups(const struct cpumask *span, |
6948 | int (*group_fn)(int cpu, const cpumask_t *cpu_map, | 6949 | const struct cpumask *cpu_map, |
6950 | int (*group_fn)(int cpu, const struct cpumask *cpu_map, | ||
6949 | struct sched_group **sg, | 6951 | struct sched_group **sg, |
6950 | cpumask_t *tmpmask), | 6952 | struct cpumask *tmpmask), |
6951 | cpumask_t *covered, cpumask_t *tmpmask) | 6953 | struct cpumask *covered, struct cpumask *tmpmask) |
6952 | { | 6954 | { |
6953 | struct sched_group *first = NULL, *last = NULL; | 6955 | struct sched_group *first = NULL, *last = NULL; |
6954 | int i; | 6956 | int i; |
6955 | 6957 | ||
6956 | cpus_clear(*covered); | 6958 | cpumask_clear(covered); |
6957 | 6959 | ||
6958 | for_each_cpu(i, span) { | 6960 | for_each_cpu(i, span) { |
6959 | struct sched_group *sg; | 6961 | struct sched_group *sg; |
@@ -6970,7 +6972,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map, | |||
6970 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) | 6972 | if (group_fn(j, cpu_map, NULL, tmpmask) != group) |
6971 | continue; | 6973 | continue; |
6972 | 6974 | ||
6973 | cpu_set(j, *covered); | 6975 | cpumask_set_cpu(j, covered); |
6974 | cpumask_set_cpu(j, sched_group_cpus(sg)); | 6976 | cpumask_set_cpu(j, sched_group_cpus(sg)); |
6975 | } | 6977 | } |
6976 | if (!first) | 6978 | if (!first) |
@@ -7035,9 +7037,10 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7035 | * should be one that prevents unnecessary balancing, but also spreads tasks | 7037 | * should be one that prevents unnecessary balancing, but also spreads tasks |
7036 | * out optimally. | 7038 | * out optimally. |
7037 | */ | 7039 | */ |
7038 | static void sched_domain_node_span(int node, cpumask_t *span) | 7040 | static void sched_domain_node_span(int node, struct cpumask *span) |
7039 | { | 7041 | { |
7040 | nodemask_t used_nodes; | 7042 | nodemask_t used_nodes; |
7043 | /* FIXME: use cpumask_of_node() */ | ||
7041 | node_to_cpumask_ptr(nodemask, node); | 7044 | node_to_cpumask_ptr(nodemask, node); |
7042 | int i; | 7045 | int i; |
7043 | 7046 | ||
@@ -7081,8 +7084,8 @@ static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains); | |||
7081 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); | 7084 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus); |
7082 | 7085 | ||
7083 | static int | 7086 | static int |
7084 | cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7087 | cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map, |
7085 | cpumask_t *unused) | 7088 | struct sched_group **sg, struct cpumask *unused) |
7086 | { | 7089 | { |
7087 | if (sg) | 7090 | if (sg) |
7088 | *sg = &per_cpu(sched_group_cpus, cpu).sg; | 7091 | *sg = &per_cpu(sched_group_cpus, cpu).sg; |
@@ -7100,22 +7103,21 @@ static DEFINE_PER_CPU(struct static_sched_group, sched_group_core); | |||
7100 | 7103 | ||
7101 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) | 7104 | #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) |
7102 | static int | 7105 | static int |
7103 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7106 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7104 | cpumask_t *mask) | 7107 | struct sched_group **sg, struct cpumask *mask) |
7105 | { | 7108 | { |
7106 | int group; | 7109 | int group; |
7107 | 7110 | ||
7108 | *mask = per_cpu(cpu_sibling_map, cpu); | 7111 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7109 | cpus_and(*mask, *mask, *cpu_map); | 7112 | group = cpumask_first(mask); |
7110 | group = first_cpu(*mask); | ||
7111 | if (sg) | 7113 | if (sg) |
7112 | *sg = &per_cpu(sched_group_core, group).sg; | 7114 | *sg = &per_cpu(sched_group_core, group).sg; |
7113 | return group; | 7115 | return group; |
7114 | } | 7116 | } |
7115 | #elif defined(CONFIG_SCHED_MC) | 7117 | #elif defined(CONFIG_SCHED_MC) |
7116 | static int | 7118 | static int |
7117 | cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7119 | cpu_to_core_group(int cpu, const struct cpumask *cpu_map, |
7118 | cpumask_t *unused) | 7120 | struct sched_group **sg, struct cpumask *unused) |
7119 | { | 7121 | { |
7120 | if (sg) | 7122 | if (sg) |
7121 | *sg = &per_cpu(sched_group_core, cpu).sg; | 7123 | *sg = &per_cpu(sched_group_core, cpu).sg; |
@@ -7127,18 +7129,18 @@ static DEFINE_PER_CPU(struct static_sched_domain, phys_domains); | |||
7127 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); | 7129 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys); |
7128 | 7130 | ||
7129 | static int | 7131 | static int |
7130 | cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg, | 7132 | cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, |
7131 | cpumask_t *mask) | 7133 | struct sched_group **sg, struct cpumask *mask) |
7132 | { | 7134 | { |
7133 | int group; | 7135 | int group; |
7134 | #ifdef CONFIG_SCHED_MC | 7136 | #ifdef CONFIG_SCHED_MC |
7137 | /* FIXME: Use cpu_coregroup_mask. */ | ||
7135 | *mask = cpu_coregroup_map(cpu); | 7138 | *mask = cpu_coregroup_map(cpu); |
7136 | cpus_and(*mask, *mask, *cpu_map); | 7139 | cpus_and(*mask, *mask, *cpu_map); |
7137 | group = first_cpu(*mask); | 7140 | group = cpumask_first(mask); |
7138 | #elif defined(CONFIG_SCHED_SMT) | 7141 | #elif defined(CONFIG_SCHED_SMT) |
7139 | *mask = per_cpu(cpu_sibling_map, cpu); | 7142 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
7140 | cpus_and(*mask, *mask, *cpu_map); | 7143 | group = cpumask_first(mask); |
7141 | group = first_cpu(*mask); | ||
7142 | #else | 7144 | #else |
7143 | group = cpu; | 7145 | group = cpu; |
7144 | #endif | 7146 | #endif |
@@ -7159,14 +7161,16 @@ static struct sched_group ***sched_group_nodes_bycpu; | |||
7159 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); | 7161 | static DEFINE_PER_CPU(struct sched_domain, allnodes_domains); |
7160 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); | 7162 | static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes); |
7161 | 7163 | ||
7162 | static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map, | 7164 | static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, |
7163 | struct sched_group **sg, cpumask_t *nodemask) | 7165 | struct sched_group **sg, |
7166 | struct cpumask *nodemask) | ||
7164 | { | 7167 | { |
7165 | int group; | 7168 | int group; |
7169 | /* FIXME: use cpumask_of_node */ | ||
7166 | node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); | 7170 | node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); |
7167 | 7171 | ||
7168 | cpus_and(*nodemask, *pnodemask, *cpu_map); | 7172 | cpumask_and(nodemask, pnodemask, cpu_map); |
7169 | group = first_cpu(*nodemask); | 7173 | group = cpumask_first(nodemask); |
7170 | 7174 | ||
7171 | if (sg) | 7175 | if (sg) |
7172 | *sg = &per_cpu(sched_group_allnodes, group).sg; | 7176 | *sg = &per_cpu(sched_group_allnodes, group).sg; |
@@ -7202,7 +7206,8 @@ static void init_numa_sched_groups_power(struct sched_group *group_head) | |||
7202 | 7206 | ||
7203 | #ifdef CONFIG_NUMA | 7207 | #ifdef CONFIG_NUMA |
7204 | /* Free memory allocated for various sched_group structures */ | 7208 | /* Free memory allocated for various sched_group structures */ |
7205 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7209 | static void free_sched_groups(const struct cpumask *cpu_map, |
7210 | struct cpumask *nodemask) | ||
7206 | { | 7211 | { |
7207 | int cpu, i; | 7212 | int cpu, i; |
7208 | 7213 | ||
@@ -7215,10 +7220,11 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | |||
7215 | 7220 | ||
7216 | for (i = 0; i < nr_node_ids; i++) { | 7221 | for (i = 0; i < nr_node_ids; i++) { |
7217 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7222 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7223 | /* FIXME: Use cpumask_of_node */ | ||
7218 | node_to_cpumask_ptr(pnodemask, i); | 7224 | node_to_cpumask_ptr(pnodemask, i); |
7219 | 7225 | ||
7220 | cpus_and(*nodemask, *pnodemask, *cpu_map); | 7226 | cpus_and(*nodemask, *pnodemask, *cpu_map); |
7221 | if (cpus_empty(*nodemask)) | 7227 | if (cpumask_empty(nodemask)) |
7222 | continue; | 7228 | continue; |
7223 | 7229 | ||
7224 | if (sg == NULL) | 7230 | if (sg == NULL) |
@@ -7236,7 +7242,8 @@ next_sg: | |||
7236 | } | 7242 | } |
7237 | } | 7243 | } |
7238 | #else /* !CONFIG_NUMA */ | 7244 | #else /* !CONFIG_NUMA */ |
7239 | static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) | 7245 | static void free_sched_groups(const struct cpumask *cpu_map, |
7246 | struct cpumask *nodemask) | ||
7240 | { | 7247 | { |
7241 | } | 7248 | } |
7242 | #endif /* CONFIG_NUMA */ | 7249 | #endif /* CONFIG_NUMA */ |
@@ -7366,7 +7373,7 @@ static void set_domain_attribute(struct sched_domain *sd, | |||
7366 | * Build sched domains for a given set of cpus and attach the sched domains | 7373 | * Build sched domains for a given set of cpus and attach the sched domains |
7367 | * to the individual cpus | 7374 | * to the individual cpus |
7368 | */ | 7375 | */ |
7369 | static int __build_sched_domains(const cpumask_t *cpu_map, | 7376 | static int __build_sched_domains(const struct cpumask *cpu_map, |
7370 | struct sched_domain_attr *attr) | 7377 | struct sched_domain_attr *attr) |
7371 | { | 7378 | { |
7372 | int i, err = -ENOMEM; | 7379 | int i, err = -ENOMEM; |
@@ -7416,7 +7423,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7416 | } | 7423 | } |
7417 | 7424 | ||
7418 | #ifdef CONFIG_NUMA | 7425 | #ifdef CONFIG_NUMA |
7419 | sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes; | 7426 | sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes; |
7420 | #endif | 7427 | #endif |
7421 | 7428 | ||
7422 | /* | 7429 | /* |
@@ -7425,12 +7432,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7425 | for_each_cpu(i, cpu_map) { | 7432 | for_each_cpu(i, cpu_map) { |
7426 | struct sched_domain *sd = NULL, *p; | 7433 | struct sched_domain *sd = NULL, *p; |
7427 | 7434 | ||
7435 | /* FIXME: use cpumask_of_node */ | ||
7428 | *nodemask = node_to_cpumask(cpu_to_node(i)); | 7436 | *nodemask = node_to_cpumask(cpu_to_node(i)); |
7429 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7437 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7430 | 7438 | ||
7431 | #ifdef CONFIG_NUMA | 7439 | #ifdef CONFIG_NUMA |
7432 | if (cpus_weight(*cpu_map) > | 7440 | if (cpumask_weight(cpu_map) > |
7433 | SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) { | 7441 | SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) { |
7434 | sd = &per_cpu(allnodes_domains, i); | 7442 | sd = &per_cpu(allnodes_domains, i); |
7435 | SD_INIT(sd, ALLNODES); | 7443 | SD_INIT(sd, ALLNODES); |
7436 | set_domain_attribute(sd, attr); | 7444 | set_domain_attribute(sd, attr); |
@@ -7491,9 +7499,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7491 | #ifdef CONFIG_SCHED_SMT | 7499 | #ifdef CONFIG_SCHED_SMT |
7492 | /* Set up CPU (sibling) groups */ | 7500 | /* Set up CPU (sibling) groups */ |
7493 | for_each_cpu(i, cpu_map) { | 7501 | for_each_cpu(i, cpu_map) { |
7494 | *this_sibling_map = per_cpu(cpu_sibling_map, i); | 7502 | cpumask_and(this_sibling_map, |
7495 | cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map); | 7503 | &per_cpu(cpu_sibling_map, i), cpu_map); |
7496 | if (i != first_cpu(*this_sibling_map)) | 7504 | if (i != cpumask_first(this_sibling_map)) |
7497 | continue; | 7505 | continue; |
7498 | 7506 | ||
7499 | init_sched_build_groups(this_sibling_map, cpu_map, | 7507 | init_sched_build_groups(this_sibling_map, cpu_map, |
@@ -7505,9 +7513,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7505 | #ifdef CONFIG_SCHED_MC | 7513 | #ifdef CONFIG_SCHED_MC |
7506 | /* Set up multi-core groups */ | 7514 | /* Set up multi-core groups */ |
7507 | for_each_cpu(i, cpu_map) { | 7515 | for_each_cpu(i, cpu_map) { |
7516 | /* FIXME: Use cpu_coregroup_mask */ | ||
7508 | *this_core_map = cpu_coregroup_map(i); | 7517 | *this_core_map = cpu_coregroup_map(i); |
7509 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | 7518 | cpus_and(*this_core_map, *this_core_map, *cpu_map); |
7510 | if (i != first_cpu(*this_core_map)) | 7519 | if (i != cpumask_first(this_core_map)) |
7511 | continue; | 7520 | continue; |
7512 | 7521 | ||
7513 | init_sched_build_groups(this_core_map, cpu_map, | 7522 | init_sched_build_groups(this_core_map, cpu_map, |
@@ -7518,9 +7527,10 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7518 | 7527 | ||
7519 | /* Set up physical groups */ | 7528 | /* Set up physical groups */ |
7520 | for (i = 0; i < nr_node_ids; i++) { | 7529 | for (i = 0; i < nr_node_ids; i++) { |
7530 | /* FIXME: Use cpumask_of_node */ | ||
7521 | *nodemask = node_to_cpumask(i); | 7531 | *nodemask = node_to_cpumask(i); |
7522 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7532 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7523 | if (cpus_empty(*nodemask)) | 7533 | if (cpumask_empty(nodemask)) |
7524 | continue; | 7534 | continue; |
7525 | 7535 | ||
7526 | init_sched_build_groups(nodemask, cpu_map, | 7536 | init_sched_build_groups(nodemask, cpu_map, |
@@ -7541,17 +7551,18 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7541 | struct sched_group *sg, *prev; | 7551 | struct sched_group *sg, *prev; |
7542 | int j; | 7552 | int j; |
7543 | 7553 | ||
7554 | /* FIXME: Use cpumask_of_node */ | ||
7544 | *nodemask = node_to_cpumask(i); | 7555 | *nodemask = node_to_cpumask(i); |
7545 | cpus_clear(*covered); | 7556 | cpumask_clear(covered); |
7546 | 7557 | ||
7547 | cpus_and(*nodemask, *nodemask, *cpu_map); | 7558 | cpus_and(*nodemask, *nodemask, *cpu_map); |
7548 | if (cpus_empty(*nodemask)) { | 7559 | if (cpumask_empty(nodemask)) { |
7549 | sched_group_nodes[i] = NULL; | 7560 | sched_group_nodes[i] = NULL; |
7550 | continue; | 7561 | continue; |
7551 | } | 7562 | } |
7552 | 7563 | ||
7553 | sched_domain_node_span(i, domainspan); | 7564 | sched_domain_node_span(i, domainspan); |
7554 | cpus_and(*domainspan, *domainspan, *cpu_map); | 7565 | cpumask_and(domainspan, domainspan, cpu_map); |
7555 | 7566 | ||
7556 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), | 7567 | sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(), |
7557 | GFP_KERNEL, i); | 7568 | GFP_KERNEL, i); |
@@ -7570,21 +7581,22 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7570 | sg->__cpu_power = 0; | 7581 | sg->__cpu_power = 0; |
7571 | cpumask_copy(sched_group_cpus(sg), nodemask); | 7582 | cpumask_copy(sched_group_cpus(sg), nodemask); |
7572 | sg->next = sg; | 7583 | sg->next = sg; |
7573 | cpus_or(*covered, *covered, *nodemask); | 7584 | cpumask_or(covered, covered, nodemask); |
7574 | prev = sg; | 7585 | prev = sg; |
7575 | 7586 | ||
7576 | for (j = 0; j < nr_node_ids; j++) { | 7587 | for (j = 0; j < nr_node_ids; j++) { |
7577 | int n = (i + j) % nr_node_ids; | 7588 | int n = (i + j) % nr_node_ids; |
7589 | /* FIXME: Use cpumask_of_node */ | ||
7578 | node_to_cpumask_ptr(pnodemask, n); | 7590 | node_to_cpumask_ptr(pnodemask, n); |
7579 | 7591 | ||
7580 | cpus_complement(*notcovered, *covered); | 7592 | cpumask_complement(notcovered, covered); |
7581 | cpus_and(*tmpmask, *notcovered, *cpu_map); | 7593 | cpumask_and(tmpmask, notcovered, cpu_map); |
7582 | cpus_and(*tmpmask, *tmpmask, *domainspan); | 7594 | cpumask_and(tmpmask, tmpmask, domainspan); |
7583 | if (cpus_empty(*tmpmask)) | 7595 | if (cpumask_empty(tmpmask)) |
7584 | break; | 7596 | break; |
7585 | 7597 | ||
7586 | cpus_and(*tmpmask, *tmpmask, *pnodemask); | 7598 | cpumask_and(tmpmask, tmpmask, pnodemask); |
7587 | if (cpus_empty(*tmpmask)) | 7599 | if (cpumask_empty(tmpmask)) |
7588 | continue; | 7600 | continue; |
7589 | 7601 | ||
7590 | sg = kmalloc_node(sizeof(struct sched_group) + | 7602 | sg = kmalloc_node(sizeof(struct sched_group) + |
@@ -7598,7 +7610,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7598 | sg->__cpu_power = 0; | 7610 | sg->__cpu_power = 0; |
7599 | cpumask_copy(sched_group_cpus(sg), tmpmask); | 7611 | cpumask_copy(sched_group_cpus(sg), tmpmask); |
7600 | sg->next = prev->next; | 7612 | sg->next = prev->next; |
7601 | cpus_or(*covered, *covered, *tmpmask); | 7613 | cpumask_or(covered, covered, tmpmask); |
7602 | prev->next = sg; | 7614 | prev->next = sg; |
7603 | prev = sg; | 7615 | prev = sg; |
7604 | } | 7616 | } |
@@ -7634,7 +7646,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7634 | if (sd_allnodes) { | 7646 | if (sd_allnodes) { |
7635 | struct sched_group *sg; | 7647 | struct sched_group *sg; |
7636 | 7648 | ||
7637 | cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg, | 7649 | cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg, |
7638 | tmpmask); | 7650 | tmpmask); |
7639 | init_numa_sched_groups_power(sg); | 7651 | init_numa_sched_groups_power(sg); |
7640 | } | 7652 | } |
@@ -7690,12 +7702,12 @@ error: | |||
7690 | #endif | 7702 | #endif |
7691 | } | 7703 | } |
7692 | 7704 | ||
7693 | static int build_sched_domains(const cpumask_t *cpu_map) | 7705 | static int build_sched_domains(const struct cpumask *cpu_map) |
7694 | { | 7706 | { |
7695 | return __build_sched_domains(cpu_map, NULL); | 7707 | return __build_sched_domains(cpu_map, NULL); |
7696 | } | 7708 | } |
7697 | 7709 | ||
7698 | static cpumask_t *doms_cur; /* current sched domains */ | 7710 | static struct cpumask *doms_cur; /* current sched domains */ |
7699 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | 7711 | static int ndoms_cur; /* number of sched domains in 'doms_cur' */ |
7700 | static struct sched_domain_attr *dattr_cur; | 7712 | static struct sched_domain_attr *dattr_cur; |
7701 | /* attribues of custom domains in 'doms_cur' */ | 7713 | /* attribues of custom domains in 'doms_cur' */ |
@@ -7716,13 +7728,13 @@ void __attribute__((weak)) arch_update_cpu_topology(void) | |||
7716 | * For now this just excludes isolated cpus, but could be used to | 7728 | * For now this just excludes isolated cpus, but could be used to |
7717 | * exclude other special cases in the future. | 7729 | * exclude other special cases in the future. |
7718 | */ | 7730 | */ |
7719 | static int arch_init_sched_domains(const cpumask_t *cpu_map) | 7731 | static int arch_init_sched_domains(const struct cpumask *cpu_map) |
7720 | { | 7732 | { |
7721 | int err; | 7733 | int err; |
7722 | 7734 | ||
7723 | arch_update_cpu_topology(); | 7735 | arch_update_cpu_topology(); |
7724 | ndoms_cur = 1; | 7736 | ndoms_cur = 1; |
7725 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 7737 | doms_cur = kmalloc(cpumask_size(), GFP_KERNEL); |
7726 | if (!doms_cur) | 7738 | if (!doms_cur) |
7727 | doms_cur = fallback_doms; | 7739 | doms_cur = fallback_doms; |
7728 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); | 7740 | cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map); |
@@ -7733,8 +7745,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
7733 | return err; | 7745 | return err; |
7734 | } | 7746 | } |
7735 | 7747 | ||
7736 | static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | 7748 | static void arch_destroy_sched_domains(const struct cpumask *cpu_map, |
7737 | cpumask_t *tmpmask) | 7749 | struct cpumask *tmpmask) |
7738 | { | 7750 | { |
7739 | free_sched_groups(cpu_map, tmpmask); | 7751 | free_sched_groups(cpu_map, tmpmask); |
7740 | } | 7752 | } |
@@ -7743,15 +7755,16 @@ static void arch_destroy_sched_domains(const cpumask_t *cpu_map, | |||
7743 | * Detach sched domains from a group of cpus specified in cpu_map | 7755 | * Detach sched domains from a group of cpus specified in cpu_map |
7744 | * These cpus will now be attached to the NULL domain | 7756 | * These cpus will now be attached to the NULL domain |
7745 | */ | 7757 | */ |
7746 | static void detach_destroy_domains(const cpumask_t *cpu_map) | 7758 | static void detach_destroy_domains(const struct cpumask *cpu_map) |
7747 | { | 7759 | { |
7748 | cpumask_t tmpmask; | 7760 | /* Save because hotplug lock held. */ |
7761 | static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS); | ||
7749 | int i; | 7762 | int i; |
7750 | 7763 | ||
7751 | for_each_cpu(i, cpu_map) | 7764 | for_each_cpu(i, cpu_map) |
7752 | cpu_attach_domain(NULL, &def_root_domain, i); | 7765 | cpu_attach_domain(NULL, &def_root_domain, i); |
7753 | synchronize_sched(); | 7766 | synchronize_sched(); |
7754 | arch_destroy_sched_domains(cpu_map, &tmpmask); | 7767 | arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask)); |
7755 | } | 7768 | } |
7756 | 7769 | ||
7757 | /* handle null as "default" */ | 7770 | /* handle null as "default" */ |
@@ -7776,7 +7789,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7776 | * doms_new[] to the current sched domain partitioning, doms_cur[]. | 7789 | * doms_new[] to the current sched domain partitioning, doms_cur[]. |
7777 | * It destroys each deleted domain and builds each new domain. | 7790 | * It destroys each deleted domain and builds each new domain. |
7778 | * | 7791 | * |
7779 | * 'doms_new' is an array of cpumask_t's of length 'ndoms_new'. | 7792 | * 'doms_new' is an array of cpumask's of length 'ndoms_new'. |
7780 | * The masks don't intersect (don't overlap.) We should setup one | 7793 | * The masks don't intersect (don't overlap.) We should setup one |
7781 | * sched domain for each mask. CPUs not in any of the cpumasks will | 7794 | * sched domain for each mask. CPUs not in any of the cpumasks will |
7782 | * not be load balanced. If the same cpumask appears both in the | 7795 | * not be load balanced. If the same cpumask appears both in the |
@@ -7790,13 +7803,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7790 | * the single partition 'fallback_doms', it also forces the domains | 7803 | * the single partition 'fallback_doms', it also forces the domains |
7791 | * to be rebuilt. | 7804 | * to be rebuilt. |
7792 | * | 7805 | * |
7793 | * If doms_new == NULL it will be replaced with cpu_online_map. | 7806 | * If doms_new == NULL it will be replaced with cpu_online_mask. |
7794 | * ndoms_new == 0 is a special case for destroying existing domains, | 7807 | * ndoms_new == 0 is a special case for destroying existing domains, |
7795 | * and it will not create the default domain. | 7808 | * and it will not create the default domain. |
7796 | * | 7809 | * |
7797 | * Call with hotplug lock held | 7810 | * Call with hotplug lock held |
7798 | */ | 7811 | */ |
7799 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7812 | /* FIXME: Change to struct cpumask *doms_new[] */ |
7813 | void partition_sched_domains(int ndoms_new, struct cpumask *doms_new, | ||
7800 | struct sched_domain_attr *dattr_new) | 7814 | struct sched_domain_attr *dattr_new) |
7801 | { | 7815 | { |
7802 | int i, j, n; | 7816 | int i, j, n; |
@@ -7811,7 +7825,7 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7811 | /* Destroy deleted domains */ | 7825 | /* Destroy deleted domains */ |
7812 | for (i = 0; i < ndoms_cur; i++) { | 7826 | for (i = 0; i < ndoms_cur; i++) { |
7813 | for (j = 0; j < n; j++) { | 7827 | for (j = 0; j < n; j++) { |
7814 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7828 | if (cpumask_equal(&doms_cur[i], &doms_new[j]) |
7815 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7829 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7816 | goto match1; | 7830 | goto match1; |
7817 | } | 7831 | } |
@@ -7831,7 +7845,7 @@ match1: | |||
7831 | /* Build new domains */ | 7845 | /* Build new domains */ |
7832 | for (i = 0; i < ndoms_new; i++) { | 7846 | for (i = 0; i < ndoms_new; i++) { |
7833 | for (j = 0; j < ndoms_cur; j++) { | 7847 | for (j = 0; j < ndoms_cur; j++) { |
7834 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7848 | if (cpumask_equal(&doms_new[i], &doms_cur[j]) |
7835 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7849 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7836 | goto match2; | 7850 | goto match2; |
7837 | } | 7851 | } |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bba00402ed90..08ffffd4a410 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1017,7 +1017,7 @@ static void yield_task_fair(struct rq *rq) | |||
1017 | * search starts with cpus closest then further out as needed, | 1017 | * search starts with cpus closest then further out as needed, |
1018 | * so we always favor a closer, idle cpu. | 1018 | * so we always favor a closer, idle cpu. |
1019 | * Domains may include CPUs that are not usable for migration, | 1019 | * Domains may include CPUs that are not usable for migration, |
1020 | * hence we need to mask them out (cpu_active_map) | 1020 | * hence we need to mask them out (cpu_active_mask) |
1021 | * | 1021 | * |
1022 | * Returns the CPU we should wake onto. | 1022 | * Returns the CPU we should wake onto. |
1023 | */ | 1023 | */ |
@@ -1244,7 +1244,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1244 | } | 1244 | } |
1245 | } | 1245 | } |
1246 | 1246 | ||
1247 | if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) | 1247 | if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed))) |
1248 | goto out; | 1248 | goto out; |
1249 | 1249 | ||
1250 | /* | 1250 | /* |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 1f0e99d1a8ce..fb3964579a8a 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -923,7 +923,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | |||
923 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 923 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
924 | { | 924 | { |
925 | if (!task_running(rq, p) && | 925 | if (!task_running(rq, p) && |
926 | (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) && | 926 | (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && |
927 | (p->rt.nr_cpus_allowed > 1)) | 927 | (p->rt.nr_cpus_allowed > 1)) |
928 | return 1; | 928 | return 1; |
929 | return 0; | 929 | return 0; |
@@ -982,7 +982,7 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) | |||
982 | static int find_lowest_rq(struct task_struct *task) | 982 | static int find_lowest_rq(struct task_struct *task) |
983 | { | 983 | { |
984 | struct sched_domain *sd; | 984 | struct sched_domain *sd; |
985 | cpumask_t *lowest_mask = __get_cpu_var(local_cpu_mask); | 985 | struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); |
986 | int this_cpu = smp_processor_id(); | 986 | int this_cpu = smp_processor_id(); |
987 | int cpu = task_cpu(task); | 987 | int cpu = task_cpu(task); |
988 | 988 | ||
@@ -997,7 +997,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
997 | * I guess we might want to change cpupri_find() to ignore those | 997 | * I guess we might want to change cpupri_find() to ignore those |
998 | * in the first place. | 998 | * in the first place. |
999 | */ | 999 | */ |
1000 | cpus_and(*lowest_mask, *lowest_mask, cpu_active_map); | 1000 | cpumask_and(lowest_mask, lowest_mask, cpu_active_mask); |
1001 | 1001 | ||
1002 | /* | 1002 | /* |
1003 | * At this point we have built a mask of cpus representing the | 1003 | * At this point we have built a mask of cpus representing the |
@@ -1007,7 +1007,7 @@ static int find_lowest_rq(struct task_struct *task) | |||
1007 | * We prioritize the last cpu that the task executed on since | 1007 | * We prioritize the last cpu that the task executed on since |
1008 | * it is most likely cache-hot in that location. | 1008 | * it is most likely cache-hot in that location. |
1009 | */ | 1009 | */ |
1010 | if (cpu_isset(cpu, *lowest_mask)) | 1010 | if (cpumask_test_cpu(cpu, lowest_mask)) |
1011 | return cpu; | 1011 | return cpu; |
1012 | 1012 | ||
1013 | /* | 1013 | /* |
@@ -1064,8 +1064,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1064 | * Also make sure that it wasn't scheduled on its rq. | 1064 | * Also make sure that it wasn't scheduled on its rq. |
1065 | */ | 1065 | */ |
1066 | if (unlikely(task_rq(task) != rq || | 1066 | if (unlikely(task_rq(task) != rq || |
1067 | !cpu_isset(lowest_rq->cpu, | 1067 | !cpumask_test_cpu(lowest_rq->cpu, |
1068 | task->cpus_allowed) || | 1068 | &task->cpus_allowed) || |
1069 | task_running(rq, task) || | 1069 | task_running(rq, task) || |
1070 | !task->se.on_rq)) { | 1070 | !task->se.on_rq)) { |
1071 | 1071 | ||
@@ -1315,9 +1315,9 @@ move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | static void set_cpus_allowed_rt(struct task_struct *p, | 1317 | static void set_cpus_allowed_rt(struct task_struct *p, |
1318 | const cpumask_t *new_mask) | 1318 | const struct cpumask *new_mask) |
1319 | { | 1319 | { |
1320 | int weight = cpus_weight(*new_mask); | 1320 | int weight = cpumask_weight(new_mask); |
1321 | 1321 | ||
1322 | BUG_ON(!rt_task(p)); | 1322 | BUG_ON(!rt_task(p)); |
1323 | 1323 | ||
@@ -1338,7 +1338,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1338 | update_rt_migration(rq); | 1338 | update_rt_migration(rq); |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | p->cpus_allowed = *new_mask; | 1341 | cpumask_copy(&p->cpus_allowed, new_mask); |
1342 | p->rt.nr_cpus_allowed = weight; | 1342 | p->rt.nr_cpus_allowed = weight; |
1343 | } | 1343 | } |
1344 | 1344 | ||