aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c44
1 files changed, 24 insertions, 20 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 62b1b8ecb5c7..6acf749d3336 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2029,7 +2029,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2029 /* Tally up the load of all CPUs in the group */ 2029 /* Tally up the load of all CPUs in the group */
2030 avg_load = 0; 2030 avg_load = 0;
2031 2031
2032 for_each_cpu_mask(i, group->cpumask) { 2032 for_each_cpu_mask_nr(i, group->cpumask) {
2033 /* Bias balancing toward cpus of our domain */ 2033 /* Bias balancing toward cpus of our domain */
2034 if (local_group) 2034 if (local_group)
2035 load = source_load(i, load_idx); 2035 load = source_load(i, load_idx);
@@ -2071,7 +2071,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
2071 /* Traverse only the allowed CPUs */ 2071 /* Traverse only the allowed CPUs */
2072 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2072 cpus_and(*tmp, group->cpumask, p->cpus_allowed);
2073 2073
2074 for_each_cpu_mask(i, *tmp) { 2074 for_each_cpu_mask_nr(i, *tmp) {
2075 load = weighted_cpuload(i); 2075 load = weighted_cpuload(i);
2076 2076
2077 if (load < min_load || (load == min_load && i == this_cpu)) { 2077 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -3089,7 +3089,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3089 max_cpu_load = 0; 3089 max_cpu_load = 0;
3090 min_cpu_load = ~0UL; 3090 min_cpu_load = ~0UL;
3091 3091
3092 for_each_cpu_mask(i, group->cpumask) { 3092 for_each_cpu_mask_nr(i, group->cpumask) {
3093 struct rq *rq; 3093 struct rq *rq;
3094 3094
3095 if (!cpu_isset(i, *cpus)) 3095 if (!cpu_isset(i, *cpus))
@@ -3368,7 +3368,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3368 unsigned long max_load = 0; 3368 unsigned long max_load = 0;
3369 int i; 3369 int i;
3370 3370
3371 for_each_cpu_mask(i, group->cpumask) { 3371 for_each_cpu_mask_nr(i, group->cpumask) {
3372 unsigned long wl; 3372 unsigned long wl;
3373 3373
3374 if (!cpu_isset(i, *cpus)) 3374 if (!cpu_isset(i, *cpus))
@@ -3910,7 +3910,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3910 int balance_cpu; 3910 int balance_cpu;
3911 3911
3912 cpu_clear(this_cpu, cpus); 3912 cpu_clear(this_cpu, cpus);
3913 for_each_cpu_mask(balance_cpu, cpus) { 3913 for_each_cpu_mask_nr(balance_cpu, cpus) {
3914 /* 3914 /*
3915 * If this cpu gets work to do, stop the load balancing 3915 * If this cpu gets work to do, stop the load balancing
3916 * work being done for other cpus. Next load 3916 * work being done for other cpus. Next load
@@ -6721,7 +6721,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6721 6721
6722 cpus_clear(*covered); 6722 cpus_clear(*covered);
6723 6723
6724 for_each_cpu_mask(i, *span) { 6724 for_each_cpu_mask_nr(i, *span) {
6725 struct sched_group *sg; 6725 struct sched_group *sg;
6726 int group = group_fn(i, cpu_map, &sg, tmpmask); 6726 int group = group_fn(i, cpu_map, &sg, tmpmask);
6727 int j; 6727 int j;
@@ -6732,7 +6732,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6732 cpus_clear(sg->cpumask); 6732 cpus_clear(sg->cpumask);
6733 sg->__cpu_power = 0; 6733 sg->__cpu_power = 0;
6734 6734
6735 for_each_cpu_mask(j, *span) { 6735 for_each_cpu_mask_nr(j, *span) {
6736 if (group_fn(j, cpu_map, NULL, tmpmask) != group) 6736 if (group_fn(j, cpu_map, NULL, tmpmask) != group)
6737 continue; 6737 continue;
6738 6738
@@ -6932,7 +6932,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
6932 if (!sg) 6932 if (!sg)
6933 return; 6933 return;
6934 do { 6934 do {
6935 for_each_cpu_mask(j, sg->cpumask) { 6935 for_each_cpu_mask_nr(j, sg->cpumask) {
6936 struct sched_domain *sd; 6936 struct sched_domain *sd;
6937 6937
6938 sd = &per_cpu(phys_domains, j); 6938 sd = &per_cpu(phys_domains, j);
@@ -6957,7 +6957,7 @@ static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6957{ 6957{
6958 int cpu, i; 6958 int cpu, i;
6959 6959
6960 for_each_cpu_mask(cpu, *cpu_map) { 6960 for_each_cpu_mask_nr(cpu, *cpu_map) {
6961 struct sched_group **sched_group_nodes 6961 struct sched_group **sched_group_nodes
6962 = sched_group_nodes_bycpu[cpu]; 6962 = sched_group_nodes_bycpu[cpu];
6963 6963
@@ -7196,7 +7196,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7196 /* 7196 /*
7197 * Set up domains for cpus specified by the cpu_map. 7197 * Set up domains for cpus specified by the cpu_map.
7198 */ 7198 */
7199 for_each_cpu_mask(i, *cpu_map) { 7199 for_each_cpu_mask_nr(i, *cpu_map) {
7200 struct sched_domain *sd = NULL, *p; 7200 struct sched_domain *sd = NULL, *p;
7201 SCHED_CPUMASK_VAR(nodemask, allmasks); 7201 SCHED_CPUMASK_VAR(nodemask, allmasks);
7202 7202
@@ -7263,7 +7263,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7263 7263
7264#ifdef CONFIG_SCHED_SMT 7264#ifdef CONFIG_SCHED_SMT
7265 /* Set up CPU (sibling) groups */ 7265 /* Set up CPU (sibling) groups */
7266 for_each_cpu_mask(i, *cpu_map) { 7266 for_each_cpu_mask_nr(i, *cpu_map) {
7267 SCHED_CPUMASK_VAR(this_sibling_map, allmasks); 7267 SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
7268 SCHED_CPUMASK_VAR(send_covered, allmasks); 7268 SCHED_CPUMASK_VAR(send_covered, allmasks);
7269 7269
@@ -7280,7 +7280,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7280 7280
7281#ifdef CONFIG_SCHED_MC 7281#ifdef CONFIG_SCHED_MC
7282 /* Set up multi-core groups */ 7282 /* Set up multi-core groups */
7283 for_each_cpu_mask(i, *cpu_map) { 7283 for_each_cpu_mask_nr(i, *cpu_map) {
7284 SCHED_CPUMASK_VAR(this_core_map, allmasks); 7284 SCHED_CPUMASK_VAR(this_core_map, allmasks);
7285 SCHED_CPUMASK_VAR(send_covered, allmasks); 7285 SCHED_CPUMASK_VAR(send_covered, allmasks);
7286 7286
@@ -7347,7 +7347,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7347 goto error; 7347 goto error;
7348 } 7348 }
7349 sched_group_nodes[i] = sg; 7349 sched_group_nodes[i] = sg;
7350 for_each_cpu_mask(j, *nodemask) { 7350 for_each_cpu_mask_nr(j, *nodemask) {
7351 struct sched_domain *sd; 7351 struct sched_domain *sd;
7352 7352
7353 sd = &per_cpu(node_domains, j); 7353 sd = &per_cpu(node_domains, j);
@@ -7393,21 +7393,21 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7393 7393
7394 /* Calculate CPU power for physical packages and nodes */ 7394 /* Calculate CPU power for physical packages and nodes */
7395#ifdef CONFIG_SCHED_SMT 7395#ifdef CONFIG_SCHED_SMT
7396 for_each_cpu_mask(i, *cpu_map) { 7396 for_each_cpu_mask_nr(i, *cpu_map) {
7397 struct sched_domain *sd = &per_cpu(cpu_domains, i); 7397 struct sched_domain *sd = &per_cpu(cpu_domains, i);
7398 7398
7399 init_sched_groups_power(i, sd); 7399 init_sched_groups_power(i, sd);
7400 } 7400 }
7401#endif 7401#endif
7402#ifdef CONFIG_SCHED_MC 7402#ifdef CONFIG_SCHED_MC
7403 for_each_cpu_mask(i, *cpu_map) { 7403 for_each_cpu_mask_nr(i, *cpu_map) {
7404 struct sched_domain *sd = &per_cpu(core_domains, i); 7404 struct sched_domain *sd = &per_cpu(core_domains, i);
7405 7405
7406 init_sched_groups_power(i, sd); 7406 init_sched_groups_power(i, sd);
7407 } 7407 }
7408#endif 7408#endif
7409 7409
7410 for_each_cpu_mask(i, *cpu_map) { 7410 for_each_cpu_mask_nr(i, *cpu_map) {
7411 struct sched_domain *sd = &per_cpu(phys_domains, i); 7411 struct sched_domain *sd = &per_cpu(phys_domains, i);
7412 7412
7413 init_sched_groups_power(i, sd); 7413 init_sched_groups_power(i, sd);
@@ -7427,7 +7427,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7427#endif 7427#endif
7428 7428
7429 /* Attach the domains */ 7429 /* Attach the domains */
7430 for_each_cpu_mask(i, *cpu_map) { 7430 for_each_cpu_mask_nr(i, *cpu_map) {
7431 struct sched_domain *sd; 7431 struct sched_domain *sd;
7432#ifdef CONFIG_SCHED_SMT 7432#ifdef CONFIG_SCHED_SMT
7433 sd = &per_cpu(cpu_domains, i); 7433 sd = &per_cpu(cpu_domains, i);
@@ -7510,7 +7510,7 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
7510 7510
7511 unregister_sched_domain_sysctl(); 7511 unregister_sched_domain_sysctl();
7512 7512
7513 for_each_cpu_mask(i, *cpu_map) 7513 for_each_cpu_mask_nr(i, *cpu_map)
7514 cpu_attach_domain(NULL, &def_root_domain, i); 7514 cpu_attach_domain(NULL, &def_root_domain, i);
7515 synchronize_sched(); 7515 synchronize_sched();
7516 arch_destroy_sched_domains(cpu_map, &tmpmask); 7516 arch_destroy_sched_domains(cpu_map, &tmpmask);
@@ -7641,11 +7641,13 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
7641} 7641}
7642 7642
7643#ifdef CONFIG_SCHED_MC 7643#ifdef CONFIG_SCHED_MC
7644static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page) 7644static ssize_t sched_mc_power_savings_show(struct sys_device *dev,
7645 struct sysdev_attribute *attr, char *page)
7645{ 7646{
7646 return sprintf(page, "%u\n", sched_mc_power_savings); 7647 return sprintf(page, "%u\n", sched_mc_power_savings);
7647} 7648}
7648static ssize_t sched_mc_power_savings_store(struct sys_device *dev, 7649static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
7650 struct sysdev_attribute *attr,
7649 const char *buf, size_t count) 7651 const char *buf, size_t count)
7650{ 7652{
7651 return sched_power_savings_store(buf, count, 0); 7653 return sched_power_savings_store(buf, count, 0);
@@ -7655,11 +7657,13 @@ static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
7655#endif 7657#endif
7656 7658
7657#ifdef CONFIG_SCHED_SMT 7659#ifdef CONFIG_SCHED_SMT
7658static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page) 7660static ssize_t sched_smt_power_savings_show(struct sys_device *dev,
7661 struct sysdev_attribute *attr, char *page)
7659{ 7662{
7660 return sprintf(page, "%u\n", sched_smt_power_savings); 7663 return sprintf(page, "%u\n", sched_smt_power_savings);
7661} 7664}
7662static ssize_t sched_smt_power_savings_store(struct sys_device *dev, 7665static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
7666 struct sysdev_attribute *attr,
7663 const char *buf, size_t count) 7667 const char *buf, size_t count)
7664{ 7668{
7665 return sched_power_savings_store(buf, count, 1); 7669 return sched_power_savings_store(buf, count, 1);