diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/processor_throttling.c | 6 | ||||
-rw-r--r-- | drivers/base/cpu.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 14 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 4 |
6 files changed, 17 insertions, 17 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index bb06738860c4..28509fbba6f9 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -1013,7 +1013,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1013 | * affected cpu in order to get one proper T-state. | 1013 | * affected cpu in order to get one proper T-state. |
1014 | * The notifier event is THROTTLING_PRECHANGE. | 1014 | * The notifier event is THROTTLING_PRECHANGE. |
1015 | */ | 1015 | */ |
1016 | for_each_cpu_mask(i, online_throttling_cpus) { | 1016 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1017 | t_state.cpu = i; | 1017 | t_state.cpu = i; |
1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | 1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1019 | &t_state); | 1019 | &t_state); |
@@ -1034,7 +1034,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1034 | * it is necessary to set T-state for every affected | 1034 | * it is necessary to set T-state for every affected |
1035 | * cpus. | 1035 | * cpus. |
1036 | */ | 1036 | */ |
1037 | for_each_cpu_mask(i, online_throttling_cpus) { | 1037 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1038 | match_pr = processors[i]; | 1038 | match_pr = processors[i]; |
1039 | /* | 1039 | /* |
1040 | * If the pointer is invalid, we will report the | 1040 | * If the pointer is invalid, we will report the |
@@ -1068,7 +1068,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1068 | * affected cpu to update the T-states. | 1068 | * affected cpu to update the T-states. |
1069 | * The notifier event is THROTTLING_POSTCHANGE | 1069 | * The notifier event is THROTTLING_POSTCHANGE |
1070 | */ | 1070 | */ |
1071 | for_each_cpu_mask(i, online_throttling_cpus) { | 1071 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1072 | t_state.cpu = i; | 1072 | t_state.cpu = i; |
1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | 1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
1074 | &t_state); | 1074 | &t_state); |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e38dfed41d80..5000402ae092 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -119,14 +119,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ | |||
119 | { \ | 119 | { \ |
120 | return print_cpus_map(buf, &cpu_##type##_map); \ | 120 | return print_cpus_map(buf, &cpu_##type##_map); \ |
121 | } \ | 121 | } \ |
122 | struct sysdev_class_attribute attr_##type##_map = \ | 122 | static struct sysdev_class_attribute attr_##type##_map = \ |
123 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) | 123 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) |
124 | 124 | ||
125 | print_cpus_func(online); | 125 | print_cpus_func(online); |
126 | print_cpus_func(possible); | 126 | print_cpus_func(possible); |
127 | print_cpus_func(present); | 127 | print_cpus_func(present); |
128 | 128 | ||
129 | struct sysdev_class_attribute *cpu_state_attr[] = { | 129 | static struct sysdev_class_attribute *cpu_state_attr[] = { |
130 | &attr_online_map, | 130 | &attr_online_map, |
131 | &attr_possible_map, | 131 | &attr_possible_map, |
132 | &attr_present_map, | 132 | &attr_present_map, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1d41496ed2f8..d8f8b1e0edde 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf) | |||
589 | ssize_t i = 0; | 589 | ssize_t i = 0; |
590 | unsigned int cpu; | 590 | unsigned int cpu; |
591 | 591 | ||
592 | for_each_cpu_mask(cpu, mask) { | 592 | for_each_cpu_mask_nr(cpu, mask) { |
593 | if (i) | 593 | if (i) |
594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); | 594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); |
595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); | 595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); |
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
835 | } | 835 | } |
836 | #endif | 836 | #endif |
837 | 837 | ||
838 | for_each_cpu_mask(j, policy->cpus) { | 838 | for_each_cpu_mask_nr(j, policy->cpus) { |
839 | if (cpu == j) | 839 | if (cpu == j) |
840 | continue; | 840 | continue; |
841 | 841 | ||
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
898 | } | 898 | } |
899 | 899 | ||
900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
901 | for_each_cpu_mask(j, policy->cpus) { | 901 | for_each_cpu_mask_nr(j, policy->cpus) { |
902 | cpufreq_cpu_data[j] = policy; | 902 | cpufreq_cpu_data[j] = policy; |
903 | per_cpu(policy_cpu, j) = policy->cpu; | 903 | per_cpu(policy_cpu, j) = policy->cpu; |
904 | } | 904 | } |
905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
906 | 906 | ||
907 | /* symlink affected CPUs */ | 907 | /* symlink affected CPUs */ |
908 | for_each_cpu_mask(j, policy->cpus) { | 908 | for_each_cpu_mask_nr(j, policy->cpus) { |
909 | if (j == cpu) | 909 | if (j == cpu) |
910 | continue; | 910 | continue; |
911 | if (!cpu_online(j)) | 911 | if (!cpu_online(j)) |
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
945 | 945 | ||
946 | err_out_unregister: | 946 | err_out_unregister: |
947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
948 | for_each_cpu_mask(j, policy->cpus) | 948 | for_each_cpu_mask_nr(j, policy->cpus) |
949 | cpufreq_cpu_data[j] = NULL; | 949 | cpufreq_cpu_data[j] = NULL; |
950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
951 | 951 | ||
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1028 | * links afterwards. | 1028 | * links afterwards. |
1029 | */ | 1029 | */ |
1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1031 | for_each_cpu_mask(j, data->cpus) { | 1031 | for_each_cpu_mask_nr(j, data->cpus) { |
1032 | if (j == cpu) | 1032 | if (j == cpu) |
1033 | continue; | 1033 | continue; |
1034 | cpufreq_cpu_data[j] = NULL; | 1034 | cpufreq_cpu_data[j] = NULL; |
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1039 | 1039 | ||
1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1041 | for_each_cpu_mask(j, data->cpus) { | 1041 | for_each_cpu_mask_nr(j, data->cpus) { |
1042 | if (j == cpu) | 1042 | if (j == cpu) |
1043 | continue; | 1043 | continue; |
1044 | dprintk("removing link for cpu %u\n", j); | 1044 | dprintk("removing link for cpu %u\n", j); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 5d3a04ba6ad2..fe565ee43757 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
497 | return rc; | 497 | return rc; |
498 | } | 498 | } |
499 | 499 | ||
500 | for_each_cpu_mask(j, policy->cpus) { | 500 | for_each_cpu_mask_nr(j, policy->cpus) { |
501 | struct cpu_dbs_info_s *j_dbs_info; | 501 | struct cpu_dbs_info_s *j_dbs_info; |
502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
503 | j_dbs_info->cur_policy = policy; | 503 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d2af20dda382..33855cb3cf16 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
367 | 367 | ||
368 | /* Get Idle Time */ | 368 | /* Get Idle Time */ |
369 | idle_ticks = UINT_MAX; | 369 | idle_ticks = UINT_MAX; |
370 | for_each_cpu_mask(j, policy->cpus) { | 370 | for_each_cpu_mask_nr(j, policy->cpus) { |
371 | cputime64_t total_idle_ticks; | 371 | cputime64_t total_idle_ticks; |
372 | unsigned int tmp_idle_ticks; | 372 | unsigned int tmp_idle_ticks; |
373 | struct cpu_dbs_info_s *j_dbs_info; | 373 | struct cpu_dbs_info_s *j_dbs_info; |
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
521 | return rc; | 521 | return rc; |
522 | } | 522 | } |
523 | 523 | ||
524 | for_each_cpu_mask(j, policy->cpus) { | 524 | for_each_cpu_mask_nr(j, policy->cpus) { |
525 | struct cpu_dbs_info_s *j_dbs_info; | 525 | struct cpu_dbs_info_s *j_dbs_info; |
526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
527 | j_dbs_info->cur_policy = policy; | 527 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index ce1ab0571be3..43180b952c1f 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -641,8 +641,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
641 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 641 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
642 | 642 | ||
643 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 643 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
644 | cpu = next_cpu(pool->last_cpu, cpu_online_map); | 644 | cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); |
645 | if (cpu == NR_CPUS) | 645 | if (cpu >= nr_cpu_ids) |
646 | cpu = first_cpu(cpu_online_map); | 646 | cpu = first_cpu(cpu_online_map); |
647 | pool->last_cpu = cpu; | 647 | pool->last_cpu = cpu; |
648 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | 648 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); |