diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/acpi/processor_throttling.c | 17 | ||||
-rw-r--r-- | drivers/base/cpu.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 14 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 2 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 79 | ||||
-rw-r--r-- | drivers/firmware/dcdbas.c | 3 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 4 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 3 |
9 files changed, 72 insertions, 58 deletions
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 0622ace05220..a2c3f9cfa549 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -827,6 +827,7 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) | |||
827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) | 827 | static int acpi_processor_get_throttling(struct acpi_processor *pr) |
828 | { | 828 | { |
829 | cpumask_t saved_mask; | 829 | cpumask_t saved_mask; |
830 | cpumask_of_cpu_ptr_declare(new_mask); | ||
830 | int ret; | 831 | int ret; |
831 | 832 | ||
832 | if (!pr) | 833 | if (!pr) |
@@ -838,7 +839,8 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr) | |||
838 | * Migrate task to the cpu pointed by pr. | 839 | * Migrate task to the cpu pointed by pr. |
839 | */ | 840 | */ |
840 | saved_mask = current->cpus_allowed; | 841 | saved_mask = current->cpus_allowed; |
841 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 842 | cpumask_of_cpu_ptr_next(new_mask, pr->id); |
843 | set_cpus_allowed_ptr(current, new_mask); | ||
842 | ret = pr->throttling.acpi_processor_get_throttling(pr); | 844 | ret = pr->throttling.acpi_processor_get_throttling(pr); |
843 | /* restore the previous state */ | 845 | /* restore the previous state */ |
844 | set_cpus_allowed_ptr(current, &saved_mask); | 846 | set_cpus_allowed_ptr(current, &saved_mask); |
@@ -987,6 +989,7 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr, | |||
987 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | 989 | int acpi_processor_set_throttling(struct acpi_processor *pr, int state) |
988 | { | 990 | { |
989 | cpumask_t saved_mask; | 991 | cpumask_t saved_mask; |
992 | cpumask_of_cpu_ptr_declare(new_mask); | ||
990 | int ret = 0; | 993 | int ret = 0; |
991 | unsigned int i; | 994 | unsigned int i; |
992 | struct acpi_processor *match_pr; | 995 | struct acpi_processor *match_pr; |
@@ -1013,7 +1016,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1013 | * affected cpu in order to get one proper T-state. | 1016 | * affected cpu in order to get one proper T-state. |
1014 | * The notifier event is THROTTLING_PRECHANGE. | 1017 | * The notifier event is THROTTLING_PRECHANGE. |
1015 | */ | 1018 | */ |
1016 | for_each_cpu_mask(i, online_throttling_cpus) { | 1019 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1017 | t_state.cpu = i; | 1020 | t_state.cpu = i; |
1018 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, | 1021 | acpi_processor_throttling_notifier(THROTTLING_PRECHANGE, |
1019 | &t_state); | 1022 | &t_state); |
@@ -1025,7 +1028,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1025 | * it can be called only for the cpu pointed by pr. | 1028 | * it can be called only for the cpu pointed by pr. |
1026 | */ | 1029 | */ |
1027 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { | 1030 | if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { |
1028 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pr->id)); | 1031 | cpumask_of_cpu_ptr_next(new_mask, pr->id); |
1032 | set_cpus_allowed_ptr(current, new_mask); | ||
1029 | ret = p_throttling->acpi_processor_set_throttling(pr, | 1033 | ret = p_throttling->acpi_processor_set_throttling(pr, |
1030 | t_state.target_state); | 1034 | t_state.target_state); |
1031 | } else { | 1035 | } else { |
@@ -1034,7 +1038,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1034 | * it is necessary to set T-state for every affected | 1038 | * it is necessary to set T-state for every affected |
1035 | * cpus. | 1039 | * cpus. |
1036 | */ | 1040 | */ |
1037 | for_each_cpu_mask(i, online_throttling_cpus) { | 1041 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1038 | match_pr = per_cpu(processors, i); | 1042 | match_pr = per_cpu(processors, i); |
1039 | /* | 1043 | /* |
1040 | * If the pointer is invalid, we will report the | 1044 | * If the pointer is invalid, we will report the |
@@ -1056,7 +1060,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1056 | continue; | 1060 | continue; |
1057 | } | 1061 | } |
1058 | t_state.cpu = i; | 1062 | t_state.cpu = i; |
1059 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); | 1063 | cpumask_of_cpu_ptr_next(new_mask, i); |
1064 | set_cpus_allowed_ptr(current, new_mask); | ||
1060 | ret = match_pr->throttling. | 1065 | ret = match_pr->throttling. |
1061 | acpi_processor_set_throttling( | 1066 | acpi_processor_set_throttling( |
1062 | match_pr, t_state.target_state); | 1067 | match_pr, t_state.target_state); |
@@ -1068,7 +1073,7 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, int state) | |||
1068 | * affected cpu to update the T-states. | 1073 | * affected cpu to update the T-states. |
1069 | * The notifier event is THROTTLING_POSTCHANGE | 1074 | * The notifier event is THROTTLING_POSTCHANGE |
1070 | */ | 1075 | */ |
1071 | for_each_cpu_mask(i, online_throttling_cpus) { | 1076 | for_each_cpu_mask_nr(i, online_throttling_cpus) { |
1072 | t_state.cpu = i; | 1077 | t_state.cpu = i; |
1073 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, | 1078 | acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE, |
1074 | &t_state); | 1079 | &t_state); |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 20537d507909..64f5d54f7edc 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -121,14 +121,14 @@ static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ | |||
121 | { \ | 121 | { \ |
122 | return print_cpus_map(buf, &cpu_##type##_map); \ | 122 | return print_cpus_map(buf, &cpu_##type##_map); \ |
123 | } \ | 123 | } \ |
124 | struct sysdev_class_attribute attr_##type##_map = \ | 124 | static struct sysdev_class_attribute attr_##type##_map = \ |
125 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) | 125 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) |
126 | 126 | ||
127 | print_cpus_func(online); | 127 | print_cpus_func(online); |
128 | print_cpus_func(possible); | 128 | print_cpus_func(possible); |
129 | print_cpus_func(present); | 129 | print_cpus_func(present); |
130 | 130 | ||
131 | struct sysdev_class_attribute *cpu_state_attr[] = { | 131 | static struct sysdev_class_attribute *cpu_state_attr[] = { |
132 | &attr_online_map, | 132 | &attr_online_map, |
133 | &attr_possible_map, | 133 | &attr_possible_map, |
134 | &attr_present_map, | 134 | &attr_present_map, |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index ee1df0d45e81..8d6a3ff02672 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf) | |||
589 | ssize_t i = 0; | 589 | ssize_t i = 0; |
590 | unsigned int cpu; | 590 | unsigned int cpu; |
591 | 591 | ||
592 | for_each_cpu_mask(cpu, mask) { | 592 | for_each_cpu_mask_nr(cpu, mask) { |
593 | if (i) | 593 | if (i) |
594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); | 594 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); |
595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); | 595 | i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); |
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
835 | } | 835 | } |
836 | #endif | 836 | #endif |
837 | 837 | ||
838 | for_each_cpu_mask(j, policy->cpus) { | 838 | for_each_cpu_mask_nr(j, policy->cpus) { |
839 | if (cpu == j) | 839 | if (cpu == j) |
840 | continue; | 840 | continue; |
841 | 841 | ||
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
898 | } | 898 | } |
899 | 899 | ||
900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 900 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
901 | for_each_cpu_mask(j, policy->cpus) { | 901 | for_each_cpu_mask_nr(j, policy->cpus) { |
902 | per_cpu(cpufreq_cpu_data, j) = policy; | 902 | per_cpu(cpufreq_cpu_data, j) = policy; |
903 | per_cpu(policy_cpu, j) = policy->cpu; | 903 | per_cpu(policy_cpu, j) = policy->cpu; |
904 | } | 904 | } |
905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 905 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
906 | 906 | ||
907 | /* symlink affected CPUs */ | 907 | /* symlink affected CPUs */ |
908 | for_each_cpu_mask(j, policy->cpus) { | 908 | for_each_cpu_mask_nr(j, policy->cpus) { |
909 | if (j == cpu) | 909 | if (j == cpu) |
910 | continue; | 910 | continue; |
911 | if (!cpu_online(j)) | 911 | if (!cpu_online(j)) |
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
945 | 945 | ||
946 | err_out_unregister: | 946 | err_out_unregister: |
947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 947 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
948 | for_each_cpu_mask(j, policy->cpus) | 948 | for_each_cpu_mask_nr(j, policy->cpus) |
949 | per_cpu(cpufreq_cpu_data, j) = NULL; | 949 | per_cpu(cpufreq_cpu_data, j) = NULL; |
950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 950 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
951 | 951 | ||
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1028 | * the sysfs links afterwards. | 1028 | * the sysfs links afterwards. |
1029 | */ | 1029 | */ |
1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1030 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1031 | for_each_cpu_mask(j, data->cpus) { | 1031 | for_each_cpu_mask_nr(j, data->cpus) { |
1032 | if (j == cpu) | 1032 | if (j == cpu) |
1033 | continue; | 1033 | continue; |
1034 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1034 | per_cpu(cpufreq_cpu_data, j) = NULL; |
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1038 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1039 | 1039 | ||
1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { | 1040 | if (unlikely(cpus_weight(data->cpus) > 1)) { |
1041 | for_each_cpu_mask(j, data->cpus) { | 1041 | for_each_cpu_mask_nr(j, data->cpus) { |
1042 | if (j == cpu) | 1042 | if (j == cpu) |
1043 | continue; | 1043 | continue; |
1044 | dprintk("removing link for cpu %u\n", j); | 1044 | dprintk("removing link for cpu %u\n", j); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 5d3a04ba6ad2..fe565ee43757 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
497 | return rc; | 497 | return rc; |
498 | } | 498 | } |
499 | 499 | ||
500 | for_each_cpu_mask(j, policy->cpus) { | 500 | for_each_cpu_mask_nr(j, policy->cpus) { |
501 | struct cpu_dbs_info_s *j_dbs_info; | 501 | struct cpu_dbs_info_s *j_dbs_info; |
502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 502 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
503 | j_dbs_info->cur_policy = policy; | 503 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d2af20dda382..33855cb3cf16 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
367 | 367 | ||
368 | /* Get Idle Time */ | 368 | /* Get Idle Time */ |
369 | idle_ticks = UINT_MAX; | 369 | idle_ticks = UINT_MAX; |
370 | for_each_cpu_mask(j, policy->cpus) { | 370 | for_each_cpu_mask_nr(j, policy->cpus) { |
371 | cputime64_t total_idle_ticks; | 371 | cputime64_t total_idle_ticks; |
372 | unsigned int tmp_idle_ticks; | 372 | unsigned int tmp_idle_ticks; |
373 | struct cpu_dbs_info_s *j_dbs_info; | 373 | struct cpu_dbs_info_s *j_dbs_info; |
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
521 | return rc; | 521 | return rc; |
522 | } | 522 | } |
523 | 523 | ||
524 | for_each_cpu_mask(j, policy->cpus) { | 524 | for_each_cpu_mask_nr(j, policy->cpus) { |
525 | struct cpu_dbs_info_s *j_dbs_info; | 525 | struct cpu_dbs_info_s *j_dbs_info; |
526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
527 | j_dbs_info->cur_policy = policy; | 527 | j_dbs_info->cur_policy = policy; |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index cb2ac01a41a1..32244aa7cc0c 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -30,16 +30,18 @@ | |||
30 | /** | 30 | /** |
31 | * A few values needed by the userspace governor | 31 | * A few values needed by the userspace governor |
32 | */ | 32 | */ |
33 | static unsigned int cpu_max_freq[NR_CPUS]; | 33 | static DEFINE_PER_CPU(unsigned int, cpu_max_freq); |
34 | static unsigned int cpu_min_freq[NR_CPUS]; | 34 | static DEFINE_PER_CPU(unsigned int, cpu_min_freq); |
35 | static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ | 35 | static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */ |
36 | static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ | 36 | static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by |
37 | static unsigned int cpu_is_managed[NR_CPUS]; | 37 | userspace */ |
38 | static DEFINE_PER_CPU(unsigned int, cpu_is_managed); | ||
38 | 39 | ||
39 | static DEFINE_MUTEX (userspace_mutex); | 40 | static DEFINE_MUTEX (userspace_mutex); |
40 | static int cpus_using_userspace_governor; | 41 | static int cpus_using_userspace_governor; |
41 | 42 | ||
42 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | 43 | #define dprintk(msg...) \ |
44 | cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | ||
43 | 45 | ||
44 | /* keep track of frequency transitions */ | 46 | /* keep track of frequency transitions */ |
45 | static int | 47 | static int |
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
48 | { | 50 | { |
49 | struct cpufreq_freqs *freq = data; | 51 | struct cpufreq_freqs *freq = data; |
50 | 52 | ||
51 | if (!cpu_is_managed[freq->cpu]) | 53 | if (!per_cpu(cpu_is_managed, freq->cpu)) |
52 | return 0; | 54 | return 0; |
53 | 55 | ||
54 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", | 56 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", |
55 | freq->cpu, freq->new); | 57 | freq->cpu, freq->new); |
56 | cpu_cur_freq[freq->cpu] = freq->new; | 58 | per_cpu(cpu_cur_freq, freq->cpu) = freq->new; |
57 | 59 | ||
58 | return 0; | 60 | return 0; |
59 | } | 61 | } |
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
77 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); | 79 | dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); |
78 | 80 | ||
79 | mutex_lock(&userspace_mutex); | 81 | mutex_lock(&userspace_mutex); |
80 | if (!cpu_is_managed[policy->cpu]) | 82 | if (!per_cpu(cpu_is_managed, policy->cpu)) |
81 | goto err; | 83 | goto err; |
82 | 84 | ||
83 | cpu_set_freq[policy->cpu] = freq; | 85 | per_cpu(cpu_set_freq, policy->cpu) = freq; |
84 | 86 | ||
85 | if (freq < cpu_min_freq[policy->cpu]) | 87 | if (freq < per_cpu(cpu_min_freq, policy->cpu)) |
86 | freq = cpu_min_freq[policy->cpu]; | 88 | freq = per_cpu(cpu_min_freq, policy->cpu); |
87 | if (freq > cpu_max_freq[policy->cpu]) | 89 | if (freq > per_cpu(cpu_max_freq, policy->cpu)) |
88 | freq = cpu_max_freq[policy->cpu]; | 90 | freq = per_cpu(cpu_max_freq, policy->cpu); |
89 | 91 | ||
90 | /* | 92 | /* |
91 | * We're safe from concurrent calls to ->target() here | 93 | * We're safe from concurrent calls to ->target() here |
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) | |||
104 | 106 | ||
105 | static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) | 107 | static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) |
106 | { | 108 | { |
107 | return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]); | 109 | return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu)); |
108 | } | 110 | } |
109 | 111 | ||
110 | static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | 112 | static int cpufreq_governor_userspace(struct cpufreq_policy *policy, |
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
127 | } | 129 | } |
128 | cpus_using_userspace_governor++; | 130 | cpus_using_userspace_governor++; |
129 | 131 | ||
130 | cpu_is_managed[cpu] = 1; | 132 | per_cpu(cpu_is_managed, cpu) = 1; |
131 | cpu_min_freq[cpu] = policy->min; | 133 | per_cpu(cpu_min_freq, cpu) = policy->min; |
132 | cpu_max_freq[cpu] = policy->max; | 134 | per_cpu(cpu_max_freq, cpu) = policy->max; |
133 | cpu_cur_freq[cpu] = policy->cur; | 135 | per_cpu(cpu_cur_freq, cpu) = policy->cur; |
134 | cpu_set_freq[cpu] = policy->cur; | 136 | per_cpu(cpu_set_freq, cpu) = policy->cur; |
135 | dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); | 137 | dprintk("managing cpu %u started " |
138 | "(%u - %u kHz, currently %u kHz)\n", | ||
139 | cpu, | ||
140 | per_cpu(cpu_min_freq, cpu), | ||
141 | per_cpu(cpu_max_freq, cpu), | ||
142 | per_cpu(cpu_cur_freq, cpu)); | ||
136 | 143 | ||
137 | mutex_unlock(&userspace_mutex); | 144 | mutex_unlock(&userspace_mutex); |
138 | break; | 145 | break; |
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
145 | CPUFREQ_TRANSITION_NOTIFIER); | 152 | CPUFREQ_TRANSITION_NOTIFIER); |
146 | } | 153 | } |
147 | 154 | ||
148 | cpu_is_managed[cpu] = 0; | 155 | per_cpu(cpu_is_managed, cpu) = 0; |
149 | cpu_min_freq[cpu] = 0; | 156 | per_cpu(cpu_min_freq, cpu) = 0; |
150 | cpu_max_freq[cpu] = 0; | 157 | per_cpu(cpu_max_freq, cpu) = 0; |
151 | cpu_set_freq[cpu] = 0; | 158 | per_cpu(cpu_set_freq, cpu) = 0; |
152 | dprintk("managing cpu %u stopped\n", cpu); | 159 | dprintk("managing cpu %u stopped\n", cpu); |
153 | mutex_unlock(&userspace_mutex); | 160 | mutex_unlock(&userspace_mutex); |
154 | break; | 161 | break; |
155 | case CPUFREQ_GOV_LIMITS: | 162 | case CPUFREQ_GOV_LIMITS: |
156 | mutex_lock(&userspace_mutex); | 163 | mutex_lock(&userspace_mutex); |
157 | dprintk("limit event for cpu %u: %u - %u kHz," | 164 | dprintk("limit event for cpu %u: %u - %u kHz, " |
158 | "currently %u kHz, last set to %u kHz\n", | 165 | "currently %u kHz, last set to %u kHz\n", |
159 | cpu, policy->min, policy->max, | 166 | cpu, policy->min, policy->max, |
160 | cpu_cur_freq[cpu], cpu_set_freq[cpu]); | 167 | per_cpu(cpu_cur_freq, cpu), |
161 | if (policy->max < cpu_set_freq[cpu]) { | 168 | per_cpu(cpu_set_freq, cpu)); |
169 | if (policy->max < per_cpu(cpu_set_freq, cpu)) { | ||
162 | __cpufreq_driver_target(policy, policy->max, | 170 | __cpufreq_driver_target(policy, policy->max, |
163 | CPUFREQ_RELATION_H); | 171 | CPUFREQ_RELATION_H); |
164 | } | 172 | } else if (policy->min > per_cpu(cpu_set_freq, cpu)) { |
165 | else if (policy->min > cpu_set_freq[cpu]) { | ||
166 | __cpufreq_driver_target(policy, policy->min, | 173 | __cpufreq_driver_target(policy, policy->min, |
167 | CPUFREQ_RELATION_L); | 174 | CPUFREQ_RELATION_L); |
168 | } | 175 | } else { |
169 | else { | 176 | __cpufreq_driver_target(policy, |
170 | __cpufreq_driver_target(policy, cpu_set_freq[cpu], | 177 | per_cpu(cpu_set_freq, cpu), |
171 | CPUFREQ_RELATION_L); | 178 | CPUFREQ_RELATION_L); |
172 | } | 179 | } |
173 | cpu_min_freq[cpu] = policy->min; | 180 | per_cpu(cpu_min_freq, cpu) = policy->min; |
174 | cpu_max_freq[cpu] = policy->max; | 181 | per_cpu(cpu_max_freq, cpu) = policy->max; |
175 | cpu_cur_freq[cpu] = policy->cur; | 182 | per_cpu(cpu_cur_freq, cpu) = policy->cur; |
176 | mutex_unlock(&userspace_mutex); | 183 | mutex_unlock(&userspace_mutex); |
177 | break; | 184 | break; |
178 | } | 185 | } |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 25918f7dfd0f..0b624e927a6f 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -254,6 +254,7 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, | |||
254 | static int smi_request(struct smi_cmd *smi_cmd) | 254 | static int smi_request(struct smi_cmd *smi_cmd) |
255 | { | 255 | { |
256 | cpumask_t old_mask; | 256 | cpumask_t old_mask; |
257 | cpumask_of_cpu_ptr(new_mask, 0); | ||
257 | int ret = 0; | 258 | int ret = 0; |
258 | 259 | ||
259 | if (smi_cmd->magic != SMI_CMD_MAGIC) { | 260 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
@@ -264,7 +265,7 @@ static int smi_request(struct smi_cmd *smi_cmd) | |||
264 | 265 | ||
265 | /* SMI requires CPU 0 */ | 266 | /* SMI requires CPU 0 */ |
266 | old_mask = current->cpus_allowed; | 267 | old_mask = current->cpus_allowed; |
267 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); | 268 | set_cpus_allowed_ptr(current, new_mask); |
268 | if (smp_processor_id() != 0) { | 269 | if (smp_processor_id() != 0) { |
269 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", | 270 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
270 | __func__); | 271 | __func__); |
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 0792d930c481..7a64aa9b51b6 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -646,8 +646,8 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) | |||
646 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); | 646 | ehca_dmp(&cpu_online_map, sizeof(cpumask_t), ""); |
647 | 647 | ||
648 | spin_lock_irqsave(&pool->last_cpu_lock, flags); | 648 | spin_lock_irqsave(&pool->last_cpu_lock, flags); |
649 | cpu = next_cpu(pool->last_cpu, cpu_online_map); | 649 | cpu = next_cpu_nr(pool->last_cpu, cpu_online_map); |
650 | if (cpu == NR_CPUS) | 650 | if (cpu >= nr_cpu_ids) |
651 | cpu = first_cpu(cpu_online_map); | 651 | cpu = first_cpu(cpu_online_map); |
652 | pool->last_cpu = cpu; | 652 | pool->last_cpu = cpu; |
653 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); | 653 | spin_unlock_irqrestore(&pool->last_cpu_lock, flags); |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 08256ed0d9a6..579b01ff82d4 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -229,10 +229,11 @@ xpc_hb_checker(void *ignore) | |||
229 | int last_IRQ_count = 0; | 229 | int last_IRQ_count = 0; |
230 | int new_IRQ_count; | 230 | int new_IRQ_count; |
231 | int force_IRQ = 0; | 231 | int force_IRQ = 0; |
232 | cpumask_of_cpu_ptr(cpumask, XPC_HB_CHECK_CPU); | ||
232 | 233 | ||
233 | /* this thread was marked active by xpc_hb_init() */ | 234 | /* this thread was marked active by xpc_hb_init() */ |
234 | 235 | ||
235 | set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU)); | 236 | set_cpus_allowed_ptr(current, cpumask); |
236 | 237 | ||
237 | /* set our heartbeating to other partitions into motion */ | 238 | /* set our heartbeating to other partitions into motion */ |
238 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); | 239 | xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ); |