aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c10
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.h2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c14
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c18
-rw-r--r--drivers/cpufreq/cpufreq.c42
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--include/linux/cpufreq.h4
10 files changed, 62 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 28102ad1a363..0b31939862d6 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -411,7 +411,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
411 411
412#ifdef CONFIG_HOTPLUG_CPU 412#ifdef CONFIG_HOTPLUG_CPU
413 /* cpufreq holds the hotplug lock, so we are safe from here on */ 413 /* cpufreq holds the hotplug lock, so we are safe from here on */
414 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus); 414 cpumask_and(&online_policy_cpus, cpu_online_mask, policy->cpus);
415#else 415#else
416 online_policy_cpus = policy->cpus; 416 online_policy_cpus = policy->cpus;
417#endif 417#endif
@@ -626,15 +626,15 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
626 */ 626 */
627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 627 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { 628 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
629 cpumask_copy(&policy->cpus, perf->shared_cpu_map); 629 cpumask_copy(policy->cpus, perf->shared_cpu_map);
630 } 630 }
631 cpumask_copy(&policy->related_cpus, perf->shared_cpu_map); 631 cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
632 632
633#ifdef CONFIG_SMP 633#ifdef CONFIG_SMP
634 dmi_check_system(sw_any_bug_dmi_table); 634 dmi_check_system(sw_any_bug_dmi_table);
635 if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { 635 if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) {
636 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 636 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
637 policy->cpus = per_cpu(cpu_core_map, cpu); 637 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
638 } 638 }
639#endif 639#endif
640 640
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index beea4466b063..b585e04cbc9e 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -122,7 +122,7 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
122 return 0; 122 return 0;
123 123
124 /* notifiers */ 124 /* notifiers */
125 for_each_cpu_mask_nr(i, policy->cpus) { 125 for_each_cpu(i, policy->cpus) {
126 freqs.cpu = i; 126 freqs.cpu = i;
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128 } 128 }
@@ -130,11 +130,11 @@ static int cpufreq_p4_target(struct cpufreq_policy *policy,
130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software 130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
131 * Developer's Manual, Volume 3 131 * Developer's Manual, Volume 3
132 */ 132 */
133 for_each_cpu_mask_nr(i, policy->cpus) 133 for_each_cpu(i, policy->cpus)
134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index); 134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
135 135
136 /* notifiers */ 136 /* notifiers */
137 for_each_cpu_mask_nr(i, policy->cpus) { 137 for_each_cpu(i, policy->cpus) {
138 freqs.cpu = i; 138 freqs.cpu = i;
139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
140 } 140 }
@@ -203,7 +203,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
203 unsigned int i; 203 unsigned int i;
204 204
205#ifdef CONFIG_SMP 205#ifdef CONFIG_SMP
206 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 206 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
207#endif 207#endif
208 208
209 /* Errata workaround */ 209 /* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c3c9adbaa26f..5c28b37dea11 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1199,10 +1199,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1199 set_cpus_allowed_ptr(current, &oldmask); 1199 set_cpus_allowed_ptr(current, &oldmask);
1200 1200
1201 if (cpu_family == CPU_HW_PSTATE) 1201 if (cpu_family == CPU_HW_PSTATE)
1202 pol->cpus = cpumask_of_cpu(pol->cpu); 1202 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1203 else 1203 else
1204 pol->cpus = per_cpu(cpu_core_map, pol->cpu); 1204 cpumask_copy(pol->cpus, &per_cpu(cpu_core_map, pol->cpu));
1205 data->available_cores = &(pol->cpus); 1205 data->available_cores = pol->cpus;
1206 1206
1207 /* Take a crude guess here. 1207 /* Take a crude guess here.
1208 * That guess was in microseconds, so multiply with 1000 */ 1208 * That guess was in microseconds, so multiply with 1000 */
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
index 65cfb5d7f77f..8ecc75b6c7c3 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h
@@ -53,7 +53,7 @@ struct powernow_k8_data {
53 /* we need to keep track of associated cores, but let cpufreq 53 /* we need to keep track of associated cores, but let cpufreq
54 * handle hotplug events - so just point at cpufreq pol->cpus 54 * handle hotplug events - so just point at cpufreq pol->cpus
55 * structure */ 55 * structure */
56 cpumask_t *available_cores; 56 struct cpumask *available_cores;
57}; 57};
58 58
59 59
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index d2cc4991cbaa..f08998278a3a 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -492,8 +492,8 @@ static int centrino_target (struct cpufreq_policy *policy,
492 } 492 }
493 493
494 first_cpu = 1; 494 first_cpu = 1;
495 for_each_cpu_mask_nr(j, policy->cpus) { 495 for_each_cpu(j, policy->cpus) {
496 const cpumask_t *mask; 496 const struct cpumask *mask;
497 497
498 /* cpufreq holds the hotplug lock, so we are safe here */ 498 /* cpufreq holds the hotplug lock, so we are safe here */
499 if (!cpu_online(j)) 499 if (!cpu_online(j))
@@ -504,9 +504,9 @@ static int centrino_target (struct cpufreq_policy *policy,
504 * Make sure we are running on CPU that wants to change freq 504 * Make sure we are running on CPU that wants to change freq
505 */ 505 */
506 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 506 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
507 mask = &policy->cpus; 507 mask = policy->cpus;
508 else 508 else
509 mask = &cpumask_of_cpu(j); 509 mask = cpumask_of(j);
510 510
511 set_cpus_allowed_ptr(current, mask); 511 set_cpus_allowed_ptr(current, mask);
512 preempt_disable(); 512 preempt_disable();
@@ -538,7 +538,7 @@ static int centrino_target (struct cpufreq_policy *policy,
538 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 538 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
539 target_freq, freqs.old, freqs.new, msr); 539 target_freq, freqs.old, freqs.new, msr);
540 540
541 for_each_cpu_mask_nr(k, policy->cpus) { 541 for_each_cpu(k, policy->cpus) {
542 if (!cpu_online(k)) 542 if (!cpu_online(k))
543 continue; 543 continue;
544 freqs.cpu = k; 544 freqs.cpu = k;
@@ -563,7 +563,7 @@ static int centrino_target (struct cpufreq_policy *policy,
563 preempt_enable(); 563 preempt_enable();
564 } 564 }
565 565
566 for_each_cpu_mask_nr(k, policy->cpus) { 566 for_each_cpu(k, policy->cpus) {
567 if (!cpu_online(k)) 567 if (!cpu_online(k))
568 continue; 568 continue;
569 freqs.cpu = k; 569 freqs.cpu = k;
@@ -586,7 +586,7 @@ static int centrino_target (struct cpufreq_policy *policy,
586 tmp = freqs.new; 586 tmp = freqs.new;
587 freqs.new = freqs.old; 587 freqs.new = freqs.old;
588 freqs.old = tmp; 588 freqs.old = tmp;
589 for_each_cpu_mask_nr(j, policy->cpus) { 589 for_each_cpu(j, policy->cpus) {
590 if (!cpu_online(j)) 590 if (!cpu_online(j))
591 continue; 591 continue;
592 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 592 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 04d0376b64b0..dedc1e98f168 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -229,7 +229,7 @@ static unsigned int speedstep_detect_chipset (void)
229 return 0; 229 return 0;
230} 230}
231 231
232static unsigned int _speedstep_get(const cpumask_t *cpus) 232static unsigned int _speedstep_get(const struct cpumask *cpus)
233{ 233{
234 unsigned int speed; 234 unsigned int speed;
235 cpumask_t cpus_allowed; 235 cpumask_t cpus_allowed;
@@ -244,7 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus)
244 244
245static unsigned int speedstep_get(unsigned int cpu) 245static unsigned int speedstep_get(unsigned int cpu)
246{ 246{
247 return _speedstep_get(&cpumask_of_cpu(cpu)); 247 return _speedstep_get(cpumask_of(cpu));
248} 248}
249 249
250/** 250/**
@@ -267,7 +267,7 @@ static int speedstep_target (struct cpufreq_policy *policy,
267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate)) 267 if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], target_freq, relation, &newstate))
268 return -EINVAL; 268 return -EINVAL;
269 269
270 freqs.old = _speedstep_get(&policy->cpus); 270 freqs.old = _speedstep_get(policy->cpus);
271 freqs.new = speedstep_freqs[newstate].frequency; 271 freqs.new = speedstep_freqs[newstate].frequency;
272 freqs.cpu = policy->cpu; 272 freqs.cpu = policy->cpu;
273 273
@@ -279,20 +279,20 @@ static int speedstep_target (struct cpufreq_policy *policy,
279 279
280 cpus_allowed = current->cpus_allowed; 280 cpus_allowed = current->cpus_allowed;
281 281
282 for_each_cpu_mask_nr(i, policy->cpus) { 282 for_each_cpu(i, policy->cpus) {
283 freqs.cpu = i; 283 freqs.cpu = i;
284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 284 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
285 } 285 }
286 286
287 /* switch to physical CPU where state is to be changed */ 287 /* switch to physical CPU where state is to be changed */
288 set_cpus_allowed_ptr(current, &policy->cpus); 288 set_cpus_allowed_ptr(current, policy->cpus);
289 289
290 speedstep_set_state(newstate); 290 speedstep_set_state(newstate);
291 291
292 /* allow to be run on all CPUs */ 292 /* allow to be run on all CPUs */
293 set_cpus_allowed_ptr(current, &cpus_allowed); 293 set_cpus_allowed_ptr(current, &cpus_allowed);
294 294
295 for_each_cpu_mask_nr(i, policy->cpus) { 295 for_each_cpu(i, policy->cpus) {
296 freqs.cpu = i; 296 freqs.cpu = i;
297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 297 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
298 } 298 }
@@ -322,11 +322,11 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
322 322
323 /* only run on CPU to be set, or on its sibling */ 323 /* only run on CPU to be set, or on its sibling */
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP
325 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu); 325 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu));
326#endif 326#endif
327 327
328 cpus_allowed = current->cpus_allowed; 328 cpus_allowed = current->cpus_allowed;
329 set_cpus_allowed_ptr(current, &policy->cpus); 329 set_cpus_allowed_ptr(current, policy->cpus);
330 330
331 /* detect low and high frequency and transition latency */ 331 /* detect low and high frequency and transition latency */
332 result = speedstep_get_freqs(speedstep_processor, 332 result = speedstep_get_freqs(speedstep_processor,
@@ -339,7 +339,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
339 return result; 339 return result;
340 340
341 /* get current speed setting */ 341 /* get current speed setting */
342 speed = _speedstep_get(&policy->cpus); 342 speed = _speedstep_get(policy->cpus);
343 if (!speed) 343 if (!speed)
344 return -EIO; 344 return -EIO;
345 345
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 01dde80597f7..b55cb67435bd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -584,12 +584,12 @@ out:
584 return i; 584 return i;
585} 585}
586 586
587static ssize_t show_cpus(cpumask_t mask, char *buf) 587static ssize_t show_cpus(const struct cpumask *mask, char *buf)
588{ 588{
589 ssize_t i = 0; 589 ssize_t i = 0;
590 unsigned int cpu; 590 unsigned int cpu;
591 591
592 for_each_cpu_mask_nr(cpu, mask) { 592 for_each_cpu(cpu, mask) {
593 if (i) 593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -606,7 +606,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
606 */ 606 */
607static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 607static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
608{ 608{
609 if (cpus_empty(policy->related_cpus)) 609 if (cpumask_empty(policy->related_cpus))
610 return show_cpus(policy->cpus, buf); 610 return show_cpus(policy->cpus, buf);
611 return show_cpus(policy->related_cpus, buf); 611 return show_cpus(policy->related_cpus, buf);
612} 612}
@@ -806,9 +806,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
806 ret = -ENOMEM; 806 ret = -ENOMEM;
807 goto nomem_out; 807 goto nomem_out;
808 } 808 }
809 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
810 kfree(policy);
811 ret = -ENOMEM;
812 goto nomem_out;
813 }
814 if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
815 free_cpumask_var(policy->cpus);
816 kfree(policy);
817 ret = -ENOMEM;
818 goto nomem_out;
819 }
809 820
810 policy->cpu = cpu; 821 policy->cpu = cpu;
811 policy->cpus = cpumask_of_cpu(cpu); 822 cpumask_copy(policy->cpus, cpumask_of(cpu));
812 823
813 /* Initially set CPU itself as the policy_cpu */ 824 /* Initially set CPU itself as the policy_cpu */
814 per_cpu(policy_cpu, cpu) = cpu; 825 per_cpu(policy_cpu, cpu) = cpu;
@@ -843,7 +854,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
843 } 854 }
844#endif 855#endif
845 856
846 for_each_cpu_mask_nr(j, policy->cpus) { 857 for_each_cpu(j, policy->cpus) {
847 if (cpu == j) 858 if (cpu == j)
848 continue; 859 continue;
849 860
@@ -861,7 +872,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
861 goto err_out_driver_exit; 872 goto err_out_driver_exit;
862 873
863 spin_lock_irqsave(&cpufreq_driver_lock, flags); 874 spin_lock_irqsave(&cpufreq_driver_lock, flags);
864 managed_policy->cpus = policy->cpus; 875 cpumask_copy(managed_policy->cpus, policy->cpus);
865 per_cpu(cpufreq_cpu_data, cpu) = managed_policy; 876 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
866 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 877 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
867 878
@@ -916,14 +927,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
916 } 927 }
917 928
918 spin_lock_irqsave(&cpufreq_driver_lock, flags); 929 spin_lock_irqsave(&cpufreq_driver_lock, flags);
919 for_each_cpu_mask_nr(j, policy->cpus) { 930 for_each_cpu(j, policy->cpus) {
920 per_cpu(cpufreq_cpu_data, j) = policy; 931 per_cpu(cpufreq_cpu_data, j) = policy;
921 per_cpu(policy_cpu, j) = policy->cpu; 932 per_cpu(policy_cpu, j) = policy->cpu;
922 } 933 }
923 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 934 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
924 935
925 /* symlink affected CPUs */ 936 /* symlink affected CPUs */
926 for_each_cpu_mask_nr(j, policy->cpus) { 937 for_each_cpu(j, policy->cpus) {
927 if (j == cpu) 938 if (j == cpu)
928 continue; 939 continue;
929 if (!cpu_online(j)) 940 if (!cpu_online(j))
@@ -963,7 +974,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
963 974
964err_out_unregister: 975err_out_unregister:
965 spin_lock_irqsave(&cpufreq_driver_lock, flags); 976 spin_lock_irqsave(&cpufreq_driver_lock, flags);
966 for_each_cpu_mask_nr(j, policy->cpus) 977 for_each_cpu(j, policy->cpus)
967 per_cpu(cpufreq_cpu_data, j) = NULL; 978 per_cpu(cpufreq_cpu_data, j) = NULL;
968 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 979 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
969 980
@@ -1024,7 +1035,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1024 */ 1035 */
1025 if (unlikely(cpu != data->cpu)) { 1036 if (unlikely(cpu != data->cpu)) {
1026 dprintk("removing link\n"); 1037 dprintk("removing link\n");
1027 cpu_clear(cpu, data->cpus); 1038 cpumask_clear_cpu(cpu, data->cpus);
1028 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1039 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1029 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 1040 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1030 cpufreq_cpu_put(data); 1041 cpufreq_cpu_put(data);
@@ -1045,8 +1056,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1045 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove 1056 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1046 * the sysfs links afterwards. 1057 * the sysfs links afterwards.
1047 */ 1058 */
1048 if (unlikely(cpus_weight(data->cpus) > 1)) { 1059 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1049 for_each_cpu_mask_nr(j, data->cpus) { 1060 for_each_cpu(j, data->cpus) {
1050 if (j == cpu) 1061 if (j == cpu)
1051 continue; 1062 continue;
1052 per_cpu(cpufreq_cpu_data, j) = NULL; 1063 per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1055,8 +1066,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1055 1066
1056 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1067 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1057 1068
1058 if (unlikely(cpus_weight(data->cpus) > 1)) { 1069 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1059 for_each_cpu_mask_nr(j, data->cpus) { 1070 for_each_cpu(j, data->cpus) {
1060 if (j == cpu) 1071 if (j == cpu)
1061 continue; 1072 continue;
1062 dprintk("removing link for cpu %u\n", j); 1073 dprintk("removing link for cpu %u\n", j);
@@ -1090,7 +1101,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1090 if (cpufreq_driver->exit) 1101 if (cpufreq_driver->exit)
1091 cpufreq_driver->exit(data); 1102 cpufreq_driver->exit(data);
1092 1103
1104 free_cpumask_var(data->related_cpus);
1105 free_cpumask_var(data->cpus);
1093 kfree(data); 1106 kfree(data);
1107 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1094 1108
1095 cpufreq_debug_enable_ratelimit(); 1109 cpufreq_debug_enable_ratelimit();
1096 return 0; 1110 return 0;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e2657837d954..0320962c4ec5 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -498,7 +498,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
498 return rc; 498 return rc;
499 } 499 }
500 500
501 for_each_cpu_mask_nr(j, policy->cpus) { 501 for_each_cpu(j, policy->cpus) {
502 struct cpu_dbs_info_s *j_dbs_info; 502 struct cpu_dbs_info_s *j_dbs_info;
503 j_dbs_info = &per_cpu(cpu_dbs_info, j); 503 j_dbs_info = &per_cpu(cpu_dbs_info, j);
504 j_dbs_info->cur_policy = policy; 504 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 2ab3c12b88af..6a2b036c9389 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -400,7 +400,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
400 /* Get Absolute Load - in terms of freq */ 400 /* Get Absolute Load - in terms of freq */
401 max_load_freq = 0; 401 max_load_freq = 0;
402 402
403 for_each_cpu_mask_nr(j, policy->cpus) { 403 for_each_cpu(j, policy->cpus) {
404 struct cpu_dbs_info_s *j_dbs_info; 404 struct cpu_dbs_info_s *j_dbs_info;
405 cputime64_t cur_wall_time, cur_idle_time; 405 cputime64_t cur_wall_time, cur_idle_time;
406 unsigned int idle_time, wall_time; 406 unsigned int idle_time, wall_time;
@@ -568,7 +568,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
568 return rc; 568 return rc;
569 } 569 }
570 570
571 for_each_cpu_mask_nr(j, policy->cpus) { 571 for_each_cpu(j, policy->cpus) {
572 struct cpu_dbs_info_s *j_dbs_info; 572 struct cpu_dbs_info_s *j_dbs_info;
573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 573 j_dbs_info = &per_cpu(cpu_dbs_info, j);
574 j_dbs_info->cur_policy = policy; 574 j_dbs_info->cur_policy = policy;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 484b3abf61bb..384b38d3e8e2 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -80,8 +80,8 @@ struct cpufreq_real_policy {
80}; 80};
81 81
82struct cpufreq_policy { 82struct cpufreq_policy {
83 cpumask_t cpus; /* CPUs requiring sw coordination */ 83 cpumask_var_t cpus; /* CPUs requiring sw coordination */
84 cpumask_t related_cpus; /* CPUs with any coordination */ 84 cpumask_var_t related_cpus; /* CPUs with any coordination */
85 unsigned int shared_type; /* ANY or ALL affected CPUs 85 unsigned int shared_type; /* ANY or ALL affected CPUs
86 should set cpufreq */ 86 should set cpufreq */
87 unsigned int cpu; /* cpu nr of registered CPU */ 87 unsigned int cpu; /* cpu nr of registered CPU */