aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-10 09:12:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-10 09:12:18 -0500
commit4e9b1c184cadbece3694603de5f880b6e35bd7a7 (patch)
tree8ae2ab8a4eaab4d46b4460284fd5ee475ce9a42d /drivers/cpufreq
parent0176260fc30842e358cf34afa7dcd9413db44822 (diff)
parent36c401a44abcc389a00f9cd14892c9cf9bf0780d (diff)
Merge branch 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: [IA64] fix typo in cpumask_of_pcibus() x86: fix x86_32 builds for summit and es7000 arch's cpumask: use work_on_cpu in acpi-cpufreq.c for read_measured_perf_ctrs cpumask: use work_on_cpu in acpi-cpufreq.c for drv_read and drv_write cpumask: use cpumask_var_t in acpi-cpufreq.c cpumask: use work_on_cpu in acpi/cstate.c cpumask: convert struct cpufreq_policy to cpumask_var_t cpumask: replace CPUMASK_ALLOC etc with cpumask_var_t x86: cleanup remaining cpumask_t ops in smpboot code cpumask: update pci_bus_show_cpuaffinity to use new cpumask API cpumask: update local_cpus_show to use new cpumask API ia64: cpumask fix for is_affinity_mask_valid()
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c42
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
3 files changed, 31 insertions, 17 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 01dde80597f7..b55cb67435bd 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -584,12 +584,12 @@ out:
584 return i; 584 return i;
585} 585}
586 586
587static ssize_t show_cpus(cpumask_t mask, char *buf) 587static ssize_t show_cpus(const struct cpumask *mask, char *buf)
588{ 588{
589 ssize_t i = 0; 589 ssize_t i = 0;
590 unsigned int cpu; 590 unsigned int cpu;
591 591
592 for_each_cpu_mask_nr(cpu, mask) { 592 for_each_cpu(cpu, mask) {
593 if (i) 593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -606,7 +606,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
606 */ 606 */
607static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 607static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
608{ 608{
609 if (cpus_empty(policy->related_cpus)) 609 if (cpumask_empty(policy->related_cpus))
610 return show_cpus(policy->cpus, buf); 610 return show_cpus(policy->cpus, buf);
611 return show_cpus(policy->related_cpus, buf); 611 return show_cpus(policy->related_cpus, buf);
612} 612}
@@ -806,9 +806,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
806 ret = -ENOMEM; 806 ret = -ENOMEM;
807 goto nomem_out; 807 goto nomem_out;
808 } 808 }
809 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
810 kfree(policy);
811 ret = -ENOMEM;
812 goto nomem_out;
813 }
814 if (!alloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
815 free_cpumask_var(policy->cpus);
816 kfree(policy);
817 ret = -ENOMEM;
818 goto nomem_out;
819 }
809 820
810 policy->cpu = cpu; 821 policy->cpu = cpu;
811 policy->cpus = cpumask_of_cpu(cpu); 822 cpumask_copy(policy->cpus, cpumask_of(cpu));
812 823
813 /* Initially set CPU itself as the policy_cpu */ 824 /* Initially set CPU itself as the policy_cpu */
814 per_cpu(policy_cpu, cpu) = cpu; 825 per_cpu(policy_cpu, cpu) = cpu;
@@ -843,7 +854,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
843 } 854 }
844#endif 855#endif
845 856
846 for_each_cpu_mask_nr(j, policy->cpus) { 857 for_each_cpu(j, policy->cpus) {
847 if (cpu == j) 858 if (cpu == j)
848 continue; 859 continue;
849 860
@@ -861,7 +872,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
861 goto err_out_driver_exit; 872 goto err_out_driver_exit;
862 873
863 spin_lock_irqsave(&cpufreq_driver_lock, flags); 874 spin_lock_irqsave(&cpufreq_driver_lock, flags);
864 managed_policy->cpus = policy->cpus; 875 cpumask_copy(managed_policy->cpus, policy->cpus);
865 per_cpu(cpufreq_cpu_data, cpu) = managed_policy; 876 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
866 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 877 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
867 878
@@ -916,14 +927,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
916 } 927 }
917 928
918 spin_lock_irqsave(&cpufreq_driver_lock, flags); 929 spin_lock_irqsave(&cpufreq_driver_lock, flags);
919 for_each_cpu_mask_nr(j, policy->cpus) { 930 for_each_cpu(j, policy->cpus) {
920 per_cpu(cpufreq_cpu_data, j) = policy; 931 per_cpu(cpufreq_cpu_data, j) = policy;
921 per_cpu(policy_cpu, j) = policy->cpu; 932 per_cpu(policy_cpu, j) = policy->cpu;
922 } 933 }
923 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 934 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
924 935
925 /* symlink affected CPUs */ 936 /* symlink affected CPUs */
926 for_each_cpu_mask_nr(j, policy->cpus) { 937 for_each_cpu(j, policy->cpus) {
927 if (j == cpu) 938 if (j == cpu)
928 continue; 939 continue;
929 if (!cpu_online(j)) 940 if (!cpu_online(j))
@@ -963,7 +974,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
963 974
964err_out_unregister: 975err_out_unregister:
965 spin_lock_irqsave(&cpufreq_driver_lock, flags); 976 spin_lock_irqsave(&cpufreq_driver_lock, flags);
966 for_each_cpu_mask_nr(j, policy->cpus) 977 for_each_cpu(j, policy->cpus)
967 per_cpu(cpufreq_cpu_data, j) = NULL; 978 per_cpu(cpufreq_cpu_data, j) = NULL;
968 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 979 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
969 980
@@ -1024,7 +1035,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1024 */ 1035 */
1025 if (unlikely(cpu != data->cpu)) { 1036 if (unlikely(cpu != data->cpu)) {
1026 dprintk("removing link\n"); 1037 dprintk("removing link\n");
1027 cpu_clear(cpu, data->cpus); 1038 cpumask_clear_cpu(cpu, data->cpus);
1028 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1039 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1029 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 1040 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
1030 cpufreq_cpu_put(data); 1041 cpufreq_cpu_put(data);
@@ -1045,8 +1056,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1045 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove 1056 * per_cpu(cpufreq_cpu_data) while holding the lock, and remove
1046 * the sysfs links afterwards. 1057 * the sysfs links afterwards.
1047 */ 1058 */
1048 if (unlikely(cpus_weight(data->cpus) > 1)) { 1059 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1049 for_each_cpu_mask_nr(j, data->cpus) { 1060 for_each_cpu(j, data->cpus) {
1050 if (j == cpu) 1061 if (j == cpu)
1051 continue; 1062 continue;
1052 per_cpu(cpufreq_cpu_data, j) = NULL; 1063 per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1055,8 +1066,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1055 1066
1056 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1067 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1057 1068
1058 if (unlikely(cpus_weight(data->cpus) > 1)) { 1069 if (unlikely(cpumask_weight(data->cpus) > 1)) {
1059 for_each_cpu_mask_nr(j, data->cpus) { 1070 for_each_cpu(j, data->cpus) {
1060 if (j == cpu) 1071 if (j == cpu)
1061 continue; 1072 continue;
1062 dprintk("removing link for cpu %u\n", j); 1073 dprintk("removing link for cpu %u\n", j);
@@ -1090,7 +1101,10 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1090 if (cpufreq_driver->exit) 1101 if (cpufreq_driver->exit)
1091 cpufreq_driver->exit(data); 1102 cpufreq_driver->exit(data);
1092 1103
1104 free_cpumask_var(data->related_cpus);
1105 free_cpumask_var(data->cpus);
1093 kfree(data); 1106 kfree(data);
1107 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1094 1108
1095 cpufreq_debug_enable_ratelimit(); 1109 cpufreq_debug_enable_ratelimit();
1096 return 0; 1110 return 0;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e2657837d954..0320962c4ec5 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -498,7 +498,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
498 return rc; 498 return rc;
499 } 499 }
500 500
501 for_each_cpu_mask_nr(j, policy->cpus) { 501 for_each_cpu(j, policy->cpus) {
502 struct cpu_dbs_info_s *j_dbs_info; 502 struct cpu_dbs_info_s *j_dbs_info;
503 j_dbs_info = &per_cpu(cpu_dbs_info, j); 503 j_dbs_info = &per_cpu(cpu_dbs_info, j);
504 j_dbs_info->cur_policy = policy; 504 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 2ab3c12b88af..6a2b036c9389 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -400,7 +400,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
400 /* Get Absolute Load - in terms of freq */ 400 /* Get Absolute Load - in terms of freq */
401 max_load_freq = 0; 401 max_load_freq = 0;
402 402
403 for_each_cpu_mask_nr(j, policy->cpus) { 403 for_each_cpu(j, policy->cpus) {
404 struct cpu_dbs_info_s *j_dbs_info; 404 struct cpu_dbs_info_s *j_dbs_info;
405 cputime64_t cur_wall_time, cur_idle_time; 405 cputime64_t cur_wall_time, cur_idle_time;
406 unsigned int idle_time, wall_time; 406 unsigned int idle_time, wall_time;
@@ -568,7 +568,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
568 return rc; 568 return rc;
569 } 569 }
570 570
571 for_each_cpu_mask_nr(j, policy->cpus) { 571 for_each_cpu(j, policy->cpus) {
572 struct cpu_dbs_info_s *j_dbs_info; 572 struct cpu_dbs_info_s *j_dbs_info;
573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 573 j_dbs_info = &per_cpu(cpu_dbs_info, j);
574 j_dbs_info->cur_policy = policy; 574 j_dbs_info->cur_policy = policy;