aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2005-09-23 14:10:42 -0400
committerDave Jones <davej@redhat.com>2005-09-23 14:10:42 -0400
commitb9111b7b7f46b0ec1ccb451d60ec439b92e4df65 (patch)
tree4567cbc921c39d5e2af1819ba0e3a880c1890cef /arch/i386
parentdf8b59be0976c56820453730078bef99a8d1dbda (diff)
[CPUFREQ] Remove preempt_disable from powernow-k8
Via reading the code, my understanding is that powernow-k8 uses preempt_disable to ensure that driver->target doesn't migrate across cpus whilst it's accessing per processor registers, however set_cpus_allowed will provide this for us. Additionally, remove schedule() calls from set_cpus_allowed as set_cpus_allowed ensures that you're executing on the target processor on return. Signed-off-by: Zwane Mwaikambo <zwane@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c16
1 files changed, 2 insertions, 14 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index ab6e0611303d..e2e03eebedf6 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -453,7 +453,6 @@ static int check_supported_cpu(unsigned int cpu)
453 453
454 oldmask = current->cpus_allowed; 454 oldmask = current->cpus_allowed;
455 set_cpus_allowed(current, cpumask_of_cpu(cpu)); 455 set_cpus_allowed(current, cpumask_of_cpu(cpu));
456 schedule();
457 456
458 if (smp_processor_id() != cpu) { 457 if (smp_processor_id() != cpu) {
459 printk(KERN_ERR "limiting to cpu %u failed\n", cpu); 458 printk(KERN_ERR "limiting to cpu %u failed\n", cpu);
@@ -488,9 +487,7 @@ static int check_supported_cpu(unsigned int cpu)
488 487
489out: 488out:
490 set_cpus_allowed(current, oldmask); 489 set_cpus_allowed(current, oldmask);
491 schedule();
492 return rc; 490 return rc;
493
494} 491}
495 492
496static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid) 493static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, u8 maxvid)
@@ -904,7 +901,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
904 /* only run on specific CPU from here on */ 901 /* only run on specific CPU from here on */
905 oldmask = current->cpus_allowed; 902 oldmask = current->cpus_allowed;
906 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 903 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
907 schedule();
908 904
909 if (smp_processor_id() != pol->cpu) { 905 if (smp_processor_id() != pol->cpu) {
910 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); 906 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -959,8 +955,6 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
959 955
960err_out: 956err_out:
961 set_cpus_allowed(current, oldmask); 957 set_cpus_allowed(current, oldmask);
962 schedule();
963
964 return ret; 958 return ret;
965} 959}
966 960
@@ -1017,7 +1011,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
1017 /* only run on specific CPU from here on */ 1011 /* only run on specific CPU from here on */
1018 oldmask = current->cpus_allowed; 1012 oldmask = current->cpus_allowed;
1019 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu)); 1013 set_cpus_allowed(current, cpumask_of_cpu(pol->cpu));
1020 schedule();
1021 1014
1022 if (smp_processor_id() != pol->cpu) { 1015 if (smp_processor_id() != pol->cpu) {
1023 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu); 1016 printk(KERN_ERR "limiting to cpu %u failed\n", pol->cpu);
@@ -1036,7 +1029,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
1036 1029
1037 /* run on any CPU again */ 1030 /* run on any CPU again */
1038 set_cpus_allowed(current, oldmask); 1031 set_cpus_allowed(current, oldmask);
1039 schedule();
1040 1032
1041 pol->governor = CPUFREQ_DEFAULT_GOVERNOR; 1033 pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
1042 pol->cpus = cpu_core_map[pol->cpu]; 1034 pol->cpus = cpu_core_map[pol->cpu];
@@ -1071,7 +1063,6 @@ static int __init powernowk8_cpu_init(struct cpufreq_policy *pol)
1071 1063
1072err_out: 1064err_out:
1073 set_cpus_allowed(current, oldmask); 1065 set_cpus_allowed(current, oldmask);
1074 schedule();
1075 powernow_k8_cpu_exit_acpi(data); 1066 powernow_k8_cpu_exit_acpi(data);
1076 1067
1077 kfree(data); 1068 kfree(data);
@@ -1107,17 +1098,14 @@ static unsigned int powernowk8_get (unsigned int cpu)
1107 set_cpus_allowed(current, oldmask); 1098 set_cpus_allowed(current, oldmask);
1108 return 0; 1099 return 0;
1109 } 1100 }
1110 preempt_disable(); 1101
1111
1112 if (query_current_values_with_pending_wait(data)) 1102 if (query_current_values_with_pending_wait(data))
1113 goto out; 1103 goto out;
1114 1104
1115 khz = find_khz_freq_from_fid(data->currfid); 1105 khz = find_khz_freq_from_fid(data->currfid);
1116 1106
1117 out: 1107out:
1118 preempt_enable_no_resched();
1119 set_cpus_allowed(current, oldmask); 1108 set_cpus_allowed(current, oldmask);
1120
1121 return khz; 1109 return khz;
1122} 1110}
1123 1111