diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-17 12:51:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-17 12:51:50 -0400 |
commit | c30938d59e7468259855da91a885b19e8044b5f4 (patch) | |
tree | 15fa3b7c4696947d43702273291398a91232f644 /arch | |
parent | aa2638a210ab0d7c6702cd54315365785fce326c (diff) | |
parent | 8e7c25971b1590776a90b249de3d859dd45e7414 (diff) |
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] cpumask: new cpumask operators for arch/x86/kernel/cpu/cpufreq/powernow-k8.c
[CPUFREQ] cpumask: avoid playing with cpus_allowed in powernow-k8.c
[CPUFREQ] cpumask: avoid cpumask games in arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
[CPUFREQ] cpumask: avoid playing with cpus_allowed in speedstep-ich.c
[CPUFREQ] powernow-k8: get drv data for correct CPU
[CPUFREQ] powernow-k8: read P-state from HW
[CPUFREQ] reduce scope of ACPI_PSS_BIOS_BUG_MSG[]
[CPUFREQ] Clean up convoluted code in arch/x86/kernel/tsc.c:time_cpufreq_notifier()
[CPUFREQ] minor correction to cpu-freq documentation
[CPUFREQ] powernow-k8.c: mess cleanup
[CPUFREQ] Only set sampling_rate_max deprecated, sampling_rate_min is useful
[CPUFREQ] powernow-k8: Set transition latency to 1 if ACPI tables export 0
[CPUFREQ] ondemand: Uncouple minimal sampling rate from HZ in NO_HZ case
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 191 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 11 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 60 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 93 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 8 |
6 files changed, 175 insertions, 189 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index cf52215d9eb1..81cbe64ed6b4 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1,3 +1,4 @@ | |||
1 | |||
1 | /* | 2 | /* |
2 | * (c) 2003-2006 Advanced Micro Devices, Inc. | 3 | * (c) 2003-2006 Advanced Micro Devices, Inc. |
3 | * Your use of this code is subject to the terms and conditions of the | 4 | * Your use of this code is subject to the terms and conditions of the |
@@ -117,20 +118,17 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data) | |||
117 | u32 i = 0; | 118 | u32 i = 0; |
118 | 119 | ||
119 | if (cpu_family == CPU_HW_PSTATE) { | 120 | if (cpu_family == CPU_HW_PSTATE) { |
120 | if (data->currpstate == HW_PSTATE_INVALID) { | 121 | rdmsr(MSR_PSTATE_STATUS, lo, hi); |
121 | /* read (initial) hw pstate if not yet set */ | 122 | i = lo & HW_PSTATE_MASK; |
122 | rdmsr(MSR_PSTATE_STATUS, lo, hi); | 123 | data->currpstate = i; |
123 | i = lo & HW_PSTATE_MASK; | 124 | |
124 | 125 | /* | |
125 | /* | 126 | * a workaround for family 11h erratum 311 might cause |
126 | * a workaround for family 11h erratum 311 might cause | 127 | * an "out-of-range Pstate if the core is in Pstate-0 |
127 | * an "out-of-range Pstate if the core is in Pstate-0 | 128 | */ |
128 | */ | 129 | if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps)) |
129 | if (i >= data->numps) | 130 | data->currpstate = HW_PSTATE_0; |
130 | data->currpstate = HW_PSTATE_0; | 131 | |
131 | else | ||
132 | data->currpstate = i; | ||
133 | } | ||
134 | return 0; | 132 | return 0; |
135 | } | 133 | } |
136 | do { | 134 | do { |
@@ -510,41 +508,34 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, | |||
510 | return 0; | 508 | return 0; |
511 | } | 509 | } |
512 | 510 | ||
513 | static int check_supported_cpu(unsigned int cpu) | 511 | static void check_supported_cpu(void *_rc) |
514 | { | 512 | { |
515 | cpumask_t oldmask; | ||
516 | u32 eax, ebx, ecx, edx; | 513 | u32 eax, ebx, ecx, edx; |
517 | unsigned int rc = 0; | 514 | int *rc = _rc; |
518 | |||
519 | oldmask = current->cpus_allowed; | ||
520 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
521 | 515 | ||
522 | if (smp_processor_id() != cpu) { | 516 | *rc = -ENODEV; |
523 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | ||
524 | goto out; | ||
525 | } | ||
526 | 517 | ||
527 | if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) | 518 | if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) |
528 | goto out; | 519 | return; |
529 | 520 | ||
530 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); | 521 | eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); |
531 | if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && | 522 | if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && |
532 | ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) | 523 | ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) |
533 | goto out; | 524 | return; |
534 | 525 | ||
535 | if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { | 526 | if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { |
536 | if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || | 527 | if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || |
537 | ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { | 528 | ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { |
538 | printk(KERN_INFO PFX | 529 | printk(KERN_INFO PFX |
539 | "Processor cpuid %x not supported\n", eax); | 530 | "Processor cpuid %x not supported\n", eax); |
540 | goto out; | 531 | return; |
541 | } | 532 | } |
542 | 533 | ||
543 | eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); | 534 | eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); |
544 | if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { | 535 | if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { |
545 | printk(KERN_INFO PFX | 536 | printk(KERN_INFO PFX |
546 | "No frequency change capabilities detected\n"); | 537 | "No frequency change capabilities detected\n"); |
547 | goto out; | 538 | return; |
548 | } | 539 | } |
549 | 540 | ||
550 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | 541 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); |
@@ -552,21 +543,17 @@ static int check_supported_cpu(unsigned int cpu) | |||
552 | != P_STATE_TRANSITION_CAPABLE) { | 543 | != P_STATE_TRANSITION_CAPABLE) { |
553 | printk(KERN_INFO PFX | 544 | printk(KERN_INFO PFX |
554 | "Power state transitions not supported\n"); | 545 | "Power state transitions not supported\n"); |
555 | goto out; | 546 | return; |
556 | } | 547 | } |
557 | } else { /* must be a HW Pstate capable processor */ | 548 | } else { /* must be a HW Pstate capable processor */ |
558 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | 549 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); |
559 | if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) | 550 | if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) |
560 | cpu_family = CPU_HW_PSTATE; | 551 | cpu_family = CPU_HW_PSTATE; |
561 | else | 552 | else |
562 | goto out; | 553 | return; |
563 | } | 554 | } |
564 | 555 | ||
565 | rc = 1; | 556 | *rc = 0; |
566 | |||
567 | out: | ||
568 | set_cpus_allowed_ptr(current, &oldmask); | ||
569 | return rc; | ||
570 | } | 557 | } |
571 | 558 | ||
572 | static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, | 559 | static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, |
@@ -823,13 +810,14 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, | |||
823 | if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) | 810 | if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) |
824 | return; | 811 | return; |
825 | 812 | ||
826 | control = data->acpi_data.states[index].control; data->irt = (control | 813 | control = data->acpi_data.states[index].control; |
827 | >> IRT_SHIFT) & IRT_MASK; data->rvo = (control >> | 814 | data->irt = (control >> IRT_SHIFT) & IRT_MASK; |
828 | RVO_SHIFT) & RVO_MASK; data->exttype = (control | 815 | data->rvo = (control >> RVO_SHIFT) & RVO_MASK; |
829 | >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | 816 | data->exttype = (control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; |
830 | data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; data->vidmvs = 1 | 817 | data->plllock = (control >> PLL_L_SHIFT) & PLL_L_MASK; |
831 | << ((control >> MVS_SHIFT) & MVS_MASK); data->vstable = | 818 | data->vidmvs = 1 << ((control >> MVS_SHIFT) & MVS_MASK); |
832 | (control >> VST_SHIFT) & VST_MASK; } | 819 | data->vstable = (control >> VST_SHIFT) & VST_MASK; |
820 | } | ||
833 | 821 | ||
834 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | 822 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) |
835 | { | 823 | { |
@@ -1046,6 +1034,19 @@ static int get_transition_latency(struct powernow_k8_data *data) | |||
1046 | if (cur_latency > max_latency) | 1034 | if (cur_latency > max_latency) |
1047 | max_latency = cur_latency; | 1035 | max_latency = cur_latency; |
1048 | } | 1036 | } |
1037 | if (max_latency == 0) { | ||
1038 | /* | ||
1039 | * Fam 11h always returns 0 as transition latency. | ||
1040 | * This is intended and means "very fast". While cpufreq core | ||
1041 | * and governors currently can handle that gracefully, better | ||
1042 | * set it to 1 to avoid problems in the future. | ||
1043 | * For all others it's a BIOS bug. | ||
1044 | */ | ||
1045 | if (!boot_cpu_data.x86 == 0x11) | ||
1046 | printk(KERN_ERR FW_WARN PFX "Invalid zero transition " | ||
1047 | "latency\n"); | ||
1048 | max_latency = 1; | ||
1049 | } | ||
1049 | /* value in usecs, needs to be in nanoseconds */ | 1050 | /* value in usecs, needs to be in nanoseconds */ |
1050 | return 1000 * max_latency; | 1051 | return 1000 * max_latency; |
1051 | } | 1052 | } |
@@ -1093,7 +1094,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1093 | freqs.old = find_khz_freq_from_fid(data->currfid); | 1094 | freqs.old = find_khz_freq_from_fid(data->currfid); |
1094 | freqs.new = find_khz_freq_from_fid(fid); | 1095 | freqs.new = find_khz_freq_from_fid(fid); |
1095 | 1096 | ||
1096 | for_each_cpu_mask_nr(i, *(data->available_cores)) { | 1097 | for_each_cpu(i, data->available_cores) { |
1097 | freqs.cpu = i; | 1098 | freqs.cpu = i; |
1098 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 1099 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
1099 | } | 1100 | } |
@@ -1101,7 +1102,7 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data, | |||
1101 | res = transition_fid_vid(data, fid, vid); | 1102 | res = transition_fid_vid(data, fid, vid); |
1102 | freqs.new = find_khz_freq_from_fid(data->currfid); | 1103 | freqs.new = find_khz_freq_from_fid(data->currfid); |
1103 | 1104 | ||
1104 | for_each_cpu_mask_nr(i, *(data->available_cores)) { | 1105 | for_each_cpu(i, data->available_cores) { |
1105 | freqs.cpu = i; | 1106 | freqs.cpu = i; |
1106 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 1107 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
1107 | } | 1108 | } |
@@ -1126,7 +1127,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, | |||
1126 | data->currpstate); | 1127 | data->currpstate); |
1127 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 1128 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1128 | 1129 | ||
1129 | for_each_cpu_mask_nr(i, *(data->available_cores)) { | 1130 | for_each_cpu(i, data->available_cores) { |
1130 | freqs.cpu = i; | 1131 | freqs.cpu = i; |
1131 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 1132 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
1132 | } | 1133 | } |
@@ -1134,7 +1135,7 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, | |||
1134 | res = transition_pstate(data, pstate); | 1135 | res = transition_pstate(data, pstate); |
1135 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); | 1136 | freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); |
1136 | 1137 | ||
1137 | for_each_cpu_mask_nr(i, *(data->available_cores)) { | 1138 | for_each_cpu(i, data->available_cores) { |
1138 | freqs.cpu = i; | 1139 | freqs.cpu = i; |
1139 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 1140 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
1140 | } | 1141 | } |
@@ -1235,21 +1236,47 @@ static int powernowk8_verify(struct cpufreq_policy *pol) | |||
1235 | return cpufreq_frequency_table_verify(pol, data->powernow_table); | 1236 | return cpufreq_frequency_table_verify(pol, data->powernow_table); |
1236 | } | 1237 | } |
1237 | 1238 | ||
1238 | static const char ACPI_PSS_BIOS_BUG_MSG[] = | 1239 | struct init_on_cpu { |
1239 | KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" | 1240 | struct powernow_k8_data *data; |
1240 | KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; | 1241 | int rc; |
1242 | }; | ||
1243 | |||
1244 | static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu) | ||
1245 | { | ||
1246 | struct init_on_cpu *init_on_cpu = _init_on_cpu; | ||
1247 | |||
1248 | if (pending_bit_stuck()) { | ||
1249 | printk(KERN_ERR PFX "failing init, change pending bit set\n"); | ||
1250 | init_on_cpu->rc = -ENODEV; | ||
1251 | return; | ||
1252 | } | ||
1253 | |||
1254 | if (query_current_values_with_pending_wait(init_on_cpu->data)) { | ||
1255 | init_on_cpu->rc = -ENODEV; | ||
1256 | return; | ||
1257 | } | ||
1258 | |||
1259 | if (cpu_family == CPU_OPTERON) | ||
1260 | fidvid_msr_init(); | ||
1261 | |||
1262 | init_on_cpu->rc = 0; | ||
1263 | } | ||
1241 | 1264 | ||
1242 | /* per CPU init entry point to the driver */ | 1265 | /* per CPU init entry point to the driver */ |
1243 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | 1266 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) |
1244 | { | 1267 | { |
1268 | static const char ACPI_PSS_BIOS_BUG_MSG[] = | ||
1269 | KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" | ||
1270 | KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; | ||
1245 | struct powernow_k8_data *data; | 1271 | struct powernow_k8_data *data; |
1246 | cpumask_t oldmask; | 1272 | struct init_on_cpu init_on_cpu; |
1247 | int rc; | 1273 | int rc; |
1248 | 1274 | ||
1249 | if (!cpu_online(pol->cpu)) | 1275 | if (!cpu_online(pol->cpu)) |
1250 | return -ENODEV; | 1276 | return -ENODEV; |
1251 | 1277 | ||
1252 | if (!check_supported_cpu(pol->cpu)) | 1278 | smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1); |
1279 | if (rc) | ||
1253 | return -ENODEV; | 1280 | return -ENODEV; |
1254 | 1281 | ||
1255 | data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); | 1282 | data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); |
@@ -1289,27 +1316,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1289 | pol->cpuinfo.transition_latency = get_transition_latency(data); | 1316 | pol->cpuinfo.transition_latency = get_transition_latency(data); |
1290 | 1317 | ||
1291 | /* only run on specific CPU from here on */ | 1318 | /* only run on specific CPU from here on */ |
1292 | oldmask = current->cpus_allowed; | 1319 | init_on_cpu.data = data; |
1293 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); | 1320 | smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu, |
1294 | 1321 | &init_on_cpu, 1); | |
1295 | if (smp_processor_id() != pol->cpu) { | 1322 | rc = init_on_cpu.rc; |
1296 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1323 | if (rc != 0) |
1297 | goto err_out_unmask; | 1324 | goto err_out_exit_acpi; |
1298 | } | ||
1299 | |||
1300 | if (pending_bit_stuck()) { | ||
1301 | printk(KERN_ERR PFX "failing init, change pending bit set\n"); | ||
1302 | goto err_out_unmask; | ||
1303 | } | ||
1304 | |||
1305 | if (query_current_values_with_pending_wait(data)) | ||
1306 | goto err_out_unmask; | ||
1307 | |||
1308 | if (cpu_family == CPU_OPTERON) | ||
1309 | fidvid_msr_init(); | ||
1310 | |||
1311 | /* run on any CPU again */ | ||
1312 | set_cpus_allowed_ptr(current, &oldmask); | ||
1313 | 1325 | ||
1314 | if (cpu_family == CPU_HW_PSTATE) | 1326 | if (cpu_family == CPU_HW_PSTATE) |
1315 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); | 1327 | cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); |
@@ -1346,8 +1358,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1346 | 1358 | ||
1347 | return 0; | 1359 | return 0; |
1348 | 1360 | ||
1349 | err_out_unmask: | 1361 | err_out_exit_acpi: |
1350 | set_cpus_allowed_ptr(current, &oldmask); | ||
1351 | powernow_k8_cpu_exit_acpi(data); | 1362 | powernow_k8_cpu_exit_acpi(data); |
1352 | 1363 | ||
1353 | err_out: | 1364 | err_out: |
@@ -1372,28 +1383,25 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol) | |||
1372 | return 0; | 1383 | return 0; |
1373 | } | 1384 | } |
1374 | 1385 | ||
1386 | static void query_values_on_cpu(void *_err) | ||
1387 | { | ||
1388 | int *err = _err; | ||
1389 | struct powernow_k8_data *data = __get_cpu_var(powernow_data); | ||
1390 | |||
1391 | *err = query_current_values_with_pending_wait(data); | ||
1392 | } | ||
1393 | |||
1375 | static unsigned int powernowk8_get(unsigned int cpu) | 1394 | static unsigned int powernowk8_get(unsigned int cpu) |
1376 | { | 1395 | { |
1377 | struct powernow_k8_data *data; | 1396 | struct powernow_k8_data *data = per_cpu(powernow_data, cpu); |
1378 | cpumask_t oldmask = current->cpus_allowed; | ||
1379 | unsigned int khz = 0; | 1397 | unsigned int khz = 0; |
1380 | unsigned int first; | 1398 | int err; |
1381 | |||
1382 | first = cpumask_first(cpu_core_mask(cpu)); | ||
1383 | data = per_cpu(powernow_data, first); | ||
1384 | 1399 | ||
1385 | if (!data) | 1400 | if (!data) |
1386 | return -EINVAL; | 1401 | return -EINVAL; |
1387 | 1402 | ||
1388 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | 1403 | smp_call_function_single(cpu, query_values_on_cpu, &err, true); |
1389 | if (smp_processor_id() != cpu) { | 1404 | if (err) |
1390 | printk(KERN_ERR PFX | ||
1391 | "limiting to CPU %d failed in powernowk8_get\n", cpu); | ||
1392 | set_cpus_allowed_ptr(current, &oldmask); | ||
1393 | return 0; | ||
1394 | } | ||
1395 | |||
1396 | if (query_current_values_with_pending_wait(data)) | ||
1397 | goto out; | 1405 | goto out; |
1398 | 1406 | ||
1399 | if (cpu_family == CPU_HW_PSTATE) | 1407 | if (cpu_family == CPU_HW_PSTATE) |
@@ -1404,7 +1412,6 @@ static unsigned int powernowk8_get(unsigned int cpu) | |||
1404 | 1412 | ||
1405 | 1413 | ||
1406 | out: | 1414 | out: |
1407 | set_cpus_allowed_ptr(current, &oldmask); | ||
1408 | return khz; | 1415 | return khz; |
1409 | } | 1416 | } |
1410 | 1417 | ||
@@ -1430,7 +1437,9 @@ static int __cpuinit powernowk8_init(void) | |||
1430 | unsigned int i, supported_cpus = 0; | 1437 | unsigned int i, supported_cpus = 0; |
1431 | 1438 | ||
1432 | for_each_online_cpu(i) { | 1439 | for_each_online_cpu(i) { |
1433 | if (check_supported_cpu(i)) | 1440 | int rc; |
1441 | smp_call_function_single(i, check_supported_cpu, &rc, 1); | ||
1442 | if (rc == 0) | ||
1434 | supported_cpus++; | 1443 | supported_cpus++; |
1435 | } | 1444 | } |
1436 | 1445 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index 6c6698feade1..c9c1190b5e1f 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -223,14 +223,3 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned | |||
223 | 223 | ||
224 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); | 224 | static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); |
225 | static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); | 225 | static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); |
226 | |||
227 | #ifdef CONFIG_SMP | ||
228 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) | ||
229 | { | ||
230 | } | ||
231 | #else | ||
232 | static inline void define_siblings(int cpu, cpumask_t cpu_sharedcore_mask[]) | ||
233 | { | ||
234 | cpu_set(0, cpu_sharedcore_mask[0]); | ||
235 | } | ||
236 | #endif | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index 55c831ed71ce..8d672ef162ce 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -323,14 +323,8 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
323 | { | 323 | { |
324 | unsigned l, h; | 324 | unsigned l, h; |
325 | unsigned clock_freq; | 325 | unsigned clock_freq; |
326 | cpumask_t saved_mask; | ||
327 | 326 | ||
328 | saved_mask = current->cpus_allowed; | 327 | rdmsr_on_cpu(cpu, MSR_IA32_PERF_STATUS, &l, &h); |
329 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
330 | if (smp_processor_id() != cpu) | ||
331 | return 0; | ||
332 | |||
333 | rdmsr(MSR_IA32_PERF_STATUS, l, h); | ||
334 | clock_freq = extract_clock(l, cpu, 0); | 328 | clock_freq = extract_clock(l, cpu, 0); |
335 | 329 | ||
336 | if (unlikely(clock_freq == 0)) { | 330 | if (unlikely(clock_freq == 0)) { |
@@ -340,11 +334,9 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
340 | * P-state transition (like TM2). Get the last freq set | 334 | * P-state transition (like TM2). Get the last freq set |
341 | * in PERF_CTL. | 335 | * in PERF_CTL. |
342 | */ | 336 | */ |
343 | rdmsr(MSR_IA32_PERF_CTL, l, h); | 337 | rdmsr_on_cpu(cpu, MSR_IA32_PERF_CTL, &l, &h); |
344 | clock_freq = extract_clock(l, cpu, 1); | 338 | clock_freq = extract_clock(l, cpu, 1); |
345 | } | 339 | } |
346 | |||
347 | set_cpus_allowed_ptr(current, &saved_mask); | ||
348 | return clock_freq; | 340 | return clock_freq; |
349 | } | 341 | } |
350 | 342 | ||
@@ -467,15 +459,10 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
467 | struct cpufreq_freqs freqs; | 459 | struct cpufreq_freqs freqs; |
468 | int retval = 0; | 460 | int retval = 0; |
469 | unsigned int j, k, first_cpu, tmp; | 461 | unsigned int j, k, first_cpu, tmp; |
470 | cpumask_var_t saved_mask, covered_cpus; | 462 | cpumask_var_t covered_cpus; |
471 | 463 | ||
472 | if (unlikely(!alloc_cpumask_var(&saved_mask, GFP_KERNEL))) | 464 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) |
473 | return -ENOMEM; | ||
474 | if (unlikely(!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))) { | ||
475 | free_cpumask_var(saved_mask); | ||
476 | return -ENOMEM; | 465 | return -ENOMEM; |
477 | } | ||
478 | cpumask_copy(saved_mask, ¤t->cpus_allowed); | ||
479 | 466 | ||
480 | if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { | 467 | if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { |
481 | retval = -ENODEV; | 468 | retval = -ENODEV; |
@@ -493,7 +480,7 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
493 | 480 | ||
494 | first_cpu = 1; | 481 | first_cpu = 1; |
495 | for_each_cpu(j, policy->cpus) { | 482 | for_each_cpu(j, policy->cpus) { |
496 | const struct cpumask *mask; | 483 | int good_cpu; |
497 | 484 | ||
498 | /* cpufreq holds the hotplug lock, so we are safe here */ | 485 | /* cpufreq holds the hotplug lock, so we are safe here */ |
499 | if (!cpu_online(j)) | 486 | if (!cpu_online(j)) |
@@ -504,32 +491,30 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
504 | * Make sure we are running on CPU that wants to change freq | 491 | * Make sure we are running on CPU that wants to change freq |
505 | */ | 492 | */ |
506 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) | 493 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
507 | mask = policy->cpus; | 494 | good_cpu = cpumask_any_and(policy->cpus, |
495 | cpu_online_mask); | ||
508 | else | 496 | else |
509 | mask = cpumask_of(j); | 497 | good_cpu = j; |
510 | 498 | ||
511 | set_cpus_allowed_ptr(current, mask); | 499 | if (good_cpu >= nr_cpu_ids) { |
512 | preempt_disable(); | ||
513 | if (unlikely(!cpu_isset(smp_processor_id(), *mask))) { | ||
514 | dprintk("couldn't limit to CPUs in this domain\n"); | 500 | dprintk("couldn't limit to CPUs in this domain\n"); |
515 | retval = -EAGAIN; | 501 | retval = -EAGAIN; |
516 | if (first_cpu) { | 502 | if (first_cpu) { |
517 | /* We haven't started the transition yet. */ | 503 | /* We haven't started the transition yet. */ |
518 | goto migrate_end; | 504 | goto out; |
519 | } | 505 | } |
520 | preempt_enable(); | ||
521 | break; | 506 | break; |
522 | } | 507 | } |
523 | 508 | ||
524 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; | 509 | msr = per_cpu(centrino_model, cpu)->op_points[newstate].index; |
525 | 510 | ||
526 | if (first_cpu) { | 511 | if (first_cpu) { |
527 | rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 512 | rdmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, &oldmsr, &h); |
528 | if (msr == (oldmsr & 0xffff)) { | 513 | if (msr == (oldmsr & 0xffff)) { |
529 | dprintk("no change needed - msr was and needs " | 514 | dprintk("no change needed - msr was and needs " |
530 | "to be %x\n", oldmsr); | 515 | "to be %x\n", oldmsr); |
531 | retval = 0; | 516 | retval = 0; |
532 | goto migrate_end; | 517 | goto out; |
533 | } | 518 | } |
534 | 519 | ||
535 | freqs.old = extract_clock(oldmsr, cpu, 0); | 520 | freqs.old = extract_clock(oldmsr, cpu, 0); |
@@ -553,14 +538,11 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
553 | oldmsr |= msr; | 538 | oldmsr |= msr; |
554 | } | 539 | } |
555 | 540 | ||
556 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 541 | wrmsr_on_cpu(good_cpu, MSR_IA32_PERF_CTL, oldmsr, h); |
557 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { | 542 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) |
558 | preempt_enable(); | ||
559 | break; | 543 | break; |
560 | } | ||
561 | 544 | ||
562 | cpu_set(j, *covered_cpus); | 545 | cpumask_set_cpu(j, covered_cpus); |
563 | preempt_enable(); | ||
564 | } | 546 | } |
565 | 547 | ||
566 | for_each_cpu(k, policy->cpus) { | 548 | for_each_cpu(k, policy->cpus) { |
@@ -578,10 +560,8 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
578 | * Best effort undo.. | 560 | * Best effort undo.. |
579 | */ | 561 | */ |
580 | 562 | ||
581 | for_each_cpu_mask_nr(j, *covered_cpus) { | 563 | for_each_cpu(j, covered_cpus) |
582 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(j)); | 564 | wrmsr_on_cpu(j, MSR_IA32_PERF_CTL, oldmsr, h); |
583 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | ||
584 | } | ||
585 | 565 | ||
586 | tmp = freqs.new; | 566 | tmp = freqs.new; |
587 | freqs.new = freqs.old; | 567 | freqs.new = freqs.old; |
@@ -593,15 +573,9 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
593 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 573 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
594 | } | 574 | } |
595 | } | 575 | } |
596 | set_cpus_allowed_ptr(current, saved_mask); | ||
597 | retval = 0; | 576 | retval = 0; |
598 | goto out; | ||
599 | 577 | ||
600 | migrate_end: | ||
601 | preempt_enable(); | ||
602 | set_cpus_allowed_ptr(current, saved_mask); | ||
603 | out: | 578 | out: |
604 | free_cpumask_var(saved_mask); | ||
605 | free_cpumask_var(covered_cpus); | 579 | free_cpumask_var(covered_cpus); |
606 | return retval; | 580 | return retval; |
607 | } | 581 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 016c1a4fa3fc..6911e91fb4f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -89,7 +89,8 @@ static int speedstep_find_register(void) | |||
89 | * speedstep_set_state - set the SpeedStep state | 89 | * speedstep_set_state - set the SpeedStep state |
90 | * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) | 90 | * @state: new processor frequency state (SPEEDSTEP_LOW or SPEEDSTEP_HIGH) |
91 | * | 91 | * |
92 | * Tries to change the SpeedStep state. | 92 | * Tries to change the SpeedStep state. Can be called from |
93 | * smp_call_function_single. | ||
93 | */ | 94 | */ |
94 | static void speedstep_set_state(unsigned int state) | 95 | static void speedstep_set_state(unsigned int state) |
95 | { | 96 | { |
@@ -143,6 +144,11 @@ static void speedstep_set_state(unsigned int state) | |||
143 | return; | 144 | return; |
144 | } | 145 | } |
145 | 146 | ||
147 | /* Wrapper for smp_call_function_single. */ | ||
148 | static void _speedstep_set_state(void *_state) | ||
149 | { | ||
150 | speedstep_set_state(*(unsigned int *)_state); | ||
151 | } | ||
146 | 152 | ||
147 | /** | 153 | /** |
148 | * speedstep_activate - activate SpeedStep control in the chipset | 154 | * speedstep_activate - activate SpeedStep control in the chipset |
@@ -226,22 +232,28 @@ static unsigned int speedstep_detect_chipset(void) | |||
226 | return 0; | 232 | return 0; |
227 | } | 233 | } |
228 | 234 | ||
229 | static unsigned int _speedstep_get(const struct cpumask *cpus) | 235 | struct get_freq_data { |
230 | { | ||
231 | unsigned int speed; | 236 | unsigned int speed; |
232 | cpumask_t cpus_allowed; | 237 | unsigned int processor; |
233 | 238 | }; | |
234 | cpus_allowed = current->cpus_allowed; | 239 | |
235 | set_cpus_allowed_ptr(current, cpus); | 240 | static void get_freq_data(void *_data) |
236 | speed = speedstep_get_frequency(speedstep_processor); | 241 | { |
237 | set_cpus_allowed_ptr(current, &cpus_allowed); | 242 | struct get_freq_data *data = _data; |
238 | dprintk("detected %u kHz as current frequency\n", speed); | 243 | |
239 | return speed; | 244 | data->speed = speedstep_get_frequency(data->processor); |
240 | } | 245 | } |
241 | 246 | ||
242 | static unsigned int speedstep_get(unsigned int cpu) | 247 | static unsigned int speedstep_get(unsigned int cpu) |
243 | { | 248 | { |
244 | return _speedstep_get(cpumask_of(cpu)); | 249 | struct get_freq_data data = { .processor = cpu }; |
250 | |||
251 | /* You're supposed to ensure CPU is online. */ | ||
252 | if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0) | ||
253 | BUG(); | ||
254 | |||
255 | dprintk("detected %u kHz as current frequency\n", data.speed); | ||
256 | return data.speed; | ||
245 | } | 257 | } |
246 | 258 | ||
247 | /** | 259 | /** |
@@ -257,16 +269,16 @@ static int speedstep_target(struct cpufreq_policy *policy, | |||
257 | unsigned int target_freq, | 269 | unsigned int target_freq, |
258 | unsigned int relation) | 270 | unsigned int relation) |
259 | { | 271 | { |
260 | unsigned int newstate = 0; | 272 | unsigned int newstate = 0, policy_cpu; |
261 | struct cpufreq_freqs freqs; | 273 | struct cpufreq_freqs freqs; |
262 | cpumask_t cpus_allowed; | ||
263 | int i; | 274 | int i; |
264 | 275 | ||
265 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], | 276 | if (cpufreq_frequency_table_target(policy, &speedstep_freqs[0], |
266 | target_freq, relation, &newstate)) | 277 | target_freq, relation, &newstate)) |
267 | return -EINVAL; | 278 | return -EINVAL; |
268 | 279 | ||
269 | freqs.old = _speedstep_get(policy->cpus); | 280 | policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); |
281 | freqs.old = speedstep_get(policy_cpu); | ||
270 | freqs.new = speedstep_freqs[newstate].frequency; | 282 | freqs.new = speedstep_freqs[newstate].frequency; |
271 | freqs.cpu = policy->cpu; | 283 | freqs.cpu = policy->cpu; |
272 | 284 | ||
@@ -276,20 +288,13 @@ static int speedstep_target(struct cpufreq_policy *policy, | |||
276 | if (freqs.old == freqs.new) | 288 | if (freqs.old == freqs.new) |
277 | return 0; | 289 | return 0; |
278 | 290 | ||
279 | cpus_allowed = current->cpus_allowed; | ||
280 | |||
281 | for_each_cpu(i, policy->cpus) { | 291 | for_each_cpu(i, policy->cpus) { |
282 | freqs.cpu = i; | 292 | freqs.cpu = i; |
283 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 293 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); |
284 | } | 294 | } |
285 | 295 | ||
286 | /* switch to physical CPU where state is to be changed */ | 296 | smp_call_function_single(policy_cpu, _speedstep_set_state, &newstate, |
287 | set_cpus_allowed_ptr(current, policy->cpus); | 297 | true); |
288 | |||
289 | speedstep_set_state(newstate); | ||
290 | |||
291 | /* allow to be run on all CPUs */ | ||
292 | set_cpus_allowed_ptr(current, &cpus_allowed); | ||
293 | 298 | ||
294 | for_each_cpu(i, policy->cpus) { | 299 | for_each_cpu(i, policy->cpus) { |
295 | freqs.cpu = i; | 300 | freqs.cpu = i; |
@@ -312,33 +317,43 @@ static int speedstep_verify(struct cpufreq_policy *policy) | |||
312 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); | 317 | return cpufreq_frequency_table_verify(policy, &speedstep_freqs[0]); |
313 | } | 318 | } |
314 | 319 | ||
320 | struct get_freqs { | ||
321 | struct cpufreq_policy *policy; | ||
322 | int ret; | ||
323 | }; | ||
324 | |||
325 | static void get_freqs_on_cpu(void *_get_freqs) | ||
326 | { | ||
327 | struct get_freqs *get_freqs = _get_freqs; | ||
328 | |||
329 | get_freqs->ret = | ||
330 | speedstep_get_freqs(speedstep_processor, | ||
331 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, | ||
332 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, | ||
333 | &get_freqs->policy->cpuinfo.transition_latency, | ||
334 | &speedstep_set_state); | ||
335 | } | ||
315 | 336 | ||
316 | static int speedstep_cpu_init(struct cpufreq_policy *policy) | 337 | static int speedstep_cpu_init(struct cpufreq_policy *policy) |
317 | { | 338 | { |
318 | int result = 0; | 339 | int result; |
319 | unsigned int speed; | 340 | unsigned int policy_cpu, speed; |
320 | cpumask_t cpus_allowed; | 341 | struct get_freqs gf; |
321 | 342 | ||
322 | /* only run on CPU to be set, or on its sibling */ | 343 | /* only run on CPU to be set, or on its sibling */ |
323 | #ifdef CONFIG_SMP | 344 | #ifdef CONFIG_SMP |
324 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); | 345 | cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); |
325 | #endif | 346 | #endif |
326 | 347 | policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); | |
327 | cpus_allowed = current->cpus_allowed; | ||
328 | set_cpus_allowed_ptr(current, policy->cpus); | ||
329 | 348 | ||
330 | /* detect low and high frequency and transition latency */ | 349 | /* detect low and high frequency and transition latency */ |
331 | result = speedstep_get_freqs(speedstep_processor, | 350 | gf.policy = policy; |
332 | &speedstep_freqs[SPEEDSTEP_LOW].frequency, | 351 | smp_call_function_single(policy_cpu, get_freqs_on_cpu, &gf, 1); |
333 | &speedstep_freqs[SPEEDSTEP_HIGH].frequency, | 352 | if (gf.ret) |
334 | &policy->cpuinfo.transition_latency, | 353 | return gf.ret; |
335 | &speedstep_set_state); | ||
336 | set_cpus_allowed_ptr(current, &cpus_allowed); | ||
337 | if (result) | ||
338 | return result; | ||
339 | 354 | ||
340 | /* get current speed setting */ | 355 | /* get current speed setting */ |
341 | speed = _speedstep_get(policy->cpus); | 356 | speed = speedstep_get(policy_cpu); |
342 | if (!speed) | 357 | if (!speed) |
343 | return -EIO; | 358 | return -EIO; |
344 | 359 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c index 2e3c6862657b..f4c290b8482f 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c | |||
@@ -226,6 +226,7 @@ static unsigned int pentium4_get_frequency(void) | |||
226 | } | 226 | } |
227 | 227 | ||
228 | 228 | ||
229 | /* Warning: may get called from smp_call_function_single. */ | ||
229 | unsigned int speedstep_get_frequency(unsigned int processor) | 230 | unsigned int speedstep_get_frequency(unsigned int processor) |
230 | { | 231 | { |
231 | switch (processor) { | 232 | switch (processor) { |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ae3180c506a6..b0597ad02c93 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -632,17 +632,15 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
632 | void *data) | 632 | void *data) |
633 | { | 633 | { |
634 | struct cpufreq_freqs *freq = data; | 634 | struct cpufreq_freqs *freq = data; |
635 | unsigned long *lpj, dummy; | 635 | unsigned long *lpj; |
636 | 636 | ||
637 | if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) | 637 | if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC)) |
638 | return 0; | 638 | return 0; |
639 | 639 | ||
640 | lpj = &dummy; | 640 | lpj = &boot_cpu_data.loops_per_jiffy; |
641 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
642 | #ifdef CONFIG_SMP | 641 | #ifdef CONFIG_SMP |
642 | if (!(freq->flags & CPUFREQ_CONST_LOOPS)) | ||
643 | lpj = &cpu_data(freq->cpu).loops_per_jiffy; | 643 | lpj = &cpu_data(freq->cpu).loops_per_jiffy; |
644 | #else | ||
645 | lpj = &boot_cpu_data.loops_per_jiffy; | ||
646 | #endif | 644 | #endif |
647 | 645 | ||
648 | if (!ref_freq) { | 646 | if (!ref_freq) { |