aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-06-12 07:25:37 -0400
committerDave Jones <davej@redhat.com>2009-06-15 11:49:43 -0400
commit1ff6e97f1d993dff2f9b6f4a9173687370660232 (patch)
tree19cadec61f22a61a0e4c2d338ccd8672710aed17 /arch/x86/kernel/cpu/cpufreq/powernow-k8.c
parente3f996c26ff6c4c084aaaa64dce6e54d31f517be (diff)
[CPUFREQ] cpumask: avoid playing with cpus_allowed in powernow-k8.c
cpumask: avoid playing with cpus_allowed in powernow-k8.c It's generally a very bad idea to mug some process's cpumask: it could legitimately and reasonably be changed by root, which could break us (if done before our code) or them (if we restore the wrong value). I did not replace powernowk8_target; it needs fixing, but it grabs a mutex (so no smp_call_function_single here) but Mark points out it can be called multiple times per second, so work_on_cpu is too heavy. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> To: cpufreq@vger.kernel.org Acked-by: Mark Langsdorf <mark.langsdorf@amd.com> Tested-by: Mark Langsdorf <mark.langsdorf@amd.com> Signed-off-by: Dave Jones <davej@redhat.com>
Diffstat (limited to 'arch/x86/kernel/cpu/cpufreq/powernow-k8.c')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c118
1 files changed, 60 insertions, 58 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 20c7b99d7ba8..1f55547d5b30 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -508,41 +508,34 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
508 return 0; 508 return 0;
509} 509}
510 510
511static int check_supported_cpu(unsigned int cpu) 511static void check_supported_cpu(void *_rc)
512{ 512{
513 cpumask_t oldmask;
514 u32 eax, ebx, ecx, edx; 513 u32 eax, ebx, ecx, edx;
515 unsigned int rc = 0; 514 int *rc = _rc;
516
517 oldmask = current->cpus_allowed;
518 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
519 515
520 if (smp_processor_id() != cpu) { 516 *rc = -ENODEV;
521 printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu);
522 goto out;
523 }
524 517
525 if (current_cpu_data.x86_vendor != X86_VENDOR_AMD) 518 if (current_cpu_data.x86_vendor != X86_VENDOR_AMD)
526 goto out; 519 return;
527 520
528 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE); 521 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
529 if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) && 522 if (((eax & CPUID_XFAM) != CPUID_XFAM_K8) &&
530 ((eax & CPUID_XFAM) < CPUID_XFAM_10H)) 523 ((eax & CPUID_XFAM) < CPUID_XFAM_10H))
531 goto out; 524 return;
532 525
533 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) { 526 if ((eax & CPUID_XFAM) == CPUID_XFAM_K8) {
534 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) || 527 if (((eax & CPUID_USE_XFAM_XMOD) != CPUID_USE_XFAM_XMOD) ||
535 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) { 528 ((eax & CPUID_XMOD) > CPUID_XMOD_REV_MASK)) {
536 printk(KERN_INFO PFX 529 printk(KERN_INFO PFX
537 "Processor cpuid %x not supported\n", eax); 530 "Processor cpuid %x not supported\n", eax);
538 goto out; 531 return;
539 } 532 }
540 533
541 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES); 534 eax = cpuid_eax(CPUID_GET_MAX_CAPABILITIES);
542 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) { 535 if (eax < CPUID_FREQ_VOLT_CAPABILITIES) {
543 printk(KERN_INFO PFX 536 printk(KERN_INFO PFX
544 "No frequency change capabilities detected\n"); 537 "No frequency change capabilities detected\n");
545 goto out; 538 return;
546 } 539 }
547 540
548 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 541 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
@@ -550,21 +543,17 @@ static int check_supported_cpu(unsigned int cpu)
550 != P_STATE_TRANSITION_CAPABLE) { 543 != P_STATE_TRANSITION_CAPABLE) {
551 printk(KERN_INFO PFX 544 printk(KERN_INFO PFX
552 "Power state transitions not supported\n"); 545 "Power state transitions not supported\n");
553 goto out; 546 return;
554 } 547 }
555 } else { /* must be a HW Pstate capable processor */ 548 } else { /* must be a HW Pstate capable processor */
556 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); 549 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
557 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE) 550 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
558 cpu_family = CPU_HW_PSTATE; 551 cpu_family = CPU_HW_PSTATE;
559 else 552 else
560 goto out; 553 return;
561 } 554 }
562 555
563 rc = 1; 556 *rc = 0;
564
565out:
566 set_cpus_allowed_ptr(current, &oldmask);
567 return rc;
568} 557}
569 558
570static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 559static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -1247,6 +1236,32 @@ static int powernowk8_verify(struct cpufreq_policy *pol)
1247 return cpufreq_frequency_table_verify(pol, data->powernow_table); 1236 return cpufreq_frequency_table_verify(pol, data->powernow_table);
1248} 1237}
1249 1238
1239struct init_on_cpu {
1240 struct powernow_k8_data *data;
1241 int rc;
1242};
1243
1244static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1245{
1246 struct init_on_cpu *init_on_cpu = _init_on_cpu;
1247
1248 if (pending_bit_stuck()) {
1249 printk(KERN_ERR PFX "failing init, change pending bit set\n");
1250 init_on_cpu->rc = -ENODEV;
1251 return;
1252 }
1253
1254 if (query_current_values_with_pending_wait(init_on_cpu->data)) {
1255 init_on_cpu->rc = -ENODEV;
1256 return;
1257 }
1258
1259 if (cpu_family == CPU_OPTERON)
1260 fidvid_msr_init();
1261
1262 init_on_cpu->rc = 0;
1263}
1264
1250/* per CPU init entry point to the driver */ 1265/* per CPU init entry point to the driver */
1251static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1266static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1252{ 1267{
@@ -1254,13 +1269,14 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1254 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n" 1269 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1255 KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n"; 1270 KERN_ERR FW_BUG PFX "Try again with latest BIOS.\n";
1256 struct powernow_k8_data *data; 1271 struct powernow_k8_data *data;
1257 cpumask_t oldmask; 1272 struct init_on_cpu init_on_cpu;
1258 int rc; 1273 int rc;
1259 1274
1260 if (!cpu_online(pol->cpu)) 1275 if (!cpu_online(pol->cpu))
1261 return -ENODEV; 1276 return -ENODEV;
1262 1277
1263 if (!check_supported_cpu(pol->cpu)) 1278 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
1279 if (rc)
1264 return -ENODEV; 1280 return -ENODEV;
1265 1281
1266 data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL); 1282 data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
@@ -1300,27 +1316,12 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1300 pol->cpuinfo.transition_latency = get_transition_latency(data); 1316 pol->cpuinfo.transition_latency = get_transition_latency(data);
1301 1317
1302 /* only run on specific CPU from here on */ 1318 /* only run on specific CPU from here on */
1303 oldmask = current->cpus_allowed; 1319 init_on_cpu.data = data;
1304 set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); 1320 smp_call_function_single(data->cpu, powernowk8_cpu_init_on_cpu,
1305 1321 &init_on_cpu, 1);
1306 if (smp_processor_id() != pol->cpu) { 1322 rc = init_on_cpu.rc;
1307 printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); 1323 if (rc != 0)
1308 goto err_out_unmask; 1324 goto err_out_exit_acpi;
1309 }
1310
1311 if (pending_bit_stuck()) {
1312 printk(KERN_ERR PFX "failing init, change pending bit set\n");
1313 goto err_out_unmask;
1314 }
1315
1316 if (query_current_values_with_pending_wait(data))
1317 goto err_out_unmask;
1318
1319 if (cpu_family == CPU_OPTERON)
1320 fidvid_msr_init();
1321
1322 /* run on any CPU again */
1323 set_cpus_allowed_ptr(current, &oldmask);
1324 1325
1325 if (cpu_family == CPU_HW_PSTATE) 1326 if (cpu_family == CPU_HW_PSTATE)
1326 cpumask_copy(pol->cpus, cpumask_of(pol->cpu)); 1327 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
@@ -1357,8 +1358,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1357 1358
1358 return 0; 1359 return 0;
1359 1360
1360err_out_unmask: 1361err_out_exit_acpi:
1361 set_cpus_allowed_ptr(current, &oldmask);
1362 powernow_k8_cpu_exit_acpi(data); 1362 powernow_k8_cpu_exit_acpi(data);
1363 1363
1364err_out: 1364err_out:
@@ -1383,24 +1383,25 @@ static int __devexit powernowk8_cpu_exit(struct cpufreq_policy *pol)
1383 return 0; 1383 return 0;
1384} 1384}
1385 1385
1386static void query_values_on_cpu(void *_err)
1387{
1388 int *err = _err;
1389 struct powernow_k8_data *data = __get_cpu_var(powernow_data);
1390
1391 *err = query_current_values_with_pending_wait(data);
1392}
1393
1386static unsigned int powernowk8_get(unsigned int cpu) 1394static unsigned int powernowk8_get(unsigned int cpu)
1387{ 1395{
1388 struct powernow_k8_data *data = per_cpu(powernow_data, cpu); 1396 struct powernow_k8_data *data = per_cpu(powernow_data, cpu);
1389 cpumask_t oldmask = current->cpus_allowed;
1390 unsigned int khz = 0; 1397 unsigned int khz = 0;
1398 int err;
1391 1399
1392 if (!data) 1400 if (!data)
1393 return -EINVAL; 1401 return -EINVAL;
1394 1402
1395 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); 1403 smp_call_function_single(cpu, query_values_on_cpu, &err, true);
1396 if (smp_processor_id() != cpu) { 1404 if (err)
1397 printk(KERN_ERR PFX
1398 "limiting to CPU %d failed in powernowk8_get\n", cpu);
1399 set_cpus_allowed_ptr(current, &oldmask);
1400 return 0;
1401 }
1402
1403 if (query_current_values_with_pending_wait(data))
1404 goto out; 1405 goto out;
1405 1406
1406 if (cpu_family == CPU_HW_PSTATE) 1407 if (cpu_family == CPU_HW_PSTATE)
@@ -1411,7 +1412,6 @@ static unsigned int powernowk8_get(unsigned int cpu)
1411 1412
1412 1413
1413out: 1414out:
1414 set_cpus_allowed_ptr(current, &oldmask);
1415 return khz; 1415 return khz;
1416} 1416}
1417 1417
@@ -1437,7 +1437,9 @@ static int __cpuinit powernowk8_init(void)
1437 unsigned int i, supported_cpus = 0; 1437 unsigned int i, supported_cpus = 0;
1438 1438
1439 for_each_online_cpu(i) { 1439 for_each_online_cpu(i) {
1440 if (check_supported_cpu(i)) 1440 int rc;
1441 smp_call_function_single(i, check_supported_cpu, &rc, 1);
1442 if (rc == 0)
1441 supported_cpus++; 1443 supported_cpus++;
1442 } 1444 }
1443 1445