aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 01:40:00 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 01:40:00 -0400
commitc71bd6944eb1458b7887af1783101f5a46140c40 (patch)
tree97a7f30a1fb5b3cde3d9f3108779e612960056da
parent065a3e17baa36d1d48eb7376138820035b44775e (diff)
parent4ec223d02f4d5f5a3129edc0e3d22550d6ac8a32 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] Fix ondemand vs suspend deadlock [CPUFREQ] Fix powernow-k8 SMP kernel on UP hardware bug. [PATCH] redirect speedstep-centrino maintainer mail to cpufreq list [CPUFREQ] correct powernow-k8 fid/vid masks for extended parts [CPUFREQ] Clarify powernow-k8 cpu_family statements
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c30
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.h4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c12
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
5 files changed, 46 insertions, 24 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index b4277f58f40c..2d6491672559 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -120,7 +120,7 @@ static int pending_bit_stuck(void)
120{ 120{
121 u32 lo, hi; 121 u32 lo, hi;
122 122
123 if (cpu_family) 123 if (cpu_family == CPU_HW_PSTATE)
124 return 0; 124 return 0;
125 125
126 rdmsr(MSR_FIDVID_STATUS, lo, hi); 126 rdmsr(MSR_FIDVID_STATUS, lo, hi);
@@ -136,7 +136,7 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
136 u32 lo, hi; 136 u32 lo, hi;
137 u32 i = 0; 137 u32 i = 0;
138 138
139 if (cpu_family) { 139 if (cpu_family == CPU_HW_PSTATE) {
140 rdmsr(MSR_PSTATE_STATUS, lo, hi); 140 rdmsr(MSR_PSTATE_STATUS, lo, hi);
141 i = lo & HW_PSTATE_MASK; 141 i = lo & HW_PSTATE_MASK;
142 rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi); 142 rdmsr(MSR_PSTATE_DEF_BASE + i, lo, hi);
@@ -598,7 +598,7 @@ static void print_basics(struct powernow_k8_data *data)
598 int j; 598 int j;
599 for (j = 0; j < data->numps; j++) { 599 for (j = 0; j < data->numps; j++) {
600 if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) { 600 if (data->powernow_table[j].frequency != CPUFREQ_ENTRY_INVALID) {
601 if (cpu_family) { 601 if (cpu_family == CPU_HW_PSTATE) {
602 printk(KERN_INFO PFX " %d : fid 0x%x gid 0x%x (%d MHz)\n", j, (data->powernow_table[j].index & 0xff00) >> 8, 602 printk(KERN_INFO PFX " %d : fid 0x%x gid 0x%x (%d MHz)\n", j, (data->powernow_table[j].index & 0xff00) >> 8,
603 (data->powernow_table[j].index & 0xff0000) >> 16, 603 (data->powernow_table[j].index & 0xff0000) >> 16,
604 data->powernow_table[j].frequency/1000); 604 data->powernow_table[j].frequency/1000);
@@ -758,7 +758,7 @@ static int find_psb_table(struct powernow_k8_data *data)
758#ifdef CONFIG_X86_POWERNOW_K8_ACPI 758#ifdef CONFIG_X86_POWERNOW_K8_ACPI
759static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) 759static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index)
760{ 760{
761 if (!data->acpi_data.state_count || cpu_family) 761 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE))
762 return; 762 return;
763 763
764 data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; 764 data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK;
@@ -801,7 +801,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
801 goto err_out; 801 goto err_out;
802 } 802 }
803 803
804 if (cpu_family) 804 if (cpu_family == CPU_HW_PSTATE)
805 ret_val = fill_powernow_table_pstate(data, powernow_table); 805 ret_val = fill_powernow_table_pstate(data, powernow_table);
806 else 806 else
807 ret_val = fill_powernow_table_fidvid(data, powernow_table); 807 ret_val = fill_powernow_table_fidvid(data, powernow_table);
@@ -885,8 +885,8 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf
885 u32 vid; 885 u32 vid;
886 886
887 if (data->exttype) { 887 if (data->exttype) {
888 fid = data->acpi_data.states[i].status & FID_MASK; 888 fid = data->acpi_data.states[i].status & EXT_FID_MASK;
889 vid = (data->acpi_data.states[i].status >> VID_SHIFT) & VID_MASK; 889 vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK;
890 } else { 890 } else {
891 fid = data->acpi_data.states[i].control & FID_MASK; 891 fid = data->acpi_data.states[i].control & FID_MASK;
892 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; 892 vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK;
@@ -1082,7 +1082,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1082 if (query_current_values_with_pending_wait(data)) 1082 if (query_current_values_with_pending_wait(data))
1083 goto err_out; 1083 goto err_out;
1084 1084
1085 if (cpu_family) 1085 if (cpu_family == CPU_HW_PSTATE)
1086 dprintk("targ: curr fid 0x%x, did 0x%x\n", 1086 dprintk("targ: curr fid 0x%x, did 0x%x\n",
1087 data->currfid, data->currvid); 1087 data->currfid, data->currvid);
1088 else { 1088 else {
@@ -1103,7 +1103,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1103 1103
1104 powernow_k8_acpi_pst_values(data, newstate); 1104 powernow_k8_acpi_pst_values(data, newstate);
1105 1105
1106 if (cpu_family) 1106 if (cpu_family == CPU_HW_PSTATE)
1107 ret = transition_frequency_pstate(data, newstate); 1107 ret = transition_frequency_pstate(data, newstate);
1108 else 1108 else
1109 ret = transition_frequency_fidvid(data, newstate); 1109 ret = transition_frequency_fidvid(data, newstate);
@@ -1115,7 +1115,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi
1115 } 1115 }
1116 mutex_unlock(&fidvid_mutex); 1116 mutex_unlock(&fidvid_mutex);
1117 1117
1118 if (cpu_family) 1118 if (cpu_family == CPU_HW_PSTATE)
1119 pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); 1119 pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
1120 else 1120 else
1121 pol->cur = find_khz_freq_from_fid(data->currfid); 1121 pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1163,7 +1163,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1163 * Use the PSB BIOS structure. This is only availabe on 1163 * Use the PSB BIOS structure. This is only availabe on
1164 * an UP version, and is deprecated by AMD. 1164 * an UP version, and is deprecated by AMD.
1165 */ 1165 */
1166 if ((num_online_cpus() != 1) || (num_possible_cpus() != 1)) { 1166 if (num_online_cpus() != 1) {
1167 printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n"); 1167 printk(KERN_ERR PFX "MP systems not supported by PSB BIOS structure\n");
1168 kfree(data); 1168 kfree(data);
1169 return -ENODEV; 1169 return -ENODEV;
@@ -1197,14 +1197,14 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1197 if (query_current_values_with_pending_wait(data)) 1197 if (query_current_values_with_pending_wait(data))
1198 goto err_out; 1198 goto err_out;
1199 1199
1200 if (!cpu_family) 1200 if (cpu_family == CPU_OPTERON)
1201 fidvid_msr_init(); 1201 fidvid_msr_init();
1202 1202
1203 /* run on any CPU again */ 1203 /* run on any CPU again */
1204 set_cpus_allowed(current, oldmask); 1204 set_cpus_allowed(current, oldmask);
1205 1205
1206 pol->governor = CPUFREQ_DEFAULT_GOVERNOR; 1206 pol->governor = CPUFREQ_DEFAULT_GOVERNOR;
1207 if (cpu_family) 1207 if (cpu_family == CPU_HW_PSTATE)
1208 pol->cpus = cpumask_of_cpu(pol->cpu); 1208 pol->cpus = cpumask_of_cpu(pol->cpu);
1209 else 1209 else
1210 pol->cpus = cpu_core_map[pol->cpu]; 1210 pol->cpus = cpu_core_map[pol->cpu];
@@ -1215,7 +1215,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1215 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US) 1215 pol->cpuinfo.transition_latency = (((data->rvo + 8) * data->vstable * VST_UNITS_20US)
1216 + (3 * (1 << data->irt) * 10)) * 1000; 1216 + (3 * (1 << data->irt) * 10)) * 1000;
1217 1217
1218 if (cpu_family) 1218 if (cpu_family == CPU_HW_PSTATE)
1219 pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid); 1219 pol->cur = find_khz_freq_from_fiddid(data->currfid, data->currdid);
1220 else 1220 else
1221 pol->cur = find_khz_freq_from_fid(data->currfid); 1221 pol->cur = find_khz_freq_from_fid(data->currfid);
@@ -1232,7 +1232,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1232 1232
1233 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); 1233 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1234 1234
1235 if (cpu_family) 1235 if (cpu_family == CPU_HW_PSTATE)
1236 dprintk("cpu_init done, current fid 0x%x, did 0x%x\n", 1236 dprintk("cpu_init done, current fid 0x%x, did 0x%x\n",
1237 data->currfid, data->currdid); 1237 data->currfid, data->currdid);
1238 else 1238 else
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
index bf8ad9e43da3..0fb2a3001ba5 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.h
@@ -169,7 +169,9 @@ struct powernow_k8_data {
169#define MVS_MASK 3 169#define MVS_MASK 3
170#define VST_MASK 0x7f 170#define VST_MASK 0x7f
171#define VID_MASK 0x1f 171#define VID_MASK 0x1f
172#define FID_MASK 0x3f 172#define FID_MASK 0x1f
173#define EXT_VID_MASK 0x3f
174#define EXT_FID_MASK 0x3f
173 175
174 176
175/* 177/*
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index ce54ff12c15d..f1a82c5de1ba 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -2,19 +2,15 @@
2 * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium 2 * cpufreq driver for Enhanced SpeedStep, as found in Intel's Pentium
3 * M (part of the Centrino chipset). 3 * M (part of the Centrino chipset).
4 * 4 *
5 * Since the original Pentium M, most new Intel CPUs support Enhanced
6 * SpeedStep.
7 *
5 * Despite the "SpeedStep" in the name, this is almost entirely unlike 8 * Despite the "SpeedStep" in the name, this is almost entirely unlike
6 * traditional SpeedStep. 9 * traditional SpeedStep.
7 * 10 *
8 * Modelled on speedstep.c 11 * Modelled on speedstep.c
9 * 12 *
10 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org> 13 * Copyright (C) 2003 Jeremy Fitzhardinge <jeremy@goop.org>
11 *
12 * WARNING WARNING WARNING
13 *
14 * This driver manipulates the PERF_CTL MSR, which is only somewhat
15 * documented. While it seems to work on my laptop, it has not been
16 * tested anywhere else, and it may not work for you, do strange
17 * things or simply crash.
18 */ 14 */
19 15
20#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -36,7 +32,7 @@
36#include <asm/cpufeature.h> 32#include <asm/cpufeature.h>
37 33
38#define PFX "speedstep-centrino: " 34#define PFX "speedstep-centrino: "
39#define MAINTAINER "Jeremy Fitzhardinge <jeremy@goop.org>" 35#define MAINTAINER "cpufreq@lists.linux.org.uk"
40 36
41#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
42 38
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index e07a35487bde..8878a154ed43 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -72,6 +72,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
72 72
73static unsigned int dbs_enable; /* number of CPUs using this policy */ 73static unsigned int dbs_enable; /* number of CPUs using this policy */
74 74
75/*
76 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
77 * lock and dbs_mutex. cpu_hotplug lock should always be held before
78 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
79 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
80 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
81 * is recursive for the same process. -Venki
82 */
75static DEFINE_MUTEX (dbs_mutex); 83static DEFINE_MUTEX (dbs_mutex);
76static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 84static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
77 85
@@ -414,12 +422,14 @@ static void dbs_check_cpu(int cpu)
414static void do_dbs_timer(void *data) 422static void do_dbs_timer(void *data)
415{ 423{
416 int i; 424 int i;
425 lock_cpu_hotplug();
417 mutex_lock(&dbs_mutex); 426 mutex_lock(&dbs_mutex);
418 for_each_online_cpu(i) 427 for_each_online_cpu(i)
419 dbs_check_cpu(i); 428 dbs_check_cpu(i);
420 schedule_delayed_work(&dbs_work, 429 schedule_delayed_work(&dbs_work,
421 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 430 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
422 mutex_unlock(&dbs_mutex); 431 mutex_unlock(&dbs_mutex);
432 unlock_cpu_hotplug();
423} 433}
424 434
425static inline void dbs_timer_init(void) 435static inline void dbs_timer_init(void)
@@ -514,6 +524,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
514 break; 524 break;
515 525
516 case CPUFREQ_GOV_LIMITS: 526 case CPUFREQ_GOV_LIMITS:
527 lock_cpu_hotplug();
517 mutex_lock(&dbs_mutex); 528 mutex_lock(&dbs_mutex);
518 if (policy->max < this_dbs_info->cur_policy->cur) 529 if (policy->max < this_dbs_info->cur_policy->cur)
519 __cpufreq_driver_target( 530 __cpufreq_driver_target(
@@ -524,6 +535,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
524 this_dbs_info->cur_policy, 535 this_dbs_info->cur_policy,
525 policy->min, CPUFREQ_RELATION_L); 536 policy->min, CPUFREQ_RELATION_L);
526 mutex_unlock(&dbs_mutex); 537 mutex_unlock(&dbs_mutex);
538 unlock_cpu_hotplug();
527 break; 539 break;
528 } 540 }
529 return 0; 541 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 3e6ffcaa5af4..4d308410b60e 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -71,6 +71,14 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71 71
72static unsigned int dbs_enable; /* number of CPUs using this policy */ 72static unsigned int dbs_enable; /* number of CPUs using this policy */
73 73
74/*
75 * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
76 * lock and dbs_mutex. cpu_hotplug lock should always be held before
77 * dbs_mutex. If any function that can potentially take cpu_hotplug lock
78 * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
79 * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
80 * is recursive for the same process. -Venki
81 */
74static DEFINE_MUTEX (dbs_mutex); 82static DEFINE_MUTEX (dbs_mutex);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 83static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76 84
@@ -363,12 +371,14 @@ static void dbs_check_cpu(int cpu)
363static void do_dbs_timer(void *data) 371static void do_dbs_timer(void *data)
364{ 372{
365 int i; 373 int i;
374 lock_cpu_hotplug();
366 mutex_lock(&dbs_mutex); 375 mutex_lock(&dbs_mutex);
367 for_each_online_cpu(i) 376 for_each_online_cpu(i)
368 dbs_check_cpu(i); 377 dbs_check_cpu(i);
369 queue_delayed_work(dbs_workq, &dbs_work, 378 queue_delayed_work(dbs_workq, &dbs_work,
370 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 379 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
371 mutex_unlock(&dbs_mutex); 380 mutex_unlock(&dbs_mutex);
381 unlock_cpu_hotplug();
372} 382}
373 383
374static inline void dbs_timer_init(void) 384static inline void dbs_timer_init(void)
@@ -469,6 +479,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
469 break; 479 break;
470 480
471 case CPUFREQ_GOV_LIMITS: 481 case CPUFREQ_GOV_LIMITS:
482 lock_cpu_hotplug();
472 mutex_lock(&dbs_mutex); 483 mutex_lock(&dbs_mutex);
473 if (policy->max < this_dbs_info->cur_policy->cur) 484 if (policy->max < this_dbs_info->cur_policy->cur)
474 __cpufreq_driver_target( 485 __cpufreq_driver_target(
@@ -479,6 +490,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
479 this_dbs_info->cur_policy, 490 this_dbs_info->cur_policy,
480 policy->min, CPUFREQ_RELATION_L); 491 policy->min, CPUFREQ_RELATION_L);
481 mutex_unlock(&dbs_mutex); 492 mutex_unlock(&dbs_mutex);
493 unlock_cpu_hotplug();
482 break; 494 break;
483 } 495 }
484 return 0; 496 return 0;