diff options
author | Len Brown <len.brown@intel.com> | 2011-04-01 19:34:59 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2011-08-03 19:06:37 -0400 |
commit | a0bfa1373859e9d11dc92561a8667588803e42d8 (patch) | |
tree | ef5768a313ac16a211830efc3edb9ca95487cb6a /drivers/cpuidle | |
parent | 4bfc8288bc4a64529c5547d17349a2a1f4675507 (diff) |
cpuidle: stop depending on pm_idle
cpuidle users should call cpuidle_call_idle() directly
rather than via (pm_idle)() function pointer.
Architecture may choose to continue using (pm_idle)(),
but cpuidle need not depend on it:
my_arch_cpu_idle()
...
if(cpuidle_call_idle())
pm_idle();
cc: Kevin Hilman <khilman@deeprootsystems.com>
cc: Paul Mundt <lethal@linux-sh.org>
cc: x86@kernel.org
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 38 |
1 files changed, 18 insertions, 20 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 041df0b056b2..d4c542372886 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -25,10 +25,10 @@ DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | |||
25 | 25 | ||
26 | DEFINE_MUTEX(cpuidle_lock); | 26 | DEFINE_MUTEX(cpuidle_lock); |
27 | LIST_HEAD(cpuidle_detected_devices); | 27 | LIST_HEAD(cpuidle_detected_devices); |
28 | static void (*pm_idle_old)(void); | ||
29 | 28 | ||
30 | static int enabled_devices; | 29 | static int enabled_devices; |
31 | static int off __read_mostly; | 30 | static int off __read_mostly; |
31 | static int initialized __read_mostly; | ||
32 | 32 | ||
33 | int cpuidle_disabled(void) | 33 | int cpuidle_disabled(void) |
34 | { | 34 | { |
@@ -56,25 +56,23 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); | |||
56 | * cpuidle_idle_call - the main idle loop | 56 | * cpuidle_idle_call - the main idle loop |
57 | * | 57 | * |
58 | * NOTE: no locks or semaphores should be used here | 58 | * NOTE: no locks or semaphores should be used here |
59 | * return non-zero on failure | ||
59 | */ | 60 | */ |
60 | static void cpuidle_idle_call(void) | 61 | int cpuidle_idle_call(void) |
61 | { | 62 | { |
62 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 63 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
63 | struct cpuidle_state *target_state; | 64 | struct cpuidle_state *target_state; |
64 | int next_state; | 65 | int next_state; |
65 | 66 | ||
67 | if (off) | ||
68 | return -ENODEV; | ||
69 | |||
70 | if (!initialized) | ||
71 | return -ENODEV; | ||
72 | |||
66 | /* check if the device is ready */ | 73 | /* check if the device is ready */ |
67 | if (!dev || !dev->enabled) { | 74 | if (!dev || !dev->enabled) |
68 | if (pm_idle_old) | 75 | return -EBUSY; |
69 | pm_idle_old(); | ||
70 | else | ||
71 | #if defined(CONFIG_ARCH_HAS_DEFAULT_IDLE) | ||
72 | default_idle(); | ||
73 | #else | ||
74 | local_irq_enable(); | ||
75 | #endif | ||
76 | return; | ||
77 | } | ||
78 | 76 | ||
79 | #if 0 | 77 | #if 0 |
80 | /* shows regressions, re-enable for 2.6.29 */ | 78 | /* shows regressions, re-enable for 2.6.29 */ |
@@ -99,7 +97,7 @@ static void cpuidle_idle_call(void) | |||
99 | next_state = cpuidle_curr_governor->select(dev); | 97 | next_state = cpuidle_curr_governor->select(dev); |
100 | if (need_resched()) { | 98 | if (need_resched()) { |
101 | local_irq_enable(); | 99 | local_irq_enable(); |
102 | return; | 100 | return 0; |
103 | } | 101 | } |
104 | 102 | ||
105 | target_state = &dev->states[next_state]; | 103 | target_state = &dev->states[next_state]; |
@@ -124,6 +122,8 @@ static void cpuidle_idle_call(void) | |||
124 | /* give the governor an opportunity to reflect on the outcome */ | 122 | /* give the governor an opportunity to reflect on the outcome */ |
125 | if (cpuidle_curr_governor->reflect) | 123 | if (cpuidle_curr_governor->reflect) |
126 | cpuidle_curr_governor->reflect(dev); | 124 | cpuidle_curr_governor->reflect(dev); |
125 | |||
126 | return 0; | ||
127 | } | 127 | } |
128 | 128 | ||
129 | /** | 129 | /** |
@@ -131,10 +131,10 @@ static void cpuidle_idle_call(void) | |||
131 | */ | 131 | */ |
132 | void cpuidle_install_idle_handler(void) | 132 | void cpuidle_install_idle_handler(void) |
133 | { | 133 | { |
134 | if (enabled_devices && (pm_idle != cpuidle_idle_call)) { | 134 | if (enabled_devices) { |
135 | /* Make sure all changes finished before we switch to new idle */ | 135 | /* Make sure all changes finished before we switch to new idle */ |
136 | smp_wmb(); | 136 | smp_wmb(); |
137 | pm_idle = cpuidle_idle_call; | 137 | initialized = 1; |
138 | } | 138 | } |
139 | } | 139 | } |
140 | 140 | ||
@@ -143,8 +143,8 @@ void cpuidle_install_idle_handler(void) | |||
143 | */ | 143 | */ |
144 | void cpuidle_uninstall_idle_handler(void) | 144 | void cpuidle_uninstall_idle_handler(void) |
145 | { | 145 | { |
146 | if (enabled_devices && pm_idle_old && (pm_idle != pm_idle_old)) { | 146 | if (enabled_devices) { |
147 | pm_idle = pm_idle_old; | 147 | initialized = 0; |
148 | cpuidle_kick_cpus(); | 148 | cpuidle_kick_cpus(); |
149 | } | 149 | } |
150 | } | 150 | } |
@@ -440,8 +440,6 @@ static int __init cpuidle_init(void) | |||
440 | if (cpuidle_disabled()) | 440 | if (cpuidle_disabled()) |
441 | return -ENODEV; | 441 | return -ENODEV; |
442 | 442 | ||
443 | pm_idle_old = pm_idle; | ||
444 | |||
445 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); | 443 | ret = cpuidle_add_class_sysfs(&cpu_sysdev_class); |
446 | if (ret) | 444 | if (ret) |
447 | return ret; | 445 | return ret; |