diff options
author | Deepthi Dharwar <deepthi@linux.vnet.ibm.com> | 2011-10-28 06:50:42 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2011-11-06 21:13:58 -0500 |
commit | 46bcfad7a819bd17ac4e831b04405152d59784ab (patch) | |
tree | 20041e788154d103edff2699f88d4a30320e3ee2 | |
parent | 4202735e8ab6ecfb0381631a0d0b58fefe0bd4e2 (diff) |
cpuidle: Single/Global registration of idle states
This patch makes the cpuidle_states structure global (single copy)
instead of per-cpu. The statistics needed on per-cpu basis
by the governor are kept per-cpu. This simplifies the cpuidle
subsystem as state registration is done by single cpu only.
Having single copy of cpuidle_states saves memory. Rare case
of asymmetric C-states can be handled within the cpuidle driver
and architectures such as POWER do not have asymmetric C-states.
Having single/global registration of all the idle states,
dynamic C-state transitions on x86 are handled by
the boot cpu. Here, the boot cpu would disable all the devices,
re-populate the states and later enable all the devices,
irrespective of the cpu that would receive the notification first.
Reference:
https://lkml.org/lkml/2011/4/25/83
Signed-off-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com>
Signed-off-by: Trinabh Gupta <g.trinabh@gmail.com>
Tested-by: Jean Pihet <j-pihet@ti.com>
Reviewed-by: Kevin Hilman <khilman@ti.com>
Acked-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Kevin Hilman <khilman@ti.com>
Signed-off-by: Len Brown <len.brown@intel.com>
-rw-r--r-- | arch/arm/mach-at91/cpuidle.c | 31 | ||||
-rw-r--r-- | arch/arm/mach-davinci/cpuidle.c | 39 | ||||
-rw-r--r-- | arch/arm/mach-exynos4/cpuidle.c | 23 | ||||
-rw-r--r-- | arch/arm/mach-kirkwood/cpuidle.c | 30 | ||||
-rw-r--r-- | arch/arm/mach-omap2/cpuidle34xx.c | 73 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/shmobile/cpuidle.c | 18 | ||||
-rw-r--r-- | drivers/acpi/processor_driver.c | 20 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 191 | ||||
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 45 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 25 | ||||
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 28 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 20 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 3 | ||||
-rw-r--r-- | drivers/idle/intel_idle.c | 80 | ||||
-rw-r--r-- | include/acpi/processor.h | 1 | ||||
-rw-r--r-- | include/linux/cpuidle.h | 19 |
16 files changed, 439 insertions, 207 deletions
diff --git a/arch/arm/mach-at91/cpuidle.c b/arch/arm/mach-at91/cpuidle.c index 4696a0d61e2e..93178f67420e 100644 --- a/arch/arm/mach-at91/cpuidle.c +++ b/arch/arm/mach-at91/cpuidle.c | |||
@@ -33,6 +33,7 @@ static struct cpuidle_driver at91_idle_driver = { | |||
33 | 33 | ||
34 | /* Actual code that puts the SoC in different idle states */ | 34 | /* Actual code that puts the SoC in different idle states */ |
35 | static int at91_enter_idle(struct cpuidle_device *dev, | 35 | static int at91_enter_idle(struct cpuidle_device *dev, |
36 | struct cpuidle_driver *drv, | ||
36 | int index) | 37 | int index) |
37 | { | 38 | { |
38 | struct timeval before, after; | 39 | struct timeval before, after; |
@@ -64,27 +65,29 @@ static int at91_enter_idle(struct cpuidle_device *dev, | |||
64 | static int at91_init_cpuidle(void) | 65 | static int at91_init_cpuidle(void) |
65 | { | 66 | { |
66 | struct cpuidle_device *device; | 67 | struct cpuidle_device *device; |
67 | 68 | struct cpuidle_driver *driver = &at91_idle_driver; | |
68 | cpuidle_register_driver(&at91_idle_driver); | ||
69 | 69 | ||
70 | device = &per_cpu(at91_cpuidle_device, smp_processor_id()); | 70 | device = &per_cpu(at91_cpuidle_device, smp_processor_id()); |
71 | device->state_count = AT91_MAX_STATES; | 71 | device->state_count = AT91_MAX_STATES; |
72 | driver->state_count = AT91_MAX_STATES; | ||
72 | 73 | ||
73 | /* Wait for interrupt state */ | 74 | /* Wait for interrupt state */ |
74 | device->states[0].enter = at91_enter_idle; | 75 | driver->states[0].enter = at91_enter_idle; |
75 | device->states[0].exit_latency = 1; | 76 | driver->states[0].exit_latency = 1; |
76 | device->states[0].target_residency = 10000; | 77 | driver->states[0].target_residency = 10000; |
77 | device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; | 78 | driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; |
78 | strcpy(device->states[0].name, "WFI"); | 79 | strcpy(driver->states[0].name, "WFI"); |
79 | strcpy(device->states[0].desc, "Wait for interrupt"); | 80 | strcpy(driver->states[0].desc, "Wait for interrupt"); |
80 | 81 | ||
81 | /* Wait for interrupt and RAM self refresh state */ | 82 | /* Wait for interrupt and RAM self refresh state */ |
82 | device->states[1].enter = at91_enter_idle; | 83 | driver->states[1].enter = at91_enter_idle; |
83 | device->states[1].exit_latency = 10; | 84 | driver->states[1].exit_latency = 10; |
84 | device->states[1].target_residency = 10000; | 85 | driver->states[1].target_residency = 10000; |
85 | device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; | 86 | driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; |
86 | strcpy(device->states[1].name, "RAM_SR"); | 87 | strcpy(driver->states[1].name, "RAM_SR"); |
87 | strcpy(device->states[1].desc, "WFI and RAM Self Refresh"); | 88 | strcpy(driver->states[1].desc, "WFI and RAM Self Refresh"); |
89 | |||
90 | cpuidle_register_driver(&at91_idle_driver); | ||
88 | 91 | ||
89 | if (cpuidle_register_device(device)) { | 92 | if (cpuidle_register_device(device)) { |
90 | printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); | 93 | printk(KERN_ERR "at91_init_cpuidle: Failed registering\n"); |
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c index f2d2f34603d9..dbeeccd00173 100644 --- a/arch/arm/mach-davinci/cpuidle.c +++ b/arch/arm/mach-davinci/cpuidle.c | |||
@@ -78,6 +78,7 @@ static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = { | |||
78 | 78 | ||
79 | /* Actual code that puts the SoC in different idle states */ | 79 | /* Actual code that puts the SoC in different idle states */ |
80 | static int davinci_enter_idle(struct cpuidle_device *dev, | 80 | static int davinci_enter_idle(struct cpuidle_device *dev, |
81 | struct cpuidle_driver *drv, | ||
81 | int index) | 82 | int index) |
82 | { | 83 | { |
83 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 84 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
@@ -109,6 +110,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) | |||
109 | { | 110 | { |
110 | int ret; | 111 | int ret; |
111 | struct cpuidle_device *device; | 112 | struct cpuidle_device *device; |
113 | struct cpuidle_driver *driver = &davinci_idle_driver; | ||
112 | struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; | 114 | struct davinci_cpuidle_config *pdata = pdev->dev.platform_data; |
113 | 115 | ||
114 | device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); | 116 | device = &per_cpu(davinci_cpuidle_device, smp_processor_id()); |
@@ -120,32 +122,33 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev) | |||
120 | 122 | ||
121 | ddr2_reg_base = pdata->ddr2_ctlr_base; | 123 | ddr2_reg_base = pdata->ddr2_ctlr_base; |
122 | 124 | ||
123 | ret = cpuidle_register_driver(&davinci_idle_driver); | ||
124 | if (ret) { | ||
125 | dev_err(&pdev->dev, "failed to register driver\n"); | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | /* Wait for interrupt state */ | 125 | /* Wait for interrupt state */ |
130 | device->states[0].enter = davinci_enter_idle; | 126 | driver->states[0].enter = davinci_enter_idle; |
131 | device->states[0].exit_latency = 1; | 127 | driver->states[0].exit_latency = 1; |
132 | device->states[0].target_residency = 10000; | 128 | driver->states[0].target_residency = 10000; |
133 | device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; | 129 | driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; |
134 | strcpy(device->states[0].name, "WFI"); | 130 | strcpy(driver->states[0].name, "WFI"); |
135 | strcpy(device->states[0].desc, "Wait for interrupt"); | 131 | strcpy(driver->states[0].desc, "Wait for interrupt"); |
136 | 132 | ||
137 | /* Wait for interrupt and DDR self refresh state */ | 133 | /* Wait for interrupt and DDR self refresh state */ |
138 | device->states[1].enter = davinci_enter_idle; | 134 | driver->states[1].enter = davinci_enter_idle; |
139 | device->states[1].exit_latency = 10; | 135 | driver->states[1].exit_latency = 10; |
140 | device->states[1].target_residency = 10000; | 136 | driver->states[1].target_residency = 10000; |
141 | device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; | 137 | driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; |
142 | strcpy(device->states[1].name, "DDR SR"); | 138 | strcpy(driver->states[1].name, "DDR SR"); |
143 | strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); | 139 | strcpy(driver->states[1].desc, "WFI and DDR Self Refresh"); |
144 | if (pdata->ddr2_pdown) | 140 | if (pdata->ddr2_pdown) |
145 | davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; | 141 | davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; |
146 | cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]); | 142 | cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]); |
147 | 143 | ||
148 | device->state_count = DAVINCI_CPUIDLE_MAX_STATES; | 144 | device->state_count = DAVINCI_CPUIDLE_MAX_STATES; |
145 | driver->state_count = DAVINCI_CPUIDLE_MAX_STATES; | ||
146 | |||
147 | ret = cpuidle_register_driver(&davinci_idle_driver); | ||
148 | if (ret) { | ||
149 | dev_err(&pdev->dev, "failed to register driver\n"); | ||
150 | return ret; | ||
151 | } | ||
149 | 152 | ||
150 | ret = cpuidle_register_device(device); | 153 | ret = cpuidle_register_device(device); |
151 | if (ret) { | 154 | if (ret) { |
diff --git a/arch/arm/mach-exynos4/cpuidle.c b/arch/arm/mach-exynos4/cpuidle.c index ea026e72b977..35f6502144ae 100644 --- a/arch/arm/mach-exynos4/cpuidle.c +++ b/arch/arm/mach-exynos4/cpuidle.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/proc-fns.h> | 16 | #include <asm/proc-fns.h> |
17 | 17 | ||
18 | static int exynos4_enter_idle(struct cpuidle_device *dev, | 18 | static int exynos4_enter_idle(struct cpuidle_device *dev, |
19 | struct cpuidle_driver *drv, | ||
19 | int index); | 20 | int index); |
20 | 21 | ||
21 | static struct cpuidle_state exynos4_cpuidle_set[] = { | 22 | static struct cpuidle_state exynos4_cpuidle_set[] = { |
@@ -37,6 +38,7 @@ static struct cpuidle_driver exynos4_idle_driver = { | |||
37 | }; | 38 | }; |
38 | 39 | ||
39 | static int exynos4_enter_idle(struct cpuidle_device *dev, | 40 | static int exynos4_enter_idle(struct cpuidle_device *dev, |
41 | struct cpuidle_driver *drv, | ||
40 | int index) | 42 | int index) |
41 | { | 43 | { |
42 | struct timeval before, after; | 44 | struct timeval before, after; |
@@ -60,22 +62,23 @@ static int __init exynos4_init_cpuidle(void) | |||
60 | { | 62 | { |
61 | int i, max_cpuidle_state, cpu_id; | 63 | int i, max_cpuidle_state, cpu_id; |
62 | struct cpuidle_device *device; | 64 | struct cpuidle_device *device; |
63 | 65 | struct cpuidle_driver *drv = &exynos4_idle_driver; | |
66 | |||
67 | /* Setup cpuidle driver */ | ||
68 | drv->state_count = (sizeof(exynos4_cpuidle_set) / | ||
69 | sizeof(struct cpuidle_state)); | ||
70 | max_cpuidle_state = drv->state_count; | ||
71 | for (i = 0; i < max_cpuidle_state; i++) { | ||
72 | memcpy(&drv->states[i], &exynos4_cpuidle_set[i], | ||
73 | sizeof(struct cpuidle_state)); | ||
74 | } | ||
64 | cpuidle_register_driver(&exynos4_idle_driver); | 75 | cpuidle_register_driver(&exynos4_idle_driver); |
65 | 76 | ||
66 | for_each_cpu(cpu_id, cpu_online_mask) { | 77 | for_each_cpu(cpu_id, cpu_online_mask) { |
67 | device = &per_cpu(exynos4_cpuidle_device, cpu_id); | 78 | device = &per_cpu(exynos4_cpuidle_device, cpu_id); |
68 | device->cpu = cpu_id; | 79 | device->cpu = cpu_id; |
69 | 80 | ||
70 | device->state_count = (sizeof(exynos4_cpuidle_set) / | 81 | device->state_count = drv->state_count; |
71 | sizeof(struct cpuidle_state)); | ||
72 | |||
73 | max_cpuidle_state = device->state_count; | ||
74 | |||
75 | for (i = 0; i < max_cpuidle_state; i++) { | ||
76 | memcpy(&device->states[i], &exynos4_cpuidle_set[i], | ||
77 | sizeof(struct cpuidle_state)); | ||
78 | } | ||
79 | 82 | ||
80 | if (cpuidle_register_device(device)) { | 83 | if (cpuidle_register_device(device)) { |
81 | printk(KERN_ERR "CPUidle register device failed\n,"); | 84 | printk(KERN_ERR "CPUidle register device failed\n,"); |
diff --git a/arch/arm/mach-kirkwood/cpuidle.c b/arch/arm/mach-kirkwood/cpuidle.c index 358dd80b3a07..ffd690dc3d33 100644 --- a/arch/arm/mach-kirkwood/cpuidle.c +++ b/arch/arm/mach-kirkwood/cpuidle.c | |||
@@ -32,6 +32,7 @@ static DEFINE_PER_CPU(struct cpuidle_device, kirkwood_cpuidle_device); | |||
32 | 32 | ||
33 | /* Actual code that puts the SoC in different idle states */ | 33 | /* Actual code that puts the SoC in different idle states */ |
34 | static int kirkwood_enter_idle(struct cpuidle_device *dev, | 34 | static int kirkwood_enter_idle(struct cpuidle_device *dev, |
35 | struct cpuidle_driver *drv, | ||
35 | int index) | 36 | int index) |
36 | { | 37 | { |
37 | struct timeval before, after; | 38 | struct timeval before, after; |
@@ -68,28 +69,29 @@ static int kirkwood_enter_idle(struct cpuidle_device *dev, | |||
68 | static int kirkwood_init_cpuidle(void) | 69 | static int kirkwood_init_cpuidle(void) |
69 | { | 70 | { |
70 | struct cpuidle_device *device; | 71 | struct cpuidle_device *device; |
71 | 72 | struct cpuidle_driver *driver = &kirkwood_idle_driver; | |
72 | cpuidle_register_driver(&kirkwood_idle_driver); | ||
73 | 73 | ||
74 | device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); | 74 | device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); |
75 | device->state_count = KIRKWOOD_MAX_STATES; | 75 | device->state_count = KIRKWOOD_MAX_STATES; |
76 | driver->state_count = KIRKWOOD_MAX_STATES; | ||
76 | 77 | ||
77 | /* Wait for interrupt state */ | 78 | /* Wait for interrupt state */ |
78 | device->states[0].enter = kirkwood_enter_idle; | 79 | driver->states[0].enter = kirkwood_enter_idle; |
79 | device->states[0].exit_latency = 1; | 80 | driver->states[0].exit_latency = 1; |
80 | device->states[0].target_residency = 10000; | 81 | driver->states[0].target_residency = 10000; |
81 | device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; | 82 | driver->states[0].flags = CPUIDLE_FLAG_TIME_VALID; |
82 | strcpy(device->states[0].name, "WFI"); | 83 | strcpy(driver->states[0].name, "WFI"); |
83 | strcpy(device->states[0].desc, "Wait for interrupt"); | 84 | strcpy(driver->states[0].desc, "Wait for interrupt"); |
84 | 85 | ||
85 | /* Wait for interrupt and DDR self refresh state */ | 86 | /* Wait for interrupt and DDR self refresh state */ |
86 | device->states[1].enter = kirkwood_enter_idle; | 87 | driver->states[1].enter = kirkwood_enter_idle; |
87 | device->states[1].exit_latency = 10; | 88 | driver->states[1].exit_latency = 10; |
88 | device->states[1].target_residency = 10000; | 89 | driver->states[1].target_residency = 10000; |
89 | device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; | 90 | driver->states[1].flags = CPUIDLE_FLAG_TIME_VALID; |
90 | strcpy(device->states[1].name, "DDR SR"); | 91 | strcpy(driver->states[1].name, "DDR SR"); |
91 | strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); | 92 | strcpy(driver->states[1].desc, "WFI and DDR Self Refresh"); |
92 | 93 | ||
94 | cpuidle_register_driver(&kirkwood_idle_driver); | ||
93 | if (cpuidle_register_device(device)) { | 95 | if (cpuidle_register_device(device)) { |
94 | printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); | 96 | printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); |
95 | return -EIO; | 97 | return -EIO; |
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index d3fce7b97fcf..1fe35c24fba2 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c | |||
@@ -88,12 +88,14 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm, | |||
88 | /** | 88 | /** |
89 | * omap3_enter_idle - Programs OMAP3 to enter the specified state | 89 | * omap3_enter_idle - Programs OMAP3 to enter the specified state |
90 | * @dev: cpuidle device | 90 | * @dev: cpuidle device |
91 | * @drv: cpuidle driver | ||
91 | * @index: the index of state to be entered | 92 | * @index: the index of state to be entered |
92 | * | 93 | * |
93 | * Called from the CPUidle framework to program the device to the | 94 | * Called from the CPUidle framework to program the device to the |
94 | * specified target state selected by the governor. | 95 | * specified target state selected by the governor. |
95 | */ | 96 | */ |
96 | static int omap3_enter_idle(struct cpuidle_device *dev, | 97 | static int omap3_enter_idle(struct cpuidle_device *dev, |
98 | struct cpuidle_driver *drv, | ||
97 | int index) | 99 | int index) |
98 | { | 100 | { |
99 | struct omap3_idle_statedata *cx = | 101 | struct omap3_idle_statedata *cx = |
@@ -148,6 +150,7 @@ return_sleep_time: | |||
148 | /** | 150 | /** |
149 | * next_valid_state - Find next valid C-state | 151 | * next_valid_state - Find next valid C-state |
150 | * @dev: cpuidle device | 152 | * @dev: cpuidle device |
153 | * @drv: cpuidle driver | ||
151 | * @index: Index of currently selected c-state | 154 | * @index: Index of currently selected c-state |
152 | * | 155 | * |
153 | * If the state corresponding to index is valid, index is returned back | 156 | * If the state corresponding to index is valid, index is returned back |
@@ -158,10 +161,11 @@ return_sleep_time: | |||
158 | * if it satisfies the enable_off_mode condition. | 161 | * if it satisfies the enable_off_mode condition. |
159 | */ | 162 | */ |
160 | static int next_valid_state(struct cpuidle_device *dev, | 163 | static int next_valid_state(struct cpuidle_device *dev, |
164 | struct cpuidle_driver *drv, | ||
161 | int index) | 165 | int index) |
162 | { | 166 | { |
163 | struct cpuidle_state_usage *curr_usage = &dev->states_usage[index]; | 167 | struct cpuidle_state_usage *curr_usage = &dev->states_usage[index]; |
164 | struct cpuidle_state *curr = &dev->states[index]; | 168 | struct cpuidle_state *curr = &drv->states[index]; |
165 | struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage); | 169 | struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr_usage); |
166 | u32 mpu_deepest_state = PWRDM_POWER_RET; | 170 | u32 mpu_deepest_state = PWRDM_POWER_RET; |
167 | u32 core_deepest_state = PWRDM_POWER_RET; | 171 | u32 core_deepest_state = PWRDM_POWER_RET; |
@@ -188,7 +192,7 @@ static int next_valid_state(struct cpuidle_device *dev, | |||
188 | 192 | ||
189 | /* Reach the current state starting at highest C-state */ | 193 | /* Reach the current state starting at highest C-state */ |
190 | for (; idx >= 0; idx--) { | 194 | for (; idx >= 0; idx--) { |
191 | if (&dev->states[idx] == curr) { | 195 | if (&drv->states[idx] == curr) { |
192 | next_index = idx; | 196 | next_index = idx; |
193 | break; | 197 | break; |
194 | } | 198 | } |
@@ -224,12 +228,14 @@ static int next_valid_state(struct cpuidle_device *dev, | |||
224 | /** | 228 | /** |
225 | * omap3_enter_idle_bm - Checks for any bus activity | 229 | * omap3_enter_idle_bm - Checks for any bus activity |
226 | * @dev: cpuidle device | 230 | * @dev: cpuidle device |
231 | * @drv: cpuidle driver | ||
227 | * @index: array index of target state to be programmed | 232 | * @index: array index of target state to be programmed |
228 | * | 233 | * |
229 | * This function checks for any pending activity and then programs | 234 | * This function checks for any pending activity and then programs |
230 | * the device to the specified or a safer state. | 235 | * the device to the specified or a safer state. |
231 | */ | 236 | */ |
232 | static int omap3_enter_idle_bm(struct cpuidle_device *dev, | 237 | static int omap3_enter_idle_bm(struct cpuidle_device *dev, |
238 | struct cpuidle_driver *drv, | ||
233 | int index) | 239 | int index) |
234 | { | 240 | { |
235 | int new_state_idx; | 241 | int new_state_idx; |
@@ -238,7 +244,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
238 | int ret; | 244 | int ret; |
239 | 245 | ||
240 | if (!omap3_can_sleep()) { | 246 | if (!omap3_can_sleep()) { |
241 | new_state_idx = dev->safe_state_index; | 247 | new_state_idx = drv->safe_state_index; |
242 | goto select_state; | 248 | goto select_state; |
243 | } | 249 | } |
244 | 250 | ||
@@ -248,7 +254,7 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
248 | */ | 254 | */ |
249 | cam_state = pwrdm_read_pwrst(cam_pd); | 255 | cam_state = pwrdm_read_pwrst(cam_pd); |
250 | if (cam_state == PWRDM_POWER_ON) { | 256 | if (cam_state == PWRDM_POWER_ON) { |
251 | new_state_idx = dev->safe_state_index; | 257 | new_state_idx = drv->safe_state_index; |
252 | goto select_state; | 258 | goto select_state; |
253 | } | 259 | } |
254 | 260 | ||
@@ -275,10 +281,10 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
275 | if (per_next_state != per_saved_state) | 281 | if (per_next_state != per_saved_state) |
276 | pwrdm_set_next_pwrst(per_pd, per_next_state); | 282 | pwrdm_set_next_pwrst(per_pd, per_next_state); |
277 | 283 | ||
278 | new_state_idx = next_valid_state(dev, index); | 284 | new_state_idx = next_valid_state(dev, drv, index); |
279 | 285 | ||
280 | select_state: | 286 | select_state: |
281 | ret = omap3_enter_idle(dev, new_state_idx); | 287 | ret = omap3_enter_idle(dev, drv, new_state_idx); |
282 | 288 | ||
283 | /* Restore original PER state if it was modified */ | 289 | /* Restore original PER state if it was modified */ |
284 | if (per_next_state != per_saved_state) | 290 | if (per_next_state != per_saved_state) |
@@ -311,22 +317,30 @@ struct cpuidle_driver omap3_idle_driver = { | |||
311 | .owner = THIS_MODULE, | 317 | .owner = THIS_MODULE, |
312 | }; | 318 | }; |
313 | 319 | ||
314 | /* Helper to fill the C-state common data and register the driver_data */ | 320 | /* Helper to fill the C-state common data*/ |
315 | static inline struct omap3_idle_statedata *_fill_cstate( | 321 | static inline void _fill_cstate(struct cpuidle_driver *drv, |
316 | struct cpuidle_device *dev, | ||
317 | int idx, const char *descr) | 322 | int idx, const char *descr) |
318 | { | 323 | { |
319 | struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; | 324 | struct cpuidle_state *state = &drv->states[idx]; |
320 | struct cpuidle_state *state = &dev->states[idx]; | ||
321 | struct cpuidle_state_usage *state_usage = &dev->states_usage[idx]; | ||
322 | 325 | ||
323 | state->exit_latency = cpuidle_params_table[idx].exit_latency; | 326 | state->exit_latency = cpuidle_params_table[idx].exit_latency; |
324 | state->target_residency = cpuidle_params_table[idx].target_residency; | 327 | state->target_residency = cpuidle_params_table[idx].target_residency; |
325 | state->flags = CPUIDLE_FLAG_TIME_VALID; | 328 | state->flags = CPUIDLE_FLAG_TIME_VALID; |
326 | state->enter = omap3_enter_idle_bm; | 329 | state->enter = omap3_enter_idle_bm; |
327 | cx->valid = cpuidle_params_table[idx].valid; | ||
328 | sprintf(state->name, "C%d", idx + 1); | 330 | sprintf(state->name, "C%d", idx + 1); |
329 | strncpy(state->desc, descr, CPUIDLE_DESC_LEN); | 331 | strncpy(state->desc, descr, CPUIDLE_DESC_LEN); |
332 | |||
333 | } | ||
334 | |||
335 | /* Helper to register the driver_data */ | ||
336 | static inline struct omap3_idle_statedata *_fill_cstate_usage( | ||
337 | struct cpuidle_device *dev, | ||
338 | int idx) | ||
339 | { | ||
340 | struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; | ||
341 | struct cpuidle_state_usage *state_usage = &dev->states_usage[idx]; | ||
342 | |||
343 | cx->valid = cpuidle_params_table[idx].valid; | ||
330 | cpuidle_set_statedata(state_usage, cx); | 344 | cpuidle_set_statedata(state_usage, cx); |
331 | 345 | ||
332 | return cx; | 346 | return cx; |
@@ -341,6 +355,7 @@ static inline struct omap3_idle_statedata *_fill_cstate( | |||
341 | int __init omap3_idle_init(void) | 355 | int __init omap3_idle_init(void) |
342 | { | 356 | { |
343 | struct cpuidle_device *dev; | 357 | struct cpuidle_device *dev; |
358 | struct cpuidle_driver *drv = &omap3_idle_driver; | ||
344 | struct omap3_idle_statedata *cx; | 359 | struct omap3_idle_statedata *cx; |
345 | 360 | ||
346 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); | 361 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); |
@@ -348,45 +363,52 @@ int __init omap3_idle_init(void) | |||
348 | per_pd = pwrdm_lookup("per_pwrdm"); | 363 | per_pd = pwrdm_lookup("per_pwrdm"); |
349 | cam_pd = pwrdm_lookup("cam_pwrdm"); | 364 | cam_pd = pwrdm_lookup("cam_pwrdm"); |
350 | 365 | ||
351 | cpuidle_register_driver(&omap3_idle_driver); | 366 | |
367 | drv->safe_state_index = -1; | ||
352 | dev = &per_cpu(omap3_idle_dev, smp_processor_id()); | 368 | dev = &per_cpu(omap3_idle_dev, smp_processor_id()); |
353 | dev->safe_state_index = -1; | ||
354 | 369 | ||
355 | /* C1 . MPU WFI + Core active */ | 370 | /* C1 . MPU WFI + Core active */ |
356 | cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); | 371 | _fill_cstate(drv, 0, "MPU ON + CORE ON"); |
357 | (&dev->states[0])->enter = omap3_enter_idle; | 372 | (&drv->states[0])->enter = omap3_enter_idle; |
358 | dev->safe_state_index = 0; | 373 | drv->safe_state_index = 0; |
374 | cx = _fill_cstate_usage(dev, 0); | ||
359 | cx->valid = 1; /* C1 is always valid */ | 375 | cx->valid = 1; /* C1 is always valid */ |
360 | cx->mpu_state = PWRDM_POWER_ON; | 376 | cx->mpu_state = PWRDM_POWER_ON; |
361 | cx->core_state = PWRDM_POWER_ON; | 377 | cx->core_state = PWRDM_POWER_ON; |
362 | 378 | ||
363 | /* C2 . MPU WFI + Core inactive */ | 379 | /* C2 . MPU WFI + Core inactive */ |
364 | cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); | 380 | _fill_cstate(drv, 1, "MPU ON + CORE ON"); |
381 | cx = _fill_cstate_usage(dev, 1); | ||
365 | cx->mpu_state = PWRDM_POWER_ON; | 382 | cx->mpu_state = PWRDM_POWER_ON; |
366 | cx->core_state = PWRDM_POWER_ON; | 383 | cx->core_state = PWRDM_POWER_ON; |
367 | 384 | ||
368 | /* C3 . MPU CSWR + Core inactive */ | 385 | /* C3 . MPU CSWR + Core inactive */ |
369 | cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); | 386 | _fill_cstate(drv, 2, "MPU RET + CORE ON"); |
387 | cx = _fill_cstate_usage(dev, 2); | ||
370 | cx->mpu_state = PWRDM_POWER_RET; | 388 | cx->mpu_state = PWRDM_POWER_RET; |
371 | cx->core_state = PWRDM_POWER_ON; | 389 | cx->core_state = PWRDM_POWER_ON; |
372 | 390 | ||
373 | /* C4 . MPU OFF + Core inactive */ | 391 | /* C4 . MPU OFF + Core inactive */ |
374 | cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); | 392 | _fill_cstate(drv, 3, "MPU OFF + CORE ON"); |
393 | cx = _fill_cstate_usage(dev, 3); | ||
375 | cx->mpu_state = PWRDM_POWER_OFF; | 394 | cx->mpu_state = PWRDM_POWER_OFF; |
376 | cx->core_state = PWRDM_POWER_ON; | 395 | cx->core_state = PWRDM_POWER_ON; |
377 | 396 | ||
378 | /* C5 . MPU RET + Core RET */ | 397 | /* C5 . MPU RET + Core RET */ |
379 | cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); | 398 | _fill_cstate(drv, 4, "MPU RET + CORE RET"); |
399 | cx = _fill_cstate_usage(dev, 4); | ||
380 | cx->mpu_state = PWRDM_POWER_RET; | 400 | cx->mpu_state = PWRDM_POWER_RET; |
381 | cx->core_state = PWRDM_POWER_RET; | 401 | cx->core_state = PWRDM_POWER_RET; |
382 | 402 | ||
383 | /* C6 . MPU OFF + Core RET */ | 403 | /* C6 . MPU OFF + Core RET */ |
384 | cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); | 404 | _fill_cstate(drv, 5, "MPU OFF + CORE RET"); |
405 | cx = _fill_cstate_usage(dev, 5); | ||
385 | cx->mpu_state = PWRDM_POWER_OFF; | 406 | cx->mpu_state = PWRDM_POWER_OFF; |
386 | cx->core_state = PWRDM_POWER_RET; | 407 | cx->core_state = PWRDM_POWER_RET; |
387 | 408 | ||
388 | /* C7 . MPU OFF + Core OFF */ | 409 | /* C7 . MPU OFF + Core OFF */ |
389 | cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); | 410 | _fill_cstate(drv, 6, "MPU OFF + CORE OFF"); |
411 | cx = _fill_cstate_usage(dev, 6); | ||
390 | /* | 412 | /* |
391 | * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot | 413 | * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot |
392 | * enable OFF mode in a stable form for previous revisions. | 414 | * enable OFF mode in a stable form for previous revisions. |
@@ -400,6 +422,9 @@ int __init omap3_idle_init(void) | |||
400 | cx->mpu_state = PWRDM_POWER_OFF; | 422 | cx->mpu_state = PWRDM_POWER_OFF; |
401 | cx->core_state = PWRDM_POWER_OFF; | 423 | cx->core_state = PWRDM_POWER_OFF; |
402 | 424 | ||
425 | drv->state_count = OMAP3_NUM_STATES; | ||
426 | cpuidle_register_driver(&omap3_idle_driver); | ||
427 | |||
403 | dev->state_count = OMAP3_NUM_STATES; | 428 | dev->state_count = OMAP3_NUM_STATES; |
404 | if (cpuidle_register_device(dev)) { | 429 | if (cpuidle_register_device(dev)) { |
405 | printk(KERN_ERR "%s: CPUidle register device failed\n", | 430 | printk(KERN_ERR "%s: CPUidle register device failed\n", |
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 7be50d4c4268..ad1012ad6b42 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c | |||
@@ -25,6 +25,7 @@ static unsigned long cpuidle_mode[] = { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | static int cpuidle_sleep_enter(struct cpuidle_device *dev, | 27 | static int cpuidle_sleep_enter(struct cpuidle_device *dev, |
28 | struct cpuidle_driver *drv, | ||
28 | int index) | 29 | int index) |
29 | { | 30 | { |
30 | unsigned long allowed_mode = arch_hwblk_sleep_mode(); | 31 | unsigned long allowed_mode = arch_hwblk_sleep_mode(); |
@@ -64,19 +65,19 @@ static struct cpuidle_driver cpuidle_driver = { | |||
64 | void sh_mobile_setup_cpuidle(void) | 65 | void sh_mobile_setup_cpuidle(void) |
65 | { | 66 | { |
66 | struct cpuidle_device *dev = &cpuidle_dev; | 67 | struct cpuidle_device *dev = &cpuidle_dev; |
68 | struct cpuidle_driver *drv = &cpuidle_driver; | ||
67 | struct cpuidle_state *state; | 69 | struct cpuidle_state *state; |
68 | int i; | 70 | int i; |
69 | 71 | ||
70 | cpuidle_register_driver(&cpuidle_driver); | ||
71 | 72 | ||
72 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | 73 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { |
73 | dev->states[i].name[0] = '\0'; | 74 | drv->states[i].name[0] = '\0'; |
74 | dev->states[i].desc[0] = '\0'; | 75 | drv->states[i].desc[0] = '\0'; |
75 | } | 76 | } |
76 | 77 | ||
77 | i = CPUIDLE_DRIVER_STATE_START; | 78 | i = CPUIDLE_DRIVER_STATE_START; |
78 | 79 | ||
79 | state = &dev->states[i++]; | 80 | state = &drv->states[i++]; |
80 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); | 81 | snprintf(state->name, CPUIDLE_NAME_LEN, "C1"); |
81 | strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); | 82 | strncpy(state->desc, "SuperH Sleep Mode", CPUIDLE_DESC_LEN); |
82 | state->exit_latency = 1; | 83 | state->exit_latency = 1; |
@@ -86,10 +87,10 @@ void sh_mobile_setup_cpuidle(void) | |||
86 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 87 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
87 | state->enter = cpuidle_sleep_enter; | 88 | state->enter = cpuidle_sleep_enter; |
88 | 89 | ||
89 | dev->safe_state_index = i-1; | 90 | drv->safe_state_index = i-1; |
90 | 91 | ||
91 | if (sh_mobile_sleep_supported & SUSP_SH_SF) { | 92 | if (sh_mobile_sleep_supported & SUSP_SH_SF) { |
92 | state = &dev->states[i++]; | 93 | state = &drv->states[i++]; |
93 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); | 94 | snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); |
94 | strncpy(state->desc, "SuperH Sleep Mode [SF]", | 95 | strncpy(state->desc, "SuperH Sleep Mode [SF]", |
95 | CPUIDLE_DESC_LEN); | 96 | CPUIDLE_DESC_LEN); |
@@ -102,7 +103,7 @@ void sh_mobile_setup_cpuidle(void) | |||
102 | } | 103 | } |
103 | 104 | ||
104 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { | 105 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) { |
105 | state = &dev->states[i++]; | 106 | state = &drv->states[i++]; |
106 | snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); | 107 | snprintf(state->name, CPUIDLE_NAME_LEN, "C3"); |
107 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", | 108 | strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", |
108 | CPUIDLE_DESC_LEN); | 109 | CPUIDLE_DESC_LEN); |
@@ -114,7 +115,10 @@ void sh_mobile_setup_cpuidle(void) | |||
114 | state->enter = cpuidle_sleep_enter; | 115 | state->enter = cpuidle_sleep_enter; |
115 | } | 116 | } |
116 | 117 | ||
118 | drv->state_count = i; | ||
117 | dev->state_count = i; | 119 | dev->state_count = i; |
118 | 120 | ||
121 | cpuidle_register_driver(&cpuidle_driver); | ||
122 | |||
119 | cpuidle_register_device(dev); | 123 | cpuidle_register_device(dev); |
120 | } | 124 | } |
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index a4e0f1ba6040..9d7bc9f6b6cc 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c | |||
@@ -426,7 +426,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, | |||
426 | 426 | ||
427 | if (action == CPU_ONLINE && pr) { | 427 | if (action == CPU_ONLINE && pr) { |
428 | acpi_processor_ppc_has_changed(pr, 0); | 428 | acpi_processor_ppc_has_changed(pr, 0); |
429 | acpi_processor_cst_has_changed(pr); | 429 | acpi_processor_hotplug(pr); |
430 | acpi_processor_reevaluate_tstate(pr, action); | 430 | acpi_processor_reevaluate_tstate(pr, action); |
431 | acpi_processor_tstate_has_changed(pr); | 431 | acpi_processor_tstate_has_changed(pr); |
432 | } | 432 | } |
@@ -503,8 +503,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
503 | acpi_processor_get_throttling_info(pr); | 503 | acpi_processor_get_throttling_info(pr); |
504 | acpi_processor_get_limit_info(pr); | 504 | acpi_processor_get_limit_info(pr); |
505 | 505 | ||
506 | 506 | if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) | |
507 | if (cpuidle_get_driver() == &acpi_idle_driver) | ||
508 | acpi_processor_power_init(pr, device); | 507 | acpi_processor_power_init(pr, device); |
509 | 508 | ||
510 | pr->cdev = thermal_cooling_device_register("Processor", device, | 509 | pr->cdev = thermal_cooling_device_register("Processor", device, |
@@ -800,17 +799,9 @@ static int __init acpi_processor_init(void) | |||
800 | 799 | ||
801 | memset(&errata, 0, sizeof(errata)); | 800 | memset(&errata, 0, sizeof(errata)); |
802 | 801 | ||
803 | if (!cpuidle_register_driver(&acpi_idle_driver)) { | ||
804 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | ||
805 | acpi_idle_driver.name); | ||
806 | } else { | ||
807 | printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s\n", | ||
808 | cpuidle_get_driver()->name); | ||
809 | } | ||
810 | |||
811 | result = acpi_bus_register_driver(&acpi_processor_driver); | 802 | result = acpi_bus_register_driver(&acpi_processor_driver); |
812 | if (result < 0) | 803 | if (result < 0) |
813 | goto out_cpuidle; | 804 | return result; |
814 | 805 | ||
815 | acpi_processor_install_hotplug_notify(); | 806 | acpi_processor_install_hotplug_notify(); |
816 | 807 | ||
@@ -821,11 +812,6 @@ static int __init acpi_processor_init(void) | |||
821 | acpi_processor_throttling_init(); | 812 | acpi_processor_throttling_init(); |
822 | 813 | ||
823 | return 0; | 814 | return 0; |
824 | |||
825 | out_cpuidle: | ||
826 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
827 | |||
828 | return result; | ||
829 | } | 815 | } |
830 | 816 | ||
831 | static void __exit acpi_processor_exit(void) | 817 | static void __exit acpi_processor_exit(void) |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index b98c75285690..24fe3afa7119 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -741,11 +741,13 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) | |||
741 | /** | 741 | /** |
742 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type | 742 | * acpi_idle_enter_c1 - enters an ACPI C1 state-type |
743 | * @dev: the target CPU | 743 | * @dev: the target CPU |
744 | * @drv: cpuidle driver containing cpuidle state info | ||
744 | * @index: index of target state | 745 | * @index: index of target state |
745 | * | 746 | * |
746 | * This is equivalent to the HALT instruction. | 747 | * This is equivalent to the HALT instruction. |
747 | */ | 748 | */ |
748 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, int index) | 749 | static int acpi_idle_enter_c1(struct cpuidle_device *dev, |
750 | struct cpuidle_driver *drv, int index) | ||
749 | { | 751 | { |
750 | ktime_t kt1, kt2; | 752 | ktime_t kt1, kt2; |
751 | s64 idle_time; | 753 | s64 idle_time; |
@@ -787,9 +789,11 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, int index) | |||
787 | /** | 789 | /** |
788 | * acpi_idle_enter_simple - enters an ACPI state without BM handling | 790 | * acpi_idle_enter_simple - enters an ACPI state without BM handling |
789 | * @dev: the target CPU | 791 | * @dev: the target CPU |
792 | * @drv: cpuidle driver with cpuidle state information | ||
790 | * @index: the index of suggested state | 793 | * @index: the index of suggested state |
791 | */ | 794 | */ |
792 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index) | 795 | static int acpi_idle_enter_simple(struct cpuidle_device *dev, |
796 | struct cpuidle_driver *drv, int index) | ||
793 | { | 797 | { |
794 | struct acpi_processor *pr; | 798 | struct acpi_processor *pr; |
795 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 799 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
@@ -869,11 +873,13 @@ static DEFINE_SPINLOCK(c3_lock); | |||
869 | /** | 873 | /** |
870 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 874 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
871 | * @dev: the target CPU | 875 | * @dev: the target CPU |
876 | * @drv: cpuidle driver containing state data | ||
872 | * @index: the index of suggested state | 877 | * @index: the index of suggested state |
873 | * | 878 | * |
874 | * If BM is detected, the deepest non-C3 idle state is entered instead. | 879 | * If BM is detected, the deepest non-C3 idle state is entered instead. |
875 | */ | 880 | */ |
876 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, int index) | 881 | static int acpi_idle_enter_bm(struct cpuidle_device *dev, |
882 | struct cpuidle_driver *drv, int index) | ||
877 | { | 883 | { |
878 | struct acpi_processor *pr; | 884 | struct acpi_processor *pr; |
879 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 885 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
@@ -896,9 +902,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, int index) | |||
896 | } | 902 | } |
897 | 903 | ||
898 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { | 904 | if (!cx->bm_sts_skip && acpi_idle_bm_check()) { |
899 | if (dev->safe_state_index >= 0) { | 905 | if (drv->safe_state_index >= 0) { |
900 | return dev->states[dev->safe_state_index].enter(dev, | 906 | return drv->states[drv->safe_state_index].enter(dev, |
901 | dev->safe_state_index); | 907 | drv, drv->safe_state_index); |
902 | } else { | 908 | } else { |
903 | local_irq_disable(); | 909 | local_irq_disable(); |
904 | acpi_safe_halt(); | 910 | acpi_safe_halt(); |
@@ -993,14 +999,15 @@ struct cpuidle_driver acpi_idle_driver = { | |||
993 | }; | 999 | }; |
994 | 1000 | ||
995 | /** | 1001 | /** |
996 | * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE | 1002 | * acpi_processor_setup_cpuidle_cx - prepares and configures CPUIDLE |
1003 | * device i.e. per-cpu data | ||
1004 | * | ||
997 | * @pr: the ACPI processor | 1005 | * @pr: the ACPI processor |
998 | */ | 1006 | */ |
999 | static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | 1007 | static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr) |
1000 | { | 1008 | { |
1001 | int i, count = CPUIDLE_DRIVER_STATE_START; | 1009 | int i, count = CPUIDLE_DRIVER_STATE_START; |
1002 | struct acpi_processor_cx *cx; | 1010 | struct acpi_processor_cx *cx; |
1003 | struct cpuidle_state *state; | ||
1004 | struct cpuidle_state_usage *state_usage; | 1011 | struct cpuidle_state_usage *state_usage; |
1005 | struct cpuidle_device *dev = &pr->power.dev; | 1012 | struct cpuidle_device *dev = &pr->power.dev; |
1006 | 1013 | ||
@@ -1012,18 +1019,12 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1012 | } | 1019 | } |
1013 | 1020 | ||
1014 | dev->cpu = pr->id; | 1021 | dev->cpu = pr->id; |
1015 | dev->safe_state_index = -1; | ||
1016 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | ||
1017 | dev->states[i].name[0] = '\0'; | ||
1018 | dev->states[i].desc[0] = '\0'; | ||
1019 | } | ||
1020 | 1022 | ||
1021 | if (max_cstate == 0) | 1023 | if (max_cstate == 0) |
1022 | max_cstate = 1; | 1024 | max_cstate = 1; |
1023 | 1025 | ||
1024 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | 1026 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { |
1025 | cx = &pr->power.states[i]; | 1027 | cx = &pr->power.states[i]; |
1026 | state = &dev->states[count]; | ||
1027 | state_usage = &dev->states_usage[count]; | 1028 | state_usage = &dev->states_usage[count]; |
1028 | 1029 | ||
1029 | if (!cx->valid) | 1030 | if (!cx->valid) |
@@ -1035,8 +1036,64 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1035 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | 1036 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) |
1036 | continue; | 1037 | continue; |
1037 | #endif | 1038 | #endif |
1039 | |||
1038 | cpuidle_set_statedata(state_usage, cx); | 1040 | cpuidle_set_statedata(state_usage, cx); |
1039 | 1041 | ||
1042 | count++; | ||
1043 | if (count == CPUIDLE_STATE_MAX) | ||
1044 | break; | ||
1045 | } | ||
1046 | |||
1047 | dev->state_count = count; | ||
1048 | |||
1049 | if (!count) | ||
1050 | return -EINVAL; | ||
1051 | |||
1052 | return 0; | ||
1053 | } | ||
1054 | |||
1055 | /** | ||
1056 | * acpi_processor_setup_cpuidle states- prepares and configures cpuidle | ||
1057 | * global state data i.e. idle routines | ||
1058 | * | ||
1059 | * @pr: the ACPI processor | ||
1060 | */ | ||
1061 | static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | ||
1062 | { | ||
1063 | int i, count = CPUIDLE_DRIVER_STATE_START; | ||
1064 | struct acpi_processor_cx *cx; | ||
1065 | struct cpuidle_state *state; | ||
1066 | struct cpuidle_driver *drv = &acpi_idle_driver; | ||
1067 | |||
1068 | if (!pr->flags.power_setup_done) | ||
1069 | return -EINVAL; | ||
1070 | |||
1071 | if (pr->flags.power == 0) | ||
1072 | return -EINVAL; | ||
1073 | |||
1074 | drv->safe_state_index = -1; | ||
1075 | for (i = 0; i < CPUIDLE_STATE_MAX; i++) { | ||
1076 | drv->states[i].name[0] = '\0'; | ||
1077 | drv->states[i].desc[0] = '\0'; | ||
1078 | } | ||
1079 | |||
1080 | if (max_cstate == 0) | ||
1081 | max_cstate = 1; | ||
1082 | |||
1083 | for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { | ||
1084 | cx = &pr->power.states[i]; | ||
1085 | |||
1086 | if (!cx->valid) | ||
1087 | continue; | ||
1088 | |||
1089 | #ifdef CONFIG_HOTPLUG_CPU | ||
1090 | if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && | ||
1091 | !pr->flags.has_cst && | ||
1092 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) | ||
1093 | continue; | ||
1094 | #endif | ||
1095 | |||
1096 | state = &drv->states[count]; | ||
1040 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); | 1097 | snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); |
1041 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); | 1098 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); |
1042 | state->exit_latency = cx->latency; | 1099 | state->exit_latency = cx->latency; |
@@ -1049,13 +1106,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1049 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1106 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
1050 | 1107 | ||
1051 | state->enter = acpi_idle_enter_c1; | 1108 | state->enter = acpi_idle_enter_c1; |
1052 | dev->safe_state_index = count; | 1109 | drv->safe_state_index = count; |
1053 | break; | 1110 | break; |
1054 | 1111 | ||
1055 | case ACPI_STATE_C2: | 1112 | case ACPI_STATE_C2: |
1056 | state->flags |= CPUIDLE_FLAG_TIME_VALID; | 1113 | state->flags |= CPUIDLE_FLAG_TIME_VALID; |
1057 | state->enter = acpi_idle_enter_simple; | 1114 | state->enter = acpi_idle_enter_simple; |
1058 | dev->safe_state_index = count; | 1115 | drv->safe_state_index = count; |
1059 | break; | 1116 | break; |
1060 | 1117 | ||
1061 | case ACPI_STATE_C3: | 1118 | case ACPI_STATE_C3: |
@@ -1071,7 +1128,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1071 | break; | 1128 | break; |
1072 | } | 1129 | } |
1073 | 1130 | ||
1074 | dev->state_count = count; | 1131 | drv->state_count = count; |
1075 | 1132 | ||
1076 | if (!count) | 1133 | if (!count) |
1077 | return -EINVAL; | 1134 | return -EINVAL; |
@@ -1079,7 +1136,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) | |||
1079 | return 0; | 1136 | return 0; |
1080 | } | 1137 | } |
1081 | 1138 | ||
1082 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | 1139 | int acpi_processor_hotplug(struct acpi_processor *pr) |
1083 | { | 1140 | { |
1084 | int ret = 0; | 1141 | int ret = 0; |
1085 | 1142 | ||
@@ -1100,7 +1157,7 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1100 | cpuidle_disable_device(&pr->power.dev); | 1157 | cpuidle_disable_device(&pr->power.dev); |
1101 | acpi_processor_get_power_info(pr); | 1158 | acpi_processor_get_power_info(pr); |
1102 | if (pr->flags.power) { | 1159 | if (pr->flags.power) { |
1103 | acpi_processor_setup_cpuidle(pr); | 1160 | acpi_processor_setup_cpuidle_cx(pr); |
1104 | ret = cpuidle_enable_device(&pr->power.dev); | 1161 | ret = cpuidle_enable_device(&pr->power.dev); |
1105 | } | 1162 | } |
1106 | cpuidle_resume_and_unlock(); | 1163 | cpuidle_resume_and_unlock(); |
@@ -1108,10 +1165,72 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1108 | return ret; | 1165 | return ret; |
1109 | } | 1166 | } |
1110 | 1167 | ||
1168 | int acpi_processor_cst_has_changed(struct acpi_processor *pr) | ||
1169 | { | ||
1170 | int cpu; | ||
1171 | struct acpi_processor *_pr; | ||
1172 | |||
1173 | if (disabled_by_idle_boot_param()) | ||
1174 | return 0; | ||
1175 | |||
1176 | if (!pr) | ||
1177 | return -EINVAL; | ||
1178 | |||
1179 | if (nocst) | ||
1180 | return -ENODEV; | ||
1181 | |||
1182 | if (!pr->flags.power_setup_done) | ||
1183 | return -ENODEV; | ||
1184 | |||
1185 | /* | ||
1186 | * FIXME: Design the ACPI notification to make it once per | ||
1187 | * system instead of once per-cpu. This condition is a hack | ||
1188 | * to make the code that updates C-States be called once. | ||
1189 | */ | ||
1190 | |||
1191 | if (smp_processor_id() == 0 && | ||
1192 | cpuidle_get_driver() == &acpi_idle_driver) { | ||
1193 | |||
1194 | cpuidle_pause_and_lock(); | ||
1195 | /* Protect against cpu-hotplug */ | ||
1196 | get_online_cpus(); | ||
1197 | |||
1198 | /* Disable all cpuidle devices */ | ||
1199 | for_each_online_cpu(cpu) { | ||
1200 | _pr = per_cpu(processors, cpu); | ||
1201 | if (!_pr || !_pr->flags.power_setup_done) | ||
1202 | continue; | ||
1203 | cpuidle_disable_device(&_pr->power.dev); | ||
1204 | } | ||
1205 | |||
1206 | /* Populate Updated C-state information */ | ||
1207 | acpi_processor_setup_cpuidle_states(pr); | ||
1208 | |||
1209 | /* Enable all cpuidle devices */ | ||
1210 | for_each_online_cpu(cpu) { | ||
1211 | _pr = per_cpu(processors, cpu); | ||
1212 | if (!_pr || !_pr->flags.power_setup_done) | ||
1213 | continue; | ||
1214 | acpi_processor_get_power_info(_pr); | ||
1215 | if (_pr->flags.power) { | ||
1216 | acpi_processor_setup_cpuidle_cx(_pr); | ||
1217 | cpuidle_enable_device(&_pr->power.dev); | ||
1218 | } | ||
1219 | } | ||
1220 | put_online_cpus(); | ||
1221 | cpuidle_resume_and_unlock(); | ||
1222 | } | ||
1223 | |||
1224 | return 0; | ||
1225 | } | ||
1226 | |||
1227 | static int acpi_processor_registered; | ||
1228 | |||
1111 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | 1229 | int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, |
1112 | struct acpi_device *device) | 1230 | struct acpi_device *device) |
1113 | { | 1231 | { |
1114 | acpi_status status = 0; | 1232 | acpi_status status = 0; |
1233 | int retval; | ||
1115 | static int first_run; | 1234 | static int first_run; |
1116 | 1235 | ||
1117 | if (disabled_by_idle_boot_param()) | 1236 | if (disabled_by_idle_boot_param()) |
@@ -1148,9 +1267,26 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1148 | * platforms that only support C1. | 1267 | * platforms that only support C1. |
1149 | */ | 1268 | */ |
1150 | if (pr->flags.power) { | 1269 | if (pr->flags.power) { |
1151 | acpi_processor_setup_cpuidle(pr); | 1270 | /* Register acpi_idle_driver if not already registered */ |
1152 | if (cpuidle_register_device(&pr->power.dev)) | 1271 | if (!acpi_processor_registered) { |
1153 | return -EIO; | 1272 | acpi_processor_setup_cpuidle_states(pr); |
1273 | retval = cpuidle_register_driver(&acpi_idle_driver); | ||
1274 | if (retval) | ||
1275 | return retval; | ||
1276 | printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", | ||
1277 | acpi_idle_driver.name); | ||
1278 | } | ||
1279 | /* Register per-cpu cpuidle_device. Cpuidle driver | ||
1280 | * must already be registered before registering device | ||
1281 | */ | ||
1282 | acpi_processor_setup_cpuidle_cx(pr); | ||
1283 | retval = cpuidle_register_device(&pr->power.dev); | ||
1284 | if (retval) { | ||
1285 | if (acpi_processor_registered == 0) | ||
1286 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1287 | return retval; | ||
1288 | } | ||
1289 | acpi_processor_registered++; | ||
1154 | } | 1290 | } |
1155 | return 0; | 1291 | return 0; |
1156 | } | 1292 | } |
@@ -1161,8 +1297,13 @@ int acpi_processor_power_exit(struct acpi_processor *pr, | |||
1161 | if (disabled_by_idle_boot_param()) | 1297 | if (disabled_by_idle_boot_param()) |
1162 | return 0; | 1298 | return 0; |
1163 | 1299 | ||
1164 | cpuidle_unregister_device(&pr->power.dev); | 1300 | if (pr->flags.power) { |
1165 | pr->flags.power_setup_done = 0; | 1301 | cpuidle_unregister_device(&pr->power.dev); |
1302 | acpi_processor_registered--; | ||
1303 | if (acpi_processor_registered == 0) | ||
1304 | cpuidle_unregister_driver(&acpi_idle_driver); | ||
1305 | } | ||
1166 | 1306 | ||
1307 | pr->flags.power_setup_done = 0; | ||
1167 | return 0; | 1308 | return 0; |
1168 | } | 1309 | } |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 7127e92fa8a1..7a57b11eaa8d 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -61,6 +61,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); | |||
61 | int cpuidle_idle_call(void) | 61 | int cpuidle_idle_call(void) |
62 | { | 62 | { |
63 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 63 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
64 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
64 | struct cpuidle_state *target_state; | 65 | struct cpuidle_state *target_state; |
65 | int next_state, entered_state; | 66 | int next_state, entered_state; |
66 | 67 | ||
@@ -84,18 +85,18 @@ int cpuidle_idle_call(void) | |||
84 | #endif | 85 | #endif |
85 | 86 | ||
86 | /* ask the governor for the next state */ | 87 | /* ask the governor for the next state */ |
87 | next_state = cpuidle_curr_governor->select(dev); | 88 | next_state = cpuidle_curr_governor->select(drv, dev); |
88 | if (need_resched()) { | 89 | if (need_resched()) { |
89 | local_irq_enable(); | 90 | local_irq_enable(); |
90 | return 0; | 91 | return 0; |
91 | } | 92 | } |
92 | 93 | ||
93 | target_state = &dev->states[next_state]; | 94 | target_state = &drv->states[next_state]; |
94 | 95 | ||
95 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); | 96 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); |
96 | trace_cpu_idle(next_state, dev->cpu); | 97 | trace_cpu_idle(next_state, dev->cpu); |
97 | 98 | ||
98 | entered_state = target_state->enter(dev, next_state); | 99 | entered_state = target_state->enter(dev, drv, next_state); |
99 | 100 | ||
100 | trace_power_end(dev->cpu); | 101 | trace_power_end(dev->cpu); |
101 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); | 102 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); |
@@ -163,7 +164,8 @@ void cpuidle_resume_and_unlock(void) | |||
163 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | 164 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); |
164 | 165 | ||
165 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX | 166 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX |
166 | static int poll_idle(struct cpuidle_device *dev, int index) | 167 | static int poll_idle(struct cpuidle_device *dev, |
168 | struct cpuidle_driver *drv, int index) | ||
167 | { | 169 | { |
168 | ktime_t t1, t2; | 170 | ktime_t t1, t2; |
169 | s64 diff; | 171 | s64 diff; |
@@ -183,12 +185,9 @@ static int poll_idle(struct cpuidle_device *dev, int index) | |||
183 | return index; | 185 | return index; |
184 | } | 186 | } |
185 | 187 | ||
186 | static void poll_idle_init(struct cpuidle_device *dev) | 188 | static void poll_idle_init(struct cpuidle_driver *drv) |
187 | { | 189 | { |
188 | struct cpuidle_state *state = &dev->states[0]; | 190 | struct cpuidle_state *state = &drv->states[0]; |
189 | struct cpuidle_state_usage *state_usage = &dev->states_usage[0]; | ||
190 | |||
191 | cpuidle_set_statedata(state_usage, NULL); | ||
192 | 191 | ||
193 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); | 192 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); |
194 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | 193 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); |
@@ -199,7 +198,7 @@ static void poll_idle_init(struct cpuidle_device *dev) | |||
199 | state->enter = poll_idle; | 198 | state->enter = poll_idle; |
200 | } | 199 | } |
201 | #else | 200 | #else |
202 | static void poll_idle_init(struct cpuidle_device *dev) {} | 201 | static void poll_idle_init(struct cpuidle_driver *drv) {} |
203 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | 202 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ |
204 | 203 | ||
205 | /** | 204 | /** |
@@ -226,13 +225,13 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
226 | return ret; | 225 | return ret; |
227 | } | 226 | } |
228 | 227 | ||
229 | poll_idle_init(dev); | 228 | poll_idle_init(cpuidle_get_driver()); |
230 | 229 | ||
231 | if ((ret = cpuidle_add_state_sysfs(dev))) | 230 | if ((ret = cpuidle_add_state_sysfs(dev))) |
232 | return ret; | 231 | return ret; |
233 | 232 | ||
234 | if (cpuidle_curr_governor->enable && | 233 | if (cpuidle_curr_governor->enable && |
235 | (ret = cpuidle_curr_governor->enable(dev))) | 234 | (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev))) |
236 | goto fail_sysfs; | 235 | goto fail_sysfs; |
237 | 236 | ||
238 | for (i = 0; i < dev->state_count; i++) { | 237 | for (i = 0; i < dev->state_count; i++) { |
@@ -273,7 +272,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) | |||
273 | dev->enabled = 0; | 272 | dev->enabled = 0; |
274 | 273 | ||
275 | if (cpuidle_curr_governor->disable) | 274 | if (cpuidle_curr_governor->disable) |
276 | cpuidle_curr_governor->disable(dev); | 275 | cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); |
277 | 276 | ||
278 | cpuidle_remove_state_sysfs(dev); | 277 | cpuidle_remove_state_sysfs(dev); |
279 | enabled_devices--; | 278 | enabled_devices--; |
@@ -301,26 +300,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
301 | 300 | ||
302 | init_completion(&dev->kobj_unregister); | 301 | init_completion(&dev->kobj_unregister); |
303 | 302 | ||
304 | /* | ||
305 | * cpuidle driver should set the dev->power_specified bit | ||
306 | * before registering the device if the driver provides | ||
307 | * power_usage numbers. | ||
308 | * | ||
309 | * For those devices whose ->power_specified is not set, | ||
310 | * we fill in power_usage with decreasing values as the | ||
311 | * cpuidle code has an implicit assumption that state Cn | ||
312 | * uses less power than C(n-1). | ||
313 | * | ||
314 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
315 | * an power value of -1. So we use -2, -3, etc, for other | ||
316 | * c-states. | ||
317 | */ | ||
318 | if (!dev->power_specified) { | ||
319 | int i; | ||
320 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) | ||
321 | dev->states[i].power_usage = -1 - i; | ||
322 | } | ||
323 | |||
324 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 303 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
325 | list_add(&dev->device_list, &cpuidle_detected_devices); | 304 | list_add(&dev->device_list, &cpuidle_detected_devices); |
326 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | 305 | if ((ret = cpuidle_add_sysfs(sys_dev))) { |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 3f7e3cedd133..284d7af5a9c8 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -17,6 +17,30 @@ | |||
17 | static struct cpuidle_driver *cpuidle_curr_driver; | 17 | static struct cpuidle_driver *cpuidle_curr_driver; |
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 18 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
19 | 19 | ||
20 | static void __cpuidle_register_driver(struct cpuidle_driver *drv) | ||
21 | { | ||
22 | int i; | ||
23 | /* | ||
24 | * cpuidle driver should set the drv->power_specified bit | ||
25 | * before registering if the driver provides | ||
26 | * power_usage numbers. | ||
27 | * | ||
28 | * If power_specified is not set, | ||
29 | * we fill in power_usage with decreasing values as the | ||
30 | * cpuidle code has an implicit assumption that state Cn | ||
31 | * uses less power than C(n-1). | ||
32 | * | ||
33 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
34 | * an power value of -1. So we use -2, -3, etc, for other | ||
35 | * c-states. | ||
36 | */ | ||
37 | if (!drv->power_specified) { | ||
38 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) | ||
39 | drv->states[i].power_usage = -1 - i; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | |||
20 | /** | 44 | /** |
21 | * cpuidle_register_driver - registers a driver | 45 | * cpuidle_register_driver - registers a driver |
22 | * @drv: the driver | 46 | * @drv: the driver |
@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
34 | spin_unlock(&cpuidle_driver_lock); | 58 | spin_unlock(&cpuidle_driver_lock); |
35 | return -EBUSY; | 59 | return -EBUSY; |
36 | } | 60 | } |
61 | __cpuidle_register_driver(drv); | ||
37 | cpuidle_curr_driver = drv; | 62 | cpuidle_curr_driver = drv; |
38 | spin_unlock(&cpuidle_driver_lock); | 63 | spin_unlock(&cpuidle_driver_lock); |
39 | 64 | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 6a686a76711f..ef6b9e4727a7 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev, | |||
60 | 60 | ||
61 | /** | 61 | /** |
62 | * ladder_select_state - selects the next state to enter | 62 | * ladder_select_state - selects the next state to enter |
63 | * @drv: cpuidle driver | ||
63 | * @dev: the CPU | 64 | * @dev: the CPU |
64 | */ | 65 | */ |
65 | static int ladder_select_state(struct cpuidle_device *dev) | 66 | static int ladder_select_state(struct cpuidle_driver *drv, |
67 | struct cpuidle_device *dev) | ||
66 | { | 68 | { |
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 69 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); |
68 | struct ladder_device_state *last_state; | 70 | struct ladder_device_state *last_state; |
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
77 | 79 | ||
78 | last_state = &ldev->states[last_idx]; | 80 | last_state = &ldev->states[last_idx]; |
79 | 81 | ||
80 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | 82 | if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) { |
81 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | 83 | last_residency = cpuidle_get_last_residency(dev) - \ |
84 | drv->states[last_idx].exit_latency; | ||
85 | } | ||
82 | else | 86 | else |
83 | last_residency = last_state->threshold.promotion_time + 1; | 87 | last_residency = last_state->threshold.promotion_time + 1; |
84 | 88 | ||
85 | /* consider promotion */ | 89 | /* consider promotion */ |
86 | if (last_idx < dev->state_count - 1 && | 90 | if (last_idx < drv->state_count - 1 && |
87 | last_residency > last_state->threshold.promotion_time && | 91 | last_residency > last_state->threshold.promotion_time && |
88 | dev->states[last_idx + 1].exit_latency <= latency_req) { | 92 | drv->states[last_idx + 1].exit_latency <= latency_req) { |
89 | last_state->stats.promotion_count++; | 93 | last_state->stats.promotion_count++; |
90 | last_state->stats.demotion_count = 0; | 94 | last_state->stats.demotion_count = 0; |
91 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | 95 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { |
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
96 | 100 | ||
97 | /* consider demotion */ | 101 | /* consider demotion */ |
98 | if (last_idx > CPUIDLE_DRIVER_STATE_START && | 102 | if (last_idx > CPUIDLE_DRIVER_STATE_START && |
99 | dev->states[last_idx].exit_latency > latency_req) { | 103 | drv->states[last_idx].exit_latency > latency_req) { |
100 | int i; | 104 | int i; |
101 | 105 | ||
102 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { | 106 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { |
103 | if (dev->states[i].exit_latency <= latency_req) | 107 | if (drv->states[i].exit_latency <= latency_req) |
104 | break; | 108 | break; |
105 | } | 109 | } |
106 | ladder_do_selection(ldev, last_idx, i); | 110 | ladder_do_selection(ldev, last_idx, i); |
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
123 | 127 | ||
124 | /** | 128 | /** |
125 | * ladder_enable_device - setup for the governor | 129 | * ladder_enable_device - setup for the governor |
130 | * @drv: cpuidle driver | ||
126 | * @dev: the CPU | 131 | * @dev: the CPU |
127 | */ | 132 | */ |
128 | static int ladder_enable_device(struct cpuidle_device *dev) | 133 | static int ladder_enable_device(struct cpuidle_driver *drv, |
134 | struct cpuidle_device *dev) | ||
129 | { | 135 | { |
130 | int i; | 136 | int i; |
131 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | 137 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); |
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
134 | 140 | ||
135 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; | 141 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
136 | 142 | ||
137 | for (i = 0; i < dev->state_count; i++) { | 143 | for (i = 0; i < drv->state_count; i++) { |
138 | state = &dev->states[i]; | 144 | state = &drv->states[i]; |
139 | lstate = &ldev->states[i]; | 145 | lstate = &ldev->states[i]; |
140 | 146 | ||
141 | lstate->stats.promotion_count = 0; | 147 | lstate->stats.promotion_count = 0; |
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
144 | lstate->threshold.promotion_count = PROMOTION_COUNT; | 150 | lstate->threshold.promotion_count = PROMOTION_COUNT; |
145 | lstate->threshold.demotion_count = DEMOTION_COUNT; | 151 | lstate->threshold.demotion_count = DEMOTION_COUNT; |
146 | 152 | ||
147 | if (i < dev->state_count - 1) | 153 | if (i < drv->state_count - 1) |
148 | lstate->threshold.promotion_time = state->exit_latency; | 154 | lstate->threshold.promotion_time = state->exit_latency; |
149 | if (i > 0) | 155 | if (i > 0) |
150 | lstate->threshold.demotion_time = state->exit_latency; | 156 | lstate->threshold.demotion_time = state->exit_latency; |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index af724e823c8e..bcbe88142135 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -182,7 +182,7 @@ static inline int performance_multiplier(void) | |||
182 | 182 | ||
183 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | 183 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
184 | 184 | ||
185 | static void menu_update(struct cpuidle_device *dev); | 185 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
186 | 186 | ||
187 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ | 187 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ |
188 | static u64 div_round64(u64 dividend, u32 divisor) | 188 | static u64 div_round64(u64 dividend, u32 divisor) |
@@ -228,9 +228,10 @@ static void detect_repeating_patterns(struct menu_device *data) | |||
228 | 228 | ||
229 | /** | 229 | /** |
230 | * menu_select - selects the next idle state to enter | 230 | * menu_select - selects the next idle state to enter |
231 | * @drv: cpuidle driver containing state data | ||
231 | * @dev: the CPU | 232 | * @dev: the CPU |
232 | */ | 233 | */ |
233 | static int menu_select(struct cpuidle_device *dev) | 234 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
234 | { | 235 | { |
235 | struct menu_device *data = &__get_cpu_var(menu_devices); | 236 | struct menu_device *data = &__get_cpu_var(menu_devices); |
236 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); | 237 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
@@ -240,7 +241,7 @@ static int menu_select(struct cpuidle_device *dev) | |||
240 | struct timespec t; | 241 | struct timespec t; |
241 | 242 | ||
242 | if (data->needs_update) { | 243 | if (data->needs_update) { |
243 | menu_update(dev); | 244 | menu_update(drv, dev); |
244 | data->needs_update = 0; | 245 | data->needs_update = 0; |
245 | } | 246 | } |
246 | 247 | ||
@@ -285,8 +286,8 @@ static int menu_select(struct cpuidle_device *dev) | |||
285 | * Find the idle state with the lowest power while satisfying | 286 | * Find the idle state with the lowest power while satisfying |
286 | * our constraints. | 287 | * our constraints. |
287 | */ | 288 | */ |
288 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { | 289 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
289 | struct cpuidle_state *s = &dev->states[i]; | 290 | struct cpuidle_state *s = &drv->states[i]; |
290 | 291 | ||
291 | if (s->target_residency > data->predicted_us) | 292 | if (s->target_residency > data->predicted_us) |
292 | continue; | 293 | continue; |
@@ -323,14 +324,15 @@ static void menu_reflect(struct cpuidle_device *dev, int index) | |||
323 | 324 | ||
324 | /** | 325 | /** |
325 | * menu_update - attempts to guess what happened after entry | 326 | * menu_update - attempts to guess what happened after entry |
327 | * @drv: cpuidle driver containing state data | ||
326 | * @dev: the CPU | 328 | * @dev: the CPU |
327 | */ | 329 | */ |
328 | static void menu_update(struct cpuidle_device *dev) | 330 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
329 | { | 331 | { |
330 | struct menu_device *data = &__get_cpu_var(menu_devices); | 332 | struct menu_device *data = &__get_cpu_var(menu_devices); |
331 | int last_idx = data->last_state_idx; | 333 | int last_idx = data->last_state_idx; |
332 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); | 334 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
333 | struct cpuidle_state *target = &dev->states[last_idx]; | 335 | struct cpuidle_state *target = &drv->states[last_idx]; |
334 | unsigned int measured_us; | 336 | unsigned int measured_us; |
335 | u64 new_factor; | 337 | u64 new_factor; |
336 | 338 | ||
@@ -384,9 +386,11 @@ static void menu_update(struct cpuidle_device *dev) | |||
384 | 386 | ||
385 | /** | 387 | /** |
386 | * menu_enable_device - scans a CPU's states and does setup | 388 | * menu_enable_device - scans a CPU's states and does setup |
389 | * @drv: cpuidle driver | ||
387 | * @dev: the CPU | 390 | * @dev: the CPU |
388 | */ | 391 | */ |
389 | static int menu_enable_device(struct cpuidle_device *dev) | 392 | static int menu_enable_device(struct cpuidle_driver *drv, |
393 | struct cpuidle_device *dev) | ||
390 | { | 394 | { |
391 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 395 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
392 | 396 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index 8a1ace104476..1e756e160dca 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -322,13 +322,14 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
322 | { | 322 | { |
323 | int i, ret = -ENOMEM; | 323 | int i, ret = -ENOMEM; |
324 | struct cpuidle_state_kobj *kobj; | 324 | struct cpuidle_state_kobj *kobj; |
325 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
325 | 326 | ||
326 | /* state statistics */ | 327 | /* state statistics */ |
327 | for (i = 0; i < device->state_count; i++) { | 328 | for (i = 0; i < device->state_count; i++) { |
328 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 329 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
329 | if (!kobj) | 330 | if (!kobj) |
330 | goto error_state; | 331 | goto error_state; |
331 | kobj->state = &device->states[i]; | 332 | kobj->state = &drv->states[i]; |
332 | kobj->state_usage = &device->states_usage[i]; | 333 | kobj->state_usage = &device->states_usage[i]; |
333 | init_completion(&kobj->kobj_unregister); | 334 | init_completion(&kobj->kobj_unregister); |
334 | 335 | ||
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 3aa8d4cb6dca..5be9d599ff6b 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -81,7 +81,8 @@ static unsigned int mwait_substates; | |||
81 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ | 81 | static unsigned int lapic_timer_reliable_states = (1 << 1); /* Default to only C1 */ |
82 | 82 | ||
83 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; | 83 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
84 | static int intel_idle(struct cpuidle_device *dev, int index); | 84 | static int intel_idle(struct cpuidle_device *dev, |
85 | struct cpuidle_driver *drv, int index); | ||
85 | 86 | ||
86 | static struct cpuidle_state *cpuidle_state_table; | 87 | static struct cpuidle_state *cpuidle_state_table; |
87 | 88 | ||
@@ -227,13 +228,15 @@ static int get_driver_data(int cstate) | |||
227 | /** | 228 | /** |
228 | * intel_idle | 229 | * intel_idle |
229 | * @dev: cpuidle_device | 230 | * @dev: cpuidle_device |
231 | * @drv: cpuidle driver | ||
230 | * @index: index of cpuidle state | 232 | * @index: index of cpuidle state |
231 | * | 233 | * |
232 | */ | 234 | */ |
233 | static int intel_idle(struct cpuidle_device *dev, int index) | 235 | static int intel_idle(struct cpuidle_device *dev, |
236 | struct cpuidle_driver *drv, int index) | ||
234 | { | 237 | { |
235 | unsigned long ecx = 1; /* break on interrupt flag */ | 238 | unsigned long ecx = 1; /* break on interrupt flag */ |
236 | struct cpuidle_state *state = &dev->states[index]; | 239 | struct cpuidle_state *state = &drv->states[index]; |
237 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; | 240 | struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; |
238 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); | 241 | unsigned long eax = (unsigned long)cpuidle_get_statedata(state_usage); |
239 | unsigned int cstate; | 242 | unsigned int cstate; |
@@ -420,6 +423,60 @@ static void intel_idle_cpuidle_devices_uninit(void) | |||
420 | return; | 423 | return; |
421 | } | 424 | } |
422 | /* | 425 | /* |
426 | * intel_idle_cpuidle_driver_init() | ||
427 | * allocate, initialize cpuidle_states | ||
428 | */ | ||
429 | static int intel_idle_cpuidle_driver_init(void) | ||
430 | { | ||
431 | int cstate; | ||
432 | struct cpuidle_driver *drv = &intel_idle_driver; | ||
433 | |||
434 | drv->state_count = 1; | ||
435 | |||
436 | for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) { | ||
437 | int num_substates; | ||
438 | |||
439 | if (cstate > max_cstate) { | ||
440 | printk(PREFIX "max_cstate %d reached\n", | ||
441 | max_cstate); | ||
442 | break; | ||
443 | } | ||
444 | |||
445 | /* does the state exist in CPUID.MWAIT? */ | ||
446 | num_substates = (mwait_substates >> ((cstate) * 4)) | ||
447 | & MWAIT_SUBSTATE_MASK; | ||
448 | if (num_substates == 0) | ||
449 | continue; | ||
450 | /* is the state not enabled? */ | ||
451 | if (cpuidle_state_table[cstate].enter == NULL) { | ||
452 | /* does the driver not know about the state? */ | ||
453 | if (*cpuidle_state_table[cstate].name == '\0') | ||
454 | pr_debug(PREFIX "unaware of model 0x%x" | ||
455 | " MWAIT %d please" | ||
456 | " contact lenb@kernel.org", | ||
457 | boot_cpu_data.x86_model, cstate); | ||
458 | continue; | ||
459 | } | ||
460 | |||
461 | if ((cstate > 2) && | ||
462 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
463 | mark_tsc_unstable("TSC halts in idle" | ||
464 | " states deeper than C2"); | ||
465 | |||
466 | drv->states[drv->state_count] = /* structure copy */ | ||
467 | cpuidle_state_table[cstate]; | ||
468 | |||
469 | drv->state_count += 1; | ||
470 | } | ||
471 | |||
472 | if (auto_demotion_disable_flags) | ||
473 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
474 | |||
475 | return 0; | ||
476 | } | ||
477 | |||
478 | |||
479 | /* | ||
423 | * intel_idle_cpuidle_devices_init() | 480 | * intel_idle_cpuidle_devices_init() |
424 | * allocate, initialize, register cpuidle_devices | 481 | * allocate, initialize, register cpuidle_devices |
425 | */ | 482 | */ |
@@ -453,23 +510,9 @@ static int intel_idle_cpuidle_devices_init(void) | |||
453 | continue; | 510 | continue; |
454 | /* is the state not enabled? */ | 511 | /* is the state not enabled? */ |
455 | if (cpuidle_state_table[cstate].enter == NULL) { | 512 | if (cpuidle_state_table[cstate].enter == NULL) { |
456 | /* does the driver not know about the state? */ | ||
457 | if (*cpuidle_state_table[cstate].name == '\0') | ||
458 | pr_debug(PREFIX "unaware of model 0x%x" | ||
459 | " MWAIT %d please" | ||
460 | " contact lenb@kernel.org", | ||
461 | boot_cpu_data.x86_model, cstate); | ||
462 | continue; | 513 | continue; |
463 | } | 514 | } |
464 | 515 | ||
465 | if ((cstate > 2) && | ||
466 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) | ||
467 | mark_tsc_unstable("TSC halts in idle" | ||
468 | " states deeper than C2"); | ||
469 | |||
470 | dev->states[dev->state_count] = /* structure copy */ | ||
471 | cpuidle_state_table[cstate]; | ||
472 | |||
473 | dev->states_usage[dev->state_count].driver_data = | 516 | dev->states_usage[dev->state_count].driver_data = |
474 | (void *)get_driver_data(cstate); | 517 | (void *)get_driver_data(cstate); |
475 | 518 | ||
@@ -484,8 +527,6 @@ static int intel_idle_cpuidle_devices_init(void) | |||
484 | return -EIO; | 527 | return -EIO; |
485 | } | 528 | } |
486 | } | 529 | } |
487 | if (auto_demotion_disable_flags) | ||
488 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
489 | 530 | ||
490 | return 0; | 531 | return 0; |
491 | } | 532 | } |
@@ -503,6 +544,7 @@ static int __init intel_idle_init(void) | |||
503 | if (retval) | 544 | if (retval) |
504 | return retval; | 545 | return retval; |
505 | 546 | ||
547 | intel_idle_cpuidle_driver_init(); | ||
506 | retval = cpuidle_register_driver(&intel_idle_driver); | 548 | retval = cpuidle_register_driver(&intel_idle_driver); |
507 | if (retval) { | 549 | if (retval) { |
508 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", | 550 | printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", |
diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 67055f180330..610f6fb1bbc2 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h | |||
@@ -329,6 +329,7 @@ extern void acpi_processor_throttling_init(void); | |||
329 | int acpi_processor_power_init(struct acpi_processor *pr, | 329 | int acpi_processor_power_init(struct acpi_processor *pr, |
330 | struct acpi_device *device); | 330 | struct acpi_device *device); |
331 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); | 331 | int acpi_processor_cst_has_changed(struct acpi_processor *pr); |
332 | int acpi_processor_hotplug(struct acpi_processor *pr); | ||
332 | int acpi_processor_power_exit(struct acpi_processor *pr, | 333 | int acpi_processor_power_exit(struct acpi_processor *pr, |
333 | struct acpi_device *device); | 334 | struct acpi_device *device); |
334 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); | 335 | int acpi_processor_suspend(struct acpi_device * device, pm_message_t state); |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 0156540b3f79..c90418822f40 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define CPUIDLE_DESC_LEN 32 | 22 | #define CPUIDLE_DESC_LEN 32 |
23 | 23 | ||
24 | struct cpuidle_device; | 24 | struct cpuidle_device; |
25 | struct cpuidle_driver; | ||
25 | 26 | ||
26 | 27 | ||
27 | /**************************** | 28 | /**************************** |
@@ -45,6 +46,7 @@ struct cpuidle_state { | |||
45 | unsigned int target_residency; /* in US */ | 46 | unsigned int target_residency; /* in US */ |
46 | 47 | ||
47 | int (*enter) (struct cpuidle_device *dev, | 48 | int (*enter) (struct cpuidle_device *dev, |
49 | struct cpuidle_driver *drv, | ||
48 | int index); | 50 | int index); |
49 | }; | 51 | }; |
50 | 52 | ||
@@ -83,12 +85,10 @@ struct cpuidle_state_kobj { | |||
83 | struct cpuidle_device { | 85 | struct cpuidle_device { |
84 | unsigned int registered:1; | 86 | unsigned int registered:1; |
85 | unsigned int enabled:1; | 87 | unsigned int enabled:1; |
86 | unsigned int power_specified:1; | ||
87 | unsigned int cpu; | 88 | unsigned int cpu; |
88 | 89 | ||
89 | int last_residency; | 90 | int last_residency; |
90 | int state_count; | 91 | int state_count; |
91 | struct cpuidle_state states[CPUIDLE_STATE_MAX]; | ||
92 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; | 92 | struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX]; |
93 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; | 93 | struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX]; |
94 | 94 | ||
@@ -96,7 +96,6 @@ struct cpuidle_device { | |||
96 | struct kobject kobj; | 96 | struct kobject kobj; |
97 | struct completion kobj_unregister; | 97 | struct completion kobj_unregister; |
98 | void *governor_data; | 98 | void *governor_data; |
99 | int safe_state_index; | ||
100 | }; | 99 | }; |
101 | 100 | ||
102 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | 101 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
@@ -120,6 +119,11 @@ static inline int cpuidle_get_last_residency(struct cpuidle_device *dev) | |||
120 | struct cpuidle_driver { | 119 | struct cpuidle_driver { |
121 | char name[CPUIDLE_NAME_LEN]; | 120 | char name[CPUIDLE_NAME_LEN]; |
122 | struct module *owner; | 121 | struct module *owner; |
122 | |||
123 | unsigned int power_specified:1; | ||
124 | struct cpuidle_state states[CPUIDLE_STATE_MAX]; | ||
125 | int state_count; | ||
126 | int safe_state_index; | ||
123 | }; | 127 | }; |
124 | 128 | ||
125 | #ifdef CONFIG_CPU_IDLE | 129 | #ifdef CONFIG_CPU_IDLE |
@@ -166,10 +170,13 @@ struct cpuidle_governor { | |||
166 | struct list_head governor_list; | 170 | struct list_head governor_list; |
167 | unsigned int rating; | 171 | unsigned int rating; |
168 | 172 | ||
169 | int (*enable) (struct cpuidle_device *dev); | 173 | int (*enable) (struct cpuidle_driver *drv, |
170 | void (*disable) (struct cpuidle_device *dev); | 174 | struct cpuidle_device *dev); |
175 | void (*disable) (struct cpuidle_driver *drv, | ||
176 | struct cpuidle_device *dev); | ||
171 | 177 | ||
172 | int (*select) (struct cpuidle_device *dev); | 178 | int (*select) (struct cpuidle_driver *drv, |
179 | struct cpuidle_device *dev); | ||
173 | void (*reflect) (struct cpuidle_device *dev, int index); | 180 | void (*reflect) (struct cpuidle_device *dev, int index); |
174 | 181 | ||
175 | struct module *owner; | 182 | struct module *owner; |