diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-07 13:13:52 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-07 13:13:52 -0500 |
commit | 3c00303206c3a1ccd86579efdc90bc35f140962e (patch) | |
tree | 66170c84b5ddaeb102aea3530517a26657b6ea29 /drivers/cpuidle | |
parent | 83dbb15e9cd78a3619e3db36777e2f81d09b2914 (diff) | |
parent | efb90582c575084723cc14302c1300cb26c7e01f (diff) |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux:
cpuidle: Single/Global registration of idle states
cpuidle: Split cpuidle_state structure and move per-cpu statistics fields
cpuidle: Remove CPUIDLE_FLAG_IGNORE and dev->prepare()
cpuidle: Move dev->last_residency update to driver enter routine; remove dev->last_state
ACPI: Fix CONFIG_ACPI_DOCK=n compiler warning
ACPI: Export FADT pm_profile integer value to userspace
thermal: Prevent polling from happening during system suspend
ACPI: Drop ACPI_NO_HARDWARE_INIT
ACPI atomicio: Convert width in bits to bytes in __acpi_ioremap_fast()
PNPACPI: Simplify disabled resource registration
ACPI: Fix possible recursive locking in hwregs.c
ACPI: use kstrdup()
mrst pmu: update comment
tools/power turbostat: less verbose debugging
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 86 | ||||
-rw-r--r-- | drivers/cpuidle/driver.c | 25 | ||||
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 41 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 29 | ||||
-rw-r--r-- | drivers/cpuidle/sysfs.c | 22 |
5 files changed, 115 insertions, 88 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index becd6d99203b..06ce2680d00d 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -62,8 +62,9 @@ static int __cpuidle_register_device(struct cpuidle_device *dev); | |||
62 | int cpuidle_idle_call(void) | 62 | int cpuidle_idle_call(void) |
63 | { | 63 | { |
64 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 64 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); |
65 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
65 | struct cpuidle_state *target_state; | 66 | struct cpuidle_state *target_state; |
66 | int next_state; | 67 | int next_state, entered_state; |
67 | 68 | ||
68 | if (off) | 69 | if (off) |
69 | return -ENODEV; | 70 | return -ENODEV; |
@@ -84,45 +85,36 @@ int cpuidle_idle_call(void) | |||
84 | hrtimer_peek_ahead_timers(); | 85 | hrtimer_peek_ahead_timers(); |
85 | #endif | 86 | #endif |
86 | 87 | ||
87 | /* | ||
88 | * Call the device's prepare function before calling the | ||
89 | * governor's select function. ->prepare gives the device's | ||
90 | * cpuidle driver a chance to update any dynamic information | ||
91 | * of its cpuidle states for the current idle period, e.g. | ||
92 | * state availability, latencies, residencies, etc. | ||
93 | */ | ||
94 | if (dev->prepare) | ||
95 | dev->prepare(dev); | ||
96 | |||
97 | /* ask the governor for the next state */ | 88 | /* ask the governor for the next state */ |
98 | next_state = cpuidle_curr_governor->select(dev); | 89 | next_state = cpuidle_curr_governor->select(drv, dev); |
99 | if (need_resched()) { | 90 | if (need_resched()) { |
100 | local_irq_enable(); | 91 | local_irq_enable(); |
101 | return 0; | 92 | return 0; |
102 | } | 93 | } |
103 | 94 | ||
104 | target_state = &dev->states[next_state]; | 95 | target_state = &drv->states[next_state]; |
105 | |||
106 | /* enter the state and update stats */ | ||
107 | dev->last_state = target_state; | ||
108 | 96 | ||
109 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); | 97 | trace_power_start(POWER_CSTATE, next_state, dev->cpu); |
110 | trace_cpu_idle(next_state, dev->cpu); | 98 | trace_cpu_idle(next_state, dev->cpu); |
111 | 99 | ||
112 | dev->last_residency = target_state->enter(dev, target_state); | 100 | entered_state = target_state->enter(dev, drv, next_state); |
113 | 101 | ||
114 | trace_power_end(dev->cpu); | 102 | trace_power_end(dev->cpu); |
115 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); | 103 | trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu); |
116 | 104 | ||
117 | if (dev->last_state) | 105 | if (entered_state >= 0) { |
118 | target_state = dev->last_state; | 106 | /* Update cpuidle counters */ |
119 | 107 | /* This can be moved to within driver enter routine | |
120 | target_state->time += (unsigned long long)dev->last_residency; | 108 | * but that results in multiple copies of same code. |
121 | target_state->usage++; | 109 | */ |
110 | dev->states_usage[entered_state].time += | ||
111 | (unsigned long long)dev->last_residency; | ||
112 | dev->states_usage[entered_state].usage++; | ||
113 | } | ||
122 | 114 | ||
123 | /* give the governor an opportunity to reflect on the outcome */ | 115 | /* give the governor an opportunity to reflect on the outcome */ |
124 | if (cpuidle_curr_governor->reflect) | 116 | if (cpuidle_curr_governor->reflect) |
125 | cpuidle_curr_governor->reflect(dev); | 117 | cpuidle_curr_governor->reflect(dev, entered_state); |
126 | 118 | ||
127 | return 0; | 119 | return 0; |
128 | } | 120 | } |
@@ -173,11 +165,11 @@ void cpuidle_resume_and_unlock(void) | |||
173 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); | 165 | EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock); |
174 | 166 | ||
175 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX | 167 | #ifdef CONFIG_ARCH_HAS_CPU_RELAX |
176 | static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | 168 | static int poll_idle(struct cpuidle_device *dev, |
169 | struct cpuidle_driver *drv, int index) | ||
177 | { | 170 | { |
178 | ktime_t t1, t2; | 171 | ktime_t t1, t2; |
179 | s64 diff; | 172 | s64 diff; |
180 | int ret; | ||
181 | 173 | ||
182 | t1 = ktime_get(); | 174 | t1 = ktime_get(); |
183 | local_irq_enable(); | 175 | local_irq_enable(); |
@@ -189,15 +181,14 @@ static int poll_idle(struct cpuidle_device *dev, struct cpuidle_state *st) | |||
189 | if (diff > INT_MAX) | 181 | if (diff > INT_MAX) |
190 | diff = INT_MAX; | 182 | diff = INT_MAX; |
191 | 183 | ||
192 | ret = (int) diff; | 184 | dev->last_residency = (int) diff; |
193 | return ret; | 185 | |
186 | return index; | ||
194 | } | 187 | } |
195 | 188 | ||
196 | static void poll_idle_init(struct cpuidle_device *dev) | 189 | static void poll_idle_init(struct cpuidle_driver *drv) |
197 | { | 190 | { |
198 | struct cpuidle_state *state = &dev->states[0]; | 191 | struct cpuidle_state *state = &drv->states[0]; |
199 | |||
200 | cpuidle_set_statedata(state, NULL); | ||
201 | 192 | ||
202 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); | 193 | snprintf(state->name, CPUIDLE_NAME_LEN, "POLL"); |
203 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); | 194 | snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE"); |
@@ -208,7 +199,7 @@ static void poll_idle_init(struct cpuidle_device *dev) | |||
208 | state->enter = poll_idle; | 199 | state->enter = poll_idle; |
209 | } | 200 | } |
210 | #else | 201 | #else |
211 | static void poll_idle_init(struct cpuidle_device *dev) {} | 202 | static void poll_idle_init(struct cpuidle_driver *drv) {} |
212 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ | 203 | #endif /* CONFIG_ARCH_HAS_CPU_RELAX */ |
213 | 204 | ||
214 | /** | 205 | /** |
@@ -235,21 +226,20 @@ int cpuidle_enable_device(struct cpuidle_device *dev) | |||
235 | return ret; | 226 | return ret; |
236 | } | 227 | } |
237 | 228 | ||
238 | poll_idle_init(dev); | 229 | poll_idle_init(cpuidle_get_driver()); |
239 | 230 | ||
240 | if ((ret = cpuidle_add_state_sysfs(dev))) | 231 | if ((ret = cpuidle_add_state_sysfs(dev))) |
241 | return ret; | 232 | return ret; |
242 | 233 | ||
243 | if (cpuidle_curr_governor->enable && | 234 | if (cpuidle_curr_governor->enable && |
244 | (ret = cpuidle_curr_governor->enable(dev))) | 235 | (ret = cpuidle_curr_governor->enable(cpuidle_get_driver(), dev))) |
245 | goto fail_sysfs; | 236 | goto fail_sysfs; |
246 | 237 | ||
247 | for (i = 0; i < dev->state_count; i++) { | 238 | for (i = 0; i < dev->state_count; i++) { |
248 | dev->states[i].usage = 0; | 239 | dev->states_usage[i].usage = 0; |
249 | dev->states[i].time = 0; | 240 | dev->states_usage[i].time = 0; |
250 | } | 241 | } |
251 | dev->last_residency = 0; | 242 | dev->last_residency = 0; |
252 | dev->last_state = NULL; | ||
253 | 243 | ||
254 | smp_wmb(); | 244 | smp_wmb(); |
255 | 245 | ||
@@ -283,7 +273,7 @@ void cpuidle_disable_device(struct cpuidle_device *dev) | |||
283 | dev->enabled = 0; | 273 | dev->enabled = 0; |
284 | 274 | ||
285 | if (cpuidle_curr_governor->disable) | 275 | if (cpuidle_curr_governor->disable) |
286 | cpuidle_curr_governor->disable(dev); | 276 | cpuidle_curr_governor->disable(cpuidle_get_driver(), dev); |
287 | 277 | ||
288 | cpuidle_remove_state_sysfs(dev); | 278 | cpuidle_remove_state_sysfs(dev); |
289 | enabled_devices--; | 279 | enabled_devices--; |
@@ -311,26 +301,6 @@ static int __cpuidle_register_device(struct cpuidle_device *dev) | |||
311 | 301 | ||
312 | init_completion(&dev->kobj_unregister); | 302 | init_completion(&dev->kobj_unregister); |
313 | 303 | ||
314 | /* | ||
315 | * cpuidle driver should set the dev->power_specified bit | ||
316 | * before registering the device if the driver provides | ||
317 | * power_usage numbers. | ||
318 | * | ||
319 | * For those devices whose ->power_specified is not set, | ||
320 | * we fill in power_usage with decreasing values as the | ||
321 | * cpuidle code has an implicit assumption that state Cn | ||
322 | * uses less power than C(n-1). | ||
323 | * | ||
324 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
325 | * an power value of -1. So we use -2, -3, etc, for other | ||
326 | * c-states. | ||
327 | */ | ||
328 | if (!dev->power_specified) { | ||
329 | int i; | ||
330 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) | ||
331 | dev->states[i].power_usage = -1 - i; | ||
332 | } | ||
333 | |||
334 | per_cpu(cpuidle_devices, dev->cpu) = dev; | 304 | per_cpu(cpuidle_devices, dev->cpu) = dev; |
335 | list_add(&dev->device_list, &cpuidle_detected_devices); | 305 | list_add(&dev->device_list, &cpuidle_detected_devices); |
336 | if ((ret = cpuidle_add_sysfs(sys_dev))) { | 306 | if ((ret = cpuidle_add_sysfs(sys_dev))) { |
diff --git a/drivers/cpuidle/driver.c b/drivers/cpuidle/driver.c index 3f7e3cedd133..284d7af5a9c8 100644 --- a/drivers/cpuidle/driver.c +++ b/drivers/cpuidle/driver.c | |||
@@ -17,6 +17,30 @@ | |||
17 | static struct cpuidle_driver *cpuidle_curr_driver; | 17 | static struct cpuidle_driver *cpuidle_curr_driver; |
18 | DEFINE_SPINLOCK(cpuidle_driver_lock); | 18 | DEFINE_SPINLOCK(cpuidle_driver_lock); |
19 | 19 | ||
20 | static void __cpuidle_register_driver(struct cpuidle_driver *drv) | ||
21 | { | ||
22 | int i; | ||
23 | /* | ||
24 | * cpuidle driver should set the drv->power_specified bit | ||
25 | * before registering if the driver provides | ||
26 | * power_usage numbers. | ||
27 | * | ||
28 | * If power_specified is not set, | ||
29 | * we fill in power_usage with decreasing values as the | ||
30 | * cpuidle code has an implicit assumption that state Cn | ||
31 | * uses less power than C(n-1). | ||
32 | * | ||
33 | * With CONFIG_ARCH_HAS_CPU_RELAX, C0 is already assigned | ||
34 | * an power value of -1. So we use -2, -3, etc, for other | ||
35 | * c-states. | ||
36 | */ | ||
37 | if (!drv->power_specified) { | ||
38 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) | ||
39 | drv->states[i].power_usage = -1 - i; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | |||
20 | /** | 44 | /** |
21 | * cpuidle_register_driver - registers a driver | 45 | * cpuidle_register_driver - registers a driver |
22 | * @drv: the driver | 46 | * @drv: the driver |
@@ -34,6 +58,7 @@ int cpuidle_register_driver(struct cpuidle_driver *drv) | |||
34 | spin_unlock(&cpuidle_driver_lock); | 58 | spin_unlock(&cpuidle_driver_lock); |
35 | return -EBUSY; | 59 | return -EBUSY; |
36 | } | 60 | } |
61 | __cpuidle_register_driver(drv); | ||
37 | cpuidle_curr_driver = drv; | 62 | cpuidle_curr_driver = drv; |
38 | spin_unlock(&cpuidle_driver_lock); | 63 | spin_unlock(&cpuidle_driver_lock); |
39 | 64 | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 3b8fce20f023..b6a09ea859b1 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev, | |||
60 | 60 | ||
61 | /** | 61 | /** |
62 | * ladder_select_state - selects the next state to enter | 62 | * ladder_select_state - selects the next state to enter |
63 | * @drv: cpuidle driver | ||
63 | * @dev: the CPU | 64 | * @dev: the CPU |
64 | */ | 65 | */ |
65 | static int ladder_select_state(struct cpuidle_device *dev) | 66 | static int ladder_select_state(struct cpuidle_driver *drv, |
67 | struct cpuidle_device *dev) | ||
66 | { | 68 | { |
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 69 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); |
68 | struct ladder_device_state *last_state; | 70 | struct ladder_device_state *last_state; |
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
77 | 79 | ||
78 | last_state = &ldev->states[last_idx]; | 80 | last_state = &ldev->states[last_idx]; |
79 | 81 | ||
80 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | 82 | if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) { |
81 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | 83 | last_residency = cpuidle_get_last_residency(dev) - \ |
84 | drv->states[last_idx].exit_latency; | ||
85 | } | ||
82 | else | 86 | else |
83 | last_residency = last_state->threshold.promotion_time + 1; | 87 | last_residency = last_state->threshold.promotion_time + 1; |
84 | 88 | ||
85 | /* consider promotion */ | 89 | /* consider promotion */ |
86 | if (last_idx < dev->state_count - 1 && | 90 | if (last_idx < drv->state_count - 1 && |
87 | last_residency > last_state->threshold.promotion_time && | 91 | last_residency > last_state->threshold.promotion_time && |
88 | dev->states[last_idx + 1].exit_latency <= latency_req) { | 92 | drv->states[last_idx + 1].exit_latency <= latency_req) { |
89 | last_state->stats.promotion_count++; | 93 | last_state->stats.promotion_count++; |
90 | last_state->stats.demotion_count = 0; | 94 | last_state->stats.demotion_count = 0; |
91 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | 95 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { |
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
96 | 100 | ||
97 | /* consider demotion */ | 101 | /* consider demotion */ |
98 | if (last_idx > CPUIDLE_DRIVER_STATE_START && | 102 | if (last_idx > CPUIDLE_DRIVER_STATE_START && |
99 | dev->states[last_idx].exit_latency > latency_req) { | 103 | drv->states[last_idx].exit_latency > latency_req) { |
100 | int i; | 104 | int i; |
101 | 105 | ||
102 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { | 106 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { |
103 | if (dev->states[i].exit_latency <= latency_req) | 107 | if (drv->states[i].exit_latency <= latency_req) |
104 | break; | 108 | break; |
105 | } | 109 | } |
106 | ladder_do_selection(ldev, last_idx, i); | 110 | ladder_do_selection(ldev, last_idx, i); |
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
123 | 127 | ||
124 | /** | 128 | /** |
125 | * ladder_enable_device - setup for the governor | 129 | * ladder_enable_device - setup for the governor |
130 | * @drv: cpuidle driver | ||
126 | * @dev: the CPU | 131 | * @dev: the CPU |
127 | */ | 132 | */ |
128 | static int ladder_enable_device(struct cpuidle_device *dev) | 133 | static int ladder_enable_device(struct cpuidle_driver *drv, |
134 | struct cpuidle_device *dev) | ||
129 | { | 135 | { |
130 | int i; | 136 | int i; |
131 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | 137 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); |
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
134 | 140 | ||
135 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; | 141 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
136 | 142 | ||
137 | for (i = 0; i < dev->state_count; i++) { | 143 | for (i = 0; i < drv->state_count; i++) { |
138 | state = &dev->states[i]; | 144 | state = &drv->states[i]; |
139 | lstate = &ldev->states[i]; | 145 | lstate = &ldev->states[i]; |
140 | 146 | ||
141 | lstate->stats.promotion_count = 0; | 147 | lstate->stats.promotion_count = 0; |
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
144 | lstate->threshold.promotion_count = PROMOTION_COUNT; | 150 | lstate->threshold.promotion_count = PROMOTION_COUNT; |
145 | lstate->threshold.demotion_count = DEMOTION_COUNT; | 151 | lstate->threshold.demotion_count = DEMOTION_COUNT; |
146 | 152 | ||
147 | if (i < dev->state_count - 1) | 153 | if (i < drv->state_count - 1) |
148 | lstate->threshold.promotion_time = state->exit_latency; | 154 | lstate->threshold.promotion_time = state->exit_latency; |
149 | if (i > 0) | 155 | if (i > 0) |
150 | lstate->threshold.demotion_time = state->exit_latency; | 156 | lstate->threshold.demotion_time = state->exit_latency; |
@@ -153,11 +159,24 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
153 | return 0; | 159 | return 0; |
154 | } | 160 | } |
155 | 161 | ||
162 | /** | ||
163 | * ladder_reflect - update the correct last_state_idx | ||
164 | * @dev: the CPU | ||
165 | * @index: the index of actual state entered | ||
166 | */ | ||
167 | static void ladder_reflect(struct cpuidle_device *dev, int index) | ||
168 | { | ||
169 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | ||
170 | if (index > 0) | ||
171 | ldev->last_state_idx = index; | ||
172 | } | ||
173 | |||
156 | static struct cpuidle_governor ladder_governor = { | 174 | static struct cpuidle_governor ladder_governor = { |
157 | .name = "ladder", | 175 | .name = "ladder", |
158 | .rating = 10, | 176 | .rating = 10, |
159 | .enable = ladder_enable_device, | 177 | .enable = ladder_enable_device, |
160 | .select = ladder_select_state, | 178 | .select = ladder_select_state, |
179 | .reflect = ladder_reflect, | ||
161 | .owner = THIS_MODULE, | 180 | .owner = THIS_MODULE, |
162 | }; | 181 | }; |
163 | 182 | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 00275244ce2f..ad0952601ae2 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -183,7 +183,7 @@ static inline int performance_multiplier(void) | |||
183 | 183 | ||
184 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | 184 | static DEFINE_PER_CPU(struct menu_device, menu_devices); |
185 | 185 | ||
186 | static void menu_update(struct cpuidle_device *dev); | 186 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev); |
187 | 187 | ||
188 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ | 188 | /* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ |
189 | static u64 div_round64(u64 dividend, u32 divisor) | 189 | static u64 div_round64(u64 dividend, u32 divisor) |
@@ -229,9 +229,10 @@ static void detect_repeating_patterns(struct menu_device *data) | |||
229 | 229 | ||
230 | /** | 230 | /** |
231 | * menu_select - selects the next idle state to enter | 231 | * menu_select - selects the next idle state to enter |
232 | * @drv: cpuidle driver containing state data | ||
232 | * @dev: the CPU | 233 | * @dev: the CPU |
233 | */ | 234 | */ |
234 | static int menu_select(struct cpuidle_device *dev) | 235 | static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
235 | { | 236 | { |
236 | struct menu_device *data = &__get_cpu_var(menu_devices); | 237 | struct menu_device *data = &__get_cpu_var(menu_devices); |
237 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); | 238 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
@@ -241,7 +242,7 @@ static int menu_select(struct cpuidle_device *dev) | |||
241 | struct timespec t; | 242 | struct timespec t; |
242 | 243 | ||
243 | if (data->needs_update) { | 244 | if (data->needs_update) { |
244 | menu_update(dev); | 245 | menu_update(drv, dev); |
245 | data->needs_update = 0; | 246 | data->needs_update = 0; |
246 | } | 247 | } |
247 | 248 | ||
@@ -286,11 +287,9 @@ static int menu_select(struct cpuidle_device *dev) | |||
286 | * Find the idle state with the lowest power while satisfying | 287 | * Find the idle state with the lowest power while satisfying |
287 | * our constraints. | 288 | * our constraints. |
288 | */ | 289 | */ |
289 | for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { | 290 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
290 | struct cpuidle_state *s = &dev->states[i]; | 291 | struct cpuidle_state *s = &drv->states[i]; |
291 | 292 | ||
292 | if (s->flags & CPUIDLE_FLAG_IGNORE) | ||
293 | continue; | ||
294 | if (s->target_residency > data->predicted_us) | 293 | if (s->target_residency > data->predicted_us) |
295 | continue; | 294 | continue; |
296 | if (s->exit_latency > latency_req) | 295 | if (s->exit_latency > latency_req) |
@@ -311,26 +310,30 @@ static int menu_select(struct cpuidle_device *dev) | |||
311 | /** | 310 | /** |
312 | * menu_reflect - records that data structures need update | 311 | * menu_reflect - records that data structures need update |
313 | * @dev: the CPU | 312 | * @dev: the CPU |
313 | * @index: the index of actual entered state | ||
314 | * | 314 | * |
315 | * NOTE: it's important to be fast here because this operation will add to | 315 | * NOTE: it's important to be fast here because this operation will add to |
316 | * the overall exit latency. | 316 | * the overall exit latency. |
317 | */ | 317 | */ |
318 | static void menu_reflect(struct cpuidle_device *dev) | 318 | static void menu_reflect(struct cpuidle_device *dev, int index) |
319 | { | 319 | { |
320 | struct menu_device *data = &__get_cpu_var(menu_devices); | 320 | struct menu_device *data = &__get_cpu_var(menu_devices); |
321 | data->needs_update = 1; | 321 | data->last_state_idx = index; |
322 | if (index >= 0) | ||
323 | data->needs_update = 1; | ||
322 | } | 324 | } |
323 | 325 | ||
324 | /** | 326 | /** |
325 | * menu_update - attempts to guess what happened after entry | 327 | * menu_update - attempts to guess what happened after entry |
328 | * @drv: cpuidle driver containing state data | ||
326 | * @dev: the CPU | 329 | * @dev: the CPU |
327 | */ | 330 | */ |
328 | static void menu_update(struct cpuidle_device *dev) | 331 | static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev) |
329 | { | 332 | { |
330 | struct menu_device *data = &__get_cpu_var(menu_devices); | 333 | struct menu_device *data = &__get_cpu_var(menu_devices); |
331 | int last_idx = data->last_state_idx; | 334 | int last_idx = data->last_state_idx; |
332 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); | 335 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
333 | struct cpuidle_state *target = &dev->states[last_idx]; | 336 | struct cpuidle_state *target = &drv->states[last_idx]; |
334 | unsigned int measured_us; | 337 | unsigned int measured_us; |
335 | u64 new_factor; | 338 | u64 new_factor; |
336 | 339 | ||
@@ -384,9 +387,11 @@ static void menu_update(struct cpuidle_device *dev) | |||
384 | 387 | ||
385 | /** | 388 | /** |
386 | * menu_enable_device - scans a CPU's states and does setup | 389 | * menu_enable_device - scans a CPU's states and does setup |
390 | * @drv: cpuidle driver | ||
387 | * @dev: the CPU | 391 | * @dev: the CPU |
388 | */ | 392 | */ |
389 | static int menu_enable_device(struct cpuidle_device *dev) | 393 | static int menu_enable_device(struct cpuidle_driver *drv, |
394 | struct cpuidle_device *dev) | ||
390 | { | 395 | { |
391 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | 396 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); |
392 | 397 | ||
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index be7917ec40c9..1e756e160dca 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
@@ -216,7 +216,8 @@ static struct kobj_type ktype_cpuidle = { | |||
216 | 216 | ||
217 | struct cpuidle_state_attr { | 217 | struct cpuidle_state_attr { |
218 | struct attribute attr; | 218 | struct attribute attr; |
219 | ssize_t (*show)(struct cpuidle_state *, char *); | 219 | ssize_t (*show)(struct cpuidle_state *, \ |
220 | struct cpuidle_state_usage *, char *); | ||
220 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); | 221 | ssize_t (*store)(struct cpuidle_state *, const char *, size_t); |
221 | }; | 222 | }; |
222 | 223 | ||
@@ -224,19 +225,22 @@ struct cpuidle_state_attr { | |||
224 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) | 225 | static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL) |
225 | 226 | ||
226 | #define define_show_state_function(_name) \ | 227 | #define define_show_state_function(_name) \ |
227 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 228 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
229 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
228 | { \ | 230 | { \ |
229 | return sprintf(buf, "%u\n", state->_name);\ | 231 | return sprintf(buf, "%u\n", state->_name);\ |
230 | } | 232 | } |
231 | 233 | ||
232 | #define define_show_state_ull_function(_name) \ | 234 | #define define_show_state_ull_function(_name) \ |
233 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 235 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
236 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
234 | { \ | 237 | { \ |
235 | return sprintf(buf, "%llu\n", state->_name);\ | 238 | return sprintf(buf, "%llu\n", state_usage->_name);\ |
236 | } | 239 | } |
237 | 240 | ||
238 | #define define_show_state_str_function(_name) \ | 241 | #define define_show_state_str_function(_name) \ |
239 | static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \ | 242 | static ssize_t show_state_##_name(struct cpuidle_state *state, \ |
243 | struct cpuidle_state_usage *state_usage, char *buf) \ | ||
240 | { \ | 244 | { \ |
241 | if (state->_name[0] == '\0')\ | 245 | if (state->_name[0] == '\0')\ |
242 | return sprintf(buf, "<null>\n");\ | 246 | return sprintf(buf, "<null>\n");\ |
@@ -269,16 +273,18 @@ static struct attribute *cpuidle_state_default_attrs[] = { | |||
269 | 273 | ||
270 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) | 274 | #define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj) |
271 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) | 275 | #define kobj_to_state(k) (kobj_to_state_obj(k)->state) |
276 | #define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage) | ||
272 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) | 277 | #define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr) |
273 | static ssize_t cpuidle_state_show(struct kobject * kobj, | 278 | static ssize_t cpuidle_state_show(struct kobject * kobj, |
274 | struct attribute * attr ,char * buf) | 279 | struct attribute * attr ,char * buf) |
275 | { | 280 | { |
276 | int ret = -EIO; | 281 | int ret = -EIO; |
277 | struct cpuidle_state *state = kobj_to_state(kobj); | 282 | struct cpuidle_state *state = kobj_to_state(kobj); |
283 | struct cpuidle_state_usage *state_usage = kobj_to_state_usage(kobj); | ||
278 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); | 284 | struct cpuidle_state_attr * cattr = attr_to_stateattr(attr); |
279 | 285 | ||
280 | if (cattr->show) | 286 | if (cattr->show) |
281 | ret = cattr->show(state, buf); | 287 | ret = cattr->show(state, state_usage, buf); |
282 | 288 | ||
283 | return ret; | 289 | return ret; |
284 | } | 290 | } |
@@ -316,13 +322,15 @@ int cpuidle_add_state_sysfs(struct cpuidle_device *device) | |||
316 | { | 322 | { |
317 | int i, ret = -ENOMEM; | 323 | int i, ret = -ENOMEM; |
318 | struct cpuidle_state_kobj *kobj; | 324 | struct cpuidle_state_kobj *kobj; |
325 | struct cpuidle_driver *drv = cpuidle_get_driver(); | ||
319 | 326 | ||
320 | /* state statistics */ | 327 | /* state statistics */ |
321 | for (i = 0; i < device->state_count; i++) { | 328 | for (i = 0; i < device->state_count; i++) { |
322 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); | 329 | kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL); |
323 | if (!kobj) | 330 | if (!kobj) |
324 | goto error_state; | 331 | goto error_state; |
325 | kobj->state = &device->states[i]; | 332 | kobj->state = &drv->states[i]; |
333 | kobj->state_usage = &device->states_usage[i]; | ||
326 | init_completion(&kobj->kobj_unregister); | 334 | init_completion(&kobj->kobj_unregister); |
327 | 335 | ||
328 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, | 336 | ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle, &device->kobj, |