aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRajagopal Venkat <rajagopal.venkat@linaro.org>2012-10-25 19:50:09 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2012-11-14 18:35:04 -0500
commit7e6fdd4bad033fa2d73716377b184fa975b0d985 (patch)
tree171f13c8aadd833965a9cb0b83a84fe3eb74c1da
parent77b67063bb6bce6d475e910d3b886a606d0d91f7 (diff)
PM / devfreq: Core updates to support devices which can idle
Prepare devfreq core framework to support devices which can idle. When device idleness is detected perhaps through runtime-pm, need some mechanism to suspend devfreq load monitoring and resume back when device is online. Present code continues monitoring unless device is removed from devfreq core. This patch introduces following design changes, - use per device work instead of global work to monitor device load. This enables suspend/resume of device devfreq and reduces monitoring code complexity. - decouple delayed work based load monitoring logic from core by introducing helpers functions to be used by governors. This provides flexibility for governors either to use delayed work based monitoring functions or to implement their own mechanism. - devfreq core interacts with governors via events to perform specific actions. These events include start/stop devfreq. This sets ground for adding suspend/resume events. The devfreq apis are not modified and are kept intact. Signed-off-by: Rajagopal Venkat <rajagopal.venkat@linaro.org> Acked-by: MyungJoo Ham <myungjoo.ham@samsung.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--Documentation/ABI/testing/sysfs-class-devfreq8
-rw-r--r--drivers/devfreq/devfreq.c442
-rw-r--r--drivers/devfreq/governor.h11
-rw-r--r--drivers/devfreq/governor_performance.c16
-rw-r--r--drivers/devfreq/governor_powersave.c16
-rw-r--r--drivers/devfreq/governor_simpleondemand.c24
-rw-r--r--drivers/devfreq/governor_userspace.c23
-rw-r--r--include/linux/devfreq.h34
8 files changed, 278 insertions, 296 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-devfreq b/Documentation/ABI/testing/sysfs-class-devfreq
index 23d78b5aab11..89283b1b0240 100644
--- a/Documentation/ABI/testing/sysfs-class-devfreq
+++ b/Documentation/ABI/testing/sysfs-class-devfreq
@@ -21,14 +21,6 @@ Description:
21 The /sys/class/devfreq/.../cur_freq shows the current 21 The /sys/class/devfreq/.../cur_freq shows the current
22 frequency of the corresponding devfreq object. 22 frequency of the corresponding devfreq object.
23 23
24What: /sys/class/devfreq/.../central_polling
25Date: September 2011
26Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
27Description:
28 The /sys/class/devfreq/.../central_polling shows whether
29 the devfreq ojbect is using devfreq-provided central
30 polling mechanism or not.
31
32What: /sys/class/devfreq/.../polling_interval 24What: /sys/class/devfreq/.../polling_interval
33Date: September 2011 25Date: September 2011
34Contact: MyungJoo Ham <myungjoo.ham@samsung.com> 26Contact: MyungJoo Ham <myungjoo.ham@samsung.com>
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index b146d76f04cf..1aaf1aeb1f1d 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -30,17 +30,11 @@
30struct class *devfreq_class; 30struct class *devfreq_class;
31 31
32/* 32/*
33 * devfreq_work periodically monitors every registered device. 33 * devfreq core provides delayed work based load monitoring helper
34 * The minimum polling interval is one jiffy. The polling interval is 34 * functions. Governors can use these or can implement their own
35 * determined by the minimum polling period among all polling devfreq 35 * monitoring mechanism.
36 * devices. The resolution of polling interval is one jiffy.
37 */ 36 */
38static bool polling;
39static struct workqueue_struct *devfreq_wq; 37static struct workqueue_struct *devfreq_wq;
40static struct delayed_work devfreq_work;
41
42/* wait removing if this is to be removed */
43static struct devfreq *wait_remove_device;
44 38
45/* The list of all device-devfreq */ 39/* The list of all device-devfreq */
46static LIST_HEAD(devfreq_list); 40static LIST_HEAD(devfreq_list);
@@ -72,6 +66,8 @@ static struct devfreq *find_device_devfreq(struct device *dev)
72 return ERR_PTR(-ENODEV); 66 return ERR_PTR(-ENODEV);
73} 67}
74 68
69/* Load monitoring helper functions for governors use */
70
75/** 71/**
76 * update_devfreq() - Reevaluate the device and configure frequency. 72 * update_devfreq() - Reevaluate the device and configure frequency.
77 * @devfreq: the devfreq instance. 73 * @devfreq: the devfreq instance.
@@ -121,6 +117,152 @@ int update_devfreq(struct devfreq *devfreq)
121} 117}
122 118
123/** 119/**
120 * devfreq_monitor() - Periodically poll devfreq objects.
121 * @work: the work struct used to run devfreq_monitor periodically.
122 *
123 */
124static void devfreq_monitor(struct work_struct *work)
125{
126 int err;
127 struct devfreq *devfreq = container_of(work,
128 struct devfreq, work.work);
129
130 mutex_lock(&devfreq->lock);
131 err = update_devfreq(devfreq);
132 if (err)
133 dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
134
135 queue_delayed_work(devfreq_wq, &devfreq->work,
136 msecs_to_jiffies(devfreq->profile->polling_ms));
137 mutex_unlock(&devfreq->lock);
138}
139
140/**
141 * devfreq_monitor_start() - Start load monitoring of devfreq instance
142 * @devfreq: the devfreq instance.
143 *
144 * Helper function for starting devfreq device load monitoing. By
145 * default delayed work based monitoring is supported. Function
146 * to be called from governor in response to DEVFREQ_GOV_START
147 * event when device is added to devfreq framework.
148 */
149void devfreq_monitor_start(struct devfreq *devfreq)
150{
151 INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
152 if (devfreq->profile->polling_ms)
153 queue_delayed_work(devfreq_wq, &devfreq->work,
154 msecs_to_jiffies(devfreq->profile->polling_ms));
155}
156
157/**
158 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
159 * @devfreq: the devfreq instance.
160 *
161 * Helper function to stop devfreq device load monitoing. Function
162 * to be called from governor in response to DEVFREQ_GOV_STOP
163 * event when device is removed from devfreq framework.
164 */
165void devfreq_monitor_stop(struct devfreq *devfreq)
166{
167 cancel_delayed_work_sync(&devfreq->work);
168}
169
170/**
171 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
172 * @devfreq: the devfreq instance.
173 *
174 * Helper function to suspend devfreq device load monitoing. Function
175 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
176 * event or when polling interval is set to zero.
177 *
178 * Note: Though this function is same as devfreq_monitor_stop(),
179 * intentionally kept separate to provide hooks for collecting
180 * transition statistics.
181 */
182void devfreq_monitor_suspend(struct devfreq *devfreq)
183{
184 mutex_lock(&devfreq->lock);
185 if (devfreq->stop_polling) {
186 mutex_unlock(&devfreq->lock);
187 return;
188 }
189
190 devfreq->stop_polling = true;
191 mutex_unlock(&devfreq->lock);
192 cancel_delayed_work_sync(&devfreq->work);
193}
194
195/**
196 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
197 * @devfreq: the devfreq instance.
198 *
199 * Helper function to resume devfreq device load monitoing. Function
200 * to be called from governor in response to DEVFREQ_GOV_RESUME
201 * event or when polling interval is set to non-zero.
202 */
203void devfreq_monitor_resume(struct devfreq *devfreq)
204{
205 mutex_lock(&devfreq->lock);
206 if (!devfreq->stop_polling)
207 goto out;
208
209 if (!delayed_work_pending(&devfreq->work) &&
210 devfreq->profile->polling_ms)
211 queue_delayed_work(devfreq_wq, &devfreq->work,
212 msecs_to_jiffies(devfreq->profile->polling_ms));
213 devfreq->stop_polling = false;
214
215out:
216 mutex_unlock(&devfreq->lock);
217}
218
219/**
220 * devfreq_interval_update() - Update device devfreq monitoring interval
221 * @devfreq: the devfreq instance.
222 * @delay: new polling interval to be set.
223 *
224 * Helper function to set new load monitoring polling interval. Function
225 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
226 */
227void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
228{
229 unsigned int cur_delay = devfreq->profile->polling_ms;
230 unsigned int new_delay = *delay;
231
232 mutex_lock(&devfreq->lock);
233 devfreq->profile->polling_ms = new_delay;
234
235 if (devfreq->stop_polling)
236 goto out;
237
238 /* if new delay is zero, stop polling */
239 if (!new_delay) {
240 mutex_unlock(&devfreq->lock);
241 cancel_delayed_work_sync(&devfreq->work);
242 return;
243 }
244
245 /* if current delay is zero, start polling with new delay */
246 if (!cur_delay) {
247 queue_delayed_work(devfreq_wq, &devfreq->work,
248 msecs_to_jiffies(devfreq->profile->polling_ms));
249 goto out;
250 }
251
252 /* if current delay is greater than new delay, restart polling */
253 if (cur_delay > new_delay) {
254 mutex_unlock(&devfreq->lock);
255 cancel_delayed_work_sync(&devfreq->work);
256 mutex_lock(&devfreq->lock);
257 if (!devfreq->stop_polling)
258 queue_delayed_work(devfreq_wq, &devfreq->work,
259 msecs_to_jiffies(devfreq->profile->polling_ms));
260 }
261out:
262 mutex_unlock(&devfreq->lock);
263}
264
265/**
124 * devfreq_notifier_call() - Notify that the device frequency requirements 266 * devfreq_notifier_call() - Notify that the device frequency requirements
125 * has been changed out of devfreq framework. 267 * has been changed out of devfreq framework.
126 * @nb the notifier_block (supposed to be devfreq->nb) 268 * @nb the notifier_block (supposed to be devfreq->nb)
@@ -143,59 +285,32 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
143} 285}
144 286
145/** 287/**
146 * _remove_devfreq() - Remove devfreq from the device. 288 * _remove_devfreq() - Remove devfreq from the list and release its resources.
147 * @devfreq: the devfreq struct 289 * @devfreq: the devfreq struct
148 * @skip: skip calling device_unregister(). 290 * @skip: skip calling device_unregister().
149 *
150 * Note that the caller should lock devfreq->lock before calling
151 * this. _remove_devfreq() will unlock it and free devfreq
152 * internally. devfreq_list_lock should be locked by the caller
153 * as well (not relased at return)
154 *
155 * Lock usage:
156 * devfreq->lock: locked before call.
157 * unlocked at return (and freed)
158 * devfreq_list_lock: locked before call.
159 * kept locked at return.
160 * if devfreq is centrally polled.
161 *
162 * Freed memory:
163 * devfreq
164 */ 291 */
165static void _remove_devfreq(struct devfreq *devfreq, bool skip) 292static void _remove_devfreq(struct devfreq *devfreq, bool skip)
166{ 293{
167 if (!mutex_is_locked(&devfreq->lock)) { 294 mutex_lock(&devfreq_list_lock);
168 WARN(true, "devfreq->lock must be locked by the caller.\n"); 295 if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
169 return; 296 mutex_unlock(&devfreq_list_lock);
170 } 297 dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
171 if (!devfreq->governor->no_central_polling &&
172 !mutex_is_locked(&devfreq_list_lock)) {
173 WARN(true, "devfreq_list_lock must be locked by the caller.\n");
174 return; 298 return;
175 } 299 }
300 list_del(&devfreq->node);
301 mutex_unlock(&devfreq_list_lock);
176 302
177 if (devfreq->being_removed) 303 devfreq->governor->event_handler(devfreq, DEVFREQ_GOV_STOP, NULL);
178 return;
179
180 devfreq->being_removed = true;
181 304
182 if (devfreq->profile->exit) 305 if (devfreq->profile->exit)
183 devfreq->profile->exit(devfreq->dev.parent); 306 devfreq->profile->exit(devfreq->dev.parent);
184 307
185 if (devfreq->governor->exit)
186 devfreq->governor->exit(devfreq);
187
188 if (!skip && get_device(&devfreq->dev)) { 308 if (!skip && get_device(&devfreq->dev)) {
189 device_unregister(&devfreq->dev); 309 device_unregister(&devfreq->dev);
190 put_device(&devfreq->dev); 310 put_device(&devfreq->dev);
191 } 311 }
192 312
193 if (!devfreq->governor->no_central_polling)
194 list_del(&devfreq->node);
195
196 mutex_unlock(&devfreq->lock);
197 mutex_destroy(&devfreq->lock); 313 mutex_destroy(&devfreq->lock);
198
199 kfree(devfreq); 314 kfree(devfreq);
200} 315}
201 316
@@ -210,130 +325,8 @@ static void _remove_devfreq(struct devfreq *devfreq, bool skip)
210static void devfreq_dev_release(struct device *dev) 325static void devfreq_dev_release(struct device *dev)
211{ 326{
212 struct devfreq *devfreq = to_devfreq(dev); 327 struct devfreq *devfreq = to_devfreq(dev);
213 bool central_polling = !devfreq->governor->no_central_polling;
214
215 /*
216 * If devfreq_dev_release() was called by device_unregister() of
217 * _remove_devfreq(), we cannot mutex_lock(&devfreq->lock) and
218 * being_removed is already set. This also partially checks the case
219 * where devfreq_dev_release() is called from a thread other than
220 * the one called _remove_devfreq(); however, this case is
221 * dealt completely with another following being_removed check.
222 *
223 * Because being_removed is never being
224 * unset, we do not need to worry about race conditions on
225 * being_removed.
226 */
227 if (devfreq->being_removed)
228 return;
229 328
230 if (central_polling)
231 mutex_lock(&devfreq_list_lock);
232
233 mutex_lock(&devfreq->lock);
234
235 /*
236 * Check being_removed flag again for the case where
237 * devfreq_dev_release() was called in a thread other than the one
238 * possibly called _remove_devfreq().
239 */
240 if (devfreq->being_removed) {
241 mutex_unlock(&devfreq->lock);
242 goto out;
243 }
244
245 /* devfreq->lock is unlocked and removed in _removed_devfreq() */
246 _remove_devfreq(devfreq, true); 329 _remove_devfreq(devfreq, true);
247
248out:
249 if (central_polling)
250 mutex_unlock(&devfreq_list_lock);
251}
252
253/**
254 * devfreq_monitor() - Periodically poll devfreq objects.
255 * @work: the work struct used to run devfreq_monitor periodically.
256 *
257 */
258static void devfreq_monitor(struct work_struct *work)
259{
260 static unsigned long last_polled_at;
261 struct devfreq *devfreq, *tmp;
262 int error;
263 unsigned long jiffies_passed;
264 unsigned long next_jiffies = ULONG_MAX, now = jiffies;
265 struct device *dev;
266
267 /* Initially last_polled_at = 0, polling every device at bootup */
268 jiffies_passed = now - last_polled_at;
269 last_polled_at = now;
270 if (jiffies_passed == 0)
271 jiffies_passed = 1;
272
273 mutex_lock(&devfreq_list_lock);
274 list_for_each_entry_safe(devfreq, tmp, &devfreq_list, node) {
275 mutex_lock(&devfreq->lock);
276 dev = devfreq->dev.parent;
277
278 /* Do not remove tmp for a while */
279 wait_remove_device = tmp;
280
281 if (devfreq->governor->no_central_polling ||
282 devfreq->next_polling == 0) {
283 mutex_unlock(&devfreq->lock);
284 continue;
285 }
286 mutex_unlock(&devfreq_list_lock);
287
288 /*
289 * Reduce more next_polling if devfreq_wq took an extra
290 * delay. (i.e., CPU has been idled.)
291 */
292 if (devfreq->next_polling <= jiffies_passed) {
293 error = update_devfreq(devfreq);
294
295 /* Remove a devfreq with an error. */
296 if (error && error != -EAGAIN) {
297
298 dev_err(dev, "Due to update_devfreq error(%d), devfreq(%s) is removed from the device\n",
299 error, devfreq->governor->name);
300
301 /*
302 * Unlock devfreq before locking the list
303 * in order to avoid deadlock with
304 * find_device_devfreq or others
305 */
306 mutex_unlock(&devfreq->lock);
307 mutex_lock(&devfreq_list_lock);
308 /* Check if devfreq is already removed */
309 if (IS_ERR(find_device_devfreq(dev)))
310 continue;
311 mutex_lock(&devfreq->lock);
312 /* This unlocks devfreq->lock and free it */
313 _remove_devfreq(devfreq, false);
314 continue;
315 }
316 devfreq->next_polling = devfreq->polling_jiffies;
317 } else {
318 devfreq->next_polling -= jiffies_passed;
319 }
320
321 if (devfreq->next_polling)
322 next_jiffies = (next_jiffies > devfreq->next_polling) ?
323 devfreq->next_polling : next_jiffies;
324
325 mutex_unlock(&devfreq->lock);
326 mutex_lock(&devfreq_list_lock);
327 }
328 wait_remove_device = NULL;
329 mutex_unlock(&devfreq_list_lock);
330
331 if (next_jiffies > 0 && next_jiffies < ULONG_MAX) {
332 polling = true;
333 queue_delayed_work(devfreq_wq, &devfreq_work, next_jiffies);
334 } else {
335 polling = false;
336 }
337} 330}
338 331
339/** 332/**
@@ -357,16 +350,13 @@ struct devfreq *devfreq_add_device(struct device *dev,
357 return ERR_PTR(-EINVAL); 350 return ERR_PTR(-EINVAL);
358 } 351 }
359 352
360 353 mutex_lock(&devfreq_list_lock);
361 if (!governor->no_central_polling) { 354 devfreq = find_device_devfreq(dev);
362 mutex_lock(&devfreq_list_lock); 355 mutex_unlock(&devfreq_list_lock);
363 devfreq = find_device_devfreq(dev); 356 if (!IS_ERR(devfreq)) {
364 mutex_unlock(&devfreq_list_lock); 357 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
365 if (!IS_ERR(devfreq)) { 358 err = -EINVAL;
366 dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__); 359 goto err_out;
367 err = -EINVAL;
368 goto err_out;
369 }
370 } 360 }
371 361
372 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL); 362 devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
@@ -386,48 +376,41 @@ struct devfreq *devfreq_add_device(struct device *dev,
386 devfreq->governor = governor; 376 devfreq->governor = governor;
387 devfreq->previous_freq = profile->initial_freq; 377 devfreq->previous_freq = profile->initial_freq;
388 devfreq->data = data; 378 devfreq->data = data;
389 devfreq->next_polling = devfreq->polling_jiffies
390 = msecs_to_jiffies(devfreq->profile->polling_ms);
391 devfreq->nb.notifier_call = devfreq_notifier_call; 379 devfreq->nb.notifier_call = devfreq_notifier_call;
392 380
393 dev_set_name(&devfreq->dev, dev_name(dev)); 381 dev_set_name(&devfreq->dev, dev_name(dev));
394 err = device_register(&devfreq->dev); 382 err = device_register(&devfreq->dev);
395 if (err) { 383 if (err) {
396 put_device(&devfreq->dev); 384 put_device(&devfreq->dev);
385 mutex_unlock(&devfreq->lock);
397 goto err_dev; 386 goto err_dev;
398 } 387 }
399 388
400 if (governor->init)
401 err = governor->init(devfreq);
402 if (err)
403 goto err_init;
404
405 mutex_unlock(&devfreq->lock); 389 mutex_unlock(&devfreq->lock);
406 390
407 if (governor->no_central_polling)
408 goto out;
409
410 mutex_lock(&devfreq_list_lock); 391 mutex_lock(&devfreq_list_lock);
411
412 list_add(&devfreq->node, &devfreq_list); 392 list_add(&devfreq->node, &devfreq_list);
393 mutex_unlock(&devfreq_list_lock);
413 394
414 if (devfreq_wq && devfreq->next_polling && !polling) { 395 err = devfreq->governor->event_handler(devfreq,
415 polling = true; 396 DEVFREQ_GOV_START, NULL);
416 queue_delayed_work(devfreq_wq, &devfreq_work, 397 if (err) {
417 devfreq->next_polling); 398 dev_err(dev, "%s: Unable to start governor for the device\n",
399 __func__);
400 goto err_init;
418 } 401 }
419 mutex_unlock(&devfreq_list_lock); 402
420out:
421 return devfreq; 403 return devfreq;
422 404
423err_init: 405err_init:
406 list_del(&devfreq->node);
424 device_unregister(&devfreq->dev); 407 device_unregister(&devfreq->dev);
425err_dev: 408err_dev:
426 mutex_unlock(&devfreq->lock);
427 kfree(devfreq); 409 kfree(devfreq);
428err_out: 410err_out:
429 return ERR_PTR(err); 411 return ERR_PTR(err);
430} 412}
413EXPORT_SYMBOL(devfreq_add_device);
431 414
432/** 415/**
433 * devfreq_remove_device() - Remove devfreq feature from a device. 416 * devfreq_remove_device() - Remove devfreq feature from a device.
@@ -435,30 +418,14 @@ err_out:
435 */ 418 */
436int devfreq_remove_device(struct devfreq *devfreq) 419int devfreq_remove_device(struct devfreq *devfreq)
437{ 420{
438 bool central_polling;
439
440 if (!devfreq) 421 if (!devfreq)
441 return -EINVAL; 422 return -EINVAL;
442 423
443 central_polling = !devfreq->governor->no_central_polling; 424 _remove_devfreq(devfreq, false);
444
445 if (central_polling) {
446 mutex_lock(&devfreq_list_lock);
447 while (wait_remove_device == devfreq) {
448 mutex_unlock(&devfreq_list_lock);
449 schedule();
450 mutex_lock(&devfreq_list_lock);
451 }
452 }
453
454 mutex_lock(&devfreq->lock);
455 _remove_devfreq(devfreq, false); /* it unlocks devfreq->lock */
456
457 if (central_polling)
458 mutex_unlock(&devfreq_list_lock);
459 425
460 return 0; 426 return 0;
461} 427}
428EXPORT_SYMBOL(devfreq_remove_device);
462 429
463static ssize_t show_governor(struct device *dev, 430static ssize_t show_governor(struct device *dev,
464 struct device_attribute *attr, char *buf) 431 struct device_attribute *attr, char *buf)
@@ -490,35 +457,13 @@ static ssize_t store_polling_interval(struct device *dev,
490 if (ret != 1) 457 if (ret != 1)
491 goto out; 458 goto out;
492 459
493 mutex_lock(&df->lock); 460 df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
494 df->profile->polling_ms = value;
495 df->next_polling = df->polling_jiffies
496 = msecs_to_jiffies(value);
497 mutex_unlock(&df->lock);
498
499 ret = count; 461 ret = count;
500 462
501 if (df->governor->no_central_polling)
502 goto out;
503
504 mutex_lock(&devfreq_list_lock);
505 if (df->next_polling > 0 && !polling) {
506 polling = true;
507 queue_delayed_work(devfreq_wq, &devfreq_work,
508 df->next_polling);
509 }
510 mutex_unlock(&devfreq_list_lock);
511out: 463out:
512 return ret; 464 return ret;
513} 465}
514 466
515static ssize_t show_central_polling(struct device *dev,
516 struct device_attribute *attr, char *buf)
517{
518 return sprintf(buf, "%d\n",
519 !to_devfreq(dev)->governor->no_central_polling);
520}
521
522static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr, 467static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
523 const char *buf, size_t count) 468 const char *buf, size_t count)
524{ 469{
@@ -590,7 +535,6 @@ static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
590static struct device_attribute devfreq_attrs[] = { 535static struct device_attribute devfreq_attrs[] = {
591 __ATTR(governor, S_IRUGO, show_governor, NULL), 536 __ATTR(governor, S_IRUGO, show_governor, NULL),
592 __ATTR(cur_freq, S_IRUGO, show_freq, NULL), 537 __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
593 __ATTR(central_polling, S_IRUGO, show_central_polling, NULL),
594 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval, 538 __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
595 store_polling_interval), 539 store_polling_interval),
596 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq), 540 __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
@@ -598,23 +542,6 @@ static struct device_attribute devfreq_attrs[] = {
598 { }, 542 { },
599}; 543};
600 544
601/**
602 * devfreq_start_polling() - Initialize data structure for devfreq framework and
603 * start polling registered devfreq devices.
604 */
605static int __init devfreq_start_polling(void)
606{
607 mutex_lock(&devfreq_list_lock);
608 polling = false;
609 devfreq_wq = create_freezable_workqueue("devfreq_wq");
610 INIT_DEFERRABLE_WORK(&devfreq_work, devfreq_monitor);
611 mutex_unlock(&devfreq_list_lock);
612
613 devfreq_monitor(&devfreq_work.work);
614 return 0;
615}
616late_initcall(devfreq_start_polling);
617
618static int __init devfreq_init(void) 545static int __init devfreq_init(void)
619{ 546{
620 devfreq_class = class_create(THIS_MODULE, "devfreq"); 547 devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -622,7 +549,15 @@ static int __init devfreq_init(void)
622 pr_err("%s: couldn't create class\n", __FILE__); 549 pr_err("%s: couldn't create class\n", __FILE__);
623 return PTR_ERR(devfreq_class); 550 return PTR_ERR(devfreq_class);
624 } 551 }
552
553 devfreq_wq = create_freezable_workqueue("devfreq_wq");
554 if (IS_ERR(devfreq_wq)) {
555 class_destroy(devfreq_class);
556 pr_err("%s: couldn't create workqueue\n", __FILE__);
557 return PTR_ERR(devfreq_wq);
558 }
625 devfreq_class->dev_attrs = devfreq_attrs; 559 devfreq_class->dev_attrs = devfreq_attrs;
560
626 return 0; 561 return 0;
627} 562}
628subsys_initcall(devfreq_init); 563subsys_initcall(devfreq_init);
@@ -630,6 +565,7 @@ subsys_initcall(devfreq_init);
630static void __exit devfreq_exit(void) 565static void __exit devfreq_exit(void)
631{ 566{
632 class_destroy(devfreq_class); 567 class_destroy(devfreq_class);
568 destroy_workqueue(devfreq_wq);
633} 569}
634module_exit(devfreq_exit); 570module_exit(devfreq_exit);
635 571
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index ea7f13c58ded..bb3aff32d627 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -18,7 +18,18 @@
18 18
19#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev) 19#define to_devfreq(DEV) container_of((DEV), struct devfreq, dev)
20 20
21/* Devfreq events */
22#define DEVFREQ_GOV_START 0x1
23#define DEVFREQ_GOV_STOP 0x2
24#define DEVFREQ_GOV_INTERVAL 0x3
25
21/* Caution: devfreq->lock must be locked before calling update_devfreq */ 26/* Caution: devfreq->lock must be locked before calling update_devfreq */
22extern int update_devfreq(struct devfreq *devfreq); 27extern int update_devfreq(struct devfreq *devfreq);
23 28
29extern void devfreq_monitor_start(struct devfreq *devfreq);
30extern void devfreq_monitor_stop(struct devfreq *devfreq);
31extern void devfreq_monitor_suspend(struct devfreq *devfreq);
32extern void devfreq_monitor_resume(struct devfreq *devfreq);
33extern void devfreq_interval_update(struct devfreq *devfreq,
34 unsigned int *delay);
24#endif /* _GOVERNOR_H */ 35#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index af75ddd4f158..eea3f9bd7894 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -26,14 +26,22 @@ static int devfreq_performance_func(struct devfreq *df,
26 return 0; 26 return 0;
27} 27}
28 28
29static int performance_init(struct devfreq *devfreq) 29static int devfreq_performance_handler(struct devfreq *devfreq,
30 unsigned int event, void *data)
30{ 31{
31 return update_devfreq(devfreq); 32 int ret = 0;
33
34 if (event == DEVFREQ_GOV_START) {
35 mutex_lock(&devfreq->lock);
36 ret = update_devfreq(devfreq);
37 mutex_unlock(&devfreq->lock);
38 }
39
40 return ret;
32} 41}
33 42
34const struct devfreq_governor devfreq_performance = { 43const struct devfreq_governor devfreq_performance = {
35 .name = "performance", 44 .name = "performance",
36 .init = performance_init,
37 .get_target_freq = devfreq_performance_func, 45 .get_target_freq = devfreq_performance_func,
38 .no_central_polling = true, 46 .event_handler = devfreq_performance_handler,
39}; 47};
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index fec0cdbd2477..2868d98ed3e2 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -23,14 +23,22 @@ static int devfreq_powersave_func(struct devfreq *df,
23 return 0; 23 return 0;
24} 24}
25 25
26static int powersave_init(struct devfreq *devfreq) 26static int devfreq_powersave_handler(struct devfreq *devfreq,
27 unsigned int event, void *data)
27{ 28{
28 return update_devfreq(devfreq); 29 int ret = 0;
30
31 if (event == DEVFREQ_GOV_START) {
32 mutex_lock(&devfreq->lock);
33 ret = update_devfreq(devfreq);
34 mutex_unlock(&devfreq->lock);
35 }
36
37 return ret;
29} 38}
30 39
31const struct devfreq_governor devfreq_powersave = { 40const struct devfreq_governor devfreq_powersave = {
32 .name = "powersave", 41 .name = "powersave",
33 .init = powersave_init,
34 .get_target_freq = devfreq_powersave_func, 42 .get_target_freq = devfreq_powersave_func,
35 .no_central_polling = true, 43 .event_handler = devfreq_powersave_handler,
36}; 44};
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index a2e3eae79011..3716a659122b 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -12,6 +12,7 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/devfreq.h> 13#include <linux/devfreq.h>
14#include <linux/math64.h> 14#include <linux/math64.h>
15#include "governor.h"
15 16
16/* Default constants for DevFreq-Simple-Ondemand (DFSO) */ 17/* Default constants for DevFreq-Simple-Ondemand (DFSO) */
17#define DFSO_UPTHRESHOLD (90) 18#define DFSO_UPTHRESHOLD (90)
@@ -88,7 +89,30 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
88 return 0; 89 return 0;
89} 90}
90 91
92static int devfreq_simple_ondemand_handler(struct devfreq *devfreq,
93 unsigned int event, void *data)
94{
95 switch (event) {
96 case DEVFREQ_GOV_START:
97 devfreq_monitor_start(devfreq);
98 break;
99
100 case DEVFREQ_GOV_STOP:
101 devfreq_monitor_stop(devfreq);
102 break;
103
104 case DEVFREQ_GOV_INTERVAL:
105 devfreq_interval_update(devfreq, (unsigned int *)data);
106 break;
107 default:
108 break;
109 }
110
111 return 0;
112}
113
91const struct devfreq_governor devfreq_simple_ondemand = { 114const struct devfreq_governor devfreq_simple_ondemand = {
92 .name = "simple_ondemand", 115 .name = "simple_ondemand",
93 .get_target_freq = devfreq_simple_ondemand_func, 116 .get_target_freq = devfreq_simple_ondemand_func,
117 .event_handler = devfreq_simple_ondemand_handler,
94}; 118};
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 0681246fc89d..7067555bd444 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -116,10 +116,27 @@ static void userspace_exit(struct devfreq *devfreq)
116 devfreq->data = NULL; 116 devfreq->data = NULL;
117} 117}
118 118
119static int devfreq_userspace_handler(struct devfreq *devfreq,
120 unsigned int event, void *data)
121{
122 int ret = 0;
123
124 switch (event) {
125 case DEVFREQ_GOV_START:
126 ret = userspace_init(devfreq);
127 break;
128 case DEVFREQ_GOV_STOP:
129 userspace_exit(devfreq);
130 break;
131 default:
132 break;
133 }
134
135 return ret;
136}
137
119const struct devfreq_governor devfreq_userspace = { 138const struct devfreq_governor devfreq_userspace = {
120 .name = "userspace", 139 .name = "userspace",
121 .get_target_freq = devfreq_userspace_func, 140 .get_target_freq = devfreq_userspace_func,
122 .init = userspace_init, 141 .event_handler = devfreq_userspace_handler,
123 .exit = userspace_exit,
124 .no_central_polling = true,
125}; 142};
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 281c72a3b9d5..9cdffde74bb5 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -91,25 +91,18 @@ struct devfreq_dev_profile {
91 * status of the device (load = busy_time / total_time). 91 * status of the device (load = busy_time / total_time).
92 * If no_central_polling is set, this callback is called 92 * If no_central_polling is set, this callback is called
93 * only with update_devfreq() notified by OPP. 93 * only with update_devfreq() notified by OPP.
94 * @init Called when the devfreq is being attached to a device 94 * @event_handler Callback for devfreq core framework to notify events
95 * @exit Called when the devfreq is being removed from a 95 * to governors. Events include per device governor
96 * device. Governor should stop any internal routines 96 * init and exit, opp changes out of devfreq, suspend
97 * before return because related data may be 97 * and resume of per device devfreq during device idle.
98 * freed after exit().
99 * @no_central_polling Do not use devfreq's central polling mechanism.
100 * When this is set, devfreq will not call
101 * get_target_freq with devfreq_monitor(). However,
102 * devfreq will call get_target_freq with
103 * devfreq_update() notified by OPP framework.
104 * 98 *
105 * Note that the callbacks are called with devfreq->lock locked by devfreq. 99 * Note that the callbacks are called with devfreq->lock locked by devfreq.
106 */ 100 */
107struct devfreq_governor { 101struct devfreq_governor {
108 const char name[DEVFREQ_NAME_LEN]; 102 const char name[DEVFREQ_NAME_LEN];
109 int (*get_target_freq)(struct devfreq *this, unsigned long *freq); 103 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
110 int (*init)(struct devfreq *this); 104 int (*event_handler)(struct devfreq *devfreq,
111 void (*exit)(struct devfreq *this); 105 unsigned int event, void *data);
112 const bool no_central_polling;
113}; 106};
114 107
115/** 108/**
@@ -124,18 +117,13 @@ struct devfreq_governor {
124 * @nb notifier block used to notify devfreq object that it should 117 * @nb notifier block used to notify devfreq object that it should
125 * reevaluate operable frequencies. Devfreq users may use 118 * reevaluate operable frequencies. Devfreq users may use
126 * devfreq.nb to the corresponding register notifier call chain. 119 * devfreq.nb to the corresponding register notifier call chain.
127 * @polling_jiffies interval in jiffies. 120 * @work delayed work for load monitoring.
128 * @previous_freq previously configured frequency value. 121 * @previous_freq previously configured frequency value.
129 * @next_polling the number of remaining jiffies to poll with
130 * "devfreq_monitor" executions to reevaluate
131 * frequency/voltage of the device. Set by
132 * profile's polling_ms interval.
133 * @data Private data of the governor. The devfreq framework does not 122 * @data Private data of the governor. The devfreq framework does not
134 * touch this. 123 * touch this.
135 * @being_removed a flag to mark that this object is being removed in
136 * order to prevent trying to remove the object multiple times.
137 * @min_freq Limit minimum frequency requested by user (0: none) 124 * @min_freq Limit minimum frequency requested by user (0: none)
138 * @max_freq Limit maximum frequency requested by user (0: none) 125 * @max_freq Limit maximum frequency requested by user (0: none)
126 * @stop_polling devfreq polling status of a device.
139 * 127 *
140 * This structure stores the devfreq information for a give device. 128 * This structure stores the devfreq information for a give device.
141 * 129 *
@@ -153,17 +141,15 @@ struct devfreq {
153 struct devfreq_dev_profile *profile; 141 struct devfreq_dev_profile *profile;
154 const struct devfreq_governor *governor; 142 const struct devfreq_governor *governor;
155 struct notifier_block nb; 143 struct notifier_block nb;
144 struct delayed_work work;
156 145
157 unsigned long polling_jiffies;
158 unsigned long previous_freq; 146 unsigned long previous_freq;
159 unsigned int next_polling;
160 147
161 void *data; /* private data for governors */ 148 void *data; /* private data for governors */
162 149
163 bool being_removed;
164
165 unsigned long min_freq; 150 unsigned long min_freq;
166 unsigned long max_freq; 151 unsigned long max_freq;
152 bool stop_polling;
167}; 153};
168 154
169#if defined(CONFIG_PM_DEVFREQ) 155#if defined(CONFIG_PM_DEVFREQ)