aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/qos.c
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2013-04-03 11:07:29 -0400
committerTakashi Iwai <tiwai@suse.de>2013-04-03 11:07:29 -0400
commitefc33ce197e4b6aaddf1eb2ba6293f51daf3c283 (patch)
tree9eaceed3c855d5cbc91c2fc247bdcceafbf28cdf /drivers/base/power/qos.c
parent993884f6a26c6547fa3875f9d3fabdc4250d8da6 (diff)
parent690a863ff03d9a29ace2b752b8f802fba78a842f (diff)
Merge branch 'for-linus' into for-next
Back-merge for cleaning up usb-audio code the recent commit modified, and further UAC2 autoclock patches.
Diffstat (limited to 'drivers/base/power/qos.c')
-rw-r--r--drivers/base/power/qos.c217
1 files changed, 123 insertions, 94 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5c..5f74587ef258 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 62 struct pm_qos_flags *pqf;
62 s32 val; 63 s32 val;
63 64
64 if (!qos) 65 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 66 return PM_QOS_FLAGS_UNDEFINED;
66 67
67 pqf = &qos->flags; 68 pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
101 */ 102 */
102s32 __dev_pm_qos_read_value(struct device *dev) 103s32 __dev_pm_qos_read_value(struct device *dev)
103{ 104{
104 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 105 return IS_ERR_OR_NULL(dev->power.qos) ?
106 0 : pm_qos_read_value(&dev->power.qos->latency);
105} 107}
106 108
107/** 109/**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
198 return 0; 200 return 0;
199} 201}
200 202
201/** 203static void __dev_pm_qos_hide_latency_limit(struct device *dev);
202 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 204static void __dev_pm_qos_hide_flags(struct device *dev);
203 * @dev: target device
204 *
205 * Called from the device PM subsystem during device insertion under
206 * device_pm_lock().
207 */
208void dev_pm_qos_constraints_init(struct device *dev)
209{
210 mutex_lock(&dev_pm_qos_mtx);
211 dev->power.qos = NULL;
212 dev->power.power_state = PMSG_ON;
213 mutex_unlock(&dev_pm_qos_mtx);
214}
215 205
216/** 206/**
217 * dev_pm_qos_constraints_destroy 207 * dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
226 struct pm_qos_constraints *c; 216 struct pm_qos_constraints *c;
227 struct pm_qos_flags *f; 217 struct pm_qos_flags *f;
228 218
219 mutex_lock(&dev_pm_qos_mtx);
220
229 /* 221 /*
230 * If the device's PM QoS resume latency limit or PM QoS flags have been 222 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point. 223 * exposed to user space, they have to be hidden at this point.
232 */ 224 */
233 dev_pm_qos_hide_latency_limit(dev); 225 __dev_pm_qos_hide_latency_limit(dev);
234 dev_pm_qos_hide_flags(dev); 226 __dev_pm_qos_hide_flags(dev);
235 227
236 mutex_lock(&dev_pm_qos_mtx);
237
238 dev->power.power_state = PMSG_INVALID;
239 qos = dev->power.qos; 228 qos = dev->power.qos;
240 if (!qos) 229 if (!qos)
241 goto out; 230 goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
257 } 246 }
258 247
259 spin_lock_irq(&dev->power.lock); 248 spin_lock_irq(&dev->power.lock);
260 dev->power.qos = NULL; 249 dev->power.qos = ERR_PTR(-ENODEV);
261 spin_unlock_irq(&dev->power.lock); 250 spin_unlock_irq(&dev->power.lock);
262 251
263 kfree(c->notifiers); 252 kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
301 "%s() called for already added request\n", __func__)) 290 "%s() called for already added request\n", __func__))
302 return -EINVAL; 291 return -EINVAL;
303 292
304 req->dev = dev;
305
306 mutex_lock(&dev_pm_qos_mtx); 293 mutex_lock(&dev_pm_qos_mtx);
307 294
308 if (!dev->power.qos) { 295 if (IS_ERR(dev->power.qos))
309 if (dev->power.power_state.event == PM_EVENT_INVALID) { 296 ret = -ENODEV;
310 /* The device has been removed from the system. */ 297 else if (!dev->power.qos)
311 req->dev = NULL; 298 ret = dev_pm_qos_constraints_allocate(dev);
312 ret = -ENODEV;
313 goto out;
314 } else {
315 /*
316 * Allocate the constraints data on the first call to
317 * add_request, i.e. only if the data is not already
318 * allocated and if the device has not been removed.
319 */
320 ret = dev_pm_qos_constraints_allocate(dev);
321 }
322 }
323 299
324 if (!ret) { 300 if (!ret) {
301 req->dev = dev;
325 req->type = type; 302 req->type = type;
326 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
327 } 304 }
328 305
329 out:
330 mutex_unlock(&dev_pm_qos_mtx); 306 mutex_unlock(&dev_pm_qos_mtx);
331 307
332 return ret; 308 return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
344 s32 curr_value; 320 s32 curr_value;
345 int ret = 0; 321 int ret = 0;
346 322
347 if (!req->dev->power.qos) 323 if (!req) /*guard against callers passing in null */
324 return -EINVAL;
325
326 if (WARN(!dev_pm_qos_request_active(req),
327 "%s() called for unknown object\n", __func__))
328 return -EINVAL;
329
330 if (IS_ERR_OR_NULL(req->dev->power.qos))
348 return -ENODEV; 331 return -ENODEV;
349 332
350 switch(req->type) { 333 switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
386{ 369{
387 int ret; 370 int ret;
388 371
372 mutex_lock(&dev_pm_qos_mtx);
373 ret = __dev_pm_qos_update_request(req, new_value);
374 mutex_unlock(&dev_pm_qos_mtx);
375 return ret;
376}
377EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
378
379static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
380{
381 int ret;
382
389 if (!req) /*guard against callers passing in null */ 383 if (!req) /*guard against callers passing in null */
390 return -EINVAL; 384 return -EINVAL;
391 385
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
393 "%s() called for unknown object\n", __func__)) 387 "%s() called for unknown object\n", __func__))
394 return -EINVAL; 388 return -EINVAL;
395 389
396 mutex_lock(&dev_pm_qos_mtx); 390 if (IS_ERR_OR_NULL(req->dev->power.qos))
397 ret = __dev_pm_qos_update_request(req, new_value); 391 return -ENODEV;
398 mutex_unlock(&dev_pm_qos_mtx);
399 392
393 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
394 memset(req, 0, sizeof(*req));
400 return ret; 395 return ret;
401} 396}
402EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 397
404/** 398/**
405 * dev_pm_qos_remove_request - modifies an existing qos request 399 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
418 */ 412 */
419int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 413int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420{ 414{
421 int ret = 0; 415 int ret;
422
423 if (!req) /*guard against callers passing in null */
424 return -EINVAL;
425
426 if (WARN(!dev_pm_qos_request_active(req),
427 "%s() called for unknown object\n", __func__))
428 return -EINVAL;
429 416
430 mutex_lock(&dev_pm_qos_mtx); 417 mutex_lock(&dev_pm_qos_mtx);
431 418 ret = __dev_pm_qos_remove_request(req);
432 if (req->dev->power.qos) {
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434 PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 } else {
437 /* Return if the device has been removed */
438 ret = -ENODEV;
439 }
440
441 mutex_unlock(&dev_pm_qos_mtx); 419 mutex_unlock(&dev_pm_qos_mtx);
442 return ret; 420 return ret;
443} 421}
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
462 440
463 mutex_lock(&dev_pm_qos_mtx); 441 mutex_lock(&dev_pm_qos_mtx);
464 442
465 if (!dev->power.qos) 443 if (IS_ERR(dev->power.qos))
466 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 444 ret = -ENODEV;
467 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 445 else if (!dev->power.qos)
446 ret = dev_pm_qos_constraints_allocate(dev);
468 447
469 if (!ret) 448 if (!ret)
470 ret = blocking_notifier_chain_register( 449 ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
493 mutex_lock(&dev_pm_qos_mtx); 472 mutex_lock(&dev_pm_qos_mtx);
494 473
495 /* Silently return if the constraints object is not present. */ 474 /* Silently return if the constraints object is not present. */
496 if (dev->power.qos) 475 if (!IS_ERR_OR_NULL(dev->power.qos))
497 retval = blocking_notifier_chain_unregister( 476 retval = blocking_notifier_chain_unregister(
498 dev->power.qos->latency.notifiers, 477 dev->power.qos->latency.notifiers,
499 notifier); 478 notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
563static void __dev_pm_qos_drop_user_request(struct device *dev, 542static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type) 543 enum dev_pm_qos_req_type type)
565{ 544{
545 struct dev_pm_qos_request *req = NULL;
546
566 switch(type) { 547 switch(type) {
567 case DEV_PM_QOS_LATENCY: 548 case DEV_PM_QOS_LATENCY:
568 dev_pm_qos_remove_request(dev->power.qos->latency_req); 549 req = dev->power.qos->latency_req;
569 dev->power.qos->latency_req = NULL; 550 dev->power.qos->latency_req = NULL;
570 break; 551 break;
571 case DEV_PM_QOS_FLAGS: 552 case DEV_PM_QOS_FLAGS:
572 dev_pm_qos_remove_request(dev->power.qos->flags_req); 553 req = dev->power.qos->flags_req;
573 dev->power.qos->flags_req = NULL; 554 dev->power.qos->flags_req = NULL;
574 break; 555 break;
575 } 556 }
557 __dev_pm_qos_remove_request(req);
558 kfree(req);
576} 559}
577 560
578/** 561/**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
588 if (!device_is_registered(dev) || value < 0) 571 if (!device_is_registered(dev) || value < 0)
589 return -EINVAL; 572 return -EINVAL;
590 573
591 if (dev->power.qos && dev->power.qos->latency_req)
592 return -EEXIST;
593
594 req = kzalloc(sizeof(*req), GFP_KERNEL); 574 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 if (!req) 575 if (!req)
596 return -ENOMEM; 576 return -ENOMEM;
597 577
598 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599 if (ret < 0) 579 if (ret < 0) {
580 kfree(req);
600 return ret; 581 return ret;
582 }
583
584 mutex_lock(&dev_pm_qos_mtx);
585
586 if (IS_ERR_OR_NULL(dev->power.qos))
587 ret = -ENODEV;
588 else if (dev->power.qos->latency_req)
589 ret = -EEXIST;
590
591 if (ret < 0) {
592 __dev_pm_qos_remove_request(req);
593 kfree(req);
594 goto out;
595 }
601 596
602 dev->power.qos->latency_req = req; 597 dev->power.qos->latency_req = req;
603 ret = pm_qos_sysfs_add_latency(dev); 598 ret = pm_qos_sysfs_add_latency(dev);
604 if (ret) 599 if (ret)
605 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 601
602 out:
603 mutex_unlock(&dev_pm_qos_mtx);
607 return ret; 604 return ret;
608} 605}
609EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 606EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
610 607
608static void __dev_pm_qos_hide_latency_limit(struct device *dev)
609{
610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
611 pm_qos_sysfs_remove_latency(dev);
612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
613 }
614}
615
611/** 616/**
612 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
613 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
614 */ 619 */
615void dev_pm_qos_hide_latency_limit(struct device *dev) 620void dev_pm_qos_hide_latency_limit(struct device *dev)
616{ 621{
617 if (dev->power.qos && dev->power.qos->latency_req) { 622 mutex_lock(&dev_pm_qos_mtx);
618 pm_qos_sysfs_remove_latency(dev); 623 __dev_pm_qos_hide_latency_limit(dev);
619 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 624 mutex_unlock(&dev_pm_qos_mtx);
620 }
621} 625}
622EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 626EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 627
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
634 if (!device_is_registered(dev)) 638 if (!device_is_registered(dev))
635 return -EINVAL; 639 return -EINVAL;
636 640
637 if (dev->power.qos && dev->power.qos->flags_req)
638 return -EEXIST;
639
640 req = kzalloc(sizeof(*req), GFP_KERNEL); 641 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 if (!req) 642 if (!req)
642 return -ENOMEM; 643 return -ENOMEM;
643 644
644 pm_runtime_get_sync(dev);
645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646 if (ret < 0) 646 if (ret < 0) {
647 goto fail; 647 kfree(req);
648 return ret;
649 }
650
651 pm_runtime_get_sync(dev);
652 mutex_lock(&dev_pm_qos_mtx);
653
654 if (IS_ERR_OR_NULL(dev->power.qos))
655 ret = -ENODEV;
656 else if (dev->power.qos->flags_req)
657 ret = -EEXIST;
658
659 if (ret < 0) {
660 __dev_pm_qos_remove_request(req);
661 kfree(req);
662 goto out;
663 }
648 664
649 dev->power.qos->flags_req = req; 665 dev->power.qos->flags_req = req;
650 ret = pm_qos_sysfs_add_flags(dev); 666 ret = pm_qos_sysfs_add_flags(dev);
651 if (ret) 667 if (ret)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 669
654fail: 670 out:
671 mutex_unlock(&dev_pm_qos_mtx);
655 pm_runtime_put(dev); 672 pm_runtime_put(dev);
656 return ret; 673 return ret;
657} 674}
658EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 675EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
659 676
677static void __dev_pm_qos_hide_flags(struct device *dev)
678{
679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
680 pm_qos_sysfs_remove_flags(dev);
681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
682 }
683}
684
660/** 685/**
661 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
662 * @dev: Device whose PM QoS flags are to be hidden from user space. 687 * @dev: Device whose PM QoS flags are to be hidden from user space.
663 */ 688 */
664void dev_pm_qos_hide_flags(struct device *dev) 689void dev_pm_qos_hide_flags(struct device *dev)
665{ 690{
666 if (dev->power.qos && dev->power.qos->flags_req) { 691 pm_runtime_get_sync(dev);
667 pm_qos_sysfs_remove_flags(dev); 692 mutex_lock(&dev_pm_qos_mtx);
668 pm_runtime_get_sync(dev); 693 __dev_pm_qos_hide_flags(dev);
669 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 694 mutex_unlock(&dev_pm_qos_mtx);
670 pm_runtime_put(dev); 695 pm_runtime_put(dev);
671 }
672} 696}
673EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 697EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 698
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
683 s32 value; 707 s32 value;
684 int ret; 708 int ret;
685 709
686 if (!dev->power.qos || !dev->power.qos->flags_req)
687 return -EINVAL;
688
689 pm_runtime_get_sync(dev); 710 pm_runtime_get_sync(dev);
690 mutex_lock(&dev_pm_qos_mtx); 711 mutex_lock(&dev_pm_qos_mtx);
691 712
713 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
714 ret = -EINVAL;
715 goto out;
716 }
717
692 value = dev_pm_qos_requested_flags(dev); 718 value = dev_pm_qos_requested_flags(dev);
693 if (set) 719 if (set)
694 value |= mask; 720 value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
697 723
698 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 725
726 out:
700 mutex_unlock(&dev_pm_qos_mtx); 727 mutex_unlock(&dev_pm_qos_mtx);
701 pm_runtime_put(dev); 728 pm_runtime_put(dev);
702
703 return ret; 729 return ret;
704} 730}
731#else /* !CONFIG_PM_RUNTIME */
732static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
733static void __dev_pm_qos_hide_flags(struct device *dev) {}
705#endif /* CONFIG_PM_RUNTIME */ 734#endif /* CONFIG_PM_RUNTIME */