aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 20:08:13 -0400
committerTejun Heo <tj@kernel.org>2013-04-01 21:45:36 -0400
commit229641a6f1f09e27a1f12fba38980f33f4c92975 (patch)
tree234a6f8aea0910de3242af0bbe6d7494fcf81847 /drivers/base
parentd55262c4d164759a8debe772da6c9b16059dec47 (diff)
parent07961ac7c0ee8b546658717034fe692fd12eefa9 (diff)
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10 branch to take advantage of custom attrs and NUMA support for unbound workqueues. Mainline currently contains two commits which result in non-trivial merge conflicts with wq/for-3.10 and because block/for-3.10/core is based on v3.9-rc3 which contains one of the conflicting commits, we need a pre-merge-window merge anyway. Let's pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer from workqueue merge conflicts. The two conflicts and their resolutions: * e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes worker_pool_assign_id() to use idr_alloc() instead of the old idr interface. worker_pool_assign_id() goes through multiple locking changes in wq/for-3.10 causing the following conflict. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; <<<<<<< HEAD lockdep_assert_held(&wq_pool_mutex); do { if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) return -ENOMEM; ret = idr_get_new(&worker_pool_idr, pool, &pool->id); } while (ret == -EAGAIN); ======= mutex_lock(&worker_pool_idr_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) pool->id = ret; mutex_unlock(&worker_pool_idr_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 return ret < 0 ? ret : 0; } We want locking from the former and idr_alloc() usage from the latter, which can be combined to the following. static int worker_pool_assign_id(struct worker_pool *pool) { int ret; lockdep_assert_held(&wq_pool_mutex); ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); if (ret >= 0) { pool->id = ret; return 0; } return ret; } * eb2834285c ("workqueue: fix possible pool stall bug in wq_unbind_fn()") updated wq_unbind_fn() such that it has single larger for_each_std_worker_pool() loop instead of two separate loops with a schedule() call inbetween. wq/for-3.10 renamed pool->assoc_mutex to pool->manager_mutex causing the following conflict (earlier function body and comments omitted for brevity). static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); <<<<<<< HEAD mutex_unlock(&pool->manager_mutex); } ======= mutex_unlock(&pool->assoc_mutex); >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 schedule(); <<<<<<< HEAD for_each_cpu_worker_pool(pool, cpu) ======= >>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89 atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } The resolution is mostly trivial. We want the control flow of the latter with the rename of the former. static void wq_unbind_fn(struct work_struct *work) { ... spin_unlock_irq(&pool->lock); mutex_unlock(&pool->manager_mutex); schedule(); atomic_set(&pool->nr_running, 0); spin_lock_irq(&pool->lock); wake_up_worker(pool); spin_unlock_irq(&pool->lock); } } Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c217
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/regmap/regmap-irq.c1
5 files changed, 127 insertions, 102 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb0..15beb500a4e4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
99 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent)); 100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list); 101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx); 102 mutex_unlock(&dpm_list_mtx);
104} 103}
105 104
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion); 113 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx); 114 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry); 115 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx); 116 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev); 117 device_wakeup_disable(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a2..cfc3226ec492 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
4{ 4{
5 if (!dev->power.early_init) { 5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock); 6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID; 7 dev->power.qos = NULL;
8 dev->power.early_init = true; 8 dev->power.early_init = true;
9 } 9 }
10} 10}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
56 56
57static inline void device_pm_sleep_init(struct device *dev) {} 57static inline void device_pm_sleep_init(struct device *dev) {}
58 58
59static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev) {}
60{
61 dev_pm_qos_constraints_init(dev);
62}
63 60
64static inline void device_pm_remove(struct device *dev) 61static inline void device_pm_remove(struct device *dev)
65{ 62{
66 dev_pm_qos_constraints_destroy(dev);
67 pm_runtime_remove(dev); 63 pm_runtime_remove(dev);
68} 64}
69 65
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5c..5f74587ef258 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 62 struct pm_qos_flags *pqf;
62 s32 val; 63 s32 val;
63 64
64 if (!qos) 65 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 66 return PM_QOS_FLAGS_UNDEFINED;
66 67
67 pqf = &qos->flags; 68 pqf = &qos->flags;
@@ -101,7 +102,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
101 */ 102 */
102s32 __dev_pm_qos_read_value(struct device *dev) 103s32 __dev_pm_qos_read_value(struct device *dev)
103{ 104{
104 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 105 return IS_ERR_OR_NULL(dev->power.qos) ?
106 0 : pm_qos_read_value(&dev->power.qos->latency);
105} 107}
106 108
107/** 109/**
@@ -198,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
198 return 0; 200 return 0;
199} 201}
200 202
201/** 203static void __dev_pm_qos_hide_latency_limit(struct device *dev);
202 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 204static void __dev_pm_qos_hide_flags(struct device *dev);
203 * @dev: target device
204 *
205 * Called from the device PM subsystem during device insertion under
206 * device_pm_lock().
207 */
208void dev_pm_qos_constraints_init(struct device *dev)
209{
210 mutex_lock(&dev_pm_qos_mtx);
211 dev->power.qos = NULL;
212 dev->power.power_state = PMSG_ON;
213 mutex_unlock(&dev_pm_qos_mtx);
214}
215 205
216/** 206/**
217 * dev_pm_qos_constraints_destroy 207 * dev_pm_qos_constraints_destroy
@@ -226,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
226 struct pm_qos_constraints *c; 216 struct pm_qos_constraints *c;
227 struct pm_qos_flags *f; 217 struct pm_qos_flags *f;
228 218
219 mutex_lock(&dev_pm_qos_mtx);
220
229 /* 221 /*
230 * If the device's PM QoS resume latency limit or PM QoS flags have been 222 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point. 223 * exposed to user space, they have to be hidden at this point.
232 */ 224 */
233 dev_pm_qos_hide_latency_limit(dev); 225 __dev_pm_qos_hide_latency_limit(dev);
234 dev_pm_qos_hide_flags(dev); 226 __dev_pm_qos_hide_flags(dev);
235 227
236 mutex_lock(&dev_pm_qos_mtx);
237
238 dev->power.power_state = PMSG_INVALID;
239 qos = dev->power.qos; 228 qos = dev->power.qos;
240 if (!qos) 229 if (!qos)
241 goto out; 230 goto out;
@@ -257,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
257 } 246 }
258 247
259 spin_lock_irq(&dev->power.lock); 248 spin_lock_irq(&dev->power.lock);
260 dev->power.qos = NULL; 249 dev->power.qos = ERR_PTR(-ENODEV);
261 spin_unlock_irq(&dev->power.lock); 250 spin_unlock_irq(&dev->power.lock);
262 251
263 kfree(c->notifiers); 252 kfree(c->notifiers);
@@ -301,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
301 "%s() called for already added request\n", __func__)) 290 "%s() called for already added request\n", __func__))
302 return -EINVAL; 291 return -EINVAL;
303 292
304 req->dev = dev;
305
306 mutex_lock(&dev_pm_qos_mtx); 293 mutex_lock(&dev_pm_qos_mtx);
307 294
308 if (!dev->power.qos) { 295 if (IS_ERR(dev->power.qos))
309 if (dev->power.power_state.event == PM_EVENT_INVALID) { 296 ret = -ENODEV;
310 /* The device has been removed from the system. */ 297 else if (!dev->power.qos)
311 req->dev = NULL; 298 ret = dev_pm_qos_constraints_allocate(dev);
312 ret = -ENODEV;
313 goto out;
314 } else {
315 /*
316 * Allocate the constraints data on the first call to
317 * add_request, i.e. only if the data is not already
318 * allocated and if the device has not been removed.
319 */
320 ret = dev_pm_qos_constraints_allocate(dev);
321 }
322 }
323 299
324 if (!ret) { 300 if (!ret) {
301 req->dev = dev;
325 req->type = type; 302 req->type = type;
326 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
327 } 304 }
328 305
329 out:
330 mutex_unlock(&dev_pm_qos_mtx); 306 mutex_unlock(&dev_pm_qos_mtx);
331 307
332 return ret; 308 return ret;
@@ -344,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
344 s32 curr_value; 320 s32 curr_value;
345 int ret = 0; 321 int ret = 0;
346 322
347 if (!req->dev->power.qos) 323 if (!req) /*guard against callers passing in null */
324 return -EINVAL;
325
326 if (WARN(!dev_pm_qos_request_active(req),
327 "%s() called for unknown object\n", __func__))
328 return -EINVAL;
329
330 if (IS_ERR_OR_NULL(req->dev->power.qos))
348 return -ENODEV; 331 return -ENODEV;
349 332
350 switch(req->type) { 333 switch(req->type) {
@@ -386,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
386{ 369{
387 int ret; 370 int ret;
388 371
372 mutex_lock(&dev_pm_qos_mtx);
373 ret = __dev_pm_qos_update_request(req, new_value);
374 mutex_unlock(&dev_pm_qos_mtx);
375 return ret;
376}
377EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
378
379static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
380{
381 int ret;
382
389 if (!req) /*guard against callers passing in null */ 383 if (!req) /*guard against callers passing in null */
390 return -EINVAL; 384 return -EINVAL;
391 385
@@ -393,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
393 "%s() called for unknown object\n", __func__)) 387 "%s() called for unknown object\n", __func__))
394 return -EINVAL; 388 return -EINVAL;
395 389
396 mutex_lock(&dev_pm_qos_mtx); 390 if (IS_ERR_OR_NULL(req->dev->power.qos))
397 ret = __dev_pm_qos_update_request(req, new_value); 391 return -ENODEV;
398 mutex_unlock(&dev_pm_qos_mtx);
399 392
393 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
394 memset(req, 0, sizeof(*req));
400 return ret; 395 return ret;
401} 396}
402EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 397
404/** 398/**
405 * dev_pm_qos_remove_request - modifies an existing qos request 399 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
418 */ 412 */
419int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 413int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420{ 414{
421 int ret = 0; 415 int ret;
422
423 if (!req) /*guard against callers passing in null */
424 return -EINVAL;
425
426 if (WARN(!dev_pm_qos_request_active(req),
427 "%s() called for unknown object\n", __func__))
428 return -EINVAL;
429 416
430 mutex_lock(&dev_pm_qos_mtx); 417 mutex_lock(&dev_pm_qos_mtx);
431 418 ret = __dev_pm_qos_remove_request(req);
432 if (req->dev->power.qos) {
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434 PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 } else {
437 /* Return if the device has been removed */
438 ret = -ENODEV;
439 }
440
441 mutex_unlock(&dev_pm_qos_mtx); 419 mutex_unlock(&dev_pm_qos_mtx);
442 return ret; 420 return ret;
443} 421}
@@ -462,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
462 440
463 mutex_lock(&dev_pm_qos_mtx); 441 mutex_lock(&dev_pm_qos_mtx);
464 442
465 if (!dev->power.qos) 443 if (IS_ERR(dev->power.qos))
466 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 444 ret = -ENODEV;
467 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 445 else if (!dev->power.qos)
446 ret = dev_pm_qos_constraints_allocate(dev);
468 447
469 if (!ret) 448 if (!ret)
470 ret = blocking_notifier_chain_register( 449 ret = blocking_notifier_chain_register(
@@ -493,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
493 mutex_lock(&dev_pm_qos_mtx); 472 mutex_lock(&dev_pm_qos_mtx);
494 473
495 /* Silently return if the constraints object is not present. */ 474 /* Silently return if the constraints object is not present. */
496 if (dev->power.qos) 475 if (!IS_ERR_OR_NULL(dev->power.qos))
497 retval = blocking_notifier_chain_unregister( 476 retval = blocking_notifier_chain_unregister(
498 dev->power.qos->latency.notifiers, 477 dev->power.qos->latency.notifiers,
499 notifier); 478 notifier);
@@ -563,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
563static void __dev_pm_qos_drop_user_request(struct device *dev, 542static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type) 543 enum dev_pm_qos_req_type type)
565{ 544{
545 struct dev_pm_qos_request *req = NULL;
546
566 switch(type) { 547 switch(type) {
567 case DEV_PM_QOS_LATENCY: 548 case DEV_PM_QOS_LATENCY:
568 dev_pm_qos_remove_request(dev->power.qos->latency_req); 549 req = dev->power.qos->latency_req;
569 dev->power.qos->latency_req = NULL; 550 dev->power.qos->latency_req = NULL;
570 break; 551 break;
571 case DEV_PM_QOS_FLAGS: 552 case DEV_PM_QOS_FLAGS:
572 dev_pm_qos_remove_request(dev->power.qos->flags_req); 553 req = dev->power.qos->flags_req;
573 dev->power.qos->flags_req = NULL; 554 dev->power.qos->flags_req = NULL;
574 break; 555 break;
575 } 556 }
557 __dev_pm_qos_remove_request(req);
558 kfree(req);
576} 559}
577 560
578/** 561/**
@@ -588,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
588 if (!device_is_registered(dev) || value < 0) 571 if (!device_is_registered(dev) || value < 0)
589 return -EINVAL; 572 return -EINVAL;
590 573
591 if (dev->power.qos && dev->power.qos->latency_req)
592 return -EEXIST;
593
594 req = kzalloc(sizeof(*req), GFP_KERNEL); 574 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 if (!req) 575 if (!req)
596 return -ENOMEM; 576 return -ENOMEM;
597 577
598 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599 if (ret < 0) 579 if (ret < 0) {
580 kfree(req);
600 return ret; 581 return ret;
582 }
583
584 mutex_lock(&dev_pm_qos_mtx);
585
586 if (IS_ERR_OR_NULL(dev->power.qos))
587 ret = -ENODEV;
588 else if (dev->power.qos->latency_req)
589 ret = -EEXIST;
590
591 if (ret < 0) {
592 __dev_pm_qos_remove_request(req);
593 kfree(req);
594 goto out;
595 }
601 596
602 dev->power.qos->latency_req = req; 597 dev->power.qos->latency_req = req;
603 ret = pm_qos_sysfs_add_latency(dev); 598 ret = pm_qos_sysfs_add_latency(dev);
604 if (ret) 599 if (ret)
605 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 601
602 out:
603 mutex_unlock(&dev_pm_qos_mtx);
607 return ret; 604 return ret;
608} 605}
609EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 606EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
610 607
608static void __dev_pm_qos_hide_latency_limit(struct device *dev)
609{
610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
611 pm_qos_sysfs_remove_latency(dev);
612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
613 }
614}
615
611/** 616/**
612 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
613 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
614 */ 619 */
615void dev_pm_qos_hide_latency_limit(struct device *dev) 620void dev_pm_qos_hide_latency_limit(struct device *dev)
616{ 621{
617 if (dev->power.qos && dev->power.qos->latency_req) { 622 mutex_lock(&dev_pm_qos_mtx);
618 pm_qos_sysfs_remove_latency(dev); 623 __dev_pm_qos_hide_latency_limit(dev);
619 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 624 mutex_unlock(&dev_pm_qos_mtx);
620 }
621} 625}
622EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 626EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 627
@@ -634,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
634 if (!device_is_registered(dev)) 638 if (!device_is_registered(dev))
635 return -EINVAL; 639 return -EINVAL;
636 640
637 if (dev->power.qos && dev->power.qos->flags_req)
638 return -EEXIST;
639
640 req = kzalloc(sizeof(*req), GFP_KERNEL); 641 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 if (!req) 642 if (!req)
642 return -ENOMEM; 643 return -ENOMEM;
643 644
644 pm_runtime_get_sync(dev);
645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646 if (ret < 0) 646 if (ret < 0) {
647 goto fail; 647 kfree(req);
648 return ret;
649 }
650
651 pm_runtime_get_sync(dev);
652 mutex_lock(&dev_pm_qos_mtx);
653
654 if (IS_ERR_OR_NULL(dev->power.qos))
655 ret = -ENODEV;
656 else if (dev->power.qos->flags_req)
657 ret = -EEXIST;
658
659 if (ret < 0) {
660 __dev_pm_qos_remove_request(req);
661 kfree(req);
662 goto out;
663 }
648 664
649 dev->power.qos->flags_req = req; 665 dev->power.qos->flags_req = req;
650 ret = pm_qos_sysfs_add_flags(dev); 666 ret = pm_qos_sysfs_add_flags(dev);
651 if (ret) 667 if (ret)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 669
654fail: 670 out:
671 mutex_unlock(&dev_pm_qos_mtx);
655 pm_runtime_put(dev); 672 pm_runtime_put(dev);
656 return ret; 673 return ret;
657} 674}
658EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 675EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
659 676
677static void __dev_pm_qos_hide_flags(struct device *dev)
678{
679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
680 pm_qos_sysfs_remove_flags(dev);
681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
682 }
683}
684
660/** 685/**
661 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
662 * @dev: Device whose PM QoS flags are to be hidden from user space. 687 * @dev: Device whose PM QoS flags are to be hidden from user space.
663 */ 688 */
664void dev_pm_qos_hide_flags(struct device *dev) 689void dev_pm_qos_hide_flags(struct device *dev)
665{ 690{
666 if (dev->power.qos && dev->power.qos->flags_req) { 691 pm_runtime_get_sync(dev);
667 pm_qos_sysfs_remove_flags(dev); 692 mutex_lock(&dev_pm_qos_mtx);
668 pm_runtime_get_sync(dev); 693 __dev_pm_qos_hide_flags(dev);
669 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 694 mutex_unlock(&dev_pm_qos_mtx);
670 pm_runtime_put(dev); 695 pm_runtime_put(dev);
671 }
672} 696}
673EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 697EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 698
@@ -683,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
683 s32 value; 707 s32 value;
684 int ret; 708 int ret;
685 709
686 if (!dev->power.qos || !dev->power.qos->flags_req)
687 return -EINVAL;
688
689 pm_runtime_get_sync(dev); 710 pm_runtime_get_sync(dev);
690 mutex_lock(&dev_pm_qos_mtx); 711 mutex_lock(&dev_pm_qos_mtx);
691 712
713 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
714 ret = -EINVAL;
715 goto out;
716 }
717
692 value = dev_pm_qos_requested_flags(dev); 718 value = dev_pm_qos_requested_flags(dev);
693 if (set) 719 if (set)
694 value |= mask; 720 value |= mask;
@@ -697,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
697 723
698 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 725
726 out:
700 mutex_unlock(&dev_pm_qos_mtx); 727 mutex_unlock(&dev_pm_qos_mtx);
701 pm_runtime_put(dev); 728 pm_runtime_put(dev);
702
703 return ret; 729 return ret;
704} 730}
731#else /* !CONFIG_PM_RUNTIME */
732static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
733static void __dev_pm_qos_hide_flags(struct device *dev) {}
705#endif /* CONFIG_PM_RUNTIME */ 734#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a9..a53ebd265701 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
708 708
709void dpm_sysfs_remove(struct device *dev) 709void dpm_sysfs_remove(struct device *dev)
710{ 710{
711 dev_pm_qos_constraints_destroy(dev);
711 rpm_sysfs_remove(dev); 712 rpm_sysfs_remove(dev);
712 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
713 sysfs_remove_group(&dev->kobj, &pm_attr_group); 714 sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4706c63d0bc6..020ea2b9fd2f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
184 if (ret < 0) { 184 if (ret < 0) {
185 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 185 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
186 ret); 186 ret);
187 pm_runtime_put(map->dev);
187 return IRQ_NONE; 188 return IRQ_NONE;
188 } 189 }
189 } 190 }