aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorMark Brown <broonie@opensource.wolfsonmicro.com>2013-04-16 11:02:41 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2013-04-16 11:02:41 -0400
commit60f7110e36ff7858182e8990a2d19fa3df7e05f5 (patch)
tree179c2a9f8fe654694d40536a506345db80705436 /drivers/base
parent9659293c1784f3d9df2235f6ebf92f6f9059a563 (diff)
parent41ef2d5678d83af030125550329b6ae8b74618fa (diff)
Merge tag 'v3.9-rc7' into regmap-cache
Linux 3.9-rc7
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c251
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/regmap/regcache-rbtree.c2
-rw-r--r--drivers/base/regmap/regmap-irq.c1
-rw-r--r--drivers/base/regmap/regmap.c6
7 files changed, 166 insertions, 105 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb0..15beb500a4e4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
99 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent)); 100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list); 101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx); 102 mutex_unlock(&dpm_list_mtx);
104} 103}
105 104
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion); 113 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx); 114 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry); 115 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx); 116 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev); 117 device_wakeup_disable(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a2..cfc3226ec492 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
4{ 4{
5 if (!dev->power.early_init) { 5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock); 6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID; 7 dev->power.qos = NULL;
8 dev->power.early_init = true; 8 dev->power.early_init = true;
9 } 9 }
10} 10}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
56 56
57static inline void device_pm_sleep_init(struct device *dev) {} 57static inline void device_pm_sleep_init(struct device *dev) {}
58 58
59static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev) {}
60{
61 dev_pm_qos_constraints_init(dev);
62}
63 60
64static inline void device_pm_remove(struct device *dev) 61static inline void device_pm_remove(struct device *dev)
65{ 62{
66 dev_pm_qos_constraints_destroy(dev);
67 pm_runtime_remove(dev); 63 pm_runtime_remove(dev);
68} 64}
69 65
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5c..71671c42ef45 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,10 +41,12 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
47static DEFINE_MUTEX(dev_pm_qos_mtx); 48static DEFINE_MUTEX(dev_pm_qos_mtx);
49static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
48 50
49static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 51static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
50 52
@@ -61,7 +63,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 63 struct pm_qos_flags *pqf;
62 s32 val; 64 s32 val;
63 65
64 if (!qos) 66 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 67 return PM_QOS_FLAGS_UNDEFINED;
66 68
67 pqf = &qos->flags; 69 pqf = &qos->flags;
@@ -101,7 +103,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
101 */ 103 */
102s32 __dev_pm_qos_read_value(struct device *dev) 104s32 __dev_pm_qos_read_value(struct device *dev)
103{ 105{
104 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 106 return IS_ERR_OR_NULL(dev->power.qos) ?
107 0 : pm_qos_read_value(&dev->power.qos->latency);
105} 108}
106 109
107/** 110/**
@@ -198,20 +201,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
198 return 0; 201 return 0;
199} 202}
200 203
201/** 204static void __dev_pm_qos_hide_latency_limit(struct device *dev);
202 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 205static void __dev_pm_qos_hide_flags(struct device *dev);
203 * @dev: target device
204 *
205 * Called from the device PM subsystem during device insertion under
206 * device_pm_lock().
207 */
208void dev_pm_qos_constraints_init(struct device *dev)
209{
210 mutex_lock(&dev_pm_qos_mtx);
211 dev->power.qos = NULL;
212 dev->power.power_state = PMSG_ON;
213 mutex_unlock(&dev_pm_qos_mtx);
214}
215 206
216/** 207/**
217 * dev_pm_qos_constraints_destroy 208 * dev_pm_qos_constraints_destroy
@@ -226,16 +217,20 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
226 struct pm_qos_constraints *c; 217 struct pm_qos_constraints *c;
227 struct pm_qos_flags *f; 218 struct pm_qos_flags *f;
228 219
220 mutex_lock(&dev_pm_qos_sysfs_mtx);
221
229 /* 222 /*
230 * If the device's PM QoS resume latency limit or PM QoS flags have been 223 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point. 224 * exposed to user space, they have to be hidden at this point.
232 */ 225 */
233 dev_pm_qos_hide_latency_limit(dev); 226 pm_qos_sysfs_remove_latency(dev);
234 dev_pm_qos_hide_flags(dev); 227 pm_qos_sysfs_remove_flags(dev);
235 228
236 mutex_lock(&dev_pm_qos_mtx); 229 mutex_lock(&dev_pm_qos_mtx);
237 230
238 dev->power.power_state = PMSG_INVALID; 231 __dev_pm_qos_hide_latency_limit(dev);
232 __dev_pm_qos_hide_flags(dev);
233
239 qos = dev->power.qos; 234 qos = dev->power.qos;
240 if (!qos) 235 if (!qos)
241 goto out; 236 goto out;
@@ -257,7 +252,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
257 } 252 }
258 253
259 spin_lock_irq(&dev->power.lock); 254 spin_lock_irq(&dev->power.lock);
260 dev->power.qos = NULL; 255 dev->power.qos = ERR_PTR(-ENODEV);
261 spin_unlock_irq(&dev->power.lock); 256 spin_unlock_irq(&dev->power.lock);
262 257
263 kfree(c->notifiers); 258 kfree(c->notifiers);
@@ -265,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
265 260
266 out: 261 out:
267 mutex_unlock(&dev_pm_qos_mtx); 262 mutex_unlock(&dev_pm_qos_mtx);
263
264 mutex_unlock(&dev_pm_qos_sysfs_mtx);
268} 265}
269 266
270/** 267/**
@@ -301,32 +298,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
301 "%s() called for already added request\n", __func__)) 298 "%s() called for already added request\n", __func__))
302 return -EINVAL; 299 return -EINVAL;
303 300
304 req->dev = dev;
305
306 mutex_lock(&dev_pm_qos_mtx); 301 mutex_lock(&dev_pm_qos_mtx);
307 302
308 if (!dev->power.qos) { 303 if (IS_ERR(dev->power.qos))
309 if (dev->power.power_state.event == PM_EVENT_INVALID) { 304 ret = -ENODEV;
310 /* The device has been removed from the system. */ 305 else if (!dev->power.qos)
311 req->dev = NULL; 306 ret = dev_pm_qos_constraints_allocate(dev);
312 ret = -ENODEV;
313 goto out;
314 } else {
315 /*
316 * Allocate the constraints data on the first call to
317 * add_request, i.e. only if the data is not already
318 * allocated and if the device has not been removed.
319 */
320 ret = dev_pm_qos_constraints_allocate(dev);
321 }
322 }
323 307
324 if (!ret) { 308 if (!ret) {
309 req->dev = dev;
325 req->type = type; 310 req->type = type;
326 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 311 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
327 } 312 }
328 313
329 out:
330 mutex_unlock(&dev_pm_qos_mtx); 314 mutex_unlock(&dev_pm_qos_mtx);
331 315
332 return ret; 316 return ret;
@@ -344,7 +328,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
344 s32 curr_value; 328 s32 curr_value;
345 int ret = 0; 329 int ret = 0;
346 330
347 if (!req->dev->power.qos) 331 if (!req) /*guard against callers passing in null */
332 return -EINVAL;
333
334 if (WARN(!dev_pm_qos_request_active(req),
335 "%s() called for unknown object\n", __func__))
336 return -EINVAL;
337
338 if (IS_ERR_OR_NULL(req->dev->power.qos))
348 return -ENODEV; 339 return -ENODEV;
349 340
350 switch(req->type) { 341 switch(req->type) {
@@ -386,6 +377,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
386{ 377{
387 int ret; 378 int ret;
388 379
380 mutex_lock(&dev_pm_qos_mtx);
381 ret = __dev_pm_qos_update_request(req, new_value);
382 mutex_unlock(&dev_pm_qos_mtx);
383 return ret;
384}
385EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
386
387static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
388{
389 int ret;
390
389 if (!req) /*guard against callers passing in null */ 391 if (!req) /*guard against callers passing in null */
390 return -EINVAL; 392 return -EINVAL;
391 393
@@ -393,13 +395,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
393 "%s() called for unknown object\n", __func__)) 395 "%s() called for unknown object\n", __func__))
394 return -EINVAL; 396 return -EINVAL;
395 397
396 mutex_lock(&dev_pm_qos_mtx); 398 if (IS_ERR_OR_NULL(req->dev->power.qos))
397 ret = __dev_pm_qos_update_request(req, new_value); 399 return -ENODEV;
398 mutex_unlock(&dev_pm_qos_mtx);
399 400
401 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
402 memset(req, 0, sizeof(*req));
400 return ret; 403 return ret;
401} 404}
402EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 405
404/** 406/**
405 * dev_pm_qos_remove_request - modifies an existing qos request 407 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +420,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
418 */ 420 */
419int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 421int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420{ 422{
421 int ret = 0; 423 int ret;
422
423 if (!req) /*guard against callers passing in null */
424 return -EINVAL;
425
426 if (WARN(!dev_pm_qos_request_active(req),
427 "%s() called for unknown object\n", __func__))
428 return -EINVAL;
429 424
430 mutex_lock(&dev_pm_qos_mtx); 425 mutex_lock(&dev_pm_qos_mtx);
431 426 ret = __dev_pm_qos_remove_request(req);
432 if (req->dev->power.qos) {
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434 PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 } else {
437 /* Return if the device has been removed */
438 ret = -ENODEV;
439 }
440
441 mutex_unlock(&dev_pm_qos_mtx); 427 mutex_unlock(&dev_pm_qos_mtx);
442 return ret; 428 return ret;
443} 429}
@@ -462,9 +448,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
462 448
463 mutex_lock(&dev_pm_qos_mtx); 449 mutex_lock(&dev_pm_qos_mtx);
464 450
465 if (!dev->power.qos) 451 if (IS_ERR(dev->power.qos))
466 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 452 ret = -ENODEV;
467 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 453 else if (!dev->power.qos)
454 ret = dev_pm_qos_constraints_allocate(dev);
468 455
469 if (!ret) 456 if (!ret)
470 ret = blocking_notifier_chain_register( 457 ret = blocking_notifier_chain_register(
@@ -493,7 +480,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
493 mutex_lock(&dev_pm_qos_mtx); 480 mutex_lock(&dev_pm_qos_mtx);
494 481
495 /* Silently return if the constraints object is not present. */ 482 /* Silently return if the constraints object is not present. */
496 if (dev->power.qos) 483 if (!IS_ERR_OR_NULL(dev->power.qos))
497 retval = blocking_notifier_chain_unregister( 484 retval = blocking_notifier_chain_unregister(
498 dev->power.qos->latency.notifiers, 485 dev->power.qos->latency.notifiers,
499 notifier); 486 notifier);
@@ -563,16 +550,28 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
563static void __dev_pm_qos_drop_user_request(struct device *dev, 550static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type) 551 enum dev_pm_qos_req_type type)
565{ 552{
553 struct dev_pm_qos_request *req = NULL;
554
566 switch(type) { 555 switch(type) {
567 case DEV_PM_QOS_LATENCY: 556 case DEV_PM_QOS_LATENCY:
568 dev_pm_qos_remove_request(dev->power.qos->latency_req); 557 req = dev->power.qos->latency_req;
569 dev->power.qos->latency_req = NULL; 558 dev->power.qos->latency_req = NULL;
570 break; 559 break;
571 case DEV_PM_QOS_FLAGS: 560 case DEV_PM_QOS_FLAGS:
572 dev_pm_qos_remove_request(dev->power.qos->flags_req); 561 req = dev->power.qos->flags_req;
573 dev->power.qos->flags_req = NULL; 562 dev->power.qos->flags_req = NULL;
574 break; 563 break;
575 } 564 }
565 __dev_pm_qos_remove_request(req);
566 kfree(req);
567}
568
569static void dev_pm_qos_drop_user_request(struct device *dev,
570 enum dev_pm_qos_req_type type)
571{
572 mutex_lock(&dev_pm_qos_mtx);
573 __dev_pm_qos_drop_user_request(dev, type);
574 mutex_unlock(&dev_pm_qos_mtx);
576} 575}
577 576
578/** 577/**
@@ -588,36 +587,66 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
588 if (!device_is_registered(dev) || value < 0) 587 if (!device_is_registered(dev) || value < 0)
589 return -EINVAL; 588 return -EINVAL;
590 589
591 if (dev->power.qos && dev->power.qos->latency_req)
592 return -EEXIST;
593
594 req = kzalloc(sizeof(*req), GFP_KERNEL); 590 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 if (!req) 591 if (!req)
596 return -ENOMEM; 592 return -ENOMEM;
597 593
598 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 594 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599 if (ret < 0) 595 if (ret < 0) {
596 kfree(req);
600 return ret; 597 return ret;
598 }
601 599
600 mutex_lock(&dev_pm_qos_sysfs_mtx);
601
602 mutex_lock(&dev_pm_qos_mtx);
603
604 if (IS_ERR_OR_NULL(dev->power.qos))
605 ret = -ENODEV;
606 else if (dev->power.qos->latency_req)
607 ret = -EEXIST;
608
609 if (ret < 0) {
610 __dev_pm_qos_remove_request(req);
611 kfree(req);
612 mutex_unlock(&dev_pm_qos_mtx);
613 goto out;
614 }
602 dev->power.qos->latency_req = req; 615 dev->power.qos->latency_req = req;
616
617 mutex_unlock(&dev_pm_qos_mtx);
618
603 ret = pm_qos_sysfs_add_latency(dev); 619 ret = pm_qos_sysfs_add_latency(dev);
604 if (ret) 620 if (ret)
605 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 621 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 622
623 out:
624 mutex_unlock(&dev_pm_qos_sysfs_mtx);
607 return ret; 625 return ret;
608} 626}
609EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 627EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
610 628
629static void __dev_pm_qos_hide_latency_limit(struct device *dev)
630{
631 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
632 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
633}
634
611/** 635/**
612 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 636 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
613 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 637 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
614 */ 638 */
615void dev_pm_qos_hide_latency_limit(struct device *dev) 639void dev_pm_qos_hide_latency_limit(struct device *dev)
616{ 640{
617 if (dev->power.qos && dev->power.qos->latency_req) { 641 mutex_lock(&dev_pm_qos_sysfs_mtx);
618 pm_qos_sysfs_remove_latency(dev); 642
619 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 643 pm_qos_sysfs_remove_latency(dev);
620 } 644
645 mutex_lock(&dev_pm_qos_mtx);
646 __dev_pm_qos_hide_latency_limit(dev);
647 mutex_unlock(&dev_pm_qos_mtx);
648
649 mutex_unlock(&dev_pm_qos_sysfs_mtx);
621} 650}
622EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 651EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 652
@@ -634,41 +663,70 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
634 if (!device_is_registered(dev)) 663 if (!device_is_registered(dev))
635 return -EINVAL; 664 return -EINVAL;
636 665
637 if (dev->power.qos && dev->power.qos->flags_req)
638 return -EEXIST;
639
640 req = kzalloc(sizeof(*req), GFP_KERNEL); 666 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 if (!req) 667 if (!req)
642 return -ENOMEM; 668 return -ENOMEM;
643 669
644 pm_runtime_get_sync(dev);
645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 670 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646 if (ret < 0) 671 if (ret < 0) {
647 goto fail; 672 kfree(req);
673 return ret;
674 }
675
676 pm_runtime_get_sync(dev);
677 mutex_lock(&dev_pm_qos_sysfs_mtx);
678
679 mutex_lock(&dev_pm_qos_mtx);
680
681 if (IS_ERR_OR_NULL(dev->power.qos))
682 ret = -ENODEV;
683 else if (dev->power.qos->flags_req)
684 ret = -EEXIST;
648 685
686 if (ret < 0) {
687 __dev_pm_qos_remove_request(req);
688 kfree(req);
689 mutex_unlock(&dev_pm_qos_mtx);
690 goto out;
691 }
649 dev->power.qos->flags_req = req; 692 dev->power.qos->flags_req = req;
693
694 mutex_unlock(&dev_pm_qos_mtx);
695
650 ret = pm_qos_sysfs_add_flags(dev); 696 ret = pm_qos_sysfs_add_flags(dev);
651 if (ret) 697 if (ret)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 698 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 699
654fail: 700 out:
701 mutex_unlock(&dev_pm_qos_sysfs_mtx);
655 pm_runtime_put(dev); 702 pm_runtime_put(dev);
656 return ret; 703 return ret;
657} 704}
658EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 705EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
659 706
707static void __dev_pm_qos_hide_flags(struct device *dev)
708{
709 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
710 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
711}
712
660/** 713/**
661 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 714 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
662 * @dev: Device whose PM QoS flags are to be hidden from user space. 715 * @dev: Device whose PM QoS flags are to be hidden from user space.
663 */ 716 */
664void dev_pm_qos_hide_flags(struct device *dev) 717void dev_pm_qos_hide_flags(struct device *dev)
665{ 718{
666 if (dev->power.qos && dev->power.qos->flags_req) { 719 pm_runtime_get_sync(dev);
667 pm_qos_sysfs_remove_flags(dev); 720 mutex_lock(&dev_pm_qos_sysfs_mtx);
668 pm_runtime_get_sync(dev); 721
669 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 722 pm_qos_sysfs_remove_flags(dev);
670 pm_runtime_put(dev); 723
671 } 724 mutex_lock(&dev_pm_qos_mtx);
725 __dev_pm_qos_hide_flags(dev);
726 mutex_unlock(&dev_pm_qos_mtx);
727
728 mutex_unlock(&dev_pm_qos_sysfs_mtx);
729 pm_runtime_put(dev);
672} 730}
673EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 731EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 732
@@ -683,12 +741,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
683 s32 value; 741 s32 value;
684 int ret; 742 int ret;
685 743
686 if (!dev->power.qos || !dev->power.qos->flags_req)
687 return -EINVAL;
688
689 pm_runtime_get_sync(dev); 744 pm_runtime_get_sync(dev);
690 mutex_lock(&dev_pm_qos_mtx); 745 mutex_lock(&dev_pm_qos_mtx);
691 746
747 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
748 ret = -EINVAL;
749 goto out;
750 }
751
692 value = dev_pm_qos_requested_flags(dev); 752 value = dev_pm_qos_requested_flags(dev);
693 if (set) 753 if (set)
694 value |= mask; 754 value |= mask;
@@ -697,9 +757,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
697 757
698 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 758 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 759
760 out:
700 mutex_unlock(&dev_pm_qos_mtx); 761 mutex_unlock(&dev_pm_qos_mtx);
701 pm_runtime_put(dev); 762 pm_runtime_put(dev);
702
703 return ret; 763 return ret;
704} 764}
765#else /* !CONFIG_PM_RUNTIME */
766static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
767static void __dev_pm_qos_hide_flags(struct device *dev) {}
705#endif /* CONFIG_PM_RUNTIME */ 768#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a9..a53ebd265701 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
708 708
709void dpm_sysfs_remove(struct device *dev) 709void dpm_sysfs_remove(struct device *dev)
710{ 710{
711 dev_pm_qos_constraints_destroy(dev);
711 rpm_sysfs_remove(dev); 712 rpm_sysfs_remove(dev);
712 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
713 sysfs_remove_group(&dev->kobj, &pm_attr_group); 714 sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 1fdd8ec6af23..aa0875f6f1b7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -404,7 +404,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
404 base = 0; 404 base = 0;
405 405
406 if (max < rbnode->base_reg + rbnode->blklen) 406 if (max < rbnode->base_reg + rbnode->blklen)
407 end = rbnode->base_reg + rbnode->blklen - max; 407 end = max - rbnode->base_reg + 1;
408 else 408 else
409 end = rbnode->blklen; 409 end = rbnode->blklen;
410 410
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4706c63d0bc6..020ea2b9fd2f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
184 if (ret < 0) { 184 if (ret < 0) {
185 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 185 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
186 ret); 186 ret);
187 pm_runtime_put(map->dev);
187 return IRQ_NONE; 188 return IRQ_NONE;
188 } 189 }
189 } 190 }
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 9ab1e1fedbc9..940fc63ed5f2 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -730,12 +730,12 @@ skip_format_initialization:
730 } 730 }
731 } 731 }
732 732
733 regmap_debugfs_init(map, config->name);
734
733 ret = regcache_init(map, config); 735 ret = regcache_init(map, config);
734 if (ret != 0) 736 if (ret != 0)
735 goto err_range; 737 goto err_range;
736 738
737 regmap_debugfs_init(map, config->name);
738
739 /* Add a devres resource for dev_get_regmap() */ 739 /* Add a devres resource for dev_get_regmap() */
740 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 740 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
741 if (!m) { 741 if (!m) {
@@ -1056,6 +1056,8 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
1056 kfree(async->work_buf); 1056 kfree(async->work_buf);
1057 kfree(async); 1057 kfree(async);
1058 } 1058 }
1059
1060 return ret;
1059 } 1061 }
1060 1062
1061 trace_regmap_hw_write_start(map->dev, reg, 1063 trace_regmap_hw_write_start(map->dev, reg,