aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c251
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/regmap/internal.h39
-rw-r--r--drivers/base/regmap/regcache-lzo.c6
-rw-r--r--drivers/base/regmap/regcache-rbtree.c102
-rw-r--r--drivers/base/regmap/regcache.c196
-rw-r--r--drivers/base/regmap/regmap-irq.c4
-rw-r--r--drivers/base/regmap/regmap.c98
10 files changed, 491 insertions, 216 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb0..15beb500a4e4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
99 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent)); 100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list); 101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx); 102 mutex_unlock(&dpm_list_mtx);
104} 103}
105 104
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion); 113 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx); 114 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry); 115 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx); 116 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev); 117 device_wakeup_disable(dev);
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a2..cfc3226ec492 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
4{ 4{
5 if (!dev->power.early_init) { 5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock); 6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID; 7 dev->power.qos = NULL;
8 dev->power.early_init = true; 8 dev->power.early_init = true;
9 } 9 }
10} 10}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
56 56
57static inline void device_pm_sleep_init(struct device *dev) {} 57static inline void device_pm_sleep_init(struct device *dev) {}
58 58
59static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev) {}
60{
61 dev_pm_qos_constraints_init(dev);
62}
63 60
64static inline void device_pm_remove(struct device *dev) 61static inline void device_pm_remove(struct device *dev)
65{ 62{
66 dev_pm_qos_constraints_destroy(dev);
67 pm_runtime_remove(dev); 63 pm_runtime_remove(dev);
68} 64}
69 65
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 3d4d1f8aac5c..71671c42ef45 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,10 +41,12 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
47static DEFINE_MUTEX(dev_pm_qos_mtx); 48static DEFINE_MUTEX(dev_pm_qos_mtx);
49static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
48 50
49static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 51static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
50 52
@@ -61,7 +63,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 63 struct pm_qos_flags *pqf;
62 s32 val; 64 s32 val;
63 65
64 if (!qos) 66 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 67 return PM_QOS_FLAGS_UNDEFINED;
66 68
67 pqf = &qos->flags; 69 pqf = &qos->flags;
@@ -101,7 +103,8 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
101 */ 103 */
102s32 __dev_pm_qos_read_value(struct device *dev) 104s32 __dev_pm_qos_read_value(struct device *dev)
103{ 105{
104 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 106 return IS_ERR_OR_NULL(dev->power.qos) ?
107 0 : pm_qos_read_value(&dev->power.qos->latency);
105} 108}
106 109
107/** 110/**
@@ -198,20 +201,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
198 return 0; 201 return 0;
199} 202}
200 203
201/** 204static void __dev_pm_qos_hide_latency_limit(struct device *dev);
202 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 205static void __dev_pm_qos_hide_flags(struct device *dev);
203 * @dev: target device
204 *
205 * Called from the device PM subsystem during device insertion under
206 * device_pm_lock().
207 */
208void dev_pm_qos_constraints_init(struct device *dev)
209{
210 mutex_lock(&dev_pm_qos_mtx);
211 dev->power.qos = NULL;
212 dev->power.power_state = PMSG_ON;
213 mutex_unlock(&dev_pm_qos_mtx);
214}
215 206
216/** 207/**
217 * dev_pm_qos_constraints_destroy 208 * dev_pm_qos_constraints_destroy
@@ -226,16 +217,20 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
226 struct pm_qos_constraints *c; 217 struct pm_qos_constraints *c;
227 struct pm_qos_flags *f; 218 struct pm_qos_flags *f;
228 219
220 mutex_lock(&dev_pm_qos_sysfs_mtx);
221
229 /* 222 /*
230 * If the device's PM QoS resume latency limit or PM QoS flags have been 223 * If the device's PM QoS resume latency limit or PM QoS flags have been
231 * exposed to user space, they have to be hidden at this point. 224 * exposed to user space, they have to be hidden at this point.
232 */ 225 */
233 dev_pm_qos_hide_latency_limit(dev); 226 pm_qos_sysfs_remove_latency(dev);
234 dev_pm_qos_hide_flags(dev); 227 pm_qos_sysfs_remove_flags(dev);
235 228
236 mutex_lock(&dev_pm_qos_mtx); 229 mutex_lock(&dev_pm_qos_mtx);
237 230
238 dev->power.power_state = PMSG_INVALID; 231 __dev_pm_qos_hide_latency_limit(dev);
232 __dev_pm_qos_hide_flags(dev);
233
239 qos = dev->power.qos; 234 qos = dev->power.qos;
240 if (!qos) 235 if (!qos)
241 goto out; 236 goto out;
@@ -257,7 +252,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
257 } 252 }
258 253
259 spin_lock_irq(&dev->power.lock); 254 spin_lock_irq(&dev->power.lock);
260 dev->power.qos = NULL; 255 dev->power.qos = ERR_PTR(-ENODEV);
261 spin_unlock_irq(&dev->power.lock); 256 spin_unlock_irq(&dev->power.lock);
262 257
263 kfree(c->notifiers); 258 kfree(c->notifiers);
@@ -265,6 +260,8 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
265 260
266 out: 261 out:
267 mutex_unlock(&dev_pm_qos_mtx); 262 mutex_unlock(&dev_pm_qos_mtx);
263
264 mutex_unlock(&dev_pm_qos_sysfs_mtx);
268} 265}
269 266
270/** 267/**
@@ -301,32 +298,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
301 "%s() called for already added request\n", __func__)) 298 "%s() called for already added request\n", __func__))
302 return -EINVAL; 299 return -EINVAL;
303 300
304 req->dev = dev;
305
306 mutex_lock(&dev_pm_qos_mtx); 301 mutex_lock(&dev_pm_qos_mtx);
307 302
308 if (!dev->power.qos) { 303 if (IS_ERR(dev->power.qos))
309 if (dev->power.power_state.event == PM_EVENT_INVALID) { 304 ret = -ENODEV;
310 /* The device has been removed from the system. */ 305 else if (!dev->power.qos)
311 req->dev = NULL; 306 ret = dev_pm_qos_constraints_allocate(dev);
312 ret = -ENODEV;
313 goto out;
314 } else {
315 /*
316 * Allocate the constraints data on the first call to
317 * add_request, i.e. only if the data is not already
318 * allocated and if the device has not been removed.
319 */
320 ret = dev_pm_qos_constraints_allocate(dev);
321 }
322 }
323 307
324 if (!ret) { 308 if (!ret) {
309 req->dev = dev;
325 req->type = type; 310 req->type = type;
326 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 311 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
327 } 312 }
328 313
329 out:
330 mutex_unlock(&dev_pm_qos_mtx); 314 mutex_unlock(&dev_pm_qos_mtx);
331 315
332 return ret; 316 return ret;
@@ -344,7 +328,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
344 s32 curr_value; 328 s32 curr_value;
345 int ret = 0; 329 int ret = 0;
346 330
347 if (!req->dev->power.qos) 331 if (!req) /*guard against callers passing in null */
332 return -EINVAL;
333
334 if (WARN(!dev_pm_qos_request_active(req),
335 "%s() called for unknown object\n", __func__))
336 return -EINVAL;
337
338 if (IS_ERR_OR_NULL(req->dev->power.qos))
348 return -ENODEV; 339 return -ENODEV;
349 340
350 switch(req->type) { 341 switch(req->type) {
@@ -386,6 +377,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
386{ 377{
387 int ret; 378 int ret;
388 379
380 mutex_lock(&dev_pm_qos_mtx);
381 ret = __dev_pm_qos_update_request(req, new_value);
382 mutex_unlock(&dev_pm_qos_mtx);
383 return ret;
384}
385EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
386
387static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
388{
389 int ret;
390
389 if (!req) /*guard against callers passing in null */ 391 if (!req) /*guard against callers passing in null */
390 return -EINVAL; 392 return -EINVAL;
391 393
@@ -393,13 +395,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
393 "%s() called for unknown object\n", __func__)) 395 "%s() called for unknown object\n", __func__))
394 return -EINVAL; 396 return -EINVAL;
395 397
396 mutex_lock(&dev_pm_qos_mtx); 398 if (IS_ERR_OR_NULL(req->dev->power.qos))
397 ret = __dev_pm_qos_update_request(req, new_value); 399 return -ENODEV;
398 mutex_unlock(&dev_pm_qos_mtx);
399 400
401 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
402 memset(req, 0, sizeof(*req));
400 return ret; 403 return ret;
401} 404}
402EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
403 405
404/** 406/**
405 * dev_pm_qos_remove_request - modifies an existing qos request 407 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -418,26 +420,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
418 */ 420 */
419int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 421int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
420{ 422{
421 int ret = 0; 423 int ret;
422
423 if (!req) /*guard against callers passing in null */
424 return -EINVAL;
425
426 if (WARN(!dev_pm_qos_request_active(req),
427 "%s() called for unknown object\n", __func__))
428 return -EINVAL;
429 424
430 mutex_lock(&dev_pm_qos_mtx); 425 mutex_lock(&dev_pm_qos_mtx);
431 426 ret = __dev_pm_qos_remove_request(req);
432 if (req->dev->power.qos) {
433 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
434 PM_QOS_DEFAULT_VALUE);
435 memset(req, 0, sizeof(*req));
436 } else {
437 /* Return if the device has been removed */
438 ret = -ENODEV;
439 }
440
441 mutex_unlock(&dev_pm_qos_mtx); 427 mutex_unlock(&dev_pm_qos_mtx);
442 return ret; 428 return ret;
443} 429}
@@ -462,9 +448,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
462 448
463 mutex_lock(&dev_pm_qos_mtx); 449 mutex_lock(&dev_pm_qos_mtx);
464 450
465 if (!dev->power.qos) 451 if (IS_ERR(dev->power.qos))
466 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 452 ret = -ENODEV;
467 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 453 else if (!dev->power.qos)
454 ret = dev_pm_qos_constraints_allocate(dev);
468 455
469 if (!ret) 456 if (!ret)
470 ret = blocking_notifier_chain_register( 457 ret = blocking_notifier_chain_register(
@@ -493,7 +480,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
493 mutex_lock(&dev_pm_qos_mtx); 480 mutex_lock(&dev_pm_qos_mtx);
494 481
495 /* Silently return if the constraints object is not present. */ 482 /* Silently return if the constraints object is not present. */
496 if (dev->power.qos) 483 if (!IS_ERR_OR_NULL(dev->power.qos))
497 retval = blocking_notifier_chain_unregister( 484 retval = blocking_notifier_chain_unregister(
498 dev->power.qos->latency.notifiers, 485 dev->power.qos->latency.notifiers,
499 notifier); 486 notifier);
@@ -563,16 +550,28 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
563static void __dev_pm_qos_drop_user_request(struct device *dev, 550static void __dev_pm_qos_drop_user_request(struct device *dev,
564 enum dev_pm_qos_req_type type) 551 enum dev_pm_qos_req_type type)
565{ 552{
553 struct dev_pm_qos_request *req = NULL;
554
566 switch(type) { 555 switch(type) {
567 case DEV_PM_QOS_LATENCY: 556 case DEV_PM_QOS_LATENCY:
568 dev_pm_qos_remove_request(dev->power.qos->latency_req); 557 req = dev->power.qos->latency_req;
569 dev->power.qos->latency_req = NULL; 558 dev->power.qos->latency_req = NULL;
570 break; 559 break;
571 case DEV_PM_QOS_FLAGS: 560 case DEV_PM_QOS_FLAGS:
572 dev_pm_qos_remove_request(dev->power.qos->flags_req); 561 req = dev->power.qos->flags_req;
573 dev->power.qos->flags_req = NULL; 562 dev->power.qos->flags_req = NULL;
574 break; 563 break;
575 } 564 }
565 __dev_pm_qos_remove_request(req);
566 kfree(req);
567}
568
569static void dev_pm_qos_drop_user_request(struct device *dev,
570 enum dev_pm_qos_req_type type)
571{
572 mutex_lock(&dev_pm_qos_mtx);
573 __dev_pm_qos_drop_user_request(dev, type);
574 mutex_unlock(&dev_pm_qos_mtx);
576} 575}
577 576
578/** 577/**
@@ -588,36 +587,66 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
588 if (!device_is_registered(dev) || value < 0) 587 if (!device_is_registered(dev) || value < 0)
589 return -EINVAL; 588 return -EINVAL;
590 589
591 if (dev->power.qos && dev->power.qos->latency_req)
592 return -EEXIST;
593
594 req = kzalloc(sizeof(*req), GFP_KERNEL); 590 req = kzalloc(sizeof(*req), GFP_KERNEL);
595 if (!req) 591 if (!req)
596 return -ENOMEM; 592 return -ENOMEM;
597 593
598 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 594 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
599 if (ret < 0) 595 if (ret < 0) {
596 kfree(req);
600 return ret; 597 return ret;
598 }
601 599
600 mutex_lock(&dev_pm_qos_sysfs_mtx);
601
602 mutex_lock(&dev_pm_qos_mtx);
603
604 if (IS_ERR_OR_NULL(dev->power.qos))
605 ret = -ENODEV;
606 else if (dev->power.qos->latency_req)
607 ret = -EEXIST;
608
609 if (ret < 0) {
610 __dev_pm_qos_remove_request(req);
611 kfree(req);
612 mutex_unlock(&dev_pm_qos_mtx);
613 goto out;
614 }
602 dev->power.qos->latency_req = req; 615 dev->power.qos->latency_req = req;
616
617 mutex_unlock(&dev_pm_qos_mtx);
618
603 ret = pm_qos_sysfs_add_latency(dev); 619 ret = pm_qos_sysfs_add_latency(dev);
604 if (ret) 620 if (ret)
605 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 621 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
606 622
623 out:
624 mutex_unlock(&dev_pm_qos_sysfs_mtx);
607 return ret; 625 return ret;
608} 626}
609EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 627EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
610 628
629static void __dev_pm_qos_hide_latency_limit(struct device *dev)
630{
631 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
632 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
633}
634
611/** 635/**
612 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 636 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
613 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 637 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
614 */ 638 */
615void dev_pm_qos_hide_latency_limit(struct device *dev) 639void dev_pm_qos_hide_latency_limit(struct device *dev)
616{ 640{
617 if (dev->power.qos && dev->power.qos->latency_req) { 641 mutex_lock(&dev_pm_qos_sysfs_mtx);
618 pm_qos_sysfs_remove_latency(dev); 642
619 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 643 pm_qos_sysfs_remove_latency(dev);
620 } 644
645 mutex_lock(&dev_pm_qos_mtx);
646 __dev_pm_qos_hide_latency_limit(dev);
647 mutex_unlock(&dev_pm_qos_mtx);
648
649 mutex_unlock(&dev_pm_qos_sysfs_mtx);
621} 650}
622EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 651EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
623 652
@@ -634,41 +663,70 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
634 if (!device_is_registered(dev)) 663 if (!device_is_registered(dev))
635 return -EINVAL; 664 return -EINVAL;
636 665
637 if (dev->power.qos && dev->power.qos->flags_req)
638 return -EEXIST;
639
640 req = kzalloc(sizeof(*req), GFP_KERNEL); 666 req = kzalloc(sizeof(*req), GFP_KERNEL);
641 if (!req) 667 if (!req)
642 return -ENOMEM; 668 return -ENOMEM;
643 669
644 pm_runtime_get_sync(dev);
645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 670 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
646 if (ret < 0) 671 if (ret < 0) {
647 goto fail; 672 kfree(req);
673 return ret;
674 }
675
676 pm_runtime_get_sync(dev);
677 mutex_lock(&dev_pm_qos_sysfs_mtx);
678
679 mutex_lock(&dev_pm_qos_mtx);
680
681 if (IS_ERR_OR_NULL(dev->power.qos))
682 ret = -ENODEV;
683 else if (dev->power.qos->flags_req)
684 ret = -EEXIST;
648 685
686 if (ret < 0) {
687 __dev_pm_qos_remove_request(req);
688 kfree(req);
689 mutex_unlock(&dev_pm_qos_mtx);
690 goto out;
691 }
649 dev->power.qos->flags_req = req; 692 dev->power.qos->flags_req = req;
693
694 mutex_unlock(&dev_pm_qos_mtx);
695
650 ret = pm_qos_sysfs_add_flags(dev); 696 ret = pm_qos_sysfs_add_flags(dev);
651 if (ret) 697 if (ret)
652 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 698 dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
653 699
654fail: 700 out:
701 mutex_unlock(&dev_pm_qos_sysfs_mtx);
655 pm_runtime_put(dev); 702 pm_runtime_put(dev);
656 return ret; 703 return ret;
657} 704}
658EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 705EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
659 706
707static void __dev_pm_qos_hide_flags(struct device *dev)
708{
709 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
710 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
711}
712
660/** 713/**
661 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 714 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
662 * @dev: Device whose PM QoS flags are to be hidden from user space. 715 * @dev: Device whose PM QoS flags are to be hidden from user space.
663 */ 716 */
664void dev_pm_qos_hide_flags(struct device *dev) 717void dev_pm_qos_hide_flags(struct device *dev)
665{ 718{
666 if (dev->power.qos && dev->power.qos->flags_req) { 719 pm_runtime_get_sync(dev);
667 pm_qos_sysfs_remove_flags(dev); 720 mutex_lock(&dev_pm_qos_sysfs_mtx);
668 pm_runtime_get_sync(dev); 721
669 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 722 pm_qos_sysfs_remove_flags(dev);
670 pm_runtime_put(dev); 723
671 } 724 mutex_lock(&dev_pm_qos_mtx);
725 __dev_pm_qos_hide_flags(dev);
726 mutex_unlock(&dev_pm_qos_mtx);
727
728 mutex_unlock(&dev_pm_qos_sysfs_mtx);
729 pm_runtime_put(dev);
672} 730}
673EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 731EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
674 732
@@ -683,12 +741,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
683 s32 value; 741 s32 value;
684 int ret; 742 int ret;
685 743
686 if (!dev->power.qos || !dev->power.qos->flags_req)
687 return -EINVAL;
688
689 pm_runtime_get_sync(dev); 744 pm_runtime_get_sync(dev);
690 mutex_lock(&dev_pm_qos_mtx); 745 mutex_lock(&dev_pm_qos_mtx);
691 746
747 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
748 ret = -EINVAL;
749 goto out;
750 }
751
692 value = dev_pm_qos_requested_flags(dev); 752 value = dev_pm_qos_requested_flags(dev);
693 if (set) 753 if (set)
694 value |= mask; 754 value |= mask;
@@ -697,9 +757,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
697 757
698 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 758 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
699 759
760 out:
700 mutex_unlock(&dev_pm_qos_mtx); 761 mutex_unlock(&dev_pm_qos_mtx);
701 pm_runtime_put(dev); 762 pm_runtime_put(dev);
702
703 return ret; 763 return ret;
704} 764}
765#else /* !CONFIG_PM_RUNTIME */
766static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
767static void __dev_pm_qos_hide_flags(struct device *dev) {}
705#endif /* CONFIG_PM_RUNTIME */ 768#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a9..a53ebd265701 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
708 708
709void dpm_sysfs_remove(struct device *dev) 709void dpm_sysfs_remove(struct device *dev)
710{ 710{
711 dev_pm_qos_constraints_destroy(dev);
711 rpm_sysfs_remove(dev); 712 rpm_sysfs_remove(dev);
712 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
713 sysfs_remove_group(&dev->kobj, &pm_attr_group); 714 sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index dc23508745fe..c130536e0ab0 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -38,7 +38,8 @@ struct regmap_format {
38 unsigned int reg, unsigned int val); 38 unsigned int reg, unsigned int val);
39 void (*format_reg)(void *buf, unsigned int reg, unsigned int shift); 39 void (*format_reg)(void *buf, unsigned int reg, unsigned int shift);
40 void (*format_val)(void *buf, unsigned int val, unsigned int shift); 40 void (*format_val)(void *buf, unsigned int val, unsigned int shift);
41 unsigned int (*parse_val)(void *buf); 41 unsigned int (*parse_val)(const void *buf);
42 void (*parse_inplace)(void *buf);
42}; 43};
43 44
44struct regmap_async { 45struct regmap_async {
@@ -126,6 +127,9 @@ struct regmap {
126 void *cache; 127 void *cache;
127 u32 cache_dirty; 128 u32 cache_dirty;
128 129
130 unsigned long *cache_present;
131 unsigned int cache_present_nbits;
132
129 struct reg_default *patch; 133 struct reg_default *patch;
130 int patch_regs; 134 int patch_regs;
131 135
@@ -188,12 +192,35 @@ int regcache_read(struct regmap *map,
188int regcache_write(struct regmap *map, 192int regcache_write(struct regmap *map,
189 unsigned int reg, unsigned int value); 193 unsigned int reg, unsigned int value);
190int regcache_sync(struct regmap *map); 194int regcache_sync(struct regmap *map);
191 195int regcache_sync_block(struct regmap *map, void *block,
192unsigned int regcache_get_val(const void *base, unsigned int idx, 196 unsigned int block_base, unsigned int start,
193 unsigned int word_size); 197 unsigned int end);
194bool regcache_set_val(void *base, unsigned int idx, 198
195 unsigned int val, unsigned int word_size); 199static inline const void *regcache_get_val_addr(struct regmap *map,
200 const void *base,
201 unsigned int idx)
202{
203 return base + (map->cache_word_size * idx);
204}
205
206unsigned int regcache_get_val(struct regmap *map, const void *base,
207 unsigned int idx);
208bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
209 unsigned int val);
196int regcache_lookup_reg(struct regmap *map, unsigned int reg); 210int regcache_lookup_reg(struct regmap *map, unsigned int reg);
211int regcache_set_reg_present(struct regmap *map, unsigned int reg);
212
213static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
214{
215 if (!map->cache_present)
216 return true;
217 if (reg > map->cache_present_nbits)
218 return false;
219 return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
220}
221
222int _regmap_raw_write(struct regmap *map, unsigned int reg,
223 const void *val, size_t val_len, bool async);
197 224
198void regmap_async_complete_cb(struct regmap_async *async, int ret); 225void regmap_async_complete_cb(struct regmap_async *async, int ret);
199 226
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index afd6aa91a0df..e210a6d1406a 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -260,8 +260,7 @@ static int regcache_lzo_read(struct regmap *map,
260 ret = regcache_lzo_decompress_cache_block(map, lzo_block); 260 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
261 if (ret >= 0) 261 if (ret >= 0)
262 /* fetch the value from the cache */ 262 /* fetch the value from the cache */
263 *value = regcache_get_val(lzo_block->dst, blkpos, 263 *value = regcache_get_val(map, lzo_block->dst, blkpos);
264 map->cache_word_size);
265 264
266 kfree(lzo_block->dst); 265 kfree(lzo_block->dst);
267 /* restore the pointer and length of the compressed block */ 266 /* restore the pointer and length of the compressed block */
@@ -304,8 +303,7 @@ static int regcache_lzo_write(struct regmap *map,
304 } 303 }
305 304
306 /* write the new value to the cache */ 305 /* write the new value to the cache */
307 if (regcache_set_val(lzo_block->dst, blkpos, value, 306 if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
308 map->cache_word_size)) {
309 kfree(lzo_block->dst); 307 kfree(lzo_block->dst);
310 goto out; 308 goto out;
311 } 309 }
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e6732cf7c06e..aa0875f6f1b7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -47,22 +47,21 @@ static inline void regcache_rbtree_get_base_top_reg(
47 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride); 47 *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
48} 48}
49 49
50static unsigned int regcache_rbtree_get_register( 50static unsigned int regcache_rbtree_get_register(struct regmap *map,
51 struct regcache_rbtree_node *rbnode, unsigned int idx, 51 struct regcache_rbtree_node *rbnode, unsigned int idx)
52 unsigned int word_size)
53{ 52{
54 return regcache_get_val(rbnode->block, idx, word_size); 53 return regcache_get_val(map, rbnode->block, idx);
55} 54}
56 55
57static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, 56static void regcache_rbtree_set_register(struct regmap *map,
58 unsigned int idx, unsigned int val, 57 struct regcache_rbtree_node *rbnode,
59 unsigned int word_size) 58 unsigned int idx, unsigned int val)
60{ 59{
61 regcache_set_val(rbnode->block, idx, val, word_size); 60 regcache_set_val(map, rbnode->block, idx, val);
62} 61}
63 62
64static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, 63static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
65 unsigned int reg) 64 unsigned int reg)
66{ 65{
67 struct regcache_rbtree_ctx *rbtree_ctx = map->cache; 66 struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
68 struct rb_node *node; 67 struct rb_node *node;
@@ -139,15 +138,21 @@ static int rbtree_show(struct seq_file *s, void *ignored)
139 struct regcache_rbtree_node *n; 138 struct regcache_rbtree_node *n;
140 struct rb_node *node; 139 struct rb_node *node;
141 unsigned int base, top; 140 unsigned int base, top;
141 size_t mem_size;
142 int nodes = 0; 142 int nodes = 0;
143 int registers = 0; 143 int registers = 0;
144 int this_registers, average; 144 int this_registers, average;
145 145
146 map->lock(map); 146 map->lock(map);
147 147
148 mem_size = sizeof(*rbtree_ctx);
149 mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
150
148 for (node = rb_first(&rbtree_ctx->root); node != NULL; 151 for (node = rb_first(&rbtree_ctx->root); node != NULL;
149 node = rb_next(node)) { 152 node = rb_next(node)) {
150 n = container_of(node, struct regcache_rbtree_node, node); 153 n = container_of(node, struct regcache_rbtree_node, node);
154 mem_size += sizeof(*n);
155 mem_size += (n->blklen * map->cache_word_size);
151 156
152 regcache_rbtree_get_base_top_reg(map, n, &base, &top); 157 regcache_rbtree_get_base_top_reg(map, n, &base, &top);
153 this_registers = ((top - base) / map->reg_stride) + 1; 158 this_registers = ((top - base) / map->reg_stride) + 1;
@@ -162,8 +167,8 @@ static int rbtree_show(struct seq_file *s, void *ignored)
162 else 167 else
163 average = 0; 168 average = 0;
164 169
165 seq_printf(s, "%d nodes, %d registers, average %d registers\n", 170 seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
166 nodes, registers, average); 171 nodes, registers, average, mem_size);
167 172
168 map->unlock(map); 173 map->unlock(map);
169 174
@@ -260,8 +265,9 @@ static int regcache_rbtree_read(struct regmap *map,
260 rbnode = regcache_rbtree_lookup(map, reg); 265 rbnode = regcache_rbtree_lookup(map, reg);
261 if (rbnode) { 266 if (rbnode) {
262 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; 267 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
263 *value = regcache_rbtree_get_register(rbnode, reg_tmp, 268 if (!regcache_reg_present(map, reg))
264 map->cache_word_size); 269 return -ENOENT;
270 *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
265 } else { 271 } else {
266 return -ENOENT; 272 return -ENOENT;
267 } 273 }
@@ -270,21 +276,23 @@ static int regcache_rbtree_read(struct regmap *map,
270} 276}
271 277
272 278
273static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, 279static int regcache_rbtree_insert_to_block(struct regmap *map,
280 struct regcache_rbtree_node *rbnode,
274 unsigned int pos, unsigned int reg, 281 unsigned int pos, unsigned int reg,
275 unsigned int value, unsigned int word_size) 282 unsigned int value)
276{ 283{
277 u8 *blk; 284 u8 *blk;
278 285
279 blk = krealloc(rbnode->block, 286 blk = krealloc(rbnode->block,
280 (rbnode->blklen + 1) * word_size, GFP_KERNEL); 287 (rbnode->blklen + 1) * map->cache_word_size,
288 GFP_KERNEL);
281 if (!blk) 289 if (!blk)
282 return -ENOMEM; 290 return -ENOMEM;
283 291
284 /* insert the register value in the correct place in the rbnode block */ 292 /* insert the register value in the correct place in the rbnode block */
285 memmove(blk + (pos + 1) * word_size, 293 memmove(blk + (pos + 1) * map->cache_word_size,
286 blk + pos * word_size, 294 blk + pos * map->cache_word_size,
287 (rbnode->blklen - pos) * word_size); 295 (rbnode->blklen - pos) * map->cache_word_size);
288 296
289 /* update the rbnode block, its size and the base register */ 297 /* update the rbnode block, its size and the base register */
290 rbnode->block = blk; 298 rbnode->block = blk;
@@ -292,7 +300,7 @@ static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode,
292 if (!pos) 300 if (!pos)
293 rbnode->base_reg = reg; 301 rbnode->base_reg = reg;
294 302
295 regcache_rbtree_set_register(rbnode, pos, value, word_size); 303 regcache_rbtree_set_register(map, rbnode, pos, value);
296 return 0; 304 return 0;
297} 305}
298 306
@@ -302,25 +310,24 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
302 struct regcache_rbtree_ctx *rbtree_ctx; 310 struct regcache_rbtree_ctx *rbtree_ctx;
303 struct regcache_rbtree_node *rbnode, *rbnode_tmp; 311 struct regcache_rbtree_node *rbnode, *rbnode_tmp;
304 struct rb_node *node; 312 struct rb_node *node;
305 unsigned int val;
306 unsigned int reg_tmp; 313 unsigned int reg_tmp;
307 unsigned int pos; 314 unsigned int pos;
308 int i; 315 int i;
309 int ret; 316 int ret;
310 317
311 rbtree_ctx = map->cache; 318 rbtree_ctx = map->cache;
319 /* update the reg_present bitmap, make space if necessary */
320 ret = regcache_set_reg_present(map, reg);
321 if (ret < 0)
322 return ret;
323
312 /* if we can't locate it in the cached rbnode we'll have 324 /* if we can't locate it in the cached rbnode we'll have
313 * to traverse the rbtree looking for it. 325 * to traverse the rbtree looking for it.
314 */ 326 */
315 rbnode = regcache_rbtree_lookup(map, reg); 327 rbnode = regcache_rbtree_lookup(map, reg);
316 if (rbnode) { 328 if (rbnode) {
317 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride; 329 reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
318 val = regcache_rbtree_get_register(rbnode, reg_tmp, 330 regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
319 map->cache_word_size);
320 if (val == value)
321 return 0;
322 regcache_rbtree_set_register(rbnode, reg_tmp, value,
323 map->cache_word_size);
324 } else { 331 } else {
325 /* look for an adjacent register to the one we are about to add */ 332 /* look for an adjacent register to the one we are about to add */
326 for (node = rb_first(&rbtree_ctx->root); node; 333 for (node = rb_first(&rbtree_ctx->root); node;
@@ -337,9 +344,10 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
337 pos = i + 1; 344 pos = i + 1;
338 else 345 else
339 pos = i; 346 pos = i;
340 ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, 347 ret = regcache_rbtree_insert_to_block(map,
341 reg, value, 348 rbnode_tmp,
342 map->cache_word_size); 349 pos, reg,
350 value);
343 if (ret) 351 if (ret)
344 return ret; 352 return ret;
345 rbtree_ctx->cached_rbnode = rbnode_tmp; 353 rbtree_ctx->cached_rbnode = rbnode_tmp;
@@ -354,7 +362,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
354 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); 362 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
355 if (!rbnode) 363 if (!rbnode)
356 return -ENOMEM; 364 return -ENOMEM;
357 rbnode->blklen = 1; 365 rbnode->blklen = sizeof(*rbnode);
358 rbnode->base_reg = reg; 366 rbnode->base_reg = reg;
359 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, 367 rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
360 GFP_KERNEL); 368 GFP_KERNEL);
@@ -362,7 +370,7 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
362 kfree(rbnode); 370 kfree(rbnode);
363 return -ENOMEM; 371 return -ENOMEM;
364 } 372 }
365 regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); 373 regcache_rbtree_set_register(map, rbnode, 0, value);
366 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode); 374 regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
367 rbtree_ctx->cached_rbnode = rbnode; 375 rbtree_ctx->cached_rbnode = rbnode;
368 } 376 }
@@ -376,10 +384,8 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
376 struct regcache_rbtree_ctx *rbtree_ctx; 384 struct regcache_rbtree_ctx *rbtree_ctx;
377 struct rb_node *node; 385 struct rb_node *node;
378 struct regcache_rbtree_node *rbnode; 386 struct regcache_rbtree_node *rbnode;
379 unsigned int regtmp;
380 unsigned int val;
381 int ret; 387 int ret;
382 int i, base, end; 388 int base, end;
383 389
384 rbtree_ctx = map->cache; 390 rbtree_ctx = map->cache;
385 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 391 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
@@ -398,31 +404,17 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
398 base = 0; 404 base = 0;
399 405
400 if (max < rbnode->base_reg + rbnode->blklen) 406 if (max < rbnode->base_reg + rbnode->blklen)
401 end = rbnode->base_reg + rbnode->blklen - max; 407 end = max - rbnode->base_reg + 1;
402 else 408 else
403 end = rbnode->blklen; 409 end = rbnode->blklen;
404 410
405 for (i = base; i < end; i++) { 411 ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
406 regtmp = rbnode->base_reg + (i * map->reg_stride); 412 base, end);
407 val = regcache_rbtree_get_register(rbnode, i, 413 if (ret != 0)
408 map->cache_word_size); 414 return ret;
409
410 /* Is this the hardware default? If so skip. */
411 ret = regcache_lookup_reg(map, regtmp);
412 if (ret >= 0 && val == map->reg_defaults[ret].def)
413 continue;
414
415 map->cache_bypass = 1;
416 ret = _regmap_write(map, regtmp, val);
417 map->cache_bypass = 0;
418 if (ret)
419 return ret;
420 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
421 regtmp, val);
422 }
423 } 415 }
424 416
425 return 0; 417 return regmap_async_complete(map);
426} 418}
427 419
428struct regcache_ops regcache_rbtree_ops = { 420struct regcache_ops regcache_rbtree_ops = {
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e69ff3e4742c..75923f2396bd 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -45,8 +45,8 @@ static int regcache_hw_init(struct regmap *map)
45 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); 45 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
46 if (!tmp_buf) 46 if (!tmp_buf)
47 return -EINVAL; 47 return -EINVAL;
48 ret = regmap_bulk_read(map, 0, tmp_buf, 48 ret = regmap_raw_read(map, 0, tmp_buf,
49 map->num_reg_defaults_raw); 49 map->num_reg_defaults_raw);
50 map->cache_bypass = cache_bypass; 50 map->cache_bypass = cache_bypass;
51 if (ret < 0) { 51 if (ret < 0) {
52 kfree(tmp_buf); 52 kfree(tmp_buf);
@@ -58,8 +58,7 @@ static int regcache_hw_init(struct regmap *map)
58 58
59 /* calculate the size of reg_defaults */ 59 /* calculate the size of reg_defaults */
60 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { 60 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
61 val = regcache_get_val(map->reg_defaults_raw, 61 val = regcache_get_val(map, map->reg_defaults_raw, i);
62 i, map->cache_word_size);
63 if (regmap_volatile(map, i * map->reg_stride)) 62 if (regmap_volatile(map, i * map->reg_stride))
64 continue; 63 continue;
65 count++; 64 count++;
@@ -75,8 +74,7 @@ static int regcache_hw_init(struct regmap *map)
75 /* fill the reg_defaults */ 74 /* fill the reg_defaults */
76 map->num_reg_defaults = count; 75 map->num_reg_defaults = count;
77 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { 76 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
78 val = regcache_get_val(map->reg_defaults_raw, 77 val = regcache_get_val(map, map->reg_defaults_raw, i);
79 i, map->cache_word_size);
80 if (regmap_volatile(map, i * map->reg_stride)) 78 if (regmap_volatile(map, i * map->reg_stride))
81 continue; 79 continue;
82 map->reg_defaults[j].reg = i * map->reg_stride; 80 map->reg_defaults[j].reg = i * map->reg_stride;
@@ -123,6 +121,8 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
123 map->reg_defaults_raw = config->reg_defaults_raw; 121 map->reg_defaults_raw = config->reg_defaults_raw;
124 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8); 122 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
125 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw; 123 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
124 map->cache_present = NULL;
125 map->cache_present_nbits = 0;
126 126
127 map->cache = NULL; 127 map->cache = NULL;
128 map->cache_ops = cache_types[i]; 128 map->cache_ops = cache_types[i];
@@ -181,6 +181,7 @@ void regcache_exit(struct regmap *map)
181 181
182 BUG_ON(!map->cache_ops); 182 BUG_ON(!map->cache_ops);
183 183
184 kfree(map->cache_present);
184 kfree(map->reg_defaults); 185 kfree(map->reg_defaults);
185 if (map->cache_free) 186 if (map->cache_free)
186 kfree(map->reg_defaults_raw); 187 kfree(map->reg_defaults_raw);
@@ -417,28 +418,68 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
417} 418}
418EXPORT_SYMBOL_GPL(regcache_cache_bypass); 419EXPORT_SYMBOL_GPL(regcache_cache_bypass);
419 420
420bool regcache_set_val(void *base, unsigned int idx, 421int regcache_set_reg_present(struct regmap *map, unsigned int reg)
421 unsigned int val, unsigned int word_size)
422{ 422{
423 switch (word_size) { 423 unsigned long *cache_present;
424 unsigned int cache_present_size;
425 unsigned int nregs;
426 int i;
427
428 nregs = reg + 1;
429 cache_present_size = BITS_TO_LONGS(nregs);
430 cache_present_size *= sizeof(long);
431
432 if (!map->cache_present) {
433 cache_present = kmalloc(cache_present_size, GFP_KERNEL);
434 if (!cache_present)
435 return -ENOMEM;
436 bitmap_zero(cache_present, nregs);
437 map->cache_present = cache_present;
438 map->cache_present_nbits = nregs;
439 }
440
441 if (nregs > map->cache_present_nbits) {
442 cache_present = krealloc(map->cache_present,
443 cache_present_size, GFP_KERNEL);
444 if (!cache_present)
445 return -ENOMEM;
446 for (i = 0; i < nregs; i++)
447 if (i >= map->cache_present_nbits)
448 clear_bit(i, cache_present);
449 map->cache_present = cache_present;
450 map->cache_present_nbits = nregs;
451 }
452
453 set_bit(reg, map->cache_present);
454 return 0;
455}
456
457bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
458 unsigned int val)
459{
460 if (regcache_get_val(map, base, idx) == val)
461 return true;
462
463 /* Use device native format if possible */
464 if (map->format.format_val) {
465 map->format.format_val(base + (map->cache_word_size * idx),
466 val, 0);
467 return false;
468 }
469
470 switch (map->cache_word_size) {
424 case 1: { 471 case 1: {
425 u8 *cache = base; 472 u8 *cache = base;
426 if (cache[idx] == val)
427 return true;
428 cache[idx] = val; 473 cache[idx] = val;
429 break; 474 break;
430 } 475 }
431 case 2: { 476 case 2: {
432 u16 *cache = base; 477 u16 *cache = base;
433 if (cache[idx] == val)
434 return true;
435 cache[idx] = val; 478 cache[idx] = val;
436 break; 479 break;
437 } 480 }
438 case 4: { 481 case 4: {
439 u32 *cache = base; 482 u32 *cache = base;
440 if (cache[idx] == val)
441 return true;
442 cache[idx] = val; 483 cache[idx] = val;
443 break; 484 break;
444 } 485 }
@@ -448,13 +489,18 @@ bool regcache_set_val(void *base, unsigned int idx,
448 return false; 489 return false;
449} 490}
450 491
451unsigned int regcache_get_val(const void *base, unsigned int idx, 492unsigned int regcache_get_val(struct regmap *map, const void *base,
452 unsigned int word_size) 493 unsigned int idx)
453{ 494{
454 if (!base) 495 if (!base)
455 return -EINVAL; 496 return -EINVAL;
456 497
457 switch (word_size) { 498 /* Use device native format if possible */
499 if (map->format.parse_val)
500 return map->format.parse_val(regcache_get_val_addr(map, base,
501 idx));
502
503 switch (map->cache_word_size) {
458 case 1: { 504 case 1: {
459 const u8 *cache = base; 505 const u8 *cache = base;
460 return cache[idx]; 506 return cache[idx];
@@ -498,3 +544,117 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
498 else 544 else
499 return -ENOENT; 545 return -ENOENT;
500} 546}
547
548static int regcache_sync_block_single(struct regmap *map, void *block,
549 unsigned int block_base,
550 unsigned int start, unsigned int end)
551{
552 unsigned int i, regtmp, val;
553 int ret;
554
555 for (i = start; i < end; i++) {
556 regtmp = block_base + (i * map->reg_stride);
557
558 if (!regcache_reg_present(map, regtmp))
559 continue;
560
561 val = regcache_get_val(map, block, i);
562
563 /* Is this the hardware default? If so skip. */
564 ret = regcache_lookup_reg(map, regtmp);
565 if (ret >= 0 && val == map->reg_defaults[ret].def)
566 continue;
567
568 map->cache_bypass = 1;
569
570 ret = _regmap_write(map, regtmp, val);
571
572 map->cache_bypass = 0;
573 if (ret != 0)
574 return ret;
575 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
576 regtmp, val);
577 }
578
579 return 0;
580}
581
582static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
583 unsigned int base, unsigned int cur)
584{
585 size_t val_bytes = map->format.val_bytes;
586 int ret, count;
587
588 if (*data == NULL)
589 return 0;
590
591 count = cur - base;
592
593 dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
594 count * val_bytes, count, base, cur - 1);
595
596 map->cache_bypass = 1;
597
598 ret = _regmap_raw_write(map, base, *data, count * val_bytes,
599 false);
600
601 map->cache_bypass = 0;
602
603 *data = NULL;
604
605 return ret;
606}
607
608static int regcache_sync_block_raw(struct regmap *map, void *block,
609 unsigned int block_base, unsigned int start,
610 unsigned int end)
611{
612 unsigned int i, val;
613 unsigned int regtmp = 0;
614 unsigned int base = 0;
615 const void *data = NULL;
616 int ret;
617
618 for (i = start; i < end; i++) {
619 regtmp = block_base + (i * map->reg_stride);
620
621 if (!regcache_reg_present(map, regtmp)) {
622 ret = regcache_sync_block_raw_flush(map, &data,
623 base, regtmp);
624 if (ret != 0)
625 return ret;
626 continue;
627 }
628
629 val = regcache_get_val(map, block, i);
630
631 /* Is this the hardware default? If so skip. */
632 ret = regcache_lookup_reg(map, regtmp);
633 if (ret >= 0 && val == map->reg_defaults[ret].def) {
634 ret = regcache_sync_block_raw_flush(map, &data,
635 base, regtmp);
636 if (ret != 0)
637 return ret;
638 continue;
639 }
640
641 if (!data) {
642 data = regcache_get_val_addr(map, block, i);
643 base = regtmp;
644 }
645 }
646
647 return regcache_sync_block_raw_flush(map, &data, base, regtmp);
648}
649
650int regcache_sync_block(struct regmap *map, void *block,
651 unsigned int block_base, unsigned int start,
652 unsigned int end)
653{
654 if (regmap_can_raw_write(map))
655 return regcache_sync_block_raw(map, block, block_base,
656 start, end);
657 else
658 return regcache_sync_block_single(map, block, block_base,
659 start, end);
660}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 4706c63d0bc6..1643e889bafc 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -184,6 +184,7 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
184 if (ret < 0) { 184 if (ret < 0) {
185 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 185 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
186 ret); 186 ret);
187 pm_runtime_put(map->dev);
187 return IRQ_NONE; 188 return IRQ_NONE;
188 } 189 }
189 } 190 }
@@ -459,7 +460,8 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
459 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags, 460 ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
460 chip->name, d); 461 chip->name, d);
461 if (ret != 0) { 462 if (ret != 0) {
462 dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret); 463 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
464 irq, chip->name, ret);
463 goto err_domain; 465 goto err_domain;
464 } 466 }
465 467
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 3d2367501fd0..a941dcfe7590 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -228,30 +228,39 @@ static void regmap_format_32_native(void *buf, unsigned int val,
228 *(u32 *)buf = val << shift; 228 *(u32 *)buf = val << shift;
229} 229}
230 230
231static unsigned int regmap_parse_8(void *buf) 231static void regmap_parse_inplace_noop(void *buf)
232{ 232{
233 u8 *b = buf; 233}
234
235static unsigned int regmap_parse_8(const void *buf)
236{
237 const u8 *b = buf;
234 238
235 return b[0]; 239 return b[0];
236} 240}
237 241
238static unsigned int regmap_parse_16_be(void *buf) 242static unsigned int regmap_parse_16_be(const void *buf)
243{
244 const __be16 *b = buf;
245
246 return be16_to_cpu(b[0]);
247}
248
249static void regmap_parse_16_be_inplace(void *buf)
239{ 250{
240 __be16 *b = buf; 251 __be16 *b = buf;
241 252
242 b[0] = be16_to_cpu(b[0]); 253 b[0] = be16_to_cpu(b[0]);
243
244 return b[0];
245} 254}
246 255
247static unsigned int regmap_parse_16_native(void *buf) 256static unsigned int regmap_parse_16_native(const void *buf)
248{ 257{
249 return *(u16 *)buf; 258 return *(u16 *)buf;
250} 259}
251 260
252static unsigned int regmap_parse_24(void *buf) 261static unsigned int regmap_parse_24(const void *buf)
253{ 262{
254 u8 *b = buf; 263 const u8 *b = buf;
255 unsigned int ret = b[2]; 264 unsigned int ret = b[2];
256 ret |= ((unsigned int)b[1]) << 8; 265 ret |= ((unsigned int)b[1]) << 8;
257 ret |= ((unsigned int)b[0]) << 16; 266 ret |= ((unsigned int)b[0]) << 16;
@@ -259,16 +268,21 @@ static unsigned int regmap_parse_24(void *buf)
259 return ret; 268 return ret;
260} 269}
261 270
262static unsigned int regmap_parse_32_be(void *buf) 271static unsigned int regmap_parse_32_be(const void *buf)
272{
273 const __be32 *b = buf;
274
275 return be32_to_cpu(b[0]);
276}
277
278static void regmap_parse_32_be_inplace(void *buf)
263{ 279{
264 __be32 *b = buf; 280 __be32 *b = buf;
265 281
266 b[0] = be32_to_cpu(b[0]); 282 b[0] = be32_to_cpu(b[0]);
267
268 return b[0];
269} 283}
270 284
271static unsigned int regmap_parse_32_native(void *buf) 285static unsigned int regmap_parse_32_native(const void *buf)
272{ 286{
273 return *(u32 *)buf; 287 return *(u32 *)buf;
274} 288}
@@ -555,16 +569,21 @@ struct regmap *regmap_init(struct device *dev,
555 goto err_map; 569 goto err_map;
556 } 570 }
557 571
572 if (val_endian == REGMAP_ENDIAN_NATIVE)
573 map->format.parse_inplace = regmap_parse_inplace_noop;
574
558 switch (config->val_bits) { 575 switch (config->val_bits) {
559 case 8: 576 case 8:
560 map->format.format_val = regmap_format_8; 577 map->format.format_val = regmap_format_8;
561 map->format.parse_val = regmap_parse_8; 578 map->format.parse_val = regmap_parse_8;
579 map->format.parse_inplace = regmap_parse_inplace_noop;
562 break; 580 break;
563 case 16: 581 case 16:
564 switch (val_endian) { 582 switch (val_endian) {
565 case REGMAP_ENDIAN_BIG: 583 case REGMAP_ENDIAN_BIG:
566 map->format.format_val = regmap_format_16_be; 584 map->format.format_val = regmap_format_16_be;
567 map->format.parse_val = regmap_parse_16_be; 585 map->format.parse_val = regmap_parse_16_be;
586 map->format.parse_inplace = regmap_parse_16_be_inplace;
568 break; 587 break;
569 case REGMAP_ENDIAN_NATIVE: 588 case REGMAP_ENDIAN_NATIVE:
570 map->format.format_val = regmap_format_16_native; 589 map->format.format_val = regmap_format_16_native;
@@ -585,6 +604,7 @@ struct regmap *regmap_init(struct device *dev,
585 case REGMAP_ENDIAN_BIG: 604 case REGMAP_ENDIAN_BIG:
586 map->format.format_val = regmap_format_32_be; 605 map->format.format_val = regmap_format_32_be;
587 map->format.parse_val = regmap_parse_32_be; 606 map->format.parse_val = regmap_parse_32_be;
607 map->format.parse_inplace = regmap_parse_32_be_inplace;
588 break; 608 break;
589 case REGMAP_ENDIAN_NATIVE: 609 case REGMAP_ENDIAN_NATIVE:
590 map->format.format_val = regmap_format_32_native; 610 map->format.format_val = regmap_format_32_native;
@@ -710,12 +730,12 @@ skip_format_initialization:
710 } 730 }
711 } 731 }
712 732
733 regmap_debugfs_init(map, config->name);
734
713 ret = regcache_init(map, config); 735 ret = regcache_init(map, config);
714 if (ret != 0) 736 if (ret != 0)
715 goto err_range; 737 goto err_range;
716 738
717 regmap_debugfs_init(map, config->name);
718
719 /* Add a devres resource for dev_get_regmap() */ 739 /* Add a devres resource for dev_get_regmap() */
720 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 740 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
721 if (!m) { 741 if (!m) {
@@ -917,8 +937,8 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
917 return 0; 937 return 0;
918} 938}
919 939
920static int _regmap_raw_write(struct regmap *map, unsigned int reg, 940int _regmap_raw_write(struct regmap *map, unsigned int reg,
921 const void *val, size_t val_len, bool async) 941 const void *val, size_t val_len, bool async)
922{ 942{
923 struct regmap_range_node *range; 943 struct regmap_range_node *range;
924 unsigned long flags; 944 unsigned long flags;
@@ -930,7 +950,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
930 size_t len; 950 size_t len;
931 int i; 951 int i;
932 952
933 BUG_ON(!map->bus); 953 WARN_ON(!map->bus);
934 954
935 /* Check for unwritable registers before we start */ 955 /* Check for unwritable registers before we start */
936 if (map->writeable_reg) 956 if (map->writeable_reg)
@@ -943,8 +963,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
943 unsigned int ival; 963 unsigned int ival;
944 int val_bytes = map->format.val_bytes; 964 int val_bytes = map->format.val_bytes;
945 for (i = 0; i < val_len / val_bytes; i++) { 965 for (i = 0; i < val_len / val_bytes; i++) {
946 memcpy(map->work_buf, val + (i * val_bytes), val_bytes); 966 ival = map->format.parse_val(val + (i * val_bytes));
947 ival = map->format.parse_val(map->work_buf);
948 ret = regcache_write(map, reg + (i * map->reg_stride), 967 ret = regcache_write(map, reg + (i * map->reg_stride),
949 ival); 968 ival);
950 if (ret) { 969 if (ret) {
@@ -999,6 +1018,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
999 if (!async) 1018 if (!async)
1000 return -ENOMEM; 1019 return -ENOMEM;
1001 1020
1021 trace_regmap_async_write_start(map->dev, reg, val_len);
1022
1002 async->work_buf = kzalloc(map->format.buf_size, 1023 async->work_buf = kzalloc(map->format.buf_size,
1003 GFP_KERNEL | GFP_DMA); 1024 GFP_KERNEL | GFP_DMA);
1004 if (!async->work_buf) { 1025 if (!async->work_buf) {
@@ -1036,6 +1057,8 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
1036 kfree(async->work_buf); 1057 kfree(async->work_buf);
1037 kfree(async); 1058 kfree(async);
1038 } 1059 }
1060
1061 return ret;
1039 } 1062 }
1040 1063
1041 trace_regmap_hw_write_start(map->dev, reg, 1064 trace_regmap_hw_write_start(map->dev, reg,
@@ -1077,6 +1100,17 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
1077 return ret; 1100 return ret;
1078} 1101}
1079 1102
1103/**
1104 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1105 *
1106 * @map: Map to check.
1107 */
1108bool regmap_can_raw_write(struct regmap *map)
1109{
1110 return map->bus && map->format.format_val && map->format.format_reg;
1111}
1112EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1113
1080static int _regmap_bus_formatted_write(void *context, unsigned int reg, 1114static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1081 unsigned int val) 1115 unsigned int val)
1082{ 1116{
@@ -1084,7 +1118,7 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1084 struct regmap_range_node *range; 1118 struct regmap_range_node *range;
1085 struct regmap *map = context; 1119 struct regmap *map = context;
1086 1120
1087 BUG_ON(!map->bus || !map->format.format_write); 1121 WARN_ON(!map->bus || !map->format.format_write);
1088 1122
1089 range = _regmap_range_lookup(map, reg); 1123 range = _regmap_range_lookup(map, reg);
1090 if (range) { 1124 if (range) {
@@ -1110,7 +1144,7 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg,
1110{ 1144{
1111 struct regmap *map = context; 1145 struct regmap *map = context;
1112 1146
1113 BUG_ON(!map->bus || !map->format.format_val); 1147 WARN_ON(!map->bus || !map->format.format_val);
1114 1148
1115 map->format.format_val(map->work_buf + map->format.reg_bytes 1149 map->format.format_val(map->work_buf + map->format.reg_bytes
1116 + map->format.pad_bytes, val, 0); 1150 + map->format.pad_bytes, val, 0);
@@ -1200,12 +1234,10 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1200{ 1234{
1201 int ret; 1235 int ret;
1202 1236
1203 if (!map->bus) 1237 if (!regmap_can_raw_write(map))
1204 return -EINVAL; 1238 return -EINVAL;
1205 if (val_len % map->format.val_bytes) 1239 if (val_len % map->format.val_bytes)
1206 return -EINVAL; 1240 return -EINVAL;
1207 if (reg % map->reg_stride)
1208 return -EINVAL;
1209 1241
1210 map->lock(map->lock_arg); 1242 map->lock(map->lock_arg);
1211 1243
@@ -1240,7 +1272,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1240 1272
1241 if (!map->bus) 1273 if (!map->bus)
1242 return -EINVAL; 1274 return -EINVAL;
1243 if (!map->format.parse_val) 1275 if (!map->format.parse_inplace)
1244 return -EINVAL; 1276 return -EINVAL;
1245 if (reg % map->reg_stride) 1277 if (reg % map->reg_stride)
1246 return -EINVAL; 1278 return -EINVAL;
@@ -1258,7 +1290,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1258 goto out; 1290 goto out;
1259 } 1291 }
1260 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1292 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1261 map->format.parse_val(wval + i); 1293 map->format.parse_inplace(wval + i);
1262 } 1294 }
1263 /* 1295 /*
1264 * Some devices does not support bulk write, for 1296 * Some devices does not support bulk write, for
@@ -1336,7 +1368,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1336 u8 *u8 = map->work_buf; 1368 u8 *u8 = map->work_buf;
1337 int ret; 1369 int ret;
1338 1370
1339 BUG_ON(!map->bus); 1371 WARN_ON(!map->bus);
1340 1372
1341 range = _regmap_range_lookup(map, reg); 1373 range = _regmap_range_lookup(map, reg);
1342 if (range) { 1374 if (range) {
@@ -1391,7 +1423,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
1391 int ret; 1423 int ret;
1392 void *context = _regmap_map_get_context(map); 1424 void *context = _regmap_map_get_context(map);
1393 1425
1394 BUG_ON(!map->reg_read); 1426 WARN_ON(!map->reg_read);
1395 1427
1396 if (!map->cache_bypass) { 1428 if (!map->cache_bypass) {
1397 ret = regcache_read(map, reg, val); 1429 ret = regcache_read(map, reg, val);
@@ -1519,7 +1551,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1519 1551
1520 if (!map->bus) 1552 if (!map->bus)
1521 return -EINVAL; 1553 return -EINVAL;
1522 if (!map->format.parse_val) 1554 if (!map->format.parse_inplace)
1523 return -EINVAL; 1555 return -EINVAL;
1524 if (reg % map->reg_stride) 1556 if (reg % map->reg_stride)
1525 return -EINVAL; 1557 return -EINVAL;
@@ -1546,7 +1578,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1546 } 1578 }
1547 1579
1548 for (i = 0; i < val_count * val_bytes; i += val_bytes) 1580 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1549 map->format.parse_val(val + i); 1581 map->format.parse_inplace(val + i);
1550 } else { 1582 } else {
1551 for (i = 0; i < val_count; i++) { 1583 for (i = 0; i < val_count; i++) {
1552 unsigned int ival; 1584 unsigned int ival;
@@ -1640,6 +1672,8 @@ void regmap_async_complete_cb(struct regmap_async *async, int ret)
1640 struct regmap *map = async->map; 1672 struct regmap *map = async->map;
1641 bool wake; 1673 bool wake;
1642 1674
1675 trace_regmap_async_io_complete(map->dev);
1676
1643 spin_lock(&map->async_lock); 1677 spin_lock(&map->async_lock);
1644 1678
1645 list_del(&async->list); 1679 list_del(&async->list);
@@ -1686,6 +1720,8 @@ int regmap_async_complete(struct regmap *map)
1686 if (!map->bus->async_write) 1720 if (!map->bus->async_write)
1687 return 0; 1721 return 0;
1688 1722
1723 trace_regmap_async_complete_start(map->dev);
1724
1689 wait_event(map->async_waitq, regmap_async_is_done(map)); 1725 wait_event(map->async_waitq, regmap_async_is_done(map));
1690 1726
1691 spin_lock_irqsave(&map->async_lock, flags); 1727 spin_lock_irqsave(&map->async_lock, flags);
@@ -1693,6 +1729,8 @@ int regmap_async_complete(struct regmap *map)
1693 map->async_ret = 0; 1729 map->async_ret = 0;
1694 spin_unlock_irqrestore(&map->async_lock, flags); 1730 spin_unlock_irqrestore(&map->async_lock, flags);
1695 1731
1732 trace_regmap_async_complete_done(map->dev);
1733
1696 return ret; 1734 return ret;
1697} 1735}
1698EXPORT_SYMBOL_GPL(regmap_async_complete); 1736EXPORT_SYMBOL_GPL(regmap_async_complete);