aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/class.c4
-rw-r--r--drivers/base/core.c8
-rw-r--r--drivers/base/dd.c9
-rw-r--r--drivers/base/devtmpfs.c3
-rw-r--r--drivers/base/dma-buf.c75
-rw-r--r--drivers/base/firmware_class.c552
-rw-r--r--drivers/base/memory.c10
-rw-r--r--drivers/base/pinctrl.c69
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/power/opp.c19
-rw-r--r--drivers/base/power/power.h8
-rw-r--r--drivers/base/power/qos.c218
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/sysfs.c1
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/Makefile2
-rw-r--r--drivers/base/regmap/internal.h22
-rw-r--r--drivers/base/regmap/regcache-flat.c72
-rw-r--r--drivers/base/regmap/regcache.c1
-rw-r--r--drivers/base/regmap/regmap-debugfs.c52
-rw-r--r--drivers/base/regmap/regmap-irq.c126
-rw-r--r--drivers/base/regmap/regmap-mmio.c79
-rw-r--r--drivers/base/regmap/regmap-spi.c54
-rw-r--r--drivers/base/regmap/regmap.c351
28 files changed, 1347 insertions, 506 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index c8b453939da2..07abd9d76f7f 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -145,6 +145,17 @@ config EXTRA_FIRMWARE_DIR
145 this option you can point it elsewhere, such as /lib/firmware/ or 145 this option you can point it elsewhere, such as /lib/firmware/ or
146 some other directory containing the firmware files. 146 some other directory containing the firmware files.
147 147
148config FW_LOADER_USER_HELPER
149 bool "Fallback user-helper invocation for firmware loading"
150 depends on FW_LOADER
151 default y
152 help
153 This option enables / disables the invocation of user-helper
154 (e.g. udev) for loading firmware files as a fallback after the
155 direct file loading in kernel fails. The user-mode helper is
156 no longer required unless you have a special firmware file that
157 resides in a non-standard path.
158
148config DEBUG_DRIVER 159config DEBUG_DRIVER
149 bool "Driver Core verbose debug messages" 160 bool "Driver Core verbose debug messages"
150 depends on DEBUG_KERNEL 161 depends on DEBUG_KERNEL
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 5aa2d703d19f..4e22ce3ed73d 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -21,6 +21,7 @@ endif
21obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o 21obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
22obj-$(CONFIG_REGMAP) += regmap/ 22obj-$(CONFIG_REGMAP) += regmap/
23obj-$(CONFIG_SOC_BUS) += soc.o 23obj-$(CONFIG_SOC_BUS) += soc.o
24obj-$(CONFIG_PINCTRL) += pinctrl.o
24 25
25ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 26ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
26 27
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 24eb07868344..519865b53f76 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -290,7 +290,7 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start,
290 struct device *dev; 290 struct device *dev;
291 int error = 0; 291 int error = 0;
292 292
293 if (!bus) 293 if (!bus || !bus->p)
294 return -EINVAL; 294 return -EINVAL;
295 295
296 klist_iter_init_node(&bus->p->klist_devices, &i, 296 klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -324,7 +324,7 @@ struct device *bus_find_device(struct bus_type *bus,
324 struct klist_iter i; 324 struct klist_iter i;
325 struct device *dev; 325 struct device *dev;
326 326
327 if (!bus) 327 if (!bus || !bus->p)
328 return NULL; 328 return NULL;
329 329
330 klist_iter_init_node(&bus->p->klist_devices, &i, 330 klist_iter_init_node(&bus->p->klist_devices, &i,
@@ -700,12 +700,12 @@ int bus_add_driver(struct device_driver *drv)
700 if (error) 700 if (error)
701 goto out_unregister; 701 goto out_unregister;
702 702
703 klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
703 if (drv->bus->p->drivers_autoprobe) { 704 if (drv->bus->p->drivers_autoprobe) {
704 error = driver_attach(drv); 705 error = driver_attach(drv);
705 if (error) 706 if (error)
706 goto out_unregister; 707 goto out_unregister;
707 } 708 }
708 klist_add_tail(&priv->knode_bus, &bus->p->klist_drivers);
709 module_add_driver(drv->owner, drv); 709 module_add_driver(drv->owner, drv);
710 710
711 error = driver_create_file(drv, &driver_attr_uevent); 711 error = driver_create_file(drv, &driver_attr_uevent);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 03243d4002fd..3ce845471327 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -420,8 +420,8 @@ EXPORT_SYMBOL_GPL(class_for_each_device);
420 * code. There's no locking restriction. 420 * code. There's no locking restriction.
421 */ 421 */
422struct device *class_find_device(struct class *class, struct device *start, 422struct device *class_find_device(struct class *class, struct device *start,
423 void *data, 423 const void *data,
424 int (*match)(struct device *, void *)) 424 int (*match)(struct device *, const void *))
425{ 425{
426 struct class_dev_iter iter; 426 struct class_dev_iter iter;
427 struct device *dev; 427 struct device *dev;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a235085e343c..56536f4b0f6b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1617,9 +1617,9 @@ struct device *device_create(struct class *class, struct device *parent,
1617} 1617}
1618EXPORT_SYMBOL_GPL(device_create); 1618EXPORT_SYMBOL_GPL(device_create);
1619 1619
1620static int __match_devt(struct device *dev, void *data) 1620static int __match_devt(struct device *dev, const void *data)
1621{ 1621{
1622 dev_t *devt = data; 1622 const dev_t *devt = data;
1623 1623
1624 return dev->devt == *devt; 1624 return dev->devt == *devt;
1625} 1625}
@@ -1685,8 +1685,6 @@ EXPORT_SYMBOL_GPL(device_destroy);
1685 */ 1685 */
1686int device_rename(struct device *dev, const char *new_name) 1686int device_rename(struct device *dev, const char *new_name)
1687{ 1687{
1688 char *old_class_name = NULL;
1689 char *new_class_name = NULL;
1690 char *old_device_name = NULL; 1688 char *old_device_name = NULL;
1691 int error; 1689 int error;
1692 1690
@@ -1717,8 +1715,6 @@ int device_rename(struct device *dev, const char *new_name)
1717out: 1715out:
1718 put_device(dev); 1716 put_device(dev);
1719 1717
1720 kfree(new_class_name);
1721 kfree(old_class_name);
1722 kfree(old_device_name); 1718 kfree(old_device_name);
1723 1719
1724 return error; 1720 return error;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e3bbed8a617c..bb5645ea0282 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -24,6 +24,7 @@
24#include <linux/wait.h> 24#include <linux/wait.h>
25#include <linux/async.h> 25#include <linux/async.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/pinctrl/devinfo.h>
27 28
28#include "base.h" 29#include "base.h"
29#include "power/power.h" 30#include "power/power.h"
@@ -172,6 +173,8 @@ static int deferred_probe_initcall(void)
172 173
173 driver_deferred_probe_enable = true; 174 driver_deferred_probe_enable = true;
174 driver_deferred_probe_trigger(); 175 driver_deferred_probe_trigger();
176 /* Sort as many dependencies as possible before exiting initcalls */
177 flush_workqueue(deferred_wq);
175 return 0; 178 return 0;
176} 179}
177late_initcall(deferred_probe_initcall); 180late_initcall(deferred_probe_initcall);
@@ -269,6 +272,12 @@ static int really_probe(struct device *dev, struct device_driver *drv)
269 WARN_ON(!list_empty(&dev->devres_head)); 272 WARN_ON(!list_empty(&dev->devres_head));
270 273
271 dev->driver = drv; 274 dev->driver = drv;
275
276 /* If using pinctrl, bind pins now before probing */
277 ret = pinctrl_bind_pins(dev);
278 if (ret)
279 goto probe_failed;
280
272 if (driver_sysfs_add(dev)) { 281 if (driver_sysfs_add(dev)) {
273 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n", 282 printk(KERN_ERR "%s: driver_sysfs_add(%s) failed\n",
274 __func__, dev_name(dev)); 283 __func__, dev_name(dev));
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 17cf7cad601e..01fc5b07f951 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -302,7 +302,8 @@ static int handle_remove(const char *nodename, struct device *dev)
302 302
303 if (dentry->d_inode) { 303 if (dentry->d_inode) {
304 struct kstat stat; 304 struct kstat stat;
305 err = vfs_getattr(parent.mnt, dentry, &stat); 305 struct path p = {.mnt = parent.mnt, .dentry = dentry};
306 err = vfs_getattr(&p, &stat);
306 if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { 307 if (!err && dev_mynode(dev, dentry->d_inode, &stat)) {
307 struct iattr newattrs; 308 struct iattr newattrs;
308 /* 309 /*
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index a3f79c495a41..2a7cb0df176b 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -39,6 +39,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
39 39
40 dmabuf = file->private_data; 40 dmabuf = file->private_data;
41 41
42 BUG_ON(dmabuf->vmapping_counter);
43
42 dmabuf->ops->release(dmabuf); 44 dmabuf->ops->release(dmabuf);
43 kfree(dmabuf); 45 kfree(dmabuf);
44 return 0; 46 return 0;
@@ -134,15 +136,14 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
134 */ 136 */
135int dma_buf_fd(struct dma_buf *dmabuf, int flags) 137int dma_buf_fd(struct dma_buf *dmabuf, int flags)
136{ 138{
137 int error, fd; 139 int fd;
138 140
139 if (!dmabuf || !dmabuf->file) 141 if (!dmabuf || !dmabuf->file)
140 return -EINVAL; 142 return -EINVAL;
141 143
142 error = get_unused_fd_flags(flags); 144 fd = get_unused_fd_flags(flags);
143 if (error < 0) 145 if (fd < 0)
144 return error; 146 return fd;
145 fd = error;
146 147
147 fd_install(fd, dmabuf->file); 148 fd_install(fd, dmabuf->file);
148 149
@@ -446,6 +447,9 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
446int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, 447int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
447 unsigned long pgoff) 448 unsigned long pgoff)
448{ 449{
450 struct file *oldfile;
451 int ret;
452
449 if (WARN_ON(!dmabuf || !vma)) 453 if (WARN_ON(!dmabuf || !vma))
450 return -EINVAL; 454 return -EINVAL;
451 455
@@ -459,14 +463,22 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
459 return -EINVAL; 463 return -EINVAL;
460 464
461 /* readjust the vma */ 465 /* readjust the vma */
462 if (vma->vm_file) 466 get_file(dmabuf->file);
463 fput(vma->vm_file); 467 oldfile = vma->vm_file;
464 468 vma->vm_file = dmabuf->file;
465 vma->vm_file = get_file(dmabuf->file);
466
467 vma->vm_pgoff = pgoff; 469 vma->vm_pgoff = pgoff;
468 470
469 return dmabuf->ops->mmap(dmabuf, vma); 471 ret = dmabuf->ops->mmap(dmabuf, vma);
472 if (ret) {
473 /* restore old parameters on failure */
474 vma->vm_file = oldfile;
475 fput(dmabuf->file);
476 } else {
477 if (oldfile)
478 fput(oldfile);
479 }
480 return ret;
481
470} 482}
471EXPORT_SYMBOL_GPL(dma_buf_mmap); 483EXPORT_SYMBOL_GPL(dma_buf_mmap);
472 484
@@ -482,12 +494,34 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
482 */ 494 */
483void *dma_buf_vmap(struct dma_buf *dmabuf) 495void *dma_buf_vmap(struct dma_buf *dmabuf)
484{ 496{
497 void *ptr;
498
485 if (WARN_ON(!dmabuf)) 499 if (WARN_ON(!dmabuf))
486 return NULL; 500 return NULL;
487 501
488 if (dmabuf->ops->vmap) 502 if (!dmabuf->ops->vmap)
489 return dmabuf->ops->vmap(dmabuf); 503 return NULL;
490 return NULL; 504
505 mutex_lock(&dmabuf->lock);
506 if (dmabuf->vmapping_counter) {
507 dmabuf->vmapping_counter++;
508 BUG_ON(!dmabuf->vmap_ptr);
509 ptr = dmabuf->vmap_ptr;
510 goto out_unlock;
511 }
512
513 BUG_ON(dmabuf->vmap_ptr);
514
515 ptr = dmabuf->ops->vmap(dmabuf);
516 if (IS_ERR_OR_NULL(ptr))
517 goto out_unlock;
518
519 dmabuf->vmap_ptr = ptr;
520 dmabuf->vmapping_counter = 1;
521
522out_unlock:
523 mutex_unlock(&dmabuf->lock);
524 return ptr;
491} 525}
492EXPORT_SYMBOL_GPL(dma_buf_vmap); 526EXPORT_SYMBOL_GPL(dma_buf_vmap);
493 527
@@ -501,7 +535,16 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
501 if (WARN_ON(!dmabuf)) 535 if (WARN_ON(!dmabuf))
502 return; 536 return;
503 537
504 if (dmabuf->ops->vunmap) 538 BUG_ON(!dmabuf->vmap_ptr);
505 dmabuf->ops->vunmap(dmabuf, vaddr); 539 BUG_ON(dmabuf->vmapping_counter == 0);
540 BUG_ON(dmabuf->vmap_ptr != vaddr);
541
542 mutex_lock(&dmabuf->lock);
543 if (--dmabuf->vmapping_counter == 0) {
544 if (dmabuf->ops->vunmap)
545 dmabuf->ops->vunmap(dmabuf, vaddr);
546 dmabuf->vmap_ptr = NULL;
547 }
548 mutex_unlock(&dmabuf->lock);
506} 549}
507EXPORT_SYMBOL_GPL(dma_buf_vunmap); 550EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index b392b353be39..4b1f9265887f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -88,11 +88,6 @@ enum {
88 FW_STATUS_ABORT, 88 FW_STATUS_ABORT,
89}; 89};
90 90
91enum fw_buf_fmt {
92 VMALLOC_BUF, /* used in direct loading */
93 PAGE_BUF, /* used in loading via userspace */
94};
95
96static int loading_timeout = 60; /* In seconds */ 91static int loading_timeout = 60; /* In seconds */
97 92
98static inline long firmware_loading_timeout(void) 93static inline long firmware_loading_timeout(void)
@@ -128,12 +123,14 @@ struct firmware_buf {
128 struct completion completion; 123 struct completion completion;
129 struct firmware_cache *fwc; 124 struct firmware_cache *fwc;
130 unsigned long status; 125 unsigned long status;
131 enum fw_buf_fmt fmt;
132 void *data; 126 void *data;
133 size_t size; 127 size_t size;
128#ifdef CONFIG_FW_LOADER_USER_HELPER
129 bool is_paged_buf;
134 struct page **pages; 130 struct page **pages;
135 int nr_pages; 131 int nr_pages;
136 int page_array_size; 132 int page_array_size;
133#endif
137 char fw_id[]; 134 char fw_id[];
138}; 135};
139 136
@@ -142,14 +139,6 @@ struct fw_cache_entry {
142 char name[]; 139 char name[];
143}; 140};
144 141
145struct firmware_priv {
146 struct delayed_work timeout_work;
147 bool nowait;
148 struct device dev;
149 struct firmware_buf *buf;
150 struct firmware *fw;
151};
152
153struct fw_name_devm { 142struct fw_name_devm {
154 unsigned long magic; 143 unsigned long magic;
155 char name[]; 144 char name[];
@@ -182,7 +171,6 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
182 strcpy(buf->fw_id, fw_name); 171 strcpy(buf->fw_id, fw_name);
183 buf->fwc = fwc; 172 buf->fwc = fwc;
184 init_completion(&buf->completion); 173 init_completion(&buf->completion);
185 buf->fmt = VMALLOC_BUF;
186 174
187 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf); 175 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
188 176
@@ -240,7 +228,6 @@ static void __fw_free_buf(struct kref *ref)
240{ 228{
241 struct firmware_buf *buf = to_fwbuf(ref); 229 struct firmware_buf *buf = to_fwbuf(ref);
242 struct firmware_cache *fwc = buf->fwc; 230 struct firmware_cache *fwc = buf->fwc;
243 int i;
244 231
245 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n", 232 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
246 __func__, buf->fw_id, buf, buf->data, 233 __func__, buf->fw_id, buf, buf->data,
@@ -249,13 +236,15 @@ static void __fw_free_buf(struct kref *ref)
249 list_del(&buf->list); 236 list_del(&buf->list);
250 spin_unlock(&fwc->lock); 237 spin_unlock(&fwc->lock);
251 238
252 239#ifdef CONFIG_FW_LOADER_USER_HELPER
253 if (buf->fmt == PAGE_BUF) { 240 if (buf->is_paged_buf) {
241 int i;
254 vunmap(buf->data); 242 vunmap(buf->data);
255 for (i = 0; i < buf->nr_pages; i++) 243 for (i = 0; i < buf->nr_pages; i++)
256 __free_page(buf->pages[i]); 244 __free_page(buf->pages[i]);
257 kfree(buf->pages); 245 kfree(buf->pages);
258 } else 246 } else
247#endif
259 vfree(buf->data); 248 vfree(buf->data);
260 kfree(buf); 249 kfree(buf);
261} 250}
@@ -290,7 +279,7 @@ MODULE_PARM_DESC(path, "customized firmware image search path with a higher prio
290static noinline_for_stack long fw_file_size(struct file *file) 279static noinline_for_stack long fw_file_size(struct file *file)
291{ 280{
292 struct kstat st; 281 struct kstat st;
293 if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st)) 282 if (vfs_getattr(&file->f_path, &st))
294 return -1; 283 return -1;
295 if (!S_ISREG(st.mode)) 284 if (!S_ISREG(st.mode))
296 return -1; 285 return -1;
@@ -319,7 +308,8 @@ static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf
319 return true; 308 return true;
320} 309}
321 310
322static bool fw_get_filesystem_firmware(struct firmware_buf *buf) 311static bool fw_get_filesystem_firmware(struct device *device,
312 struct firmware_buf *buf)
323{ 313{
324 int i; 314 int i;
325 bool success = false; 315 bool success = false;
@@ -343,9 +333,114 @@ static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
343 break; 333 break;
344 } 334 }
345 __putname(path); 335 __putname(path);
336
337 if (success) {
338 dev_dbg(device, "firmware: direct-loading firmware %s\n",
339 buf->fw_id);
340 mutex_lock(&fw_lock);
341 set_bit(FW_STATUS_DONE, &buf->status);
342 complete_all(&buf->completion);
343 mutex_unlock(&fw_lock);
344 }
345
346 return success; 346 return success;
347} 347}
348 348
349/* firmware holds the ownership of pages */
350static void firmware_free_data(const struct firmware *fw)
351{
352 /* Loaded directly? */
353 if (!fw->priv) {
354 vfree(fw->data);
355 return;
356 }
357 fw_free_buf(fw->priv);
358}
359
360/* store the pages buffer info firmware from buf */
361static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
362{
363 fw->priv = buf;
364#ifdef CONFIG_FW_LOADER_USER_HELPER
365 fw->pages = buf->pages;
366#endif
367 fw->size = buf->size;
368 fw->data = buf->data;
369
370 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
371 __func__, buf->fw_id, buf, buf->data,
372 (unsigned int)buf->size);
373}
374
375#ifdef CONFIG_PM_SLEEP
376static void fw_name_devm_release(struct device *dev, void *res)
377{
378 struct fw_name_devm *fwn = res;
379
380 if (fwn->magic == (unsigned long)&fw_cache)
381 pr_debug("%s: fw_name-%s devm-%p released\n",
382 __func__, fwn->name, res);
383}
384
385static int fw_devm_match(struct device *dev, void *res,
386 void *match_data)
387{
388 struct fw_name_devm *fwn = res;
389
390 return (fwn->magic == (unsigned long)&fw_cache) &&
391 !strcmp(fwn->name, match_data);
392}
393
394static struct fw_name_devm *fw_find_devm_name(struct device *dev,
395 const char *name)
396{
397 struct fw_name_devm *fwn;
398
399 fwn = devres_find(dev, fw_name_devm_release,
400 fw_devm_match, (void *)name);
401 return fwn;
402}
403
404/* add firmware name into devres list */
405static int fw_add_devm_name(struct device *dev, const char *name)
406{
407 struct fw_name_devm *fwn;
408
409 fwn = fw_find_devm_name(dev, name);
410 if (fwn)
411 return 1;
412
413 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
414 strlen(name) + 1, GFP_KERNEL);
415 if (!fwn)
416 return -ENOMEM;
417
418 fwn->magic = (unsigned long)&fw_cache;
419 strcpy(fwn->name, name);
420 devres_add(dev, fwn);
421
422 return 0;
423}
424#else
425static int fw_add_devm_name(struct device *dev, const char *name)
426{
427 return 0;
428}
429#endif
430
431
432/*
433 * user-mode helper code
434 */
435#ifdef CONFIG_FW_LOADER_USER_HELPER
436struct firmware_priv {
437 struct delayed_work timeout_work;
438 bool nowait;
439 struct device dev;
440 struct firmware_buf *buf;
441 struct firmware *fw;
442};
443
349static struct firmware_priv *to_firmware_priv(struct device *dev) 444static struct firmware_priv *to_firmware_priv(struct device *dev)
350{ 445{
351 return container_of(dev, struct firmware_priv, dev); 446 return container_of(dev, struct firmware_priv, dev);
@@ -359,6 +454,9 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
359 complete_all(&buf->completion); 454 complete_all(&buf->completion);
360} 455}
361 456
457#define is_fw_load_aborted(buf) \
458 test_bit(FW_STATUS_ABORT, &(buf)->status)
459
362static ssize_t firmware_timeout_show(struct class *class, 460static ssize_t firmware_timeout_show(struct class *class,
363 struct class_attribute *attr, 461 struct class_attribute *attr,
364 char *buf) 462 char *buf)
@@ -435,17 +533,6 @@ static ssize_t firmware_loading_show(struct device *dev,
435 return sprintf(buf, "%d\n", loading); 533 return sprintf(buf, "%d\n", loading);
436} 534}
437 535
438/* firmware holds the ownership of pages */
439static void firmware_free_data(const struct firmware *fw)
440{
441 /* Loaded directly? */
442 if (!fw->priv) {
443 vfree(fw->data);
444 return;
445 }
446 fw_free_buf(fw->priv);
447}
448
449/* Some architectures don't have PAGE_KERNEL_RO */ 536/* Some architectures don't have PAGE_KERNEL_RO */
450#ifndef PAGE_KERNEL_RO 537#ifndef PAGE_KERNEL_RO
451#define PAGE_KERNEL_RO PAGE_KERNEL 538#define PAGE_KERNEL_RO PAGE_KERNEL
@@ -454,7 +541,7 @@ static void firmware_free_data(const struct firmware *fw)
454/* one pages buffer should be mapped/unmapped only once */ 541/* one pages buffer should be mapped/unmapped only once */
455static int fw_map_pages_buf(struct firmware_buf *buf) 542static int fw_map_pages_buf(struct firmware_buf *buf)
456{ 543{
457 if (buf->fmt != PAGE_BUF) 544 if (!buf->is_paged_buf)
458 return 0; 545 return 0;
459 546
460 if (buf->data) 547 if (buf->data)
@@ -727,171 +814,16 @@ exit:
727 return fw_priv; 814 return fw_priv;
728} 815}
729 816
730/* store the pages buffer info firmware from buf */ 817/* load a firmware via user helper */
731static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
732{
733 fw->priv = buf;
734 fw->pages = buf->pages;
735 fw->size = buf->size;
736 fw->data = buf->data;
737
738 pr_debug("%s: fw-%s buf=%p data=%p size=%u\n",
739 __func__, buf->fw_id, buf, buf->data,
740 (unsigned int)buf->size);
741}
742
743#ifdef CONFIG_PM_SLEEP
744static void fw_name_devm_release(struct device *dev, void *res)
745{
746 struct fw_name_devm *fwn = res;
747
748 if (fwn->magic == (unsigned long)&fw_cache)
749 pr_debug("%s: fw_name-%s devm-%p released\n",
750 __func__, fwn->name, res);
751}
752
753static int fw_devm_match(struct device *dev, void *res,
754 void *match_data)
755{
756 struct fw_name_devm *fwn = res;
757
758 return (fwn->magic == (unsigned long)&fw_cache) &&
759 !strcmp(fwn->name, match_data);
760}
761
762static struct fw_name_devm *fw_find_devm_name(struct device *dev,
763 const char *name)
764{
765 struct fw_name_devm *fwn;
766
767 fwn = devres_find(dev, fw_name_devm_release,
768 fw_devm_match, (void *)name);
769 return fwn;
770}
771
772/* add firmware name into devres list */
773static int fw_add_devm_name(struct device *dev, const char *name)
774{
775 struct fw_name_devm *fwn;
776
777 fwn = fw_find_devm_name(dev, name);
778 if (fwn)
779 return 1;
780
781 fwn = devres_alloc(fw_name_devm_release, sizeof(struct fw_name_devm) +
782 strlen(name) + 1, GFP_KERNEL);
783 if (!fwn)
784 return -ENOMEM;
785
786 fwn->magic = (unsigned long)&fw_cache;
787 strcpy(fwn->name, name);
788 devres_add(dev, fwn);
789
790 return 0;
791}
792#else
793static int fw_add_devm_name(struct device *dev, const char *name)
794{
795 return 0;
796}
797#endif
798
799static void _request_firmware_cleanup(const struct firmware **firmware_p)
800{
801 release_firmware(*firmware_p);
802 *firmware_p = NULL;
803}
804
805static struct firmware_priv *
806_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
807 struct device *device, bool uevent, bool nowait)
808{
809 struct firmware *firmware;
810 struct firmware_priv *fw_priv = NULL;
811 struct firmware_buf *buf;
812 int ret;
813
814 if (!firmware_p)
815 return ERR_PTR(-EINVAL);
816
817 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
818 if (!firmware) {
819 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
820 __func__);
821 return ERR_PTR(-ENOMEM);
822 }
823
824 if (fw_get_builtin_firmware(firmware, name)) {
825 dev_dbg(device, "firmware: using built-in firmware %s\n", name);
826 return NULL;
827 }
828
829 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
830 if (!ret)
831 fw_priv = fw_create_instance(firmware, name, device,
832 uevent, nowait);
833
834 if (IS_ERR(fw_priv) || ret < 0) {
835 kfree(firmware);
836 *firmware_p = NULL;
837 return ERR_PTR(-ENOMEM);
838 } else if (fw_priv) {
839 fw_priv->buf = buf;
840
841 /*
842 * bind with 'buf' now to avoid warning in failure path
843 * of requesting firmware.
844 */
845 firmware->priv = buf;
846 return fw_priv;
847 }
848
849 /* share the cached buf, which is inprogessing or completed */
850 check_status:
851 mutex_lock(&fw_lock);
852 if (test_bit(FW_STATUS_ABORT, &buf->status)) {
853 fw_priv = ERR_PTR(-ENOENT);
854 firmware->priv = buf;
855 _request_firmware_cleanup(firmware_p);
856 goto exit;
857 } else if (test_bit(FW_STATUS_DONE, &buf->status)) {
858 fw_priv = NULL;
859 fw_set_page_data(buf, firmware);
860 goto exit;
861 }
862 mutex_unlock(&fw_lock);
863 wait_for_completion(&buf->completion);
864 goto check_status;
865
866exit:
867 mutex_unlock(&fw_lock);
868 return fw_priv;
869}
870
871static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent, 818static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
872 long timeout) 819 long timeout)
873{ 820{
874 int retval = 0; 821 int retval = 0;
875 struct device *f_dev = &fw_priv->dev; 822 struct device *f_dev = &fw_priv->dev;
876 struct firmware_buf *buf = fw_priv->buf; 823 struct firmware_buf *buf = fw_priv->buf;
877 struct firmware_cache *fwc = &fw_cache;
878 int direct_load = 0;
879
880 /* try direct loading from fs first */
881 if (fw_get_filesystem_firmware(buf)) {
882 dev_dbg(f_dev->parent, "firmware: direct-loading"
883 " firmware %s\n", buf->fw_id);
884
885 mutex_lock(&fw_lock);
886 set_bit(FW_STATUS_DONE, &buf->status);
887 mutex_unlock(&fw_lock);
888 complete_all(&buf->completion);
889 direct_load = 1;
890 goto handle_fw;
891 }
892 824
893 /* fall back on userspace loading */ 825 /* fall back on userspace loading */
894 buf->fmt = PAGE_BUF; 826 buf->is_paged_buf = true;
895 827
896 dev_set_uevent_suppress(f_dev, true); 828 dev_set_uevent_suppress(f_dev, true);
897 829
@@ -929,47 +861,196 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
929 861
930 cancel_delayed_work_sync(&fw_priv->timeout_work); 862 cancel_delayed_work_sync(&fw_priv->timeout_work);
931 863
932handle_fw: 864 fw_priv->buf = NULL;
865
866 device_remove_file(f_dev, &dev_attr_loading);
867err_del_bin_attr:
868 device_remove_bin_file(f_dev, &firmware_attr_data);
869err_del_dev:
870 device_del(f_dev);
871err_put_dev:
872 put_device(f_dev);
873 return retval;
874}
875
876static int fw_load_from_user_helper(struct firmware *firmware,
877 const char *name, struct device *device,
878 bool uevent, bool nowait, long timeout)
879{
880 struct firmware_priv *fw_priv;
881
882 fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
883 if (IS_ERR(fw_priv))
884 return PTR_ERR(fw_priv);
885
886 fw_priv->buf = firmware->priv;
887 return _request_firmware_load(fw_priv, uevent, timeout);
888}
889#else /* CONFIG_FW_LOADER_USER_HELPER */
890static inline int
891fw_load_from_user_helper(struct firmware *firmware, const char *name,
892 struct device *device, bool uevent, bool nowait,
893 long timeout)
894{
895 return -ENOENT;
896}
897
898/* No abort during direct loading */
899#define is_fw_load_aborted(buf) false
900
901#endif /* CONFIG_FW_LOADER_USER_HELPER */
902
903
904/* wait until the shared firmware_buf becomes ready (or error) */
905static int sync_cached_firmware_buf(struct firmware_buf *buf)
906{
907 int ret = 0;
908
909 mutex_lock(&fw_lock);
910 while (!test_bit(FW_STATUS_DONE, &buf->status)) {
911 if (is_fw_load_aborted(buf)) {
912 ret = -ENOENT;
913 break;
914 }
915 mutex_unlock(&fw_lock);
916 wait_for_completion(&buf->completion);
917 mutex_lock(&fw_lock);
918 }
919 mutex_unlock(&fw_lock);
920 return ret;
921}
922
923/* prepare firmware and firmware_buf structs;
924 * return 0 if a firmware is already assigned, 1 if need to load one,
925 * or a negative error code
926 */
927static int
928_request_firmware_prepare(struct firmware **firmware_p, const char *name,
929 struct device *device)
930{
931 struct firmware *firmware;
932 struct firmware_buf *buf;
933 int ret;
934
935 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
936 if (!firmware) {
937 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
938 __func__);
939 return -ENOMEM;
940 }
941
942 if (fw_get_builtin_firmware(firmware, name)) {
943 dev_dbg(device, "firmware: using built-in firmware %s\n", name);
944 return 0; /* assigned */
945 }
946
947 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
948
949 /*
950 * bind with 'buf' now to avoid warning in failure path
951 * of requesting firmware.
952 */
953 firmware->priv = buf;
954
955 if (ret > 0) {
956 ret = sync_cached_firmware_buf(buf);
957 if (!ret) {
958 fw_set_page_data(buf, firmware);
959 return 0; /* assigned */
960 }
961 }
962
963 if (ret < 0)
964 return ret;
965 return 1; /* need to load */
966}
967
968static int assign_firmware_buf(struct firmware *fw, struct device *device)
969{
970 struct firmware_buf *buf = fw->priv;
971
933 mutex_lock(&fw_lock); 972 mutex_lock(&fw_lock);
934 if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status)) 973 if (!buf->size || is_fw_load_aborted(buf)) {
935 retval = -ENOENT; 974 mutex_unlock(&fw_lock);
975 return -ENOENT;
976 }
936 977
937 /* 978 /*
938 * add firmware name into devres list so that we can auto cache 979 * add firmware name into devres list so that we can auto cache
939 * and uncache firmware for device. 980 * and uncache firmware for device.
940 * 981 *
941 * f_dev->parent may has been deleted already, but the problem 982 * device may has been deleted already, but the problem
942 * should be fixed in devres or driver core. 983 * should be fixed in devres or driver core.
943 */ 984 */
944 if (!retval && f_dev->parent) 985 if (device)
945 fw_add_devm_name(f_dev->parent, buf->fw_id); 986 fw_add_devm_name(device, buf->fw_id);
946 987
947 /* 988 /*
948 * After caching firmware image is started, let it piggyback 989 * After caching firmware image is started, let it piggyback
949 * on request firmware. 990 * on request firmware.
950 */ 991 */
951 if (!retval && fwc->state == FW_LOADER_START_CACHE) { 992 if (buf->fwc->state == FW_LOADER_START_CACHE) {
952 if (fw_cache_piggyback_on_request(buf->fw_id)) 993 if (fw_cache_piggyback_on_request(buf->fw_id))
953 kref_get(&buf->ref); 994 kref_get(&buf->ref);
954 } 995 }
955 996
956 /* pass the pages buffer to driver at the last minute */ 997 /* pass the pages buffer to driver at the last minute */
957 fw_set_page_data(buf, fw_priv->fw); 998 fw_set_page_data(buf, fw);
958
959 fw_priv->buf = NULL;
960 mutex_unlock(&fw_lock); 999 mutex_unlock(&fw_lock);
1000 return 0;
1001}
961 1002
962 if (direct_load) 1003/* called from request_firmware() and request_firmware_work_func() */
963 goto err_put_dev; 1004static int
1005_request_firmware(const struct firmware **firmware_p, const char *name,
1006 struct device *device, bool uevent, bool nowait)
1007{
1008 struct firmware *fw;
1009 long timeout;
1010 int ret;
964 1011
965 device_remove_file(f_dev, &dev_attr_loading); 1012 if (!firmware_p)
966err_del_bin_attr: 1013 return -EINVAL;
967 device_remove_bin_file(f_dev, &firmware_attr_data); 1014
968err_del_dev: 1015 ret = _request_firmware_prepare(&fw, name, device);
969 device_del(f_dev); 1016 if (ret <= 0) /* error or already assigned */
970err_put_dev: 1017 goto out;
971 put_device(f_dev); 1018
972 return retval; 1019 ret = 0;
1020 timeout = firmware_loading_timeout();
1021 if (nowait) {
1022 timeout = usermodehelper_read_lock_wait(timeout);
1023 if (!timeout) {
1024 dev_dbg(device, "firmware: %s loading timed out\n",
1025 name);
1026 ret = -EBUSY;
1027 goto out;
1028 }
1029 } else {
1030 ret = usermodehelper_read_trylock();
1031 if (WARN_ON(ret)) {
1032 dev_err(device, "firmware: %s will not be loaded\n",
1033 name);
1034 goto out;
1035 }
1036 }
1037
1038 if (!fw_get_filesystem_firmware(device, fw->priv))
1039 ret = fw_load_from_user_helper(fw, name, device,
1040 uevent, nowait, timeout);
1041 if (!ret)
1042 ret = assign_firmware_buf(fw, device);
1043
1044 usermodehelper_read_unlock();
1045
1046 out:
1047 if (ret < 0) {
1048 release_firmware(fw);
1049 fw = NULL;
1050 }
1051
1052 *firmware_p = fw;
1053 return ret;
973} 1054}
974 1055
975/** 1056/**
@@ -996,26 +1077,7 @@ int
996request_firmware(const struct firmware **firmware_p, const char *name, 1077request_firmware(const struct firmware **firmware_p, const char *name,
997 struct device *device) 1078 struct device *device)
998{ 1079{
999 struct firmware_priv *fw_priv; 1080 return _request_firmware(firmware_p, name, device, true, false);
1000 int ret;
1001
1002 fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
1003 false);
1004 if (IS_ERR_OR_NULL(fw_priv))
1005 return PTR_RET(fw_priv);
1006
1007 ret = usermodehelper_read_trylock();
1008 if (WARN_ON(ret)) {
1009 dev_err(device, "firmware: %s will not be loaded\n", name);
1010 } else {
1011 ret = _request_firmware_load(fw_priv, true,
1012 firmware_loading_timeout());
1013 usermodehelper_read_unlock();
1014 }
1015 if (ret)
1016 _request_firmware_cleanup(firmware_p);
1017
1018 return ret;
1019} 1081}
1020 1082
1021/** 1083/**
@@ -1046,33 +1108,13 @@ static void request_firmware_work_func(struct work_struct *work)
1046{ 1108{
1047 struct firmware_work *fw_work; 1109 struct firmware_work *fw_work;
1048 const struct firmware *fw; 1110 const struct firmware *fw;
1049 struct firmware_priv *fw_priv;
1050 long timeout;
1051 int ret;
1052 1111
1053 fw_work = container_of(work, struct firmware_work, work); 1112 fw_work = container_of(work, struct firmware_work, work);
1054 fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
1055 fw_work->uevent, true);
1056 if (IS_ERR_OR_NULL(fw_priv)) {
1057 ret = PTR_RET(fw_priv);
1058 goto out;
1059 }
1060
1061 timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
1062 if (timeout) {
1063 ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
1064 usermodehelper_read_unlock();
1065 } else {
1066 dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
1067 fw_work->name);
1068 ret = -EAGAIN;
1069 }
1070 if (ret)
1071 _request_firmware_cleanup(&fw);
1072 1113
1073 out: 1114 _request_firmware(&fw, fw_work->name, fw_work->device,
1115 fw_work->uevent, true);
1074 fw_work->cont(fw, fw_work->context); 1116 fw_work->cont(fw, fw_work->context);
1075 put_device(fw_work->device); 1117 put_device(fw_work->device); /* taken in request_firmware_nowait() */
1076 1118
1077 module_put(fw_work->module); 1119 module_put(fw_work->module);
1078 kfree(fw_work); 1120 kfree(fw_work);
@@ -1474,7 +1516,11 @@ static void __init fw_cache_init(void)
1474static int __init firmware_class_init(void) 1516static int __init firmware_class_init(void)
1475{ 1517{
1476 fw_cache_init(); 1518 fw_cache_init();
1519#ifdef CONFIG_FW_LOADER_USER_HELPER
1477 return class_register(&firmware_class); 1520 return class_register(&firmware_class);
1521#else
1522 return 0;
1523#endif
1478} 1524}
1479 1525
1480static void __exit firmware_class_exit(void) 1526static void __exit firmware_class_exit(void)
@@ -1483,7 +1529,9 @@ static void __exit firmware_class_exit(void)
1483 unregister_syscore_ops(&fw_syscore_ops); 1529 unregister_syscore_ops(&fw_syscore_ops);
1484 unregister_pm_notifier(&fw_cache.pm_notify); 1530 unregister_pm_notifier(&fw_cache.pm_notify);
1485#endif 1531#endif
1532#ifdef CONFIG_FW_LOADER_USER_HELPER
1486 class_unregister(&firmware_class); 1533 class_unregister(&firmware_class);
1534#endif
1487} 1535}
1488 1536
1489fs_initcall(firmware_class_init); 1537fs_initcall(firmware_class_init);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 987604d56c83..a51007b79032 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -494,8 +494,8 @@ store_hard_offline_page(struct device *dev,
494 return ret ? ret : count; 494 return ret ? ret : count;
495} 495}
496 496
497static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); 497static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
498static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); 498static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
499 499
500static __init int memory_fail_init(void) 500static __init int memory_fail_init(void)
501{ 501{
@@ -693,6 +693,12 @@ int offline_memory_block(struct memory_block *mem)
693 return ret; 693 return ret;
694} 694}
695 695
696/* return true if the memory block is offlined, otherwise, return false */
697bool is_memblock_offlined(struct memory_block *mem)
698{
699 return mem->state == MEM_OFFLINE;
700}
701
696/* 702/*
697 * Initialize the sysfs support for memory devices... 703 * Initialize the sysfs support for memory devices...
698 */ 704 */
diff --git a/drivers/base/pinctrl.c b/drivers/base/pinctrl.c
new file mode 100644
index 000000000000..67a274e86727
--- /dev/null
+++ b/drivers/base/pinctrl.c
@@ -0,0 +1,69 @@
1/*
2 * Driver core interface to the pinctrl subsystem.
3 *
4 * Copyright (C) 2012 ST-Ericsson SA
5 * Written on behalf of Linaro for ST-Ericsson
6 * Based on bits of regulator core, gpio core and clk core
7 *
8 * Author: Linus Walleij <linus.walleij@linaro.org>
9 *
10 * License terms: GNU General Public License (GPL) version 2
11 */
12
13#include <linux/device.h>
14#include <linux/pinctrl/devinfo.h>
15#include <linux/pinctrl/consumer.h>
16#include <linux/slab.h>
17
18/**
19 * pinctrl_bind_pins() - called by the device core before probe
20 * @dev: the device that is just about to probe
21 */
22int pinctrl_bind_pins(struct device *dev)
23{
24 int ret;
25
26 dev->pins = devm_kzalloc(dev, sizeof(*(dev->pins)), GFP_KERNEL);
27 if (!dev->pins)
28 return -ENOMEM;
29
30 dev->pins->p = devm_pinctrl_get(dev);
31 if (IS_ERR(dev->pins->p)) {
32 dev_dbg(dev, "no pinctrl handle\n");
33 ret = PTR_ERR(dev->pins->p);
34 goto cleanup_alloc;
35 }
36
37 dev->pins->default_state = pinctrl_lookup_state(dev->pins->p,
38 PINCTRL_STATE_DEFAULT);
39 if (IS_ERR(dev->pins->default_state)) {
40 dev_dbg(dev, "no default pinctrl state\n");
41 ret = 0;
42 goto cleanup_get;
43 }
44
45 ret = pinctrl_select_state(dev->pins->p, dev->pins->default_state);
46 if (ret) {
47 dev_dbg(dev, "failed to activate default pinctrl state\n");
48 goto cleanup_get;
49 }
50
51 return 0;
52
53 /*
54 * If no pinctrl handle or default state was found for this device,
55 * let's explicitly free the pin container in the device, there is
56 * no point in keeping it around.
57 */
58cleanup_get:
59 devm_pinctrl_put(dev->pins->p);
60cleanup_alloc:
61 devm_kfree(dev, dev->pins);
62 dev->pins = NULL;
63
64 /* Only return deferrals */
65 if (ret != -EPROBE_DEFER)
66 ret = 0;
67
68 return ret;
69}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index acc3a8ded29d..9a6b05a35603 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -433,8 +433,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
433 */ 433 */
434void genpd_queue_power_off_work(struct generic_pm_domain *genpd) 434void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
435{ 435{
436 if (!work_pending(&genpd->power_off_work)) 436 queue_work(pm_wq, &genpd->power_off_work);
437 queue_work(pm_wq, &genpd->power_off_work);
438} 437}
439 438
440/** 439/**
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2b7f77d3fcb0..15beb500a4e4 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -99,7 +99,6 @@ void device_pm_add(struct device *dev)
99 dev_warn(dev, "parent %s should not be sleeping\n", 99 dev_warn(dev, "parent %s should not be sleeping\n",
100 dev_name(dev->parent)); 100 dev_name(dev->parent));
101 list_add_tail(&dev->power.entry, &dpm_list); 101 list_add_tail(&dev->power.entry, &dpm_list);
102 dev_pm_qos_constraints_init(dev);
103 mutex_unlock(&dpm_list_mtx); 102 mutex_unlock(&dpm_list_mtx);
104} 103}
105 104
@@ -113,7 +112,6 @@ void device_pm_remove(struct device *dev)
113 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 112 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
114 complete_all(&dev->power.completion); 113 complete_all(&dev->power.completion);
115 mutex_lock(&dpm_list_mtx); 114 mutex_lock(&dpm_list_mtx);
116 dev_pm_qos_constraints_destroy(dev);
117 list_del_init(&dev->power.entry); 115 list_del_init(&dev->power.entry);
118 mutex_unlock(&dpm_list_mtx); 116 mutex_unlock(&dpm_list_mtx);
119 device_wakeup_disable(dev); 117 device_wakeup_disable(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 50b2831e027d..32ee0fc7ea54 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -162,7 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
162 162
163 return v; 163 return v;
164} 164}
165EXPORT_SYMBOL(opp_get_voltage); 165EXPORT_SYMBOL_GPL(opp_get_voltage);
166 166
167/** 167/**
168 * opp_get_freq() - Gets the frequency corresponding to an available opp 168 * opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -192,7 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
192 192
193 return f; 193 return f;
194} 194}
195EXPORT_SYMBOL(opp_get_freq); 195EXPORT_SYMBOL_GPL(opp_get_freq);
196 196
197/** 197/**
198 * opp_get_opp_count() - Get number of opps available in the opp list 198 * opp_get_opp_count() - Get number of opps available in the opp list
@@ -225,7 +225,7 @@ int opp_get_opp_count(struct device *dev)
225 225
226 return count; 226 return count;
227} 227}
228EXPORT_SYMBOL(opp_get_opp_count); 228EXPORT_SYMBOL_GPL(opp_get_opp_count);
229 229
230/** 230/**
231 * opp_find_freq_exact() - search for an exact frequency 231 * opp_find_freq_exact() - search for an exact frequency
@@ -276,7 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
276 276
277 return opp; 277 return opp;
278} 278}
279EXPORT_SYMBOL(opp_find_freq_exact); 279EXPORT_SYMBOL_GPL(opp_find_freq_exact);
280 280
281/** 281/**
282 * opp_find_freq_ceil() - Search for an rounded ceil freq 282 * opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -323,7 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
323 323
324 return opp; 324 return opp;
325} 325}
326EXPORT_SYMBOL(opp_find_freq_ceil); 326EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
327 327
328/** 328/**
329 * opp_find_freq_floor() - Search for a rounded floor freq 329 * opp_find_freq_floor() - Search for a rounded floor freq
@@ -374,7 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
374 374
375 return opp; 375 return opp;
376} 376}
377EXPORT_SYMBOL(opp_find_freq_floor); 377EXPORT_SYMBOL_GPL(opp_find_freq_floor);
378 378
379/** 379/**
380 * opp_add() - Add an OPP table from a table definitions 380 * opp_add() - Add an OPP table from a table definitions
@@ -568,7 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
568{ 568{
569 return opp_set_availability(dev, freq, true); 569 return opp_set_availability(dev, freq, true);
570} 570}
571EXPORT_SYMBOL(opp_enable); 571EXPORT_SYMBOL_GPL(opp_enable);
572 572
573/** 573/**
574 * opp_disable() - Disable a specific OPP 574 * opp_disable() - Disable a specific OPP
@@ -590,7 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
590{ 590{
591 return opp_set_availability(dev, freq, false); 591 return opp_set_availability(dev, freq, false);
592} 592}
593EXPORT_SYMBOL(opp_disable); 593EXPORT_SYMBOL_GPL(opp_disable);
594 594
595#ifdef CONFIG_CPU_FREQ 595#ifdef CONFIG_CPU_FREQ
596/** 596/**
@@ -661,6 +661,7 @@ int opp_init_cpufreq_table(struct device *dev,
661 661
662 return 0; 662 return 0;
663} 663}
664EXPORT_SYMBOL_GPL(opp_init_cpufreq_table);
664 665
665/** 666/**
666 * opp_free_cpufreq_table() - free the cpufreq table 667 * opp_free_cpufreq_table() - free the cpufreq table
@@ -678,6 +679,7 @@ void opp_free_cpufreq_table(struct device *dev,
678 kfree(*table); 679 kfree(*table);
679 *table = NULL; 680 *table = NULL;
680} 681}
682EXPORT_SYMBOL_GPL(opp_free_cpufreq_table);
681#endif /* CONFIG_CPU_FREQ */ 683#endif /* CONFIG_CPU_FREQ */
682 684
683/** 685/**
@@ -738,4 +740,5 @@ int of_init_opp_table(struct device *dev)
738 740
739 return 0; 741 return 0;
740} 742}
743EXPORT_SYMBOL_GPL(of_init_opp_table);
741#endif 744#endif
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index b16686a0a5a2..cfc3226ec492 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -4,7 +4,7 @@ static inline void device_pm_init_common(struct device *dev)
4{ 4{
5 if (!dev->power.early_init) { 5 if (!dev->power.early_init) {
6 spin_lock_init(&dev->power.lock); 6 spin_lock_init(&dev->power.lock);
7 dev->power.power_state = PMSG_INVALID; 7 dev->power.qos = NULL;
8 dev->power.early_init = true; 8 dev->power.early_init = true;
9 } 9 }
10} 10}
@@ -56,14 +56,10 @@ extern void device_pm_move_last(struct device *);
56 56
57static inline void device_pm_sleep_init(struct device *dev) {} 57static inline void device_pm_sleep_init(struct device *dev) {}
58 58
59static inline void device_pm_add(struct device *dev) 59static inline void device_pm_add(struct device *dev) {}
60{
61 dev_pm_qos_constraints_init(dev);
62}
63 60
64static inline void device_pm_remove(struct device *dev) 61static inline void device_pm_remove(struct device *dev)
65{ 62{
66 dev_pm_qos_constraints_destroy(dev);
67 pm_runtime_remove(dev); 63 pm_runtime_remove(dev);
68} 64}
69 65
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index d21349544ce5..5f74587ef258 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h> 43#include <linux/pm_runtime.h>
44#include <linux/err.h>
44 45
45#include "power.h" 46#include "power.h"
46 47
@@ -61,7 +62,7 @@ enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
61 struct pm_qos_flags *pqf; 62 struct pm_qos_flags *pqf;
62 s32 val; 63 s32 val;
63 64
64 if (!qos) 65 if (IS_ERR_OR_NULL(qos))
65 return PM_QOS_FLAGS_UNDEFINED; 66 return PM_QOS_FLAGS_UNDEFINED;
66 67
67 pqf = &qos->flags; 68 pqf = &qos->flags;
@@ -91,6 +92,7 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
91 92
92 return ret; 93 return ret;
93} 94}
95EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
94 96
95/** 97/**
96 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. 98 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
@@ -100,7 +102,8 @@ enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
100 */ 102 */
101s32 __dev_pm_qos_read_value(struct device *dev) 103s32 __dev_pm_qos_read_value(struct device *dev)
102{ 104{
103 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0; 105 return IS_ERR_OR_NULL(dev->power.qos) ?
106 0 : pm_qos_read_value(&dev->power.qos->latency);
104} 107}
105 108
106/** 109/**
@@ -197,20 +200,8 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
197 return 0; 200 return 0;
198} 201}
199 202
200/** 203static void __dev_pm_qos_hide_latency_limit(struct device *dev);
201 * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. 204static void __dev_pm_qos_hide_flags(struct device *dev);
202 * @dev: target device
203 *
204 * Called from the device PM subsystem during device insertion under
205 * device_pm_lock().
206 */
207void dev_pm_qos_constraints_init(struct device *dev)
208{
209 mutex_lock(&dev_pm_qos_mtx);
210 dev->power.qos = NULL;
211 dev->power.power_state = PMSG_ON;
212 mutex_unlock(&dev_pm_qos_mtx);
213}
214 205
215/** 206/**
216 * dev_pm_qos_constraints_destroy 207 * dev_pm_qos_constraints_destroy
@@ -225,16 +216,15 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
225 struct pm_qos_constraints *c; 216 struct pm_qos_constraints *c;
226 struct pm_qos_flags *f; 217 struct pm_qos_flags *f;
227 218
219 mutex_lock(&dev_pm_qos_mtx);
220
228 /* 221 /*
229 * If the device's PM QoS resume latency limit or PM QoS flags have been 222 * If the device's PM QoS resume latency limit or PM QoS flags have been
230 * exposed to user space, they have to be hidden at this point. 223 * exposed to user space, they have to be hidden at this point.
231 */ 224 */
232 dev_pm_qos_hide_latency_limit(dev); 225 __dev_pm_qos_hide_latency_limit(dev);
233 dev_pm_qos_hide_flags(dev); 226 __dev_pm_qos_hide_flags(dev);
234 227
235 mutex_lock(&dev_pm_qos_mtx);
236
237 dev->power.power_state = PMSG_INVALID;
238 qos = dev->power.qos; 228 qos = dev->power.qos;
239 if (!qos) 229 if (!qos)
240 goto out; 230 goto out;
@@ -256,7 +246,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
256 } 246 }
257 247
258 spin_lock_irq(&dev->power.lock); 248 spin_lock_irq(&dev->power.lock);
259 dev->power.qos = NULL; 249 dev->power.qos = ERR_PTR(-ENODEV);
260 spin_unlock_irq(&dev->power.lock); 250 spin_unlock_irq(&dev->power.lock);
261 251
262 kfree(c->notifiers); 252 kfree(c->notifiers);
@@ -300,32 +290,19 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
300 "%s() called for already added request\n", __func__)) 290 "%s() called for already added request\n", __func__))
301 return -EINVAL; 291 return -EINVAL;
302 292
303 req->dev = dev;
304
305 mutex_lock(&dev_pm_qos_mtx); 293 mutex_lock(&dev_pm_qos_mtx);
306 294
307 if (!dev->power.qos) { 295 if (IS_ERR(dev->power.qos))
308 if (dev->power.power_state.event == PM_EVENT_INVALID) { 296 ret = -ENODEV;
309 /* The device has been removed from the system. */ 297 else if (!dev->power.qos)
310 req->dev = NULL; 298 ret = dev_pm_qos_constraints_allocate(dev);
311 ret = -ENODEV;
312 goto out;
313 } else {
314 /*
315 * Allocate the constraints data on the first call to
316 * add_request, i.e. only if the data is not already
317 * allocated and if the device has not been removed.
318 */
319 ret = dev_pm_qos_constraints_allocate(dev);
320 }
321 }
322 299
323 if (!ret) { 300 if (!ret) {
301 req->dev = dev;
324 req->type = type; 302 req->type = type;
325 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 303 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
326 } 304 }
327 305
328 out:
329 mutex_unlock(&dev_pm_qos_mtx); 306 mutex_unlock(&dev_pm_qos_mtx);
330 307
331 return ret; 308 return ret;
@@ -343,7 +320,14 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
343 s32 curr_value; 320 s32 curr_value;
344 int ret = 0; 321 int ret = 0;
345 322
346 if (!req->dev->power.qos) 323 if (!req) /*guard against callers passing in null */
324 return -EINVAL;
325
326 if (WARN(!dev_pm_qos_request_active(req),
327 "%s() called for unknown object\n", __func__))
328 return -EINVAL;
329
330 if (IS_ERR_OR_NULL(req->dev->power.qos))
347 return -ENODEV; 331 return -ENODEV;
348 332
349 switch(req->type) { 333 switch(req->type) {
@@ -385,6 +369,17 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
385{ 369{
386 int ret; 370 int ret;
387 371
372 mutex_lock(&dev_pm_qos_mtx);
373 ret = __dev_pm_qos_update_request(req, new_value);
374 mutex_unlock(&dev_pm_qos_mtx);
375 return ret;
376}
377EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
378
379static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
380{
381 int ret;
382
388 if (!req) /*guard against callers passing in null */ 383 if (!req) /*guard against callers passing in null */
389 return -EINVAL; 384 return -EINVAL;
390 385
@@ -392,13 +387,13 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
392 "%s() called for unknown object\n", __func__)) 387 "%s() called for unknown object\n", __func__))
393 return -EINVAL; 388 return -EINVAL;
394 389
395 mutex_lock(&dev_pm_qos_mtx); 390 if (IS_ERR_OR_NULL(req->dev->power.qos))
396 ret = __dev_pm_qos_update_request(req, new_value); 391 return -ENODEV;
397 mutex_unlock(&dev_pm_qos_mtx);
398 392
393 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
394 memset(req, 0, sizeof(*req));
399 return ret; 395 return ret;
400} 396}
401EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
402 397
403/** 398/**
404 * dev_pm_qos_remove_request - modifies an existing qos request 399 * dev_pm_qos_remove_request - modifies an existing qos request
@@ -417,26 +412,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
417 */ 412 */
418int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 413int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
419{ 414{
420 int ret = 0; 415 int ret;
421
422 if (!req) /*guard against callers passing in null */
423 return -EINVAL;
424
425 if (WARN(!dev_pm_qos_request_active(req),
426 "%s() called for unknown object\n", __func__))
427 return -EINVAL;
428 416
429 mutex_lock(&dev_pm_qos_mtx); 417 mutex_lock(&dev_pm_qos_mtx);
430 418 ret = __dev_pm_qos_remove_request(req);
431 if (req->dev->power.qos) {
432 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
433 PM_QOS_DEFAULT_VALUE);
434 memset(req, 0, sizeof(*req));
435 } else {
436 /* Return if the device has been removed */
437 ret = -ENODEV;
438 }
439
440 mutex_unlock(&dev_pm_qos_mtx); 419 mutex_unlock(&dev_pm_qos_mtx);
441 return ret; 420 return ret;
442} 421}
@@ -461,9 +440,10 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
461 440
462 mutex_lock(&dev_pm_qos_mtx); 441 mutex_lock(&dev_pm_qos_mtx);
463 442
464 if (!dev->power.qos) 443 if (IS_ERR(dev->power.qos))
465 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 444 ret = -ENODEV;
466 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 445 else if (!dev->power.qos)
446 ret = dev_pm_qos_constraints_allocate(dev);
467 447
468 if (!ret) 448 if (!ret)
469 ret = blocking_notifier_chain_register( 449 ret = blocking_notifier_chain_register(
@@ -492,7 +472,7 @@ int dev_pm_qos_remove_notifier(struct device *dev,
492 mutex_lock(&dev_pm_qos_mtx); 472 mutex_lock(&dev_pm_qos_mtx);
493 473
494 /* Silently return if the constraints object is not present. */ 474 /* Silently return if the constraints object is not present. */
495 if (dev->power.qos) 475 if (!IS_ERR_OR_NULL(dev->power.qos))
496 retval = blocking_notifier_chain_unregister( 476 retval = blocking_notifier_chain_unregister(
497 dev->power.qos->latency.notifiers, 477 dev->power.qos->latency.notifiers,
498 notifier); 478 notifier);
@@ -562,16 +542,20 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
562static void __dev_pm_qos_drop_user_request(struct device *dev, 542static void __dev_pm_qos_drop_user_request(struct device *dev,
563 enum dev_pm_qos_req_type type) 543 enum dev_pm_qos_req_type type)
564{ 544{
545 struct dev_pm_qos_request *req = NULL;
546
565 switch(type) { 547 switch(type) {
566 case DEV_PM_QOS_LATENCY: 548 case DEV_PM_QOS_LATENCY:
567 dev_pm_qos_remove_request(dev->power.qos->latency_req); 549 req = dev->power.qos->latency_req;
568 dev->power.qos->latency_req = NULL; 550 dev->power.qos->latency_req = NULL;
569 break; 551 break;
570 case DEV_PM_QOS_FLAGS: 552 case DEV_PM_QOS_FLAGS:
571 dev_pm_qos_remove_request(dev->power.qos->flags_req); 553 req = dev->power.qos->flags_req;
572 dev->power.qos->flags_req = NULL; 554 dev->power.qos->flags_req = NULL;
573 break; 555 break;
574 } 556 }
557 __dev_pm_qos_remove_request(req);
558 kfree(req);
575} 559}
576 560
577/** 561/**
@@ -587,36 +571,57 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
587 if (!device_is_registered(dev) || value < 0) 571 if (!device_is_registered(dev) || value < 0)
588 return -EINVAL; 572 return -EINVAL;
589 573
590 if (dev->power.qos && dev->power.qos->latency_req)
591 return -EEXIST;
592
593 req = kzalloc(sizeof(*req), GFP_KERNEL); 574 req = kzalloc(sizeof(*req), GFP_KERNEL);
594 if (!req) 575 if (!req)
595 return -ENOMEM; 576 return -ENOMEM;
596 577
597 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value); 578 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
598 if (ret < 0) 579 if (ret < 0) {
580 kfree(req);
599 return ret; 581 return ret;
582 }
583
584 mutex_lock(&dev_pm_qos_mtx);
585
586 if (IS_ERR_OR_NULL(dev->power.qos))
587 ret = -ENODEV;
588 else if (dev->power.qos->latency_req)
589 ret = -EEXIST;
590
591 if (ret < 0) {
592 __dev_pm_qos_remove_request(req);
593 kfree(req);
594 goto out;
595 }
600 596
601 dev->power.qos->latency_req = req; 597 dev->power.qos->latency_req = req;
602 ret = pm_qos_sysfs_add_latency(dev); 598 ret = pm_qos_sysfs_add_latency(dev);
603 if (ret) 599 if (ret)
604 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 600 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
605 601
602 out:
603 mutex_unlock(&dev_pm_qos_mtx);
606 return ret; 604 return ret;
607} 605}
608EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit); 606EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
609 607
608static void __dev_pm_qos_hide_latency_limit(struct device *dev)
609{
610 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
611 pm_qos_sysfs_remove_latency(dev);
612 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
613 }
614}
615
610/** 616/**
611 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space. 617 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
612 * @dev: Device whose PM QoS latency limit is to be hidden from user space. 618 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
613 */ 619 */
614void dev_pm_qos_hide_latency_limit(struct device *dev) 620void dev_pm_qos_hide_latency_limit(struct device *dev)
615{ 621{
616 if (dev->power.qos && dev->power.qos->latency_req) { 622 mutex_lock(&dev_pm_qos_mtx);
617 pm_qos_sysfs_remove_latency(dev); 623 __dev_pm_qos_hide_latency_limit(dev);
618 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY); 624 mutex_unlock(&dev_pm_qos_mtx);
619 }
620} 625}
621EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 626EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
622 627
@@ -633,41 +638,61 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
633 if (!device_is_registered(dev)) 638 if (!device_is_registered(dev))
634 return -EINVAL; 639 return -EINVAL;
635 640
636 if (dev->power.qos && dev->power.qos->flags_req)
637 return -EEXIST;
638
639 req = kzalloc(sizeof(*req), GFP_KERNEL); 641 req = kzalloc(sizeof(*req), GFP_KERNEL);
640 if (!req) 642 if (!req)
641 return -ENOMEM; 643 return -ENOMEM;
642 644
643 pm_runtime_get_sync(dev);
644 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); 645 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
645 if (ret < 0) 646 if (ret < 0) {
646 goto fail; 647 kfree(req);
648 return ret;
649 }
650
651 pm_runtime_get_sync(dev);
652 mutex_lock(&dev_pm_qos_mtx);
653
654 if (IS_ERR_OR_NULL(dev->power.qos))
655 ret = -ENODEV;
656 else if (dev->power.qos->flags_req)
657 ret = -EEXIST;
658
659 if (ret < 0) {
660 __dev_pm_qos_remove_request(req);
661 kfree(req);
662 goto out;
663 }
647 664
648 dev->power.qos->flags_req = req; 665 dev->power.qos->flags_req = req;
649 ret = pm_qos_sysfs_add_flags(dev); 666 ret = pm_qos_sysfs_add_flags(dev);
650 if (ret) 667 if (ret)
651 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
652 669
653fail: 670 out:
671 mutex_unlock(&dev_pm_qos_mtx);
654 pm_runtime_put(dev); 672 pm_runtime_put(dev);
655 return ret; 673 return ret;
656} 674}
657EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags); 675EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
658 676
677static void __dev_pm_qos_hide_flags(struct device *dev)
678{
679 if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
680 pm_qos_sysfs_remove_flags(dev);
681 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
682 }
683}
684
659/** 685/**
660 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space. 686 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
661 * @dev: Device whose PM QoS flags are to be hidden from user space. 687 * @dev: Device whose PM QoS flags are to be hidden from user space.
662 */ 688 */
663void dev_pm_qos_hide_flags(struct device *dev) 689void dev_pm_qos_hide_flags(struct device *dev)
664{ 690{
665 if (dev->power.qos && dev->power.qos->flags_req) { 691 pm_runtime_get_sync(dev);
666 pm_qos_sysfs_remove_flags(dev); 692 mutex_lock(&dev_pm_qos_mtx);
667 pm_runtime_get_sync(dev); 693 __dev_pm_qos_hide_flags(dev);
668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS); 694 mutex_unlock(&dev_pm_qos_mtx);
669 pm_runtime_put(dev); 695 pm_runtime_put(dev);
670 }
671} 696}
672EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags); 697EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
673 698
@@ -682,12 +707,14 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
682 s32 value; 707 s32 value;
683 int ret; 708 int ret;
684 709
685 if (!dev->power.qos || !dev->power.qos->flags_req)
686 return -EINVAL;
687
688 pm_runtime_get_sync(dev); 710 pm_runtime_get_sync(dev);
689 mutex_lock(&dev_pm_qos_mtx); 711 mutex_lock(&dev_pm_qos_mtx);
690 712
713 if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
714 ret = -EINVAL;
715 goto out;
716 }
717
691 value = dev_pm_qos_requested_flags(dev); 718 value = dev_pm_qos_requested_flags(dev);
692 if (set) 719 if (set)
693 value |= mask; 720 value |= mask;
@@ -696,9 +723,12 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
696 723
697 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value); 724 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
698 725
726 out:
699 mutex_unlock(&dev_pm_qos_mtx); 727 mutex_unlock(&dev_pm_qos_mtx);
700 pm_runtime_put(dev); 728 pm_runtime_put(dev);
701
702 return ret; 729 return ret;
703} 730}
731#else /* !CONFIG_PM_RUNTIME */
732static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
733static void __dev_pm_qos_hide_flags(struct device *dev) {}
704#endif /* CONFIG_PM_RUNTIME */ 734#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3148b10dc2e5..1244930e3d7a 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -124,6 +124,76 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev)
124} 124}
125EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); 125EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
126 126
127static int dev_memalloc_noio(struct device *dev, void *data)
128{
129 return dev->power.memalloc_noio;
130}
131
132/*
133 * pm_runtime_set_memalloc_noio - Set a device's memalloc_noio flag.
134 * @dev: Device to handle.
135 * @enable: True for setting the flag and False for clearing the flag.
136 *
137 * Set the flag for all devices in the path from the device to the
138 * root device in the device tree if @enable is true, otherwise clear
139 * the flag for devices in the path whose siblings don't set the flag.
140 *
141 * The function should only be called by block device, or network
142 * device driver for solving the deadlock problem during runtime
143 * resume/suspend:
144 *
145 * If memory allocation with GFP_KERNEL is called inside runtime
146 * resume/suspend callback of any one of its ancestors(or the
147 * block device itself), the deadlock may be triggered inside the
148 * memory allocation since it might not complete until the block
149 * device becomes active and the involed page I/O finishes. The
150 * situation is pointed out first by Alan Stern. Network device
151 * are involved in iSCSI kind of situation.
152 *
153 * The lock of dev_hotplug_mutex is held in the function for handling
154 * hotplug race because pm_runtime_set_memalloc_noio() may be called
155 * in async probe().
156 *
157 * The function should be called between device_add() and device_del()
158 * on the affected device(block/network device).
159 */
160void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
161{
162 static DEFINE_MUTEX(dev_hotplug_mutex);
163
164 mutex_lock(&dev_hotplug_mutex);
165 for (;;) {
166 bool enabled;
167
168 /* hold power lock since bitfield is not SMP-safe. */
169 spin_lock_irq(&dev->power.lock);
170 enabled = dev->power.memalloc_noio;
171 dev->power.memalloc_noio = enable;
172 spin_unlock_irq(&dev->power.lock);
173
174 /*
175 * not need to enable ancestors any more if the device
176 * has been enabled.
177 */
178 if (enabled && enable)
179 break;
180
181 dev = dev->parent;
182
183 /*
184 * clear flag of the parent device only if all the
185 * children don't set the flag because ancestor's
186 * flag was set by any one of the descendants.
187 */
188 if (!dev || (!enable &&
189 device_for_each_child(dev, NULL,
190 dev_memalloc_noio)))
191 break;
192 }
193 mutex_unlock(&dev_hotplug_mutex);
194}
195EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
196
127/** 197/**
128 * rpm_check_suspend_allowed - Test whether a device may be suspended. 198 * rpm_check_suspend_allowed - Test whether a device may be suspended.
129 * @dev: Device to test. 199 * @dev: Device to test.
@@ -278,7 +348,24 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
278 if (!cb) 348 if (!cb)
279 return -ENOSYS; 349 return -ENOSYS;
280 350
281 retval = __rpm_callback(cb, dev); 351 if (dev->power.memalloc_noio) {
352 unsigned int noio_flag;
353
354 /*
355 * Deadlock might be caused if memory allocation with
356 * GFP_KERNEL happens inside runtime_suspend and
357 * runtime_resume callbacks of one block device's
358 * ancestor or the block device itself. Network
359 * device might be thought as part of iSCSI block
360 * device, so network device and its ancestor should
361 * be marked as memalloc_noio too.
362 */
363 noio_flag = memalloc_noio_save();
364 retval = __rpm_callback(cb, dev);
365 memalloc_noio_restore(noio_flag);
366 } else {
367 retval = __rpm_callback(cb, dev);
368 }
282 369
283 dev->power.runtime_error = retval; 370 dev->power.runtime_error = retval;
284 return retval != -EACCES ? retval : -EIO; 371 return retval != -EACCES ? retval : -EIO;
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 50d16e3cb0a9..a53ebd265701 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -708,6 +708,7 @@ void rpm_sysfs_remove(struct device *dev)
708 708
709void dpm_sysfs_remove(struct device *dev) 709void dpm_sysfs_remove(struct device *dev)
710{ 710{
711 dev_pm_qos_constraints_destroy(dev);
711 rpm_sysfs_remove(dev); 712 rpm_sysfs_remove(dev);
712 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 713 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
713 sysfs_remove_group(&dev->kobj, &pm_attr_group); 714 sysfs_remove_group(&dev->kobj, &pm_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index e6ee5e80e546..79715e7fa43e 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -382,6 +382,12 @@ static void wakeup_source_activate(struct wakeup_source *ws)
382{ 382{
383 unsigned int cec; 383 unsigned int cec;
384 384
385 /*
386 * active wakeup source should bring the system
387 * out of PM_SUSPEND_FREEZE state
388 */
389 freeze_wake();
390
385 ws->active = true; 391 ws->active = true;
386 ws->active_count++; 392 ws->active_count++;
387 ws->last_time = ktime_get(); 393 ws->last_time = ktime_get();
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 5e75d1b683e2..cf129980abd0 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_REGMAP) += regmap.o regcache.o 1obj-$(CONFIG_REGMAP) += regmap.o regcache.o
2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o 2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o 3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
4obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o 4obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
5obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o 5obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 401d1919635a..5a22bd33ce3d 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -16,6 +16,7 @@
16#include <linux/regmap.h> 16#include <linux/regmap.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/wait.h>
19 20
20struct regmap; 21struct regmap;
21struct regcache_ops; 22struct regcache_ops;
@@ -25,6 +26,7 @@ struct regmap_debugfs_off_cache {
25 off_t min; 26 off_t min;
26 off_t max; 27 off_t max;
27 unsigned int base_reg; 28 unsigned int base_reg;
29 unsigned int max_reg;
28}; 30};
29 31
30struct regmap_format { 32struct regmap_format {
@@ -39,6 +41,13 @@ struct regmap_format {
39 unsigned int (*parse_val)(void *buf); 41 unsigned int (*parse_val)(void *buf);
40}; 42};
41 43
44struct regmap_async {
45 struct list_head list;
46 struct work_struct cleanup;
47 struct regmap *map;
48 void *work_buf;
49};
50
42struct regmap { 51struct regmap {
43 struct mutex mutex; 52 struct mutex mutex;
44 spinlock_t spinlock; 53 spinlock_t spinlock;
@@ -53,6 +62,11 @@ struct regmap {
53 void *bus_context; 62 void *bus_context;
54 const char *name; 63 const char *name;
55 64
65 spinlock_t async_lock;
66 wait_queue_head_t async_waitq;
67 struct list_head async_list;
68 int async_ret;
69
56#ifdef CONFIG_DEBUG_FS 70#ifdef CONFIG_DEBUG_FS
57 struct dentry *debugfs; 71 struct dentry *debugfs;
58 const char *debugfs_name; 72 const char *debugfs_name;
@@ -74,6 +88,11 @@ struct regmap {
74 const struct regmap_access_table *volatile_table; 88 const struct regmap_access_table *volatile_table;
75 const struct regmap_access_table *precious_table; 89 const struct regmap_access_table *precious_table;
76 90
91 int (*reg_read)(void *context, unsigned int reg, unsigned int *val);
92 int (*reg_write)(void *context, unsigned int reg, unsigned int val);
93
94 bool defer_caching;
95
77 u8 read_flag_mask; 96 u8 read_flag_mask;
78 u8 write_flag_mask; 97 u8 write_flag_mask;
79 98
@@ -175,7 +194,10 @@ bool regcache_set_val(void *base, unsigned int idx,
175 unsigned int val, unsigned int word_size); 194 unsigned int val, unsigned int word_size);
176int regcache_lookup_reg(struct regmap *map, unsigned int reg); 195int regcache_lookup_reg(struct regmap *map, unsigned int reg);
177 196
197void regmap_async_complete_cb(struct regmap_async *async, int ret);
198
178extern struct regcache_ops regcache_rbtree_ops; 199extern struct regcache_ops regcache_rbtree_ops;
179extern struct regcache_ops regcache_lzo_ops; 200extern struct regcache_ops regcache_lzo_ops;
201extern struct regcache_ops regcache_flat_ops;
180 202
181#endif 203#endif
diff --git a/drivers/base/regmap/regcache-flat.c b/drivers/base/regmap/regcache-flat.c
new file mode 100644
index 000000000000..d9762e41959b
--- /dev/null
+++ b/drivers/base/regmap/regcache-flat.c
@@ -0,0 +1,72 @@
1/*
2 * Register cache access API - flat caching support
3 *
4 * Copyright 2012 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
14#include <linux/device.h>
15#include <linux/seq_file.h>
16
17#include "internal.h"
18
19static int regcache_flat_init(struct regmap *map)
20{
21 int i;
22 unsigned int *cache;
23
24 map->cache = kzalloc(sizeof(unsigned int) * (map->max_register + 1),
25 GFP_KERNEL);
26 if (!map->cache)
27 return -ENOMEM;
28
29 cache = map->cache;
30
31 for (i = 0; i < map->num_reg_defaults; i++)
32 cache[map->reg_defaults[i].reg] = map->reg_defaults[i].def;
33
34 return 0;
35}
36
37static int regcache_flat_exit(struct regmap *map)
38{
39 kfree(map->cache);
40 map->cache = NULL;
41
42 return 0;
43}
44
45static int regcache_flat_read(struct regmap *map,
46 unsigned int reg, unsigned int *value)
47{
48 unsigned int *cache = map->cache;
49
50 *value = cache[reg];
51
52 return 0;
53}
54
55static int regcache_flat_write(struct regmap *map, unsigned int reg,
56 unsigned int value)
57{
58 unsigned int *cache = map->cache;
59
60 cache[reg] = value;
61
62 return 0;
63}
64
65struct regcache_ops regcache_flat_ops = {
66 .type = REGCACHE_FLAT,
67 .name = "flat",
68 .init = regcache_flat_init,
69 .exit = regcache_flat_exit,
70 .read = regcache_flat_read,
71 .write = regcache_flat_write,
72};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 835883bda977..e69ff3e4742c 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -22,6 +22,7 @@
22static const struct regcache_ops *cache_types[] = { 22static const struct regcache_ops *cache_types[] = {
23 &regcache_rbtree_ops, 23 &regcache_rbtree_ops,
24 &regcache_lzo_ops, 24 &regcache_lzo_ops,
25 &regcache_flat_ops,
25}; 26};
26 27
27static int regcache_hw_init(struct regmap *map) 28static int regcache_hw_init(struct regmap *map)
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index d9a6c94ce423..81d6f605c92e 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -81,6 +81,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
81 struct regmap_debugfs_off_cache *c = NULL; 81 struct regmap_debugfs_off_cache *c = NULL;
82 loff_t p = 0; 82 loff_t p = 0;
83 unsigned int i, ret; 83 unsigned int i, ret;
84 unsigned int fpos_offset;
85 unsigned int reg_offset;
84 86
85 /* 87 /*
86 * If we don't have a cache build one so we don't have to do a 88 * If we don't have a cache build one so we don't have to do a
@@ -93,6 +95,9 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
93 regmap_precious(map, i)) { 95 regmap_precious(map, i)) {
94 if (c) { 96 if (c) {
95 c->max = p - 1; 97 c->max = p - 1;
98 fpos_offset = c->max - c->min;
99 reg_offset = fpos_offset / map->debugfs_tot_len;
100 c->max_reg = c->base_reg + reg_offset;
96 list_add_tail(&c->list, 101 list_add_tail(&c->list,
97 &map->debugfs_off_cache); 102 &map->debugfs_off_cache);
98 c = NULL; 103 c = NULL;
@@ -119,6 +124,9 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
119 /* Close the last entry off if we didn't scan beyond it */ 124 /* Close the last entry off if we didn't scan beyond it */
120 if (c) { 125 if (c) {
121 c->max = p - 1; 126 c->max = p - 1;
127 fpos_offset = c->max - c->min;
128 reg_offset = fpos_offset / map->debugfs_tot_len;
129 c->max_reg = c->base_reg + reg_offset;
122 list_add_tail(&c->list, 130 list_add_tail(&c->list,
123 &map->debugfs_off_cache); 131 &map->debugfs_off_cache);
124 } 132 }
@@ -128,25 +136,38 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
128 * allocate and we should never be in this code if there are 136 * allocate and we should never be in this code if there are
129 * no registers at all. 137 * no registers at all.
130 */ 138 */
131 if (list_empty(&map->debugfs_off_cache)) { 139 WARN_ON(list_empty(&map->debugfs_off_cache));
132 WARN_ON(list_empty(&map->debugfs_off_cache)); 140 ret = base;
133 return base;
134 }
135 141
136 /* Find the relevant block */ 142 /* Find the relevant block:offset */
137 list_for_each_entry(c, &map->debugfs_off_cache, list) { 143 list_for_each_entry(c, &map->debugfs_off_cache, list) {
138 if (from >= c->min && from <= c->max) { 144 if (from >= c->min && from <= c->max) {
139 *pos = c->min; 145 fpos_offset = from - c->min;
140 return c->base_reg; 146 reg_offset = fpos_offset / map->debugfs_tot_len;
147 *pos = c->min + (reg_offset * map->debugfs_tot_len);
148 return c->base_reg + reg_offset;
141 } 149 }
142 150
143 *pos = c->min; 151 *pos = c->max;
144 ret = c->base_reg; 152 ret = c->max_reg;
145 } 153 }
146 154
147 return ret; 155 return ret;
148} 156}
149 157
158static inline void regmap_calc_tot_len(struct regmap *map,
159 void *buf, size_t count)
160{
161 /* Calculate the length of a fixed format */
162 if (!map->debugfs_tot_len) {
163 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
164 buf, count);
165 map->debugfs_val_len = 2 * map->format.val_bytes;
166 map->debugfs_tot_len = map->debugfs_reg_len +
167 map->debugfs_val_len + 3; /* : \n */
168 }
169}
170
150static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from, 171static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
151 unsigned int to, char __user *user_buf, 172 unsigned int to, char __user *user_buf,
152 size_t count, loff_t *ppos) 173 size_t count, loff_t *ppos)
@@ -165,14 +186,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
165 if (!buf) 186 if (!buf)
166 return -ENOMEM; 187 return -ENOMEM;
167 188
168 /* Calculate the length of a fixed format */ 189 regmap_calc_tot_len(map, buf, count);
169 if (!map->debugfs_tot_len) {
170 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
171 buf, count);
172 map->debugfs_val_len = 2 * map->format.val_bytes;
173 map->debugfs_tot_len = map->debugfs_reg_len +
174 map->debugfs_val_len + 3; /* : \n */
175 }
176 190
177 /* Work out which register we're starting at */ 191 /* Work out which register we're starting at */
178 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p); 192 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
@@ -187,7 +201,7 @@ static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
187 /* If we're in the region the user is trying to read */ 201 /* If we're in the region the user is trying to read */
188 if (p >= *ppos) { 202 if (p >= *ppos) {
189 /* ...but not beyond it */ 203 /* ...but not beyond it */
190 if (buf_pos + 1 + map->debugfs_tot_len >= count) 204 if (buf_pos + map->debugfs_tot_len > count)
191 break; 205 break;
192 206
193 /* Format the register */ 207 /* Format the register */
@@ -265,7 +279,7 @@ static ssize_t regmap_map_write_file(struct file *file,
265 return -EINVAL; 279 return -EINVAL;
266 280
267 /* Userspace has been fiddling around behind the kernel's back */ 281 /* Userspace has been fiddling around behind the kernel's back */
268 add_taint(TAINT_USER); 282 add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
269 283
270 regmap_write(map, reg, value); 284 regmap_write(map, reg, value);
271 return buf_size; 285 return buf_size;
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5972ad958544..020ea2b9fd2f 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -34,6 +34,7 @@ struct regmap_irq_chip_data {
34 int irq; 34 int irq;
35 int wake_count; 35 int wake_count;
36 36
37 void *status_reg_buf;
37 unsigned int *status_buf; 38 unsigned int *status_buf;
38 unsigned int *mask_buf; 39 unsigned int *mask_buf;
39 unsigned int *mask_buf_def; 40 unsigned int *mask_buf_def;
@@ -87,6 +88,23 @@ static void regmap_irq_sync_unlock(struct irq_data *data)
87 if (ret != 0) 88 if (ret != 0)
88 dev_err(d->map->dev, "Failed to sync masks in %x\n", 89 dev_err(d->map->dev, "Failed to sync masks in %x\n",
89 reg); 90 reg);
91
92 reg = d->chip->wake_base +
93 (i * map->reg_stride * d->irq_reg_stride);
94 if (d->wake_buf) {
95 if (d->chip->wake_invert)
96 ret = regmap_update_bits(d->map, reg,
97 d->mask_buf_def[i],
98 ~d->wake_buf[i]);
99 else
100 ret = regmap_update_bits(d->map, reg,
101 d->mask_buf_def[i],
102 d->wake_buf[i]);
103 if (ret != 0)
104 dev_err(d->map->dev,
105 "Failed to sync wakes in %x: %d\n",
106 reg, ret);
107 }
90 } 108 }
91 109
92 if (d->chip->runtime_pm) 110 if (d->chip->runtime_pm)
@@ -129,16 +147,15 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
129 struct regmap *map = d->map; 147 struct regmap *map = d->map;
130 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); 148 const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
131 149
132 if (!d->chip->wake_base)
133 return -EINVAL;
134
135 if (on) { 150 if (on) {
136 d->wake_buf[irq_data->reg_offset / map->reg_stride] 151 if (d->wake_buf)
137 &= ~irq_data->mask; 152 d->wake_buf[irq_data->reg_offset / map->reg_stride]
153 &= ~irq_data->mask;
138 d->wake_count++; 154 d->wake_count++;
139 } else { 155 } else {
140 d->wake_buf[irq_data->reg_offset / map->reg_stride] 156 if (d->wake_buf)
141 |= irq_data->mask; 157 d->wake_buf[irq_data->reg_offset / map->reg_stride]
158 |= irq_data->mask;
142 d->wake_count--; 159 d->wake_count--;
143 } 160 }
144 161
@@ -167,30 +184,75 @@ static irqreturn_t regmap_irq_thread(int irq, void *d)
167 if (ret < 0) { 184 if (ret < 0) {
168 dev_err(map->dev, "IRQ thread failed to resume: %d\n", 185 dev_err(map->dev, "IRQ thread failed to resume: %d\n",
169 ret); 186 ret);
187 pm_runtime_put(map->dev);
170 return IRQ_NONE; 188 return IRQ_NONE;
171 } 189 }
172 } 190 }
173 191
174 /* 192 /*
175 * Ignore masked IRQs and ack if we need to; we ack early so 193 * Read in the statuses, using a single bulk read if possible
176 * there is no race between handling and acknowleding the 194 * in order to reduce the I/O overheads.
177 * interrupt. We assume that typically few of the interrupts
178 * will fire simultaneously so don't worry about overhead from
179 * doing a write per register.
180 */ 195 */
181 for (i = 0; i < data->chip->num_regs; i++) { 196 if (!map->use_single_rw && map->reg_stride == 1 &&
182 ret = regmap_read(map, chip->status_base + (i * map->reg_stride 197 data->irq_reg_stride == 1) {
183 * data->irq_reg_stride), 198 u8 *buf8 = data->status_reg_buf;
184 &data->status_buf[i]); 199 u16 *buf16 = data->status_reg_buf;
200 u32 *buf32 = data->status_reg_buf;
185 201
202 BUG_ON(!data->status_reg_buf);
203
204 ret = regmap_bulk_read(map, chip->status_base,
205 data->status_reg_buf,
206 chip->num_regs);
186 if (ret != 0) { 207 if (ret != 0) {
187 dev_err(map->dev, "Failed to read IRQ status: %d\n", 208 dev_err(map->dev, "Failed to read IRQ status: %d\n",
188 ret); 209 ret);
189 if (chip->runtime_pm)
190 pm_runtime_put(map->dev);
191 return IRQ_NONE; 210 return IRQ_NONE;
192 } 211 }
193 212
213 for (i = 0; i < data->chip->num_regs; i++) {
214 switch (map->format.val_bytes) {
215 case 1:
216 data->status_buf[i] = buf8[i];
217 break;
218 case 2:
219 data->status_buf[i] = buf16[i];
220 break;
221 case 4:
222 data->status_buf[i] = buf32[i];
223 break;
224 default:
225 BUG();
226 return IRQ_NONE;
227 }
228 }
229
230 } else {
231 for (i = 0; i < data->chip->num_regs; i++) {
232 ret = regmap_read(map, chip->status_base +
233 (i * map->reg_stride
234 * data->irq_reg_stride),
235 &data->status_buf[i]);
236
237 if (ret != 0) {
238 dev_err(map->dev,
239 "Failed to read IRQ status: %d\n",
240 ret);
241 if (chip->runtime_pm)
242 pm_runtime_put(map->dev);
243 return IRQ_NONE;
244 }
245 }
246 }
247
248 /*
249 * Ignore masked IRQs and ack if we need to; we ack early so
250 * there is no race between handling and acknowleding the
251 * interrupt. We assume that typically few of the interrupts
252 * will fire simultaneously so don't worry about overhead from
253 * doing a write per register.
254 */
255 for (i = 0; i < data->chip->num_regs; i++) {
194 data->status_buf[i] &= ~data->mask_buf[i]; 256 data->status_buf[i] &= ~data->mask_buf[i];
195 257
196 if (data->status_buf[i] && chip->ack_base) { 258 if (data->status_buf[i] && chip->ack_base) {
@@ -316,11 +378,6 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
316 378
317 d->irq_chip = regmap_irq_chip; 379 d->irq_chip = regmap_irq_chip;
318 d->irq_chip.name = chip->name; 380 d->irq_chip.name = chip->name;
319 if (!chip->wake_base) {
320 d->irq_chip.irq_set_wake = NULL;
321 d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
322 IRQCHIP_SKIP_SET_WAKE;
323 }
324 d->irq = irq; 381 d->irq = irq;
325 d->map = map; 382 d->map = map;
326 d->chip = chip; 383 d->chip = chip;
@@ -331,6 +388,14 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
331 else 388 else
332 d->irq_reg_stride = 1; 389 d->irq_reg_stride = 1;
333 390
391 if (!map->use_single_rw && map->reg_stride == 1 &&
392 d->irq_reg_stride == 1) {
393 d->status_reg_buf = kmalloc(map->format.val_bytes *
394 chip->num_regs, GFP_KERNEL);
395 if (!d->status_reg_buf)
396 goto err_alloc;
397 }
398
334 mutex_init(&d->lock); 399 mutex_init(&d->lock);
335 400
336 for (i = 0; i < chip->num_irqs; i++) 401 for (i = 0; i < chip->num_irqs; i++)
@@ -361,8 +426,15 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
361 d->wake_buf[i] = d->mask_buf_def[i]; 426 d->wake_buf[i] = d->mask_buf_def[i];
362 reg = chip->wake_base + 427 reg = chip->wake_base +
363 (i * map->reg_stride * d->irq_reg_stride); 428 (i * map->reg_stride * d->irq_reg_stride);
364 ret = regmap_update_bits(map, reg, d->wake_buf[i], 429
365 d->wake_buf[i]); 430 if (chip->wake_invert)
431 ret = regmap_update_bits(map, reg,
432 d->mask_buf_def[i],
433 0);
434 else
435 ret = regmap_update_bits(map, reg,
436 d->mask_buf_def[i],
437 d->wake_buf[i]);
366 if (ret != 0) { 438 if (ret != 0) {
367 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", 439 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
368 reg, ret); 440 reg, ret);
@@ -401,6 +473,7 @@ err_alloc:
401 kfree(d->mask_buf_def); 473 kfree(d->mask_buf_def);
402 kfree(d->mask_buf); 474 kfree(d->mask_buf);
403 kfree(d->status_buf); 475 kfree(d->status_buf);
476 kfree(d->status_reg_buf);
404 kfree(d); 477 kfree(d);
405 return ret; 478 return ret;
406} 479}
@@ -422,6 +495,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
422 kfree(d->wake_buf); 495 kfree(d->wake_buf);
423 kfree(d->mask_buf_def); 496 kfree(d->mask_buf_def);
424 kfree(d->mask_buf); 497 kfree(d->mask_buf);
498 kfree(d->status_reg_buf);
425 kfree(d->status_buf); 499 kfree(d->status_buf);
426 kfree(d); 500 kfree(d);
427} 501}
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index f05fc74dd84a..98745dd77e8c 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -16,6 +16,7 @@
16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <linux/clk.h>
19#include <linux/err.h> 20#include <linux/err.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/io.h> 22#include <linux/io.h>
@@ -26,6 +27,7 @@
26struct regmap_mmio_context { 27struct regmap_mmio_context {
27 void __iomem *regs; 28 void __iomem *regs;
28 unsigned val_bytes; 29 unsigned val_bytes;
30 struct clk *clk;
29}; 31};
30 32
31static int regmap_mmio_gather_write(void *context, 33static int regmap_mmio_gather_write(void *context,
@@ -34,9 +36,16 @@ static int regmap_mmio_gather_write(void *context,
34{ 36{
35 struct regmap_mmio_context *ctx = context; 37 struct regmap_mmio_context *ctx = context;
36 u32 offset; 38 u32 offset;
39 int ret;
37 40
38 BUG_ON(reg_size != 4); 41 BUG_ON(reg_size != 4);
39 42
43 if (ctx->clk) {
44 ret = clk_enable(ctx->clk);
45 if (ret < 0)
46 return ret;
47 }
48
40 offset = *(u32 *)reg; 49 offset = *(u32 *)reg;
41 50
42 while (val_size) { 51 while (val_size) {
@@ -64,6 +73,9 @@ static int regmap_mmio_gather_write(void *context,
64 offset += ctx->val_bytes; 73 offset += ctx->val_bytes;
65 } 74 }
66 75
76 if (ctx->clk)
77 clk_disable(ctx->clk);
78
67 return 0; 79 return 0;
68} 80}
69 81
@@ -80,9 +92,16 @@ static int regmap_mmio_read(void *context,
80{ 92{
81 struct regmap_mmio_context *ctx = context; 93 struct regmap_mmio_context *ctx = context;
82 u32 offset; 94 u32 offset;
95 int ret;
83 96
84 BUG_ON(reg_size != 4); 97 BUG_ON(reg_size != 4);
85 98
99 if (ctx->clk) {
100 ret = clk_enable(ctx->clk);
101 if (ret < 0)
102 return ret;
103 }
104
86 offset = *(u32 *)reg; 105 offset = *(u32 *)reg;
87 106
88 while (val_size) { 107 while (val_size) {
@@ -110,11 +129,20 @@ static int regmap_mmio_read(void *context,
110 offset += ctx->val_bytes; 129 offset += ctx->val_bytes;
111 } 130 }
112 131
132 if (ctx->clk)
133 clk_disable(ctx->clk);
134
113 return 0; 135 return 0;
114} 136}
115 137
116static void regmap_mmio_free_context(void *context) 138static void regmap_mmio_free_context(void *context)
117{ 139{
140 struct regmap_mmio_context *ctx = context;
141
142 if (ctx->clk) {
143 clk_unprepare(ctx->clk);
144 clk_put(ctx->clk);
145 }
118 kfree(context); 146 kfree(context);
119} 147}
120 148
@@ -128,11 +156,14 @@ static struct regmap_bus regmap_mmio = {
128 .val_format_endian_default = REGMAP_ENDIAN_NATIVE, 156 .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
129}; 157};
130 158
131static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs, 159static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
160 const char *clk_id,
161 void __iomem *regs,
132 const struct regmap_config *config) 162 const struct regmap_config *config)
133{ 163{
134 struct regmap_mmio_context *ctx; 164 struct regmap_mmio_context *ctx;
135 int min_stride; 165 int min_stride;
166 int ret;
136 167
137 if (config->reg_bits != 32) 168 if (config->reg_bits != 32)
138 return ERR_PTR(-EINVAL); 169 return ERR_PTR(-EINVAL);
@@ -179,37 +210,59 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(void __iomem *regs,
179 ctx->regs = regs; 210 ctx->regs = regs;
180 ctx->val_bytes = config->val_bits / 8; 211 ctx->val_bytes = config->val_bits / 8;
181 212
213 if (clk_id == NULL)
214 return ctx;
215
216 ctx->clk = clk_get(dev, clk_id);
217 if (IS_ERR(ctx->clk)) {
218 ret = PTR_ERR(ctx->clk);
219 goto err_free;
220 }
221
222 ret = clk_prepare(ctx->clk);
223 if (ret < 0) {
224 clk_put(ctx->clk);
225 goto err_free;
226 }
227
182 return ctx; 228 return ctx;
229
230err_free:
231 kfree(ctx);
232
233 return ERR_PTR(ret);
183} 234}
184 235
185/** 236/**
186 * regmap_init_mmio(): Initialise register map 237 * regmap_init_mmio_clk(): Initialise register map with register clock
187 * 238 *
188 * @dev: Device that will be interacted with 239 * @dev: Device that will be interacted with
240 * @clk_id: register clock consumer ID
189 * @regs: Pointer to memory-mapped IO region 241 * @regs: Pointer to memory-mapped IO region
190 * @config: Configuration for register map 242 * @config: Configuration for register map
191 * 243 *
192 * The return value will be an ERR_PTR() on error or a valid pointer to 244 * The return value will be an ERR_PTR() on error or a valid pointer to
193 * a struct regmap. 245 * a struct regmap.
194 */ 246 */
195struct regmap *regmap_init_mmio(struct device *dev, 247struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
196 void __iomem *regs, 248 void __iomem *regs,
197 const struct regmap_config *config) 249 const struct regmap_config *config)
198{ 250{
199 struct regmap_mmio_context *ctx; 251 struct regmap_mmio_context *ctx;
200 252
201 ctx = regmap_mmio_gen_context(regs, config); 253 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
202 if (IS_ERR(ctx)) 254 if (IS_ERR(ctx))
203 return ERR_CAST(ctx); 255 return ERR_CAST(ctx);
204 256
205 return regmap_init(dev, &regmap_mmio, ctx, config); 257 return regmap_init(dev, &regmap_mmio, ctx, config);
206} 258}
207EXPORT_SYMBOL_GPL(regmap_init_mmio); 259EXPORT_SYMBOL_GPL(regmap_init_mmio_clk);
208 260
209/** 261/**
210 * devm_regmap_init_mmio(): Initialise managed register map 262 * devm_regmap_init_mmio_clk(): Initialise managed register map with clock
211 * 263 *
212 * @dev: Device that will be interacted with 264 * @dev: Device that will be interacted with
265 * @clk_id: register clock consumer ID
213 * @regs: Pointer to memory-mapped IO region 266 * @regs: Pointer to memory-mapped IO region
214 * @config: Configuration for register map 267 * @config: Configuration for register map
215 * 268 *
@@ -217,18 +270,18 @@ EXPORT_SYMBOL_GPL(regmap_init_mmio);
217 * to a struct regmap. The regmap will be automatically freed by the 270 * to a struct regmap. The regmap will be automatically freed by the
218 * device management code. 271 * device management code.
219 */ 272 */
220struct regmap *devm_regmap_init_mmio(struct device *dev, 273struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
221 void __iomem *regs, 274 void __iomem *regs,
222 const struct regmap_config *config) 275 const struct regmap_config *config)
223{ 276{
224 struct regmap_mmio_context *ctx; 277 struct regmap_mmio_context *ctx;
225 278
226 ctx = regmap_mmio_gen_context(regs, config); 279 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config);
227 if (IS_ERR(ctx)) 280 if (IS_ERR(ctx))
228 return ERR_CAST(ctx); 281 return ERR_CAST(ctx);
229 282
230 return devm_regmap_init(dev, &regmap_mmio, ctx, config); 283 return devm_regmap_init(dev, &regmap_mmio, ctx, config);
231} 284}
232EXPORT_SYMBOL_GPL(devm_regmap_init_mmio); 285EXPORT_SYMBOL_GPL(devm_regmap_init_mmio_clk);
233 286
234MODULE_LICENSE("GPL v2"); 287MODULE_LICENSE("GPL v2");
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index ffa46a92ad33..4c506bd940f3 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -15,6 +15,21 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
17 17
18#include "internal.h"
19
20struct regmap_async_spi {
21 struct regmap_async core;
22 struct spi_message m;
23 struct spi_transfer t[2];
24};
25
26static void regmap_spi_complete(void *data)
27{
28 struct regmap_async_spi *async = data;
29
30 regmap_async_complete_cb(&async->core, async->m.status);
31}
32
18static int regmap_spi_write(void *context, const void *data, size_t count) 33static int regmap_spi_write(void *context, const void *data, size_t count)
19{ 34{
20 struct device *dev = context; 35 struct device *dev = context;
@@ -40,6 +55,43 @@ static int regmap_spi_gather_write(void *context,
40 return spi_sync(spi, &m); 55 return spi_sync(spi, &m);
41} 56}
42 57
58static int regmap_spi_async_write(void *context,
59 const void *reg, size_t reg_len,
60 const void *val, size_t val_len,
61 struct regmap_async *a)
62{
63 struct regmap_async_spi *async = container_of(a,
64 struct regmap_async_spi,
65 core);
66 struct device *dev = context;
67 struct spi_device *spi = to_spi_device(dev);
68
69 async->t[0].tx_buf = reg;
70 async->t[0].len = reg_len;
71 async->t[1].tx_buf = val;
72 async->t[1].len = val_len;
73
74 spi_message_init(&async->m);
75 spi_message_add_tail(&async->t[0], &async->m);
76 spi_message_add_tail(&async->t[1], &async->m);
77
78 async->m.complete = regmap_spi_complete;
79 async->m.context = async;
80
81 return spi_async(spi, &async->m);
82}
83
84static struct regmap_async *regmap_spi_async_alloc(void)
85{
86 struct regmap_async_spi *async_spi;
87
88 async_spi = kzalloc(sizeof(*async_spi), GFP_KERNEL);
89 if (!async_spi)
90 return NULL;
91
92 return &async_spi->core;
93}
94
43static int regmap_spi_read(void *context, 95static int regmap_spi_read(void *context,
44 const void *reg, size_t reg_size, 96 const void *reg, size_t reg_size,
45 void *val, size_t val_size) 97 void *val, size_t val_size)
@@ -53,6 +105,8 @@ static int regmap_spi_read(void *context,
53static struct regmap_bus regmap_spi = { 105static struct regmap_bus regmap_spi = {
54 .write = regmap_spi_write, 106 .write = regmap_spi_write,
55 .gather_write = regmap_spi_gather_write, 107 .gather_write = regmap_spi_gather_write,
108 .async_write = regmap_spi_async_write,
109 .async_alloc = regmap_spi_async_alloc,
56 .read = regmap_spi_read, 110 .read = regmap_spi_read,
57 .read_flag_mask = 0x80, 111 .read_flag_mask = 0x80,
58}; 112};
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index f00b059c057a..3d2367501fd0 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -16,6 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/rbtree.h> 18#include <linux/rbtree.h>
19#include <linux/sched.h>
19 20
20#define CREATE_TRACE_POINTS 21#define CREATE_TRACE_POINTS
21#include <trace/events/regmap.h> 22#include <trace/events/regmap.h>
@@ -34,6 +35,22 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 unsigned int mask, unsigned int val, 35 unsigned int mask, unsigned int val,
35 bool *change); 36 bool *change);
36 37
38static int _regmap_bus_read(void *context, unsigned int reg,
39 unsigned int *val);
40static int _regmap_bus_formatted_write(void *context, unsigned int reg,
41 unsigned int val);
42static int _regmap_bus_raw_write(void *context, unsigned int reg,
43 unsigned int val);
44
45static void async_cleanup(struct work_struct *work)
46{
47 struct regmap_async *async = container_of(work, struct regmap_async,
48 cleanup);
49
50 kfree(async->work_buf);
51 kfree(async);
52}
53
37bool regmap_reg_in_ranges(unsigned int reg, 54bool regmap_reg_in_ranges(unsigned int reg,
38 const struct regmap_range *ranges, 55 const struct regmap_range *ranges,
39 unsigned int nranges) 56 unsigned int nranges)
@@ -372,7 +389,7 @@ struct regmap *regmap_init(struct device *dev,
372 enum regmap_endian reg_endian, val_endian; 389 enum regmap_endian reg_endian, val_endian;
373 int i, j; 390 int i, j;
374 391
375 if (!bus || !config) 392 if (!config)
376 goto err; 393 goto err;
377 394
378 map = kzalloc(sizeof(*map), GFP_KERNEL); 395 map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -386,7 +403,8 @@ struct regmap *regmap_init(struct device *dev,
386 map->unlock = config->unlock; 403 map->unlock = config->unlock;
387 map->lock_arg = config->lock_arg; 404 map->lock_arg = config->lock_arg;
388 } else { 405 } else {
389 if (bus->fast_io) { 406 if ((bus && bus->fast_io) ||
407 config->fast_io) {
390 spin_lock_init(&map->spinlock); 408 spin_lock_init(&map->spinlock);
391 map->lock = regmap_lock_spinlock; 409 map->lock = regmap_lock_spinlock;
392 map->unlock = regmap_unlock_spinlock; 410 map->unlock = regmap_unlock_spinlock;
@@ -423,13 +441,27 @@ struct regmap *regmap_init(struct device *dev,
423 map->cache_type = config->cache_type; 441 map->cache_type = config->cache_type;
424 map->name = config->name; 442 map->name = config->name;
425 443
444 spin_lock_init(&map->async_lock);
445 INIT_LIST_HEAD(&map->async_list);
446 init_waitqueue_head(&map->async_waitq);
447
426 if (config->read_flag_mask || config->write_flag_mask) { 448 if (config->read_flag_mask || config->write_flag_mask) {
427 map->read_flag_mask = config->read_flag_mask; 449 map->read_flag_mask = config->read_flag_mask;
428 map->write_flag_mask = config->write_flag_mask; 450 map->write_flag_mask = config->write_flag_mask;
429 } else { 451 } else if (bus) {
430 map->read_flag_mask = bus->read_flag_mask; 452 map->read_flag_mask = bus->read_flag_mask;
431 } 453 }
432 454
455 if (!bus) {
456 map->reg_read = config->reg_read;
457 map->reg_write = config->reg_write;
458
459 map->defer_caching = false;
460 goto skip_format_initialization;
461 } else {
462 map->reg_read = _regmap_bus_read;
463 }
464
433 reg_endian = config->reg_format_endian; 465 reg_endian = config->reg_format_endian;
434 if (reg_endian == REGMAP_ENDIAN_DEFAULT) 466 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
435 reg_endian = bus->reg_format_endian_default; 467 reg_endian = bus->reg_format_endian_default;
@@ -500,6 +532,12 @@ struct regmap *regmap_init(struct device *dev,
500 } 532 }
501 break; 533 break;
502 534
535 case 24:
536 if (reg_endian != REGMAP_ENDIAN_BIG)
537 goto err_map;
538 map->format.format_reg = regmap_format_24;
539 break;
540
503 case 32: 541 case 32:
504 switch (reg_endian) { 542 switch (reg_endian) {
505 case REGMAP_ENDIAN_BIG: 543 case REGMAP_ENDIAN_BIG:
@@ -575,6 +613,16 @@ struct regmap *regmap_init(struct device *dev,
575 goto err_map; 613 goto err_map;
576 } 614 }
577 615
616 if (map->format.format_write) {
617 map->defer_caching = false;
618 map->reg_write = _regmap_bus_formatted_write;
619 } else if (map->format.format_val) {
620 map->defer_caching = true;
621 map->reg_write = _regmap_bus_raw_write;
622 }
623
624skip_format_initialization:
625
578 map->range_tree = RB_ROOT; 626 map->range_tree = RB_ROOT;
579 for (i = 0; i < config->num_ranges; i++) { 627 for (i = 0; i < config->num_ranges; i++) {
580 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 628 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
@@ -776,7 +824,7 @@ void regmap_exit(struct regmap *map)
776 regcache_exit(map); 824 regcache_exit(map);
777 regmap_debugfs_exit(map); 825 regmap_debugfs_exit(map);
778 regmap_range_exit(map); 826 regmap_range_exit(map);
779 if (map->bus->free_context) 827 if (map->bus && map->bus->free_context)
780 map->bus->free_context(map->bus_context); 828 map->bus->free_context(map->bus_context);
781 kfree(map->work_buf); 829 kfree(map->work_buf);
782 kfree(map); 830 kfree(map);
@@ -870,15 +918,20 @@ static int _regmap_select_page(struct regmap *map, unsigned int *reg,
870} 918}
871 919
872static int _regmap_raw_write(struct regmap *map, unsigned int reg, 920static int _regmap_raw_write(struct regmap *map, unsigned int reg,
873 const void *val, size_t val_len) 921 const void *val, size_t val_len, bool async)
874{ 922{
875 struct regmap_range_node *range; 923 struct regmap_range_node *range;
924 unsigned long flags;
876 u8 *u8 = map->work_buf; 925 u8 *u8 = map->work_buf;
926 void *work_val = map->work_buf + map->format.reg_bytes +
927 map->format.pad_bytes;
877 void *buf; 928 void *buf;
878 int ret = -ENOTSUPP; 929 int ret = -ENOTSUPP;
879 size_t len; 930 size_t len;
880 int i; 931 int i;
881 932
933 BUG_ON(!map->bus);
934
882 /* Check for unwritable registers before we start */ 935 /* Check for unwritable registers before we start */
883 if (map->writeable_reg) 936 if (map->writeable_reg)
884 for (i = 0; i < val_len / map->format.val_bytes; i++) 937 for (i = 0; i < val_len / map->format.val_bytes; i++)
@@ -918,7 +971,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
918 dev_dbg(map->dev, "Writing window %d/%zu\n", 971 dev_dbg(map->dev, "Writing window %d/%zu\n",
919 win_residue, val_len / map->format.val_bytes); 972 win_residue, val_len / map->format.val_bytes);
920 ret = _regmap_raw_write(map, reg, val, win_residue * 973 ret = _regmap_raw_write(map, reg, val, win_residue *
921 map->format.val_bytes); 974 map->format.val_bytes, async);
922 if (ret != 0) 975 if (ret != 0)
923 return ret; 976 return ret;
924 977
@@ -941,6 +994,50 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
941 994
942 u8[0] |= map->write_flag_mask; 995 u8[0] |= map->write_flag_mask;
943 996
997 if (async && map->bus->async_write) {
998 struct regmap_async *async = map->bus->async_alloc();
999 if (!async)
1000 return -ENOMEM;
1001
1002 async->work_buf = kzalloc(map->format.buf_size,
1003 GFP_KERNEL | GFP_DMA);
1004 if (!async->work_buf) {
1005 kfree(async);
1006 return -ENOMEM;
1007 }
1008
1009 INIT_WORK(&async->cleanup, async_cleanup);
1010 async->map = map;
1011
1012 /* If the caller supplied the value we can use it safely. */
1013 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1014 map->format.reg_bytes + map->format.val_bytes);
1015 if (val == work_val)
1016 val = async->work_buf + map->format.pad_bytes +
1017 map->format.reg_bytes;
1018
1019 spin_lock_irqsave(&map->async_lock, flags);
1020 list_add_tail(&async->list, &map->async_list);
1021 spin_unlock_irqrestore(&map->async_lock, flags);
1022
1023 ret = map->bus->async_write(map->bus_context, async->work_buf,
1024 map->format.reg_bytes +
1025 map->format.pad_bytes,
1026 val, val_len, async);
1027
1028 if (ret != 0) {
1029 dev_err(map->dev, "Failed to schedule write: %d\n",
1030 ret);
1031
1032 spin_lock_irqsave(&map->async_lock, flags);
1033 list_del(&async->list);
1034 spin_unlock_irqrestore(&map->async_lock, flags);
1035
1036 kfree(async->work_buf);
1037 kfree(async);
1038 }
1039 }
1040
944 trace_regmap_hw_write_start(map->dev, reg, 1041 trace_regmap_hw_write_start(map->dev, reg,
945 val_len / map->format.val_bytes); 1042 val_len / map->format.val_bytes);
946 1043
@@ -948,8 +1045,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
948 * send the work_buf directly, otherwise try to do a gather 1045 * send the work_buf directly, otherwise try to do a gather
949 * write. 1046 * write.
950 */ 1047 */
951 if (val == (map->work_buf + map->format.pad_bytes + 1048 if (val == work_val)
952 map->format.reg_bytes))
953 ret = map->bus->write(map->bus_context, map->work_buf, 1049 ret = map->bus->write(map->bus_context, map->work_buf,
954 map->format.reg_bytes + 1050 map->format.reg_bytes +
955 map->format.pad_bytes + 1051 map->format.pad_bytes +
@@ -981,14 +1077,62 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
981 return ret; 1077 return ret;
982} 1078}
983 1079
1080static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1081 unsigned int val)
1082{
1083 int ret;
1084 struct regmap_range_node *range;
1085 struct regmap *map = context;
1086
1087 BUG_ON(!map->bus || !map->format.format_write);
1088
1089 range = _regmap_range_lookup(map, reg);
1090 if (range) {
1091 ret = _regmap_select_page(map, &reg, range, 1);
1092 if (ret != 0)
1093 return ret;
1094 }
1095
1096 map->format.format_write(map, reg, val);
1097
1098 trace_regmap_hw_write_start(map->dev, reg, 1);
1099
1100 ret = map->bus->write(map->bus_context, map->work_buf,
1101 map->format.buf_size);
1102
1103 trace_regmap_hw_write_done(map->dev, reg, 1);
1104
1105 return ret;
1106}
1107
1108static int _regmap_bus_raw_write(void *context, unsigned int reg,
1109 unsigned int val)
1110{
1111 struct regmap *map = context;
1112
1113 BUG_ON(!map->bus || !map->format.format_val);
1114
1115 map->format.format_val(map->work_buf + map->format.reg_bytes
1116 + map->format.pad_bytes, val, 0);
1117 return _regmap_raw_write(map, reg,
1118 map->work_buf +
1119 map->format.reg_bytes +
1120 map->format.pad_bytes,
1121 map->format.val_bytes, false);
1122}
1123
1124static inline void *_regmap_map_get_context(struct regmap *map)
1125{
1126 return (map->bus) ? map : map->bus_context;
1127}
1128
984int _regmap_write(struct regmap *map, unsigned int reg, 1129int _regmap_write(struct regmap *map, unsigned int reg,
985 unsigned int val) 1130 unsigned int val)
986{ 1131{
987 struct regmap_range_node *range;
988 int ret; 1132 int ret;
989 BUG_ON(!map->format.format_write && !map->format.format_val); 1133 void *context = _regmap_map_get_context(map);
990 1134
991 if (!map->cache_bypass && map->format.format_write) { 1135 if (!map->cache_bypass && !map->defer_caching) {
992 ret = regcache_write(map, reg, val); 1136 ret = regcache_write(map, reg, val);
993 if (ret != 0) 1137 if (ret != 0)
994 return ret; 1138 return ret;
@@ -1005,33 +1149,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
1005 1149
1006 trace_regmap_reg_write(map->dev, reg, val); 1150 trace_regmap_reg_write(map->dev, reg, val);
1007 1151
1008 if (map->format.format_write) { 1152 return map->reg_write(context, reg, val);
1009 range = _regmap_range_lookup(map, reg);
1010 if (range) {
1011 ret = _regmap_select_page(map, &reg, range, 1);
1012 if (ret != 0)
1013 return ret;
1014 }
1015
1016 map->format.format_write(map, reg, val);
1017
1018 trace_regmap_hw_write_start(map->dev, reg, 1);
1019
1020 ret = map->bus->write(map->bus_context, map->work_buf,
1021 map->format.buf_size);
1022
1023 trace_regmap_hw_write_done(map->dev, reg, 1);
1024
1025 return ret;
1026 } else {
1027 map->format.format_val(map->work_buf + map->format.reg_bytes
1028 + map->format.pad_bytes, val, 0);
1029 return _regmap_raw_write(map, reg,
1030 map->work_buf +
1031 map->format.reg_bytes +
1032 map->format.pad_bytes,
1033 map->format.val_bytes);
1034 }
1035} 1153}
1036 1154
1037/** 1155/**
@@ -1082,6 +1200,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1082{ 1200{
1083 int ret; 1201 int ret;
1084 1202
1203 if (!map->bus)
1204 return -EINVAL;
1085 if (val_len % map->format.val_bytes) 1205 if (val_len % map->format.val_bytes)
1086 return -EINVAL; 1206 return -EINVAL;
1087 if (reg % map->reg_stride) 1207 if (reg % map->reg_stride)
@@ -1089,7 +1209,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1089 1209
1090 map->lock(map->lock_arg); 1210 map->lock(map->lock_arg);
1091 1211
1092 ret = _regmap_raw_write(map, reg, val, val_len); 1212 ret = _regmap_raw_write(map, reg, val, val_len, false);
1093 1213
1094 map->unlock(map->lock_arg); 1214 map->unlock(map->lock_arg);
1095 1215
@@ -1118,6 +1238,8 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1118 size_t val_bytes = map->format.val_bytes; 1238 size_t val_bytes = map->format.val_bytes;
1119 void *wval; 1239 void *wval;
1120 1240
1241 if (!map->bus)
1242 return -EINVAL;
1121 if (!map->format.parse_val) 1243 if (!map->format.parse_val)
1122 return -EINVAL; 1244 return -EINVAL;
1123 if (reg % map->reg_stride) 1245 if (reg % map->reg_stride)
@@ -1145,14 +1267,15 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1145 if (map->use_single_rw) { 1267 if (map->use_single_rw) {
1146 for (i = 0; i < val_count; i++) { 1268 for (i = 0; i < val_count; i++) {
1147 ret = regmap_raw_write(map, 1269 ret = regmap_raw_write(map,
1148 reg + (i * map->reg_stride), 1270 reg + (i * map->reg_stride),
1149 val + (i * val_bytes), 1271 val + (i * val_bytes),
1150 val_bytes); 1272 val_bytes);
1151 if (ret != 0) 1273 if (ret != 0)
1152 return ret; 1274 return ret;
1153 } 1275 }
1154 } else { 1276 } else {
1155 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count); 1277 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count,
1278 false);
1156 } 1279 }
1157 1280
1158 if (val_bytes != 1) 1281 if (val_bytes != 1)
@@ -1164,6 +1287,48 @@ out:
1164} 1287}
1165EXPORT_SYMBOL_GPL(regmap_bulk_write); 1288EXPORT_SYMBOL_GPL(regmap_bulk_write);
1166 1289
1290/**
1291 * regmap_raw_write_async(): Write raw values to one or more registers
1292 * asynchronously
1293 *
1294 * @map: Register map to write to
1295 * @reg: Initial register to write to
1296 * @val: Block of data to be written, laid out for direct transmission to the
1297 * device. Must be valid until regmap_async_complete() is called.
1298 * @val_len: Length of data pointed to by val.
1299 *
1300 * This function is intended to be used for things like firmware
1301 * download where a large block of data needs to be transferred to the
1302 * device. No formatting will be done on the data provided.
1303 *
1304 * If supported by the underlying bus the write will be scheduled
1305 * asynchronously, helping maximise I/O speed on higher speed buses
1306 * like SPI. regmap_async_complete() can be called to ensure that all
1307 * asynchrnous writes have been completed.
1308 *
1309 * A value of zero will be returned on success, a negative errno will
1310 * be returned in error cases.
1311 */
1312int regmap_raw_write_async(struct regmap *map, unsigned int reg,
1313 const void *val, size_t val_len)
1314{
1315 int ret;
1316
1317 if (val_len % map->format.val_bytes)
1318 return -EINVAL;
1319 if (reg % map->reg_stride)
1320 return -EINVAL;
1321
1322 map->lock(map->lock_arg);
1323
1324 ret = _regmap_raw_write(map, reg, val, val_len, true);
1325
1326 map->unlock(map->lock_arg);
1327
1328 return ret;
1329}
1330EXPORT_SYMBOL_GPL(regmap_raw_write_async);
1331
1167static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 1332static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1168 unsigned int val_len) 1333 unsigned int val_len)
1169{ 1334{
@@ -1171,6 +1336,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1171 u8 *u8 = map->work_buf; 1336 u8 *u8 = map->work_buf;
1172 int ret; 1337 int ret;
1173 1338
1339 BUG_ON(!map->bus);
1340
1174 range = _regmap_range_lookup(map, reg); 1341 range = _regmap_range_lookup(map, reg);
1175 if (range) { 1342 if (range) {
1176 ret = _regmap_select_page(map, &reg, range, 1343 ret = _regmap_select_page(map, &reg, range,
@@ -1202,10 +1369,29 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1202 return ret; 1369 return ret;
1203} 1370}
1204 1371
1372static int _regmap_bus_read(void *context, unsigned int reg,
1373 unsigned int *val)
1374{
1375 int ret;
1376 struct regmap *map = context;
1377
1378 if (!map->format.parse_val)
1379 return -EINVAL;
1380
1381 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
1382 if (ret == 0)
1383 *val = map->format.parse_val(map->work_buf);
1384
1385 return ret;
1386}
1387
1205static int _regmap_read(struct regmap *map, unsigned int reg, 1388static int _regmap_read(struct regmap *map, unsigned int reg,
1206 unsigned int *val) 1389 unsigned int *val)
1207{ 1390{
1208 int ret; 1391 int ret;
1392 void *context = _regmap_map_get_context(map);
1393
1394 BUG_ON(!map->reg_read);
1209 1395
1210 if (!map->cache_bypass) { 1396 if (!map->cache_bypass) {
1211 ret = regcache_read(map, reg, val); 1397 ret = regcache_read(map, reg, val);
@@ -1213,26 +1399,21 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
1213 return 0; 1399 return 0;
1214 } 1400 }
1215 1401
1216 if (!map->format.parse_val)
1217 return -EINVAL;
1218
1219 if (map->cache_only) 1402 if (map->cache_only)
1220 return -EBUSY; 1403 return -EBUSY;
1221 1404
1222 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); 1405 ret = map->reg_read(context, reg, val);
1223 if (ret == 0) { 1406 if (ret == 0) {
1224 *val = map->format.parse_val(map->work_buf);
1225
1226#ifdef LOG_DEVICE 1407#ifdef LOG_DEVICE
1227 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0) 1408 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1228 dev_info(map->dev, "%x => %x\n", reg, *val); 1409 dev_info(map->dev, "%x => %x\n", reg, *val);
1229#endif 1410#endif
1230 1411
1231 trace_regmap_reg_read(map->dev, reg, *val); 1412 trace_regmap_reg_read(map->dev, reg, *val);
1232 }
1233 1413
1234 if (ret == 0 && !map->cache_bypass) 1414 if (!map->cache_bypass)
1235 regcache_write(map, reg, *val); 1415 regcache_write(map, reg, *val);
1416 }
1236 1417
1237 return ret; 1418 return ret;
1238} 1419}
@@ -1283,6 +1464,8 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1283 unsigned int v; 1464 unsigned int v;
1284 int ret, i; 1465 int ret, i;
1285 1466
1467 if (!map->bus)
1468 return -EINVAL;
1286 if (val_len % map->format.val_bytes) 1469 if (val_len % map->format.val_bytes)
1287 return -EINVAL; 1470 return -EINVAL;
1288 if (reg % map->reg_stride) 1471 if (reg % map->reg_stride)
@@ -1334,6 +1517,8 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1334 size_t val_bytes = map->format.val_bytes; 1517 size_t val_bytes = map->format.val_bytes;
1335 bool vol = regmap_volatile_range(map, reg, val_count); 1518 bool vol = regmap_volatile_range(map, reg, val_count);
1336 1519
1520 if (!map->bus)
1521 return -EINVAL;
1337 if (!map->format.parse_val) 1522 if (!map->format.parse_val)
1338 return -EINVAL; 1523 return -EINVAL;
1339 if (reg % map->reg_stride) 1524 if (reg % map->reg_stride)
@@ -1450,6 +1635,68 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1450} 1635}
1451EXPORT_SYMBOL_GPL(regmap_update_bits_check); 1636EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1452 1637
1638void regmap_async_complete_cb(struct regmap_async *async, int ret)
1639{
1640 struct regmap *map = async->map;
1641 bool wake;
1642
1643 spin_lock(&map->async_lock);
1644
1645 list_del(&async->list);
1646 wake = list_empty(&map->async_list);
1647
1648 if (ret != 0)
1649 map->async_ret = ret;
1650
1651 spin_unlock(&map->async_lock);
1652
1653 schedule_work(&async->cleanup);
1654
1655 if (wake)
1656 wake_up(&map->async_waitq);
1657}
1658EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
1659
1660static int regmap_async_is_done(struct regmap *map)
1661{
1662 unsigned long flags;
1663 int ret;
1664
1665 spin_lock_irqsave(&map->async_lock, flags);
1666 ret = list_empty(&map->async_list);
1667 spin_unlock_irqrestore(&map->async_lock, flags);
1668
1669 return ret;
1670}
1671
1672/**
1673 * regmap_async_complete: Ensure all asynchronous I/O has completed.
1674 *
1675 * @map: Map to operate on.
1676 *
1677 * Blocks until any pending asynchronous I/O has completed. Returns
1678 * an error code for any failed I/O operations.
1679 */
1680int regmap_async_complete(struct regmap *map)
1681{
1682 unsigned long flags;
1683 int ret;
1684
1685 /* Nothing to do with no async support */
1686 if (!map->bus->async_write)
1687 return 0;
1688
1689 wait_event(map->async_waitq, regmap_async_is_done(map));
1690
1691 spin_lock_irqsave(&map->async_lock, flags);
1692 ret = map->async_ret;
1693 map->async_ret = 0;
1694 spin_unlock_irqrestore(&map->async_lock, flags);
1695
1696 return ret;
1697}
1698EXPORT_SYMBOL_GPL(regmap_async_complete);
1699
1453/** 1700/**
1454 * regmap_register_patch: Register and apply register updates to be applied 1701 * regmap_register_patch: Register and apply register updates to be applied
1455 * on device initialistion 1702 * on device initialistion