diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-01-09 02:38:23 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-01-09 02:38:23 -0500 |
commit | da733563be5a9da26fe81d9f007262d00b846e22 (patch) | |
tree | db28291df94a2043af2123911984c5c173da4e6f /drivers/base | |
parent | 6ccbcf2cb41131f8d56ef0723bf3f7c1f8486076 (diff) | |
parent | dab78d7924598ea4031663dd10db814e2e324928 (diff) |
Merge branch 'next' into for-linus
Diffstat (limited to 'drivers/base')
35 files changed, 2793 insertions, 410 deletions
diff --git a/drivers/base/base.h b/drivers/base/base.h index a34dca0ad041..21c1b96c34c6 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
@@ -1,3 +1,4 @@ | |||
1 | #include <linux/notifier.h> | ||
1 | 2 | ||
2 | /** | 3 | /** |
3 | * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure. | 4 | * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure. |
diff --git a/drivers/base/class.c b/drivers/base/class.c index 4f1df2e8fd74..b80d91cc8c3a 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c | |||
@@ -47,6 +47,18 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, | |||
47 | return ret; | 47 | return ret; |
48 | } | 48 | } |
49 | 49 | ||
50 | static const void *class_attr_namespace(struct kobject *kobj, | ||
51 | const struct attribute *attr) | ||
52 | { | ||
53 | struct class_attribute *class_attr = to_class_attr(attr); | ||
54 | struct subsys_private *cp = to_subsys_private(kobj); | ||
55 | const void *ns = NULL; | ||
56 | |||
57 | if (class_attr->namespace) | ||
58 | ns = class_attr->namespace(cp->class, class_attr); | ||
59 | return ns; | ||
60 | } | ||
61 | |||
50 | static void class_release(struct kobject *kobj) | 62 | static void class_release(struct kobject *kobj) |
51 | { | 63 | { |
52 | struct subsys_private *cp = to_subsys_private(kobj); | 64 | struct subsys_private *cp = to_subsys_private(kobj); |
@@ -72,8 +84,9 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject | |||
72 | } | 84 | } |
73 | 85 | ||
74 | static const struct sysfs_ops class_sysfs_ops = { | 86 | static const struct sysfs_ops class_sysfs_ops = { |
75 | .show = class_attr_show, | 87 | .show = class_attr_show, |
76 | .store = class_attr_store, | 88 | .store = class_attr_store, |
89 | .namespace = class_attr_namespace, | ||
77 | }; | 90 | }; |
78 | 91 | ||
79 | static struct kobj_type class_ktype = { | 92 | static struct kobj_type class_ktype = { |
diff --git a/drivers/base/core.c b/drivers/base/core.c index bc8729d603a7..d8b3d89db043 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/async.h> | 24 | #include <linux/async.h> |
25 | #include <linux/pm_runtime.h> | ||
25 | 26 | ||
26 | #include "base.h" | 27 | #include "base.h" |
27 | #include "power/power.h" | 28 | #include "power/power.h" |
@@ -1742,6 +1743,8 @@ void device_shutdown(void) | |||
1742 | */ | 1743 | */ |
1743 | list_del_init(&dev->kobj.entry); | 1744 | list_del_init(&dev->kobj.entry); |
1744 | spin_unlock(&devices_kset->list_lock); | 1745 | spin_unlock(&devices_kset->list_lock); |
1746 | /* Disable all device's runtime power management */ | ||
1747 | pm_runtime_disable(dev); | ||
1745 | 1748 | ||
1746 | if (dev->bus && dev->bus->shutdown) { | 1749 | if (dev->bus && dev->bus->shutdown) { |
1747 | dev_dbg(dev, "shutdown\n"); | 1750 | dev_dbg(dev, "shutdown\n"); |
@@ -1764,8 +1767,8 @@ void device_shutdown(void) | |||
1764 | 1767 | ||
1765 | #ifdef CONFIG_PRINTK | 1768 | #ifdef CONFIG_PRINTK |
1766 | 1769 | ||
1767 | static int __dev_printk(const char *level, const struct device *dev, | 1770 | int __dev_printk(const char *level, const struct device *dev, |
1768 | struct va_format *vaf) | 1771 | struct va_format *vaf) |
1769 | { | 1772 | { |
1770 | if (!dev) | 1773 | if (!dev) |
1771 | return printk("%s(NULL device *): %pV", level, vaf); | 1774 | return printk("%s(NULL device *): %pV", level, vaf); |
@@ -1773,6 +1776,7 @@ static int __dev_printk(const char *level, const struct device *dev, | |||
1773 | return printk("%s%s %s: %pV", | 1776 | return printk("%s%s %s: %pV", |
1774 | level, dev_driver_string(dev), dev_name(dev), vaf); | 1777 | level, dev_driver_string(dev), dev_name(dev), vaf); |
1775 | } | 1778 | } |
1779 | EXPORT_SYMBOL(__dev_printk); | ||
1776 | 1780 | ||
1777 | int dev_printk(const char *level, const struct device *dev, | 1781 | int dev_printk(const char *level, const struct device *dev, |
1778 | const char *fmt, ...) | 1782 | const char *fmt, ...) |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 6658da743c3a..142e3d600f14 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -147,6 +147,9 @@ probe_failed: | |||
147 | printk(KERN_WARNING | 147 | printk(KERN_WARNING |
148 | "%s: probe of %s failed with error %d\n", | 148 | "%s: probe of %s failed with error %d\n", |
149 | drv->name, dev_name(dev), ret); | 149 | drv->name, dev_name(dev), ret); |
150 | } else { | ||
151 | pr_debug("%s: probe of %s rejects match %d\n", | ||
152 | drv->name, dev_name(dev), ret); | ||
150 | } | 153 | } |
151 | /* | 154 | /* |
152 | * Ignore errors returned by ->probe so that the next driver can try | 155 | * Ignore errors returned by ->probe so that the next driver can try |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index f369e2795985..bb0025c510b3 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
@@ -4,6 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/module.h> | ||
7 | #include <linux/dma-mapping.h> | 8 | #include <linux/dma-mapping.h> |
8 | 9 | ||
9 | struct dma_coherent_mem { | 10 | struct dma_coherent_mem { |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 763d59c1eb65..6f3676f1559f 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/dma-mapping.h> | 10 | #include <linux/dma-mapping.h> |
11 | #include <linux/export.h> | ||
11 | #include <linux/gfp.h> | 12 | #include <linux/gfp.h> |
12 | 13 | ||
13 | /* | 14 | /* |
diff --git a/drivers/base/hypervisor.c b/drivers/base/hypervisor.c index 6428cba3aadd..4f8b741f4615 100644 --- a/drivers/base/hypervisor.c +++ b/drivers/base/hypervisor.c | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #include <linux/kobject.h> | 11 | #include <linux/kobject.h> |
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/export.h> | ||
13 | #include "base.h" | 14 | #include "base.h" |
14 | 15 | ||
15 | struct kobject *hypervisor_kobj; | 16 | struct kobject *hypervisor_kobj; |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 2840ed4668c1..8272d92d22c0 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -224,13 +224,48 @@ int memory_isolate_notify(unsigned long val, void *v) | |||
224 | } | 224 | } |
225 | 225 | ||
226 | /* | 226 | /* |
227 | * The probe routines leave the pages reserved, just as the bootmem code does. | ||
228 | * Make sure they're still that way. | ||
229 | */ | ||
230 | static bool pages_correctly_reserved(unsigned long start_pfn, | ||
231 | unsigned long nr_pages) | ||
232 | { | ||
233 | int i, j; | ||
234 | struct page *page; | ||
235 | unsigned long pfn = start_pfn; | ||
236 | |||
237 | /* | ||
238 | * memmap between sections is not contiguous except with | ||
239 | * SPARSEMEM_VMEMMAP. We lookup the page once per section | ||
240 | * and assume memmap is contiguous within each section | ||
241 | */ | ||
242 | for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) { | ||
243 | if (WARN_ON_ONCE(!pfn_valid(pfn))) | ||
244 | return false; | ||
245 | page = pfn_to_page(pfn); | ||
246 | |||
247 | for (j = 0; j < PAGES_PER_SECTION; j++) { | ||
248 | if (PageReserved(page + j)) | ||
249 | continue; | ||
250 | |||
251 | printk(KERN_WARNING "section number %ld page number %d " | ||
252 | "not reserved, was it already online?\n", | ||
253 | pfn_to_section_nr(pfn), j); | ||
254 | |||
255 | return false; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | return true; | ||
260 | } | ||
261 | |||
262 | /* | ||
227 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is | 263 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is |
228 | * OK to have direct references to sparsemem variables in here. | 264 | * OK to have direct references to sparsemem variables in here. |
229 | */ | 265 | */ |
230 | static int | 266 | static int |
231 | memory_block_action(unsigned long phys_index, unsigned long action) | 267 | memory_block_action(unsigned long phys_index, unsigned long action) |
232 | { | 268 | { |
233 | int i; | ||
234 | unsigned long start_pfn, start_paddr; | 269 | unsigned long start_pfn, start_paddr; |
235 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; | 270 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; |
236 | struct page *first_page; | 271 | struct page *first_page; |
@@ -238,26 +273,13 @@ memory_block_action(unsigned long phys_index, unsigned long action) | |||
238 | 273 | ||
239 | first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT); | 274 | first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT); |
240 | 275 | ||
241 | /* | ||
242 | * The probe routines leave the pages reserved, just | ||
243 | * as the bootmem code does. Make sure they're still | ||
244 | * that way. | ||
245 | */ | ||
246 | if (action == MEM_ONLINE) { | ||
247 | for (i = 0; i < nr_pages; i++) { | ||
248 | if (PageReserved(first_page+i)) | ||
249 | continue; | ||
250 | |||
251 | printk(KERN_WARNING "section number %ld page number %d " | ||
252 | "not reserved, was it already online?\n", | ||
253 | phys_index, i); | ||
254 | return -EBUSY; | ||
255 | } | ||
256 | } | ||
257 | |||
258 | switch (action) { | 276 | switch (action) { |
259 | case MEM_ONLINE: | 277 | case MEM_ONLINE: |
260 | start_pfn = page_to_pfn(first_page); | 278 | start_pfn = page_to_pfn(first_page); |
279 | |||
280 | if (!pages_correctly_reserved(start_pfn, nr_pages)) | ||
281 | return -EBUSY; | ||
282 | |||
261 | ret = online_pages(start_pfn, nr_pages); | 283 | ret = online_pages(start_pfn, nr_pages); |
262 | break; | 284 | break; |
263 | case MEM_OFFLINE: | 285 | case MEM_OFFLINE: |
@@ -380,9 +402,13 @@ memory_probe_store(struct class *class, struct class_attribute *attr, | |||
380 | u64 phys_addr; | 402 | u64 phys_addr; |
381 | int nid; | 403 | int nid; |
382 | int i, ret; | 404 | int i, ret; |
405 | unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block; | ||
383 | 406 | ||
384 | phys_addr = simple_strtoull(buf, NULL, 0); | 407 | phys_addr = simple_strtoull(buf, NULL, 0); |
385 | 408 | ||
409 | if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1)) | ||
410 | return -EINVAL; | ||
411 | |||
386 | for (i = 0; i < sections_per_block; i++) { | 412 | for (i = 0; i < sections_per_block; i++) { |
387 | nid = memory_add_physaddr_to_nid(phys_addr); | 413 | nid = memory_add_physaddr_to_nid(phys_addr); |
388 | ret = add_memory(nid, phys_addr, | 414 | ret = add_memory(nid, phys_addr, |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 793f796c4da3..5693ecee9a40 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -127,12 +127,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
127 | nid, K(node_page_state(nid, NR_WRITEBACK)), | 127 | nid, K(node_page_state(nid, NR_WRITEBACK)), |
128 | nid, K(node_page_state(nid, NR_FILE_PAGES)), | 128 | nid, K(node_page_state(nid, NR_FILE_PAGES)), |
129 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), | 129 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), |
130 | nid, K(node_page_state(nid, NR_ANON_PAGES) | ||
131 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 130 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
131 | nid, K(node_page_state(nid, NR_ANON_PAGES) | ||
132 | + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * | 132 | + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * |
133 | HPAGE_PMD_NR | 133 | HPAGE_PMD_NR), |
134 | #else | ||
135 | nid, K(node_page_state(nid, NR_ANON_PAGES)), | ||
134 | #endif | 136 | #endif |
135 | ), | ||
136 | nid, K(node_page_state(nid, NR_SHMEM)), | 137 | nid, K(node_page_state(nid, NR_SHMEM)), |
137 | nid, node_page_state(nid, NR_KERNEL_STACK) * | 138 | nid, node_page_state(nid, NR_KERNEL_STACK) * |
138 | THREAD_SIZE / 1024, | 139 | THREAD_SIZE / 1024, |
@@ -143,13 +144,14 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
143 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + | 144 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + |
144 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), | 145 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), |
145 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), | 146 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), |
146 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) | ||
147 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 147 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
148 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) | ||
148 | , nid, | 149 | , nid, |
149 | K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * | 150 | K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * |
150 | HPAGE_PMD_NR) | 151 | HPAGE_PMD_NR)); |
152 | #else | ||
153 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); | ||
151 | #endif | 154 | #endif |
152 | ); | ||
153 | n += hugetlb_report_node_meminfo(nid, buf + n); | 155 | n += hugetlb_report_node_meminfo(nid, buf + n); |
154 | return n; | 156 | return n; |
155 | } | 157 | } |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 99a5272d7c2f..7a24895543e7 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -375,52 +375,64 @@ void platform_device_unregister(struct platform_device *pdev) | |||
375 | EXPORT_SYMBOL_GPL(platform_device_unregister); | 375 | EXPORT_SYMBOL_GPL(platform_device_unregister); |
376 | 376 | ||
377 | /** | 377 | /** |
378 | * platform_device_register_resndata - add a platform-level device with | 378 | * platform_device_register_full - add a platform-level device with |
379 | * resources and platform-specific data | 379 | * resources and platform-specific data |
380 | * | 380 | * |
381 | * @parent: parent device for the device we're adding | 381 | * @pdevinfo: data used to create device |
382 | * @name: base name of the device we're adding | ||
383 | * @id: instance id | ||
384 | * @res: set of resources that needs to be allocated for the device | ||
385 | * @num: number of resources | ||
386 | * @data: platform specific data for this platform device | ||
387 | * @size: size of platform specific data | ||
388 | * | 382 | * |
389 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. | 383 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. |
390 | */ | 384 | */ |
391 | struct platform_device *platform_device_register_resndata( | 385 | struct platform_device *platform_device_register_full( |
392 | struct device *parent, | 386 | struct platform_device_info *pdevinfo) |
393 | const char *name, int id, | ||
394 | const struct resource *res, unsigned int num, | ||
395 | const void *data, size_t size) | ||
396 | { | 387 | { |
397 | int ret = -ENOMEM; | 388 | int ret = -ENOMEM; |
398 | struct platform_device *pdev; | 389 | struct platform_device *pdev; |
399 | 390 | ||
400 | pdev = platform_device_alloc(name, id); | 391 | pdev = platform_device_alloc(pdevinfo->name, pdevinfo->id); |
401 | if (!pdev) | 392 | if (!pdev) |
402 | goto err; | 393 | goto err_alloc; |
403 | 394 | ||
404 | pdev->dev.parent = parent; | 395 | pdev->dev.parent = pdevinfo->parent; |
396 | |||
397 | if (pdevinfo->dma_mask) { | ||
398 | /* | ||
399 | * This memory isn't freed when the device is put, | ||
400 | * I don't have a nice idea for that though. Conceptually | ||
401 | * dma_mask in struct device should not be a pointer. | ||
402 | * See http://thread.gmane.org/gmane.linux.kernel.pci/9081 | ||
403 | */ | ||
404 | pdev->dev.dma_mask = | ||
405 | kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL); | ||
406 | if (!pdev->dev.dma_mask) | ||
407 | goto err; | ||
408 | |||
409 | *pdev->dev.dma_mask = pdevinfo->dma_mask; | ||
410 | pdev->dev.coherent_dma_mask = pdevinfo->dma_mask; | ||
411 | } | ||
405 | 412 | ||
406 | ret = platform_device_add_resources(pdev, res, num); | 413 | ret = platform_device_add_resources(pdev, |
414 | pdevinfo->res, pdevinfo->num_res); | ||
407 | if (ret) | 415 | if (ret) |
408 | goto err; | 416 | goto err; |
409 | 417 | ||
410 | ret = platform_device_add_data(pdev, data, size); | 418 | ret = platform_device_add_data(pdev, |
419 | pdevinfo->data, pdevinfo->size_data); | ||
411 | if (ret) | 420 | if (ret) |
412 | goto err; | 421 | goto err; |
413 | 422 | ||
414 | ret = platform_device_add(pdev); | 423 | ret = platform_device_add(pdev); |
415 | if (ret) { | 424 | if (ret) { |
416 | err: | 425 | err: |
426 | kfree(pdev->dev.dma_mask); | ||
427 | |||
428 | err_alloc: | ||
417 | platform_device_put(pdev); | 429 | platform_device_put(pdev); |
418 | return ERR_PTR(ret); | 430 | return ERR_PTR(ret); |
419 | } | 431 | } |
420 | 432 | ||
421 | return pdev; | 433 | return pdev; |
422 | } | 434 | } |
423 | EXPORT_SYMBOL_GPL(platform_device_register_resndata); | 435 | EXPORT_SYMBOL_GPL(platform_device_register_full); |
424 | 436 | ||
425 | static int platform_drv_probe(struct device *_dev) | 437 | static int platform_drv_probe(struct device *_dev) |
426 | { | 438 | { |
@@ -614,7 +626,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
614 | return rc; | 626 | return rc; |
615 | 627 | ||
616 | add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, | 628 | add_uevent_var(env, "MODALIAS=%s%s", PLATFORM_MODULE_PREFIX, |
617 | (pdev->id_entry) ? pdev->id_entry->name : pdev->name); | 629 | pdev->name); |
618 | return 0; | 630 | return 0; |
619 | } | 631 | } |
620 | 632 | ||
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 2639ae79a372..81676dd17900 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
@@ -6,4 +6,4 @@ obj-$(CONFIG_PM_OPP) += opp.o | |||
6 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o | 6 | obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o |
7 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o | 7 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o |
8 | 8 | ||
9 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file | 9 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 2c18d584066d..428e55e012dc 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -10,18 +10,13 @@ | |||
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/io.h> | 11 | #include <linux/io.h> |
12 | #include <linux/pm.h> | 12 | #include <linux/pm.h> |
13 | #include <linux/pm_runtime.h> | 13 | #include <linux/pm_clock.h> |
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | 17 | ||
18 | #ifdef CONFIG_PM | 18 | #ifdef CONFIG_PM |
19 | 19 | ||
20 | struct pm_clk_data { | ||
21 | struct list_head clock_list; | ||
22 | spinlock_t lock; | ||
23 | }; | ||
24 | |||
25 | enum pce_status { | 20 | enum pce_status { |
26 | PCE_STATUS_NONE = 0, | 21 | PCE_STATUS_NONE = 0, |
27 | PCE_STATUS_ACQUIRED, | 22 | PCE_STATUS_ACQUIRED, |
@@ -36,9 +31,20 @@ struct pm_clock_entry { | |||
36 | enum pce_status status; | 31 | enum pce_status status; |
37 | }; | 32 | }; |
38 | 33 | ||
39 | static struct pm_clk_data *__to_pcd(struct device *dev) | 34 | /** |
35 | * pm_clk_acquire - Acquire a device clock. | ||
36 | * @dev: Device whose clock is to be acquired. | ||
37 | * @ce: PM clock entry corresponding to the clock. | ||
38 | */ | ||
39 | static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | ||
40 | { | 40 | { |
41 | return dev ? dev->power.subsys_data : NULL; | 41 | ce->clk = clk_get(dev, ce->con_id); |
42 | if (IS_ERR(ce->clk)) { | ||
43 | ce->status = PCE_STATUS_ERROR; | ||
44 | } else { | ||
45 | ce->status = PCE_STATUS_ACQUIRED; | ||
46 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
47 | } | ||
42 | } | 48 | } |
43 | 49 | ||
44 | /** | 50 | /** |
@@ -51,10 +57,10 @@ static struct pm_clk_data *__to_pcd(struct device *dev) | |||
51 | */ | 57 | */ |
52 | int pm_clk_add(struct device *dev, const char *con_id) | 58 | int pm_clk_add(struct device *dev, const char *con_id) |
53 | { | 59 | { |
54 | struct pm_clk_data *pcd = __to_pcd(dev); | 60 | struct pm_subsys_data *psd = dev_to_psd(dev); |
55 | struct pm_clock_entry *ce; | 61 | struct pm_clock_entry *ce; |
56 | 62 | ||
57 | if (!pcd) | 63 | if (!psd) |
58 | return -EINVAL; | 64 | return -EINVAL; |
59 | 65 | ||
60 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | 66 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); |
@@ -73,26 +79,23 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
73 | } | 79 | } |
74 | } | 80 | } |
75 | 81 | ||
76 | spin_lock_irq(&pcd->lock); | 82 | pm_clk_acquire(dev, ce); |
77 | list_add_tail(&ce->node, &pcd->clock_list); | 83 | |
78 | spin_unlock_irq(&pcd->lock); | 84 | spin_lock_irq(&psd->lock); |
85 | list_add_tail(&ce->node, &psd->clock_list); | ||
86 | spin_unlock_irq(&psd->lock); | ||
79 | return 0; | 87 | return 0; |
80 | } | 88 | } |
81 | 89 | ||
82 | /** | 90 | /** |
83 | * __pm_clk_remove - Destroy PM clock entry. | 91 | * __pm_clk_remove - Destroy PM clock entry. |
84 | * @ce: PM clock entry to destroy. | 92 | * @ce: PM clock entry to destroy. |
85 | * | ||
86 | * This routine must be called under the spinlock protecting the PM list of | ||
87 | * clocks corresponding the the @ce's device. | ||
88 | */ | 93 | */ |
89 | static void __pm_clk_remove(struct pm_clock_entry *ce) | 94 | static void __pm_clk_remove(struct pm_clock_entry *ce) |
90 | { | 95 | { |
91 | if (!ce) | 96 | if (!ce) |
92 | return; | 97 | return; |
93 | 98 | ||
94 | list_del(&ce->node); | ||
95 | |||
96 | if (ce->status < PCE_STATUS_ERROR) { | 99 | if (ce->status < PCE_STATUS_ERROR) { |
97 | if (ce->status == PCE_STATUS_ENABLED) | 100 | if (ce->status == PCE_STATUS_ENABLED) |
98 | clk_disable(ce->clk); | 101 | clk_disable(ce->clk); |
@@ -101,9 +104,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
101 | clk_put(ce->clk); | 104 | clk_put(ce->clk); |
102 | } | 105 | } |
103 | 106 | ||
104 | if (ce->con_id) | 107 | kfree(ce->con_id); |
105 | kfree(ce->con_id); | ||
106 | |||
107 | kfree(ce); | 108 | kfree(ce); |
108 | } | 109 | } |
109 | 110 | ||
@@ -117,50 +118,58 @@ static void __pm_clk_remove(struct pm_clock_entry *ce) | |||
117 | */ | 118 | */ |
118 | void pm_clk_remove(struct device *dev, const char *con_id) | 119 | void pm_clk_remove(struct device *dev, const char *con_id) |
119 | { | 120 | { |
120 | struct pm_clk_data *pcd = __to_pcd(dev); | 121 | struct pm_subsys_data *psd = dev_to_psd(dev); |
121 | struct pm_clock_entry *ce; | 122 | struct pm_clock_entry *ce; |
122 | 123 | ||
123 | if (!pcd) | 124 | if (!psd) |
124 | return; | 125 | return; |
125 | 126 | ||
126 | spin_lock_irq(&pcd->lock); | 127 | spin_lock_irq(&psd->lock); |
127 | 128 | ||
128 | list_for_each_entry(ce, &pcd->clock_list, node) { | 129 | list_for_each_entry(ce, &psd->clock_list, node) { |
129 | if (!con_id && !ce->con_id) { | 130 | if (!con_id && !ce->con_id) |
130 | __pm_clk_remove(ce); | 131 | goto remove; |
131 | break; | 132 | else if (!con_id || !ce->con_id) |
132 | } else if (!con_id || !ce->con_id) { | ||
133 | continue; | 133 | continue; |
134 | } else if (!strcmp(con_id, ce->con_id)) { | 134 | else if (!strcmp(con_id, ce->con_id)) |
135 | __pm_clk_remove(ce); | 135 | goto remove; |
136 | break; | ||
137 | } | ||
138 | } | 136 | } |
139 | 137 | ||
140 | spin_unlock_irq(&pcd->lock); | 138 | spin_unlock_irq(&psd->lock); |
139 | return; | ||
140 | |||
141 | remove: | ||
142 | list_del(&ce->node); | ||
143 | spin_unlock_irq(&psd->lock); | ||
144 | |||
145 | __pm_clk_remove(ce); | ||
141 | } | 146 | } |
142 | 147 | ||
143 | /** | 148 | /** |
144 | * pm_clk_init - Initialize a device's list of power management clocks. | 149 | * pm_clk_init - Initialize a device's list of power management clocks. |
145 | * @dev: Device to initialize the list of PM clocks for. | 150 | * @dev: Device to initialize the list of PM clocks for. |
146 | * | 151 | * |
147 | * Allocate a struct pm_clk_data object, initialize its lock member and | 152 | * Initialize the lock and clock_list members of the device's pm_subsys_data |
148 | * make the @dev's power.subsys_data field point to it. | 153 | * object. |
149 | */ | 154 | */ |
150 | int pm_clk_init(struct device *dev) | 155 | void pm_clk_init(struct device *dev) |
151 | { | 156 | { |
152 | struct pm_clk_data *pcd; | 157 | struct pm_subsys_data *psd = dev_to_psd(dev); |
153 | 158 | if (psd) | |
154 | pcd = kzalloc(sizeof(*pcd), GFP_KERNEL); | 159 | INIT_LIST_HEAD(&psd->clock_list); |
155 | if (!pcd) { | 160 | } |
156 | dev_err(dev, "Not enough memory for PM clock data.\n"); | ||
157 | return -ENOMEM; | ||
158 | } | ||
159 | 161 | ||
160 | INIT_LIST_HEAD(&pcd->clock_list); | 162 | /** |
161 | spin_lock_init(&pcd->lock); | 163 | * pm_clk_create - Create and initialize a device's list of PM clocks. |
162 | dev->power.subsys_data = pcd; | 164 | * @dev: Device to create and initialize the list of PM clocks for. |
163 | return 0; | 165 | * |
166 | * Allocate a struct pm_subsys_data object, initialize its lock and clock_list | ||
167 | * members and make the @dev's power.subsys_data field point to it. | ||
168 | */ | ||
169 | int pm_clk_create(struct device *dev) | ||
170 | { | ||
171 | int ret = dev_pm_get_subsys_data(dev); | ||
172 | return ret < 0 ? ret : 0; | ||
164 | } | 173 | } |
165 | 174 | ||
166 | /** | 175 | /** |
@@ -168,27 +177,33 @@ int pm_clk_init(struct device *dev) | |||
168 | * @dev: Device to destroy the list of PM clocks for. | 177 | * @dev: Device to destroy the list of PM clocks for. |
169 | * | 178 | * |
170 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | 179 | * Clear the @dev's power.subsys_data field, remove the list of clock entries |
171 | * from the struct pm_clk_data object pointed to by it before and free | 180 | * from the struct pm_subsys_data object pointed to by it before and free |
172 | * that object. | 181 | * that object. |
173 | */ | 182 | */ |
174 | void pm_clk_destroy(struct device *dev) | 183 | void pm_clk_destroy(struct device *dev) |
175 | { | 184 | { |
176 | struct pm_clk_data *pcd = __to_pcd(dev); | 185 | struct pm_subsys_data *psd = dev_to_psd(dev); |
177 | struct pm_clock_entry *ce, *c; | 186 | struct pm_clock_entry *ce, *c; |
187 | struct list_head list; | ||
178 | 188 | ||
179 | if (!pcd) | 189 | if (!psd) |
180 | return; | 190 | return; |
181 | 191 | ||
182 | dev->power.subsys_data = NULL; | 192 | INIT_LIST_HEAD(&list); |
183 | 193 | ||
184 | spin_lock_irq(&pcd->lock); | 194 | spin_lock_irq(&psd->lock); |
185 | 195 | ||
186 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) | 196 | list_for_each_entry_safe_reverse(ce, c, &psd->clock_list, node) |
187 | __pm_clk_remove(ce); | 197 | list_move(&ce->node, &list); |
198 | |||
199 | spin_unlock_irq(&psd->lock); | ||
188 | 200 | ||
189 | spin_unlock_irq(&pcd->lock); | 201 | dev_pm_put_subsys_data(dev); |
190 | 202 | ||
191 | kfree(pcd); | 203 | list_for_each_entry_safe_reverse(ce, c, &list, node) { |
204 | list_del(&ce->node); | ||
205 | __pm_clk_remove(ce); | ||
206 | } | ||
192 | } | 207 | } |
193 | 208 | ||
194 | #endif /* CONFIG_PM */ | 209 | #endif /* CONFIG_PM */ |
@@ -196,50 +211,31 @@ void pm_clk_destroy(struct device *dev) | |||
196 | #ifdef CONFIG_PM_RUNTIME | 211 | #ifdef CONFIG_PM_RUNTIME |
197 | 212 | ||
198 | /** | 213 | /** |
199 | * pm_clk_acquire - Acquire a device clock. | ||
200 | * @dev: Device whose clock is to be acquired. | ||
201 | * @con_id: Connection ID of the clock. | ||
202 | */ | ||
203 | static void pm_clk_acquire(struct device *dev, | ||
204 | struct pm_clock_entry *ce) | ||
205 | { | ||
206 | ce->clk = clk_get(dev, ce->con_id); | ||
207 | if (IS_ERR(ce->clk)) { | ||
208 | ce->status = PCE_STATUS_ERROR; | ||
209 | } else { | ||
210 | ce->status = PCE_STATUS_ACQUIRED; | ||
211 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * pm_clk_suspend - Disable clocks in a device's PM clock list. | 214 | * pm_clk_suspend - Disable clocks in a device's PM clock list. |
217 | * @dev: Device to disable the clocks for. | 215 | * @dev: Device to disable the clocks for. |
218 | */ | 216 | */ |
219 | int pm_clk_suspend(struct device *dev) | 217 | int pm_clk_suspend(struct device *dev) |
220 | { | 218 | { |
221 | struct pm_clk_data *pcd = __to_pcd(dev); | 219 | struct pm_subsys_data *psd = dev_to_psd(dev); |
222 | struct pm_clock_entry *ce; | 220 | struct pm_clock_entry *ce; |
223 | unsigned long flags; | 221 | unsigned long flags; |
224 | 222 | ||
225 | dev_dbg(dev, "%s()\n", __func__); | 223 | dev_dbg(dev, "%s()\n", __func__); |
226 | 224 | ||
227 | if (!pcd) | 225 | if (!psd) |
228 | return 0; | 226 | return 0; |
229 | 227 | ||
230 | spin_lock_irqsave(&pcd->lock, flags); | 228 | spin_lock_irqsave(&psd->lock, flags); |
231 | |||
232 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { | ||
233 | if (ce->status == PCE_STATUS_NONE) | ||
234 | pm_clk_acquire(dev, ce); | ||
235 | 229 | ||
230 | list_for_each_entry_reverse(ce, &psd->clock_list, node) { | ||
236 | if (ce->status < PCE_STATUS_ERROR) { | 231 | if (ce->status < PCE_STATUS_ERROR) { |
237 | clk_disable(ce->clk); | 232 | if (ce->status == PCE_STATUS_ENABLED) |
233 | clk_disable(ce->clk); | ||
238 | ce->status = PCE_STATUS_ACQUIRED; | 234 | ce->status = PCE_STATUS_ACQUIRED; |
239 | } | 235 | } |
240 | } | 236 | } |
241 | 237 | ||
242 | spin_unlock_irqrestore(&pcd->lock, flags); | 238 | spin_unlock_irqrestore(&psd->lock, flags); |
243 | 239 | ||
244 | return 0; | 240 | return 0; |
245 | } | 241 | } |
@@ -250,28 +246,25 @@ int pm_clk_suspend(struct device *dev) | |||
250 | */ | 246 | */ |
251 | int pm_clk_resume(struct device *dev) | 247 | int pm_clk_resume(struct device *dev) |
252 | { | 248 | { |
253 | struct pm_clk_data *pcd = __to_pcd(dev); | 249 | struct pm_subsys_data *psd = dev_to_psd(dev); |
254 | struct pm_clock_entry *ce; | 250 | struct pm_clock_entry *ce; |
255 | unsigned long flags; | 251 | unsigned long flags; |
256 | 252 | ||
257 | dev_dbg(dev, "%s()\n", __func__); | 253 | dev_dbg(dev, "%s()\n", __func__); |
258 | 254 | ||
259 | if (!pcd) | 255 | if (!psd) |
260 | return 0; | 256 | return 0; |
261 | 257 | ||
262 | spin_lock_irqsave(&pcd->lock, flags); | 258 | spin_lock_irqsave(&psd->lock, flags); |
263 | |||
264 | list_for_each_entry(ce, &pcd->clock_list, node) { | ||
265 | if (ce->status == PCE_STATUS_NONE) | ||
266 | pm_clk_acquire(dev, ce); | ||
267 | 259 | ||
260 | list_for_each_entry(ce, &psd->clock_list, node) { | ||
268 | if (ce->status < PCE_STATUS_ERROR) { | 261 | if (ce->status < PCE_STATUS_ERROR) { |
269 | clk_enable(ce->clk); | 262 | clk_enable(ce->clk); |
270 | ce->status = PCE_STATUS_ENABLED; | 263 | ce->status = PCE_STATUS_ENABLED; |
271 | } | 264 | } |
272 | } | 265 | } |
273 | 266 | ||
274 | spin_unlock_irqrestore(&pcd->lock, flags); | 267 | spin_unlock_irqrestore(&psd->lock, flags); |
275 | 268 | ||
276 | return 0; | 269 | return 0; |
277 | } | 270 | } |
@@ -309,7 +302,7 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
309 | if (dev->pm_domain) | 302 | if (dev->pm_domain) |
310 | break; | 303 | break; |
311 | 304 | ||
312 | error = pm_clk_init(dev); | 305 | error = pm_clk_create(dev); |
313 | if (error) | 306 | if (error) |
314 | break; | 307 | break; |
315 | 308 | ||
@@ -344,22 +337,22 @@ static int pm_clk_notify(struct notifier_block *nb, | |||
344 | */ | 337 | */ |
345 | int pm_clk_suspend(struct device *dev) | 338 | int pm_clk_suspend(struct device *dev) |
346 | { | 339 | { |
347 | struct pm_clk_data *pcd = __to_pcd(dev); | 340 | struct pm_subsys_data *psd = dev_to_psd(dev); |
348 | struct pm_clock_entry *ce; | 341 | struct pm_clock_entry *ce; |
349 | unsigned long flags; | 342 | unsigned long flags; |
350 | 343 | ||
351 | dev_dbg(dev, "%s()\n", __func__); | 344 | dev_dbg(dev, "%s()\n", __func__); |
352 | 345 | ||
353 | /* If there is no driver, the clocks are already disabled. */ | 346 | /* If there is no driver, the clocks are already disabled. */ |
354 | if (!pcd || !dev->driver) | 347 | if (!psd || !dev->driver) |
355 | return 0; | 348 | return 0; |
356 | 349 | ||
357 | spin_lock_irqsave(&pcd->lock, flags); | 350 | spin_lock_irqsave(&psd->lock, flags); |
358 | 351 | ||
359 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) | 352 | list_for_each_entry_reverse(ce, &psd->clock_list, node) |
360 | clk_disable(ce->clk); | 353 | clk_disable(ce->clk); |
361 | 354 | ||
362 | spin_unlock_irqrestore(&pcd->lock, flags); | 355 | spin_unlock_irqrestore(&psd->lock, flags); |
363 | 356 | ||
364 | return 0; | 357 | return 0; |
365 | } | 358 | } |
@@ -370,22 +363,22 @@ int pm_clk_suspend(struct device *dev) | |||
370 | */ | 363 | */ |
371 | int pm_clk_resume(struct device *dev) | 364 | int pm_clk_resume(struct device *dev) |
372 | { | 365 | { |
373 | struct pm_clk_data *pcd = __to_pcd(dev); | 366 | struct pm_subsys_data *psd = dev_to_psd(dev); |
374 | struct pm_clock_entry *ce; | 367 | struct pm_clock_entry *ce; |
375 | unsigned long flags; | 368 | unsigned long flags; |
376 | 369 | ||
377 | dev_dbg(dev, "%s()\n", __func__); | 370 | dev_dbg(dev, "%s()\n", __func__); |
378 | 371 | ||
379 | /* If there is no driver, the clocks should remain disabled. */ | 372 | /* If there is no driver, the clocks should remain disabled. */ |
380 | if (!pcd || !dev->driver) | 373 | if (!psd || !dev->driver) |
381 | return 0; | 374 | return 0; |
382 | 375 | ||
383 | spin_lock_irqsave(&pcd->lock, flags); | 376 | spin_lock_irqsave(&psd->lock, flags); |
384 | 377 | ||
385 | list_for_each_entry(ce, &pcd->clock_list, node) | 378 | list_for_each_entry(ce, &psd->clock_list, node) |
386 | clk_enable(ce->clk); | 379 | clk_enable(ce->clk); |
387 | 380 | ||
388 | spin_unlock_irqrestore(&pcd->lock, flags); | 381 | spin_unlock_irqrestore(&psd->lock, flags); |
389 | 382 | ||
390 | return 0; | 383 | return 0; |
391 | } | 384 | } |
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c new file mode 100644 index 000000000000..4af7c1cbf909 --- /dev/null +++ b/drivers/base/power/common.c | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * drivers/base/power/common.c - Common device power management code. | ||
3 | * | ||
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/pm_clock.h> | ||
14 | |||
15 | /** | ||
16 | * dev_pm_get_subsys_data - Create or refcount power.subsys_data for device. | ||
17 | * @dev: Device to handle. | ||
18 | * | ||
19 | * If power.subsys_data is NULL, point it to a new object, otherwise increment | ||
20 | * its reference counter. Return 1 if a new object has been created, otherwise | ||
21 | * return 0 or error code. | ||
22 | */ | ||
23 | int dev_pm_get_subsys_data(struct device *dev) | ||
24 | { | ||
25 | struct pm_subsys_data *psd; | ||
26 | int ret = 0; | ||
27 | |||
28 | psd = kzalloc(sizeof(*psd), GFP_KERNEL); | ||
29 | if (!psd) | ||
30 | return -ENOMEM; | ||
31 | |||
32 | spin_lock_irq(&dev->power.lock); | ||
33 | |||
34 | if (dev->power.subsys_data) { | ||
35 | dev->power.subsys_data->refcount++; | ||
36 | } else { | ||
37 | spin_lock_init(&psd->lock); | ||
38 | psd->refcount = 1; | ||
39 | dev->power.subsys_data = psd; | ||
40 | pm_clk_init(dev); | ||
41 | psd = NULL; | ||
42 | ret = 1; | ||
43 | } | ||
44 | |||
45 | spin_unlock_irq(&dev->power.lock); | ||
46 | |||
47 | /* kfree() verifies that its argument is nonzero. */ | ||
48 | kfree(psd); | ||
49 | |||
50 | return ret; | ||
51 | } | ||
52 | EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); | ||
53 | |||
54 | /** | ||
55 | * dev_pm_put_subsys_data - Drop reference to power.subsys_data. | ||
56 | * @dev: Device to handle. | ||
57 | * | ||
58 | * If the reference counter of power.subsys_data is zero after dropping the | ||
59 | * reference, power.subsys_data is removed. Return 1 if that happens or 0 | ||
60 | * otherwise. | ||
61 | */ | ||
62 | int dev_pm_put_subsys_data(struct device *dev) | ||
63 | { | ||
64 | struct pm_subsys_data *psd; | ||
65 | int ret = 0; | ||
66 | |||
67 | spin_lock_irq(&dev->power.lock); | ||
68 | |||
69 | psd = dev_to_psd(dev); | ||
70 | if (!psd) { | ||
71 | ret = -EINVAL; | ||
72 | goto out; | ||
73 | } | ||
74 | |||
75 | if (--psd->refcount == 0) { | ||
76 | dev->power.subsys_data = NULL; | ||
77 | kfree(psd); | ||
78 | ret = 1; | ||
79 | } | ||
80 | |||
81 | out: | ||
82 | spin_unlock_irq(&dev->power.lock); | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data); | ||
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 1c374579407c..6790cf7eba5a 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -29,10 +29,20 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) | |||
29 | return pd_to_genpd(dev->pm_domain); | 29 | return pd_to_genpd(dev->pm_domain); |
30 | } | 30 | } |
31 | 31 | ||
32 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) | 32 | static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
33 | { | 33 | { |
34 | if (!WARN_ON(genpd->sd_count == 0)) | 34 | bool ret = false; |
35 | genpd->sd_count--; | 35 | |
36 | if (!WARN_ON(atomic_read(&genpd->sd_count) == 0)) | ||
37 | ret = !!atomic_dec_and_test(&genpd->sd_count); | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | ||
43 | { | ||
44 | atomic_inc(&genpd->sd_count); | ||
45 | smp_mb__after_atomic_inc(); | ||
36 | } | 46 | } |
37 | 47 | ||
38 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 48 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
@@ -71,81 +81,119 @@ static void genpd_set_active(struct generic_pm_domain *genpd) | |||
71 | } | 81 | } |
72 | 82 | ||
73 | /** | 83 | /** |
74 | * pm_genpd_poweron - Restore power to a given PM domain and its parents. | 84 | * __pm_genpd_poweron - Restore power to a given PM domain and its masters. |
75 | * @genpd: PM domain to power up. | 85 | * @genpd: PM domain to power up. |
76 | * | 86 | * |
77 | * Restore power to @genpd and all of its parents so that it is possible to | 87 | * Restore power to @genpd and all of its masters so that it is possible to |
78 | * resume a device belonging to it. | 88 | * resume a device belonging to it. |
79 | */ | 89 | */ |
80 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | 90 | int __pm_genpd_poweron(struct generic_pm_domain *genpd) |
91 | __releases(&genpd->lock) __acquires(&genpd->lock) | ||
81 | { | 92 | { |
82 | struct generic_pm_domain *parent = genpd->parent; | 93 | struct gpd_link *link; |
94 | DEFINE_WAIT(wait); | ||
83 | int ret = 0; | 95 | int ret = 0; |
84 | 96 | ||
85 | start: | 97 | /* If the domain's master is being waited for, we have to wait too. */ |
86 | if (parent) { | 98 | for (;;) { |
87 | genpd_acquire_lock(parent); | 99 | prepare_to_wait(&genpd->status_wait_queue, &wait, |
88 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | 100 | TASK_UNINTERRUPTIBLE); |
89 | } else { | 101 | if (genpd->status != GPD_STATE_WAIT_MASTER) |
102 | break; | ||
103 | mutex_unlock(&genpd->lock); | ||
104 | |||
105 | schedule(); | ||
106 | |||
90 | mutex_lock(&genpd->lock); | 107 | mutex_lock(&genpd->lock); |
91 | } | 108 | } |
109 | finish_wait(&genpd->status_wait_queue, &wait); | ||
92 | 110 | ||
93 | if (genpd->status == GPD_STATE_ACTIVE | 111 | if (genpd->status == GPD_STATE_ACTIVE |
94 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | 112 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) |
95 | goto out; | 113 | return 0; |
96 | 114 | ||
97 | if (genpd->status != GPD_STATE_POWER_OFF) { | 115 | if (genpd->status != GPD_STATE_POWER_OFF) { |
98 | genpd_set_active(genpd); | 116 | genpd_set_active(genpd); |
99 | goto out; | 117 | return 0; |
100 | } | 118 | } |
101 | 119 | ||
102 | if (parent && parent->status != GPD_STATE_ACTIVE) { | 120 | /* |
121 | * The list is guaranteed not to change while the loop below is being | ||
122 | * executed, unless one of the masters' .power_on() callbacks fiddles | ||
123 | * with it. | ||
124 | */ | ||
125 | list_for_each_entry(link, &genpd->slave_links, slave_node) { | ||
126 | genpd_sd_counter_inc(link->master); | ||
127 | genpd->status = GPD_STATE_WAIT_MASTER; | ||
128 | |||
103 | mutex_unlock(&genpd->lock); | 129 | mutex_unlock(&genpd->lock); |
104 | genpd_release_lock(parent); | ||
105 | 130 | ||
106 | ret = pm_genpd_poweron(parent); | 131 | ret = pm_genpd_poweron(link->master); |
107 | if (ret) | ||
108 | return ret; | ||
109 | 132 | ||
110 | goto start; | 133 | mutex_lock(&genpd->lock); |
134 | |||
135 | /* | ||
136 | * The "wait for parent" status is guaranteed not to change | ||
137 | * while the master is powering on. | ||
138 | */ | ||
139 | genpd->status = GPD_STATE_POWER_OFF; | ||
140 | wake_up_all(&genpd->status_wait_queue); | ||
141 | if (ret) { | ||
142 | genpd_sd_counter_dec(link->master); | ||
143 | goto err; | ||
144 | } | ||
111 | } | 145 | } |
112 | 146 | ||
113 | if (genpd->power_on) { | 147 | if (genpd->power_on) { |
114 | ret = genpd->power_on(genpd); | 148 | ret = genpd->power_on(genpd); |
115 | if (ret) | 149 | if (ret) |
116 | goto out; | 150 | goto err; |
117 | } | 151 | } |
118 | 152 | ||
119 | genpd_set_active(genpd); | 153 | genpd_set_active(genpd); |
120 | if (parent) | ||
121 | parent->sd_count++; | ||
122 | 154 | ||
123 | out: | 155 | return 0; |
124 | mutex_unlock(&genpd->lock); | 156 | |
125 | if (parent) | 157 | err: |
126 | genpd_release_lock(parent); | 158 | list_for_each_entry_continue_reverse(link, &genpd->slave_links, slave_node) |
159 | genpd_sd_counter_dec(link->master); | ||
127 | 160 | ||
128 | return ret; | 161 | return ret; |
129 | } | 162 | } |
130 | 163 | ||
164 | /** | ||
165 | * pm_genpd_poweron - Restore power to a given PM domain and its masters. | ||
166 | * @genpd: PM domain to power up. | ||
167 | */ | ||
168 | int pm_genpd_poweron(struct generic_pm_domain *genpd) | ||
169 | { | ||
170 | int ret; | ||
171 | |||
172 | mutex_lock(&genpd->lock); | ||
173 | ret = __pm_genpd_poweron(genpd); | ||
174 | mutex_unlock(&genpd->lock); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
131 | #endif /* CONFIG_PM */ | 178 | #endif /* CONFIG_PM */ |
132 | 179 | ||
133 | #ifdef CONFIG_PM_RUNTIME | 180 | #ifdef CONFIG_PM_RUNTIME |
134 | 181 | ||
135 | /** | 182 | /** |
136 | * __pm_genpd_save_device - Save the pre-suspend state of a device. | 183 | * __pm_genpd_save_device - Save the pre-suspend state of a device. |
137 | * @dle: Device list entry of the device to save the state of. | 184 | * @pdd: Domain data of the device to save the state of. |
138 | * @genpd: PM domain the device belongs to. | 185 | * @genpd: PM domain the device belongs to. |
139 | */ | 186 | */ |
140 | static int __pm_genpd_save_device(struct dev_list_entry *dle, | 187 | static int __pm_genpd_save_device(struct pm_domain_data *pdd, |
141 | struct generic_pm_domain *genpd) | 188 | struct generic_pm_domain *genpd) |
142 | __releases(&genpd->lock) __acquires(&genpd->lock) | 189 | __releases(&genpd->lock) __acquires(&genpd->lock) |
143 | { | 190 | { |
144 | struct device *dev = dle->dev; | 191 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
192 | struct device *dev = pdd->dev; | ||
145 | struct device_driver *drv = dev->driver; | 193 | struct device_driver *drv = dev->driver; |
146 | int ret = 0; | 194 | int ret = 0; |
147 | 195 | ||
148 | if (dle->need_restore) | 196 | if (gpd_data->need_restore) |
149 | return 0; | 197 | return 0; |
150 | 198 | ||
151 | mutex_unlock(&genpd->lock); | 199 | mutex_unlock(&genpd->lock); |
@@ -163,24 +211,25 @@ static int __pm_genpd_save_device(struct dev_list_entry *dle, | |||
163 | mutex_lock(&genpd->lock); | 211 | mutex_lock(&genpd->lock); |
164 | 212 | ||
165 | if (!ret) | 213 | if (!ret) |
166 | dle->need_restore = true; | 214 | gpd_data->need_restore = true; |
167 | 215 | ||
168 | return ret; | 216 | return ret; |
169 | } | 217 | } |
170 | 218 | ||
171 | /** | 219 | /** |
172 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. | 220 | * __pm_genpd_restore_device - Restore the pre-suspend state of a device. |
173 | * @dle: Device list entry of the device to restore the state of. | 221 | * @pdd: Domain data of the device to restore the state of. |
174 | * @genpd: PM domain the device belongs to. | 222 | * @genpd: PM domain the device belongs to. |
175 | */ | 223 | */ |
176 | static void __pm_genpd_restore_device(struct dev_list_entry *dle, | 224 | static void __pm_genpd_restore_device(struct pm_domain_data *pdd, |
177 | struct generic_pm_domain *genpd) | 225 | struct generic_pm_domain *genpd) |
178 | __releases(&genpd->lock) __acquires(&genpd->lock) | 226 | __releases(&genpd->lock) __acquires(&genpd->lock) |
179 | { | 227 | { |
180 | struct device *dev = dle->dev; | 228 | struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); |
229 | struct device *dev = pdd->dev; | ||
181 | struct device_driver *drv = dev->driver; | 230 | struct device_driver *drv = dev->driver; |
182 | 231 | ||
183 | if (!dle->need_restore) | 232 | if (!gpd_data->need_restore) |
184 | return; | 233 | return; |
185 | 234 | ||
186 | mutex_unlock(&genpd->lock); | 235 | mutex_unlock(&genpd->lock); |
@@ -197,7 +246,7 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
197 | 246 | ||
198 | mutex_lock(&genpd->lock); | 247 | mutex_lock(&genpd->lock); |
199 | 248 | ||
200 | dle->need_restore = false; | 249 | gpd_data->need_restore = false; |
201 | } | 250 | } |
202 | 251 | ||
203 | /** | 252 | /** |
@@ -211,7 +260,8 @@ static void __pm_genpd_restore_device(struct dev_list_entry *dle, | |||
211 | */ | 260 | */ |
212 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) | 261 | static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) |
213 | { | 262 | { |
214 | return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | 263 | return genpd->status == GPD_STATE_WAIT_MASTER |
264 | || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; | ||
215 | } | 265 | } |
216 | 266 | ||
217 | /** | 267 | /** |
@@ -238,8 +288,8 @@ void genpd_queue_power_off_work(struct generic_pm_domain *genpd) | |||
238 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | 288 | static int pm_genpd_poweroff(struct generic_pm_domain *genpd) |
239 | __releases(&genpd->lock) __acquires(&genpd->lock) | 289 | __releases(&genpd->lock) __acquires(&genpd->lock) |
240 | { | 290 | { |
241 | struct generic_pm_domain *parent; | 291 | struct pm_domain_data *pdd; |
242 | struct dev_list_entry *dle; | 292 | struct gpd_link *link; |
243 | unsigned int not_suspended; | 293 | unsigned int not_suspended; |
244 | int ret = 0; | 294 | int ret = 0; |
245 | 295 | ||
@@ -247,19 +297,22 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
247 | /* | 297 | /* |
248 | * Do not try to power off the domain in the following situations: | 298 | * Do not try to power off the domain in the following situations: |
249 | * (1) The domain is already in the "power off" state. | 299 | * (1) The domain is already in the "power off" state. |
250 | * (2) System suspend is in progress. | 300 | * (2) The domain is waiting for its master to power up. |
251 | * (3) One of the domain's devices is being resumed right now. | 301 | * (3) One of the domain's devices is being resumed right now. |
302 | * (4) System suspend is in progress. | ||
252 | */ | 303 | */ |
253 | if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0 | 304 | if (genpd->status == GPD_STATE_POWER_OFF |
254 | || genpd->resume_count > 0) | 305 | || genpd->status == GPD_STATE_WAIT_MASTER |
306 | || genpd->resume_count > 0 || genpd->prepared_count > 0) | ||
255 | return 0; | 307 | return 0; |
256 | 308 | ||
257 | if (genpd->sd_count > 0) | 309 | if (atomic_read(&genpd->sd_count) > 0) |
258 | return -EBUSY; | 310 | return -EBUSY; |
259 | 311 | ||
260 | not_suspended = 0; | 312 | not_suspended = 0; |
261 | list_for_each_entry(dle, &genpd->dev_list, node) | 313 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
262 | if (dle->dev->driver && !pm_runtime_suspended(dle->dev)) | 314 | if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) |
315 | || pdd->dev->power.irq_safe)) | ||
263 | not_suspended++; | 316 | not_suspended++; |
264 | 317 | ||
265 | if (not_suspended > genpd->in_progress) | 318 | if (not_suspended > genpd->in_progress) |
@@ -282,54 +335,50 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
282 | genpd->status = GPD_STATE_BUSY; | 335 | genpd->status = GPD_STATE_BUSY; |
283 | genpd->poweroff_task = current; | 336 | genpd->poweroff_task = current; |
284 | 337 | ||
285 | list_for_each_entry_reverse(dle, &genpd->dev_list, node) { | 338 | list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { |
286 | ret = __pm_genpd_save_device(dle, genpd); | 339 | ret = atomic_read(&genpd->sd_count) == 0 ? |
340 | __pm_genpd_save_device(pdd, genpd) : -EBUSY; | ||
341 | |||
342 | if (genpd_abort_poweroff(genpd)) | ||
343 | goto out; | ||
344 | |||
287 | if (ret) { | 345 | if (ret) { |
288 | genpd_set_active(genpd); | 346 | genpd_set_active(genpd); |
289 | goto out; | 347 | goto out; |
290 | } | 348 | } |
291 | 349 | ||
292 | if (genpd_abort_poweroff(genpd)) | ||
293 | goto out; | ||
294 | |||
295 | if (genpd->status == GPD_STATE_REPEAT) { | 350 | if (genpd->status == GPD_STATE_REPEAT) { |
296 | genpd->poweroff_task = NULL; | 351 | genpd->poweroff_task = NULL; |
297 | goto start; | 352 | goto start; |
298 | } | 353 | } |
299 | } | 354 | } |
300 | 355 | ||
301 | parent = genpd->parent; | 356 | if (genpd->power_off) { |
302 | if (parent) { | 357 | if (atomic_read(&genpd->sd_count) > 0) { |
303 | mutex_unlock(&genpd->lock); | 358 | ret = -EBUSY; |
304 | |||
305 | genpd_acquire_lock(parent); | ||
306 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | ||
307 | |||
308 | if (genpd_abort_poweroff(genpd)) { | ||
309 | genpd_release_lock(parent); | ||
310 | goto out; | 359 | goto out; |
311 | } | 360 | } |
312 | } | ||
313 | 361 | ||
314 | if (genpd->power_off) { | 362 | /* |
363 | * If sd_count > 0 at this point, one of the subdomains hasn't | ||
364 | * managed to call pm_genpd_poweron() for the master yet after | ||
365 | * incrementing it. In that case pm_genpd_poweron() will wait | ||
366 | * for us to drop the lock, so we can call .power_off() and let | ||
367 | * the pm_genpd_poweron() restore power for us (this shouldn't | ||
368 | * happen very often). | ||
369 | */ | ||
315 | ret = genpd->power_off(genpd); | 370 | ret = genpd->power_off(genpd); |
316 | if (ret == -EBUSY) { | 371 | if (ret == -EBUSY) { |
317 | genpd_set_active(genpd); | 372 | genpd_set_active(genpd); |
318 | if (parent) | ||
319 | genpd_release_lock(parent); | ||
320 | |||
321 | goto out; | 373 | goto out; |
322 | } | 374 | } |
323 | } | 375 | } |
324 | 376 | ||
325 | genpd->status = GPD_STATE_POWER_OFF; | 377 | genpd->status = GPD_STATE_POWER_OFF; |
326 | 378 | ||
327 | if (parent) { | 379 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
328 | genpd_sd_counter_dec(parent); | 380 | genpd_sd_counter_dec(link->master); |
329 | if (parent->sd_count == 0) | 381 | genpd_queue_power_off_work(link->master); |
330 | genpd_queue_power_off_work(parent); | ||
331 | |||
332 | genpd_release_lock(parent); | ||
333 | } | 382 | } |
334 | 383 | ||
335 | out: | 384 | out: |
@@ -371,12 +420,21 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
371 | if (IS_ERR(genpd)) | 420 | if (IS_ERR(genpd)) |
372 | return -EINVAL; | 421 | return -EINVAL; |
373 | 422 | ||
423 | might_sleep_if(!genpd->dev_irq_safe); | ||
424 | |||
374 | if (genpd->stop_device) { | 425 | if (genpd->stop_device) { |
375 | int ret = genpd->stop_device(dev); | 426 | int ret = genpd->stop_device(dev); |
376 | if (ret) | 427 | if (ret) |
377 | return ret; | 428 | return ret; |
378 | } | 429 | } |
379 | 430 | ||
431 | /* | ||
432 | * If power.irq_safe is set, this routine will be run with interrupts | ||
433 | * off, so it can't use mutexes. | ||
434 | */ | ||
435 | if (dev->power.irq_safe) | ||
436 | return 0; | ||
437 | |||
380 | mutex_lock(&genpd->lock); | 438 | mutex_lock(&genpd->lock); |
381 | genpd->in_progress++; | 439 | genpd->in_progress++; |
382 | pm_genpd_poweroff(genpd); | 440 | pm_genpd_poweroff(genpd); |
@@ -387,24 +445,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
387 | } | 445 | } |
388 | 446 | ||
389 | /** | 447 | /** |
390 | * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
391 | * @dev: Device to resume. | ||
392 | * @genpd: PM domain the device belongs to. | ||
393 | */ | ||
394 | static void __pm_genpd_runtime_resume(struct device *dev, | ||
395 | struct generic_pm_domain *genpd) | ||
396 | { | ||
397 | struct dev_list_entry *dle; | ||
398 | |||
399 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
400 | if (dle->dev == dev) { | ||
401 | __pm_genpd_restore_device(dle, genpd); | ||
402 | break; | ||
403 | } | ||
404 | } | ||
405 | } | ||
406 | |||
407 | /** | ||
408 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | 448 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. |
409 | * @dev: Device to resume. | 449 | * @dev: Device to resume. |
410 | * | 450 | * |
@@ -424,11 +464,18 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
424 | if (IS_ERR(genpd)) | 464 | if (IS_ERR(genpd)) |
425 | return -EINVAL; | 465 | return -EINVAL; |
426 | 466 | ||
427 | ret = pm_genpd_poweron(genpd); | 467 | might_sleep_if(!genpd->dev_irq_safe); |
428 | if (ret) | 468 | |
429 | return ret; | 469 | /* If power.irq_safe, the PM domain is never powered off. */ |
470 | if (dev->power.irq_safe) | ||
471 | goto out; | ||
430 | 472 | ||
431 | mutex_lock(&genpd->lock); | 473 | mutex_lock(&genpd->lock); |
474 | ret = __pm_genpd_poweron(genpd); | ||
475 | if (ret) { | ||
476 | mutex_unlock(&genpd->lock); | ||
477 | return ret; | ||
478 | } | ||
432 | genpd->status = GPD_STATE_BUSY; | 479 | genpd->status = GPD_STATE_BUSY; |
433 | genpd->resume_count++; | 480 | genpd->resume_count++; |
434 | for (;;) { | 481 | for (;;) { |
@@ -448,12 +495,13 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
448 | mutex_lock(&genpd->lock); | 495 | mutex_lock(&genpd->lock); |
449 | } | 496 | } |
450 | finish_wait(&genpd->status_wait_queue, &wait); | 497 | finish_wait(&genpd->status_wait_queue, &wait); |
451 | __pm_genpd_runtime_resume(dev, genpd); | 498 | __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); |
452 | genpd->resume_count--; | 499 | genpd->resume_count--; |
453 | genpd_set_active(genpd); | 500 | genpd_set_active(genpd); |
454 | wake_up_all(&genpd->status_wait_queue); | 501 | wake_up_all(&genpd->status_wait_queue); |
455 | mutex_unlock(&genpd->lock); | 502 | mutex_unlock(&genpd->lock); |
456 | 503 | ||
504 | out: | ||
457 | if (genpd->start_device) | 505 | if (genpd->start_device) |
458 | genpd->start_device(dev); | 506 | genpd->start_device(dev); |
459 | 507 | ||
@@ -478,8 +526,6 @@ void pm_genpd_poweroff_unused(void) | |||
478 | #else | 526 | #else |
479 | 527 | ||
480 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 528 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
481 | static inline void __pm_genpd_runtime_resume(struct device *dev, | ||
482 | struct generic_pm_domain *genpd) {} | ||
483 | 529 | ||
484 | #define pm_genpd_runtime_suspend NULL | 530 | #define pm_genpd_runtime_suspend NULL |
485 | #define pm_genpd_runtime_resume NULL | 531 | #define pm_genpd_runtime_resume NULL |
@@ -489,11 +535,11 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
489 | #ifdef CONFIG_PM_SLEEP | 535 | #ifdef CONFIG_PM_SLEEP |
490 | 536 | ||
491 | /** | 537 | /** |
492 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. | 538 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters. |
493 | * @genpd: PM domain to power off, if possible. | 539 | * @genpd: PM domain to power off, if possible. |
494 | * | 540 | * |
495 | * Check if the given PM domain can be powered off (during system suspend or | 541 | * Check if the given PM domain can be powered off (during system suspend or |
496 | * hibernation) and do that if so. Also, in that case propagate to its parent. | 542 | * hibernation) and do that if so. Also, in that case propagate to its masters. |
497 | * | 543 | * |
498 | * This function is only called in "noirq" stages of system power transitions, | 544 | * This function is only called in "noirq" stages of system power transitions, |
499 | * so it need not acquire locks (all of the "noirq" callbacks are executed | 545 | * so it need not acquire locks (all of the "noirq" callbacks are executed |
@@ -501,21 +547,23 @@ static inline void __pm_genpd_runtime_resume(struct device *dev, | |||
501 | */ | 547 | */ |
502 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | 548 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) |
503 | { | 549 | { |
504 | struct generic_pm_domain *parent = genpd->parent; | 550 | struct gpd_link *link; |
505 | 551 | ||
506 | if (genpd->status == GPD_STATE_POWER_OFF) | 552 | if (genpd->status == GPD_STATE_POWER_OFF) |
507 | return; | 553 | return; |
508 | 554 | ||
509 | if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) | 555 | if (genpd->suspended_count != genpd->device_count |
556 | || atomic_read(&genpd->sd_count) > 0) | ||
510 | return; | 557 | return; |
511 | 558 | ||
512 | if (genpd->power_off) | 559 | if (genpd->power_off) |
513 | genpd->power_off(genpd); | 560 | genpd->power_off(genpd); |
514 | 561 | ||
515 | genpd->status = GPD_STATE_POWER_OFF; | 562 | genpd->status = GPD_STATE_POWER_OFF; |
516 | if (parent) { | 563 | |
517 | genpd_sd_counter_dec(parent); | 564 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
518 | pm_genpd_sync_poweroff(parent); | 565 | genpd_sd_counter_dec(link->master); |
566 | pm_genpd_sync_poweroff(link->master); | ||
519 | } | 567 | } |
520 | } | 568 | } |
521 | 569 | ||
@@ -666,7 +714,7 @@ static int pm_genpd_suspend_noirq(struct device *dev) | |||
666 | if (ret) | 714 | if (ret) |
667 | return ret; | 715 | return ret; |
668 | 716 | ||
669 | if (device_may_wakeup(dev) | 717 | if (dev->power.wakeup_path |
670 | && genpd->active_wakeup && genpd->active_wakeup(dev)) | 718 | && genpd->active_wakeup && genpd->active_wakeup(dev)) |
671 | return 0; | 719 | return 0; |
672 | 720 | ||
@@ -890,7 +938,7 @@ static int pm_genpd_dev_poweroff_noirq(struct device *dev) | |||
890 | if (ret) | 938 | if (ret) |
891 | return ret; | 939 | return ret; |
892 | 940 | ||
893 | if (device_may_wakeup(dev) | 941 | if (dev->power.wakeup_path |
894 | && genpd->active_wakeup && genpd->active_wakeup(dev)) | 942 | && genpd->active_wakeup && genpd->active_wakeup(dev)) |
895 | return 0; | 943 | return 0; |
896 | 944 | ||
@@ -1034,7 +1082,8 @@ static void pm_genpd_complete(struct device *dev) | |||
1034 | */ | 1082 | */ |
1035 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | 1083 | int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) |
1036 | { | 1084 | { |
1037 | struct dev_list_entry *dle; | 1085 | struct generic_pm_domain_data *gpd_data; |
1086 | struct pm_domain_data *pdd; | ||
1038 | int ret = 0; | 1087 | int ret = 0; |
1039 | 1088 | ||
1040 | dev_dbg(dev, "%s()\n", __func__); | 1089 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1054,26 +1103,26 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
1054 | goto out; | 1103 | goto out; |
1055 | } | 1104 | } |
1056 | 1105 | ||
1057 | list_for_each_entry(dle, &genpd->dev_list, node) | 1106 | list_for_each_entry(pdd, &genpd->dev_list, list_node) |
1058 | if (dle->dev == dev) { | 1107 | if (pdd->dev == dev) { |
1059 | ret = -EINVAL; | 1108 | ret = -EINVAL; |
1060 | goto out; | 1109 | goto out; |
1061 | } | 1110 | } |
1062 | 1111 | ||
1063 | dle = kzalloc(sizeof(*dle), GFP_KERNEL); | 1112 | gpd_data = kzalloc(sizeof(*gpd_data), GFP_KERNEL); |
1064 | if (!dle) { | 1113 | if (!gpd_data) { |
1065 | ret = -ENOMEM; | 1114 | ret = -ENOMEM; |
1066 | goto out; | 1115 | goto out; |
1067 | } | 1116 | } |
1068 | 1117 | ||
1069 | dle->dev = dev; | ||
1070 | dle->need_restore = false; | ||
1071 | list_add_tail(&dle->node, &genpd->dev_list); | ||
1072 | genpd->device_count++; | 1118 | genpd->device_count++; |
1073 | 1119 | ||
1074 | spin_lock_irq(&dev->power.lock); | ||
1075 | dev->pm_domain = &genpd->domain; | 1120 | dev->pm_domain = &genpd->domain; |
1076 | spin_unlock_irq(&dev->power.lock); | 1121 | dev_pm_get_subsys_data(dev); |
1122 | dev->power.subsys_data->domain_data = &gpd_data->base; | ||
1123 | gpd_data->base.dev = dev; | ||
1124 | gpd_data->need_restore = false; | ||
1125 | list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); | ||
1077 | 1126 | ||
1078 | out: | 1127 | out: |
1079 | genpd_release_lock(genpd); | 1128 | genpd_release_lock(genpd); |
@@ -1089,7 +1138,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
1089 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, | 1138 | int pm_genpd_remove_device(struct generic_pm_domain *genpd, |
1090 | struct device *dev) | 1139 | struct device *dev) |
1091 | { | 1140 | { |
1092 | struct dev_list_entry *dle; | 1141 | struct pm_domain_data *pdd; |
1093 | int ret = -EINVAL; | 1142 | int ret = -EINVAL; |
1094 | 1143 | ||
1095 | dev_dbg(dev, "%s()\n", __func__); | 1144 | dev_dbg(dev, "%s()\n", __func__); |
@@ -1104,17 +1153,17 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1104 | goto out; | 1153 | goto out; |
1105 | } | 1154 | } |
1106 | 1155 | ||
1107 | list_for_each_entry(dle, &genpd->dev_list, node) { | 1156 | list_for_each_entry(pdd, &genpd->dev_list, list_node) { |
1108 | if (dle->dev != dev) | 1157 | if (pdd->dev != dev) |
1109 | continue; | 1158 | continue; |
1110 | 1159 | ||
1111 | spin_lock_irq(&dev->power.lock); | 1160 | list_del_init(&pdd->list_node); |
1161 | pdd->dev = NULL; | ||
1162 | dev_pm_put_subsys_data(dev); | ||
1112 | dev->pm_domain = NULL; | 1163 | dev->pm_domain = NULL; |
1113 | spin_unlock_irq(&dev->power.lock); | 1164 | kfree(to_gpd_data(pdd)); |
1114 | 1165 | ||
1115 | genpd->device_count--; | 1166 | genpd->device_count--; |
1116 | list_del(&dle->node); | ||
1117 | kfree(dle); | ||
1118 | 1167 | ||
1119 | ret = 0; | 1168 | ret = 0; |
1120 | break; | 1169 | break; |
@@ -1129,48 +1178,55 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
1129 | /** | 1178 | /** |
1130 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. | 1179 | * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. |
1131 | * @genpd: Master PM domain to add the subdomain to. | 1180 | * @genpd: Master PM domain to add the subdomain to. |
1132 | * @new_subdomain: Subdomain to be added. | 1181 | * @subdomain: Subdomain to be added. |
1133 | */ | 1182 | */ |
1134 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | 1183 | int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, |
1135 | struct generic_pm_domain *new_subdomain) | 1184 | struct generic_pm_domain *subdomain) |
1136 | { | 1185 | { |
1137 | struct generic_pm_domain *subdomain; | 1186 | struct gpd_link *link; |
1138 | int ret = 0; | 1187 | int ret = 0; |
1139 | 1188 | ||
1140 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain)) | 1189 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1141 | return -EINVAL; | 1190 | return -EINVAL; |
1142 | 1191 | ||
1143 | start: | 1192 | start: |
1144 | genpd_acquire_lock(genpd); | 1193 | genpd_acquire_lock(genpd); |
1145 | mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING); | 1194 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
1146 | 1195 | ||
1147 | if (new_subdomain->status != GPD_STATE_POWER_OFF | 1196 | if (subdomain->status != GPD_STATE_POWER_OFF |
1148 | && new_subdomain->status != GPD_STATE_ACTIVE) { | 1197 | && subdomain->status != GPD_STATE_ACTIVE) { |
1149 | mutex_unlock(&new_subdomain->lock); | 1198 | mutex_unlock(&subdomain->lock); |
1150 | genpd_release_lock(genpd); | 1199 | genpd_release_lock(genpd); |
1151 | goto start; | 1200 | goto start; |
1152 | } | 1201 | } |
1153 | 1202 | ||
1154 | if (genpd->status == GPD_STATE_POWER_OFF | 1203 | if (genpd->status == GPD_STATE_POWER_OFF |
1155 | && new_subdomain->status != GPD_STATE_POWER_OFF) { | 1204 | && subdomain->status != GPD_STATE_POWER_OFF) { |
1156 | ret = -EINVAL; | 1205 | ret = -EINVAL; |
1157 | goto out; | 1206 | goto out; |
1158 | } | 1207 | } |
1159 | 1208 | ||
1160 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1209 | list_for_each_entry(link, &genpd->slave_links, slave_node) { |
1161 | if (subdomain == new_subdomain) { | 1210 | if (link->slave == subdomain && link->master == genpd) { |
1162 | ret = -EINVAL; | 1211 | ret = -EINVAL; |
1163 | goto out; | 1212 | goto out; |
1164 | } | 1213 | } |
1165 | } | 1214 | } |
1166 | 1215 | ||
1167 | list_add_tail(&new_subdomain->sd_node, &genpd->sd_list); | 1216 | link = kzalloc(sizeof(*link), GFP_KERNEL); |
1168 | new_subdomain->parent = genpd; | 1217 | if (!link) { |
1218 | ret = -ENOMEM; | ||
1219 | goto out; | ||
1220 | } | ||
1221 | link->master = genpd; | ||
1222 | list_add_tail(&link->master_node, &genpd->master_links); | ||
1223 | link->slave = subdomain; | ||
1224 | list_add_tail(&link->slave_node, &subdomain->slave_links); | ||
1169 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1225 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1170 | genpd->sd_count++; | 1226 | genpd_sd_counter_inc(genpd); |
1171 | 1227 | ||
1172 | out: | 1228 | out: |
1173 | mutex_unlock(&new_subdomain->lock); | 1229 | mutex_unlock(&subdomain->lock); |
1174 | genpd_release_lock(genpd); | 1230 | genpd_release_lock(genpd); |
1175 | 1231 | ||
1176 | return ret; | 1232 | return ret; |
@@ -1179,22 +1235,22 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, | |||
1179 | /** | 1235 | /** |
1180 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. | 1236 | * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain. |
1181 | * @genpd: Master PM domain to remove the subdomain from. | 1237 | * @genpd: Master PM domain to remove the subdomain from. |
1182 | * @target: Subdomain to be removed. | 1238 | * @subdomain: Subdomain to be removed. |
1183 | */ | 1239 | */ |
1184 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | 1240 | int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, |
1185 | struct generic_pm_domain *target) | 1241 | struct generic_pm_domain *subdomain) |
1186 | { | 1242 | { |
1187 | struct generic_pm_domain *subdomain; | 1243 | struct gpd_link *link; |
1188 | int ret = -EINVAL; | 1244 | int ret = -EINVAL; |
1189 | 1245 | ||
1190 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target)) | 1246 | if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) |
1191 | return -EINVAL; | 1247 | return -EINVAL; |
1192 | 1248 | ||
1193 | start: | 1249 | start: |
1194 | genpd_acquire_lock(genpd); | 1250 | genpd_acquire_lock(genpd); |
1195 | 1251 | ||
1196 | list_for_each_entry(subdomain, &genpd->sd_list, sd_node) { | 1252 | list_for_each_entry(link, &genpd->master_links, master_node) { |
1197 | if (subdomain != target) | 1253 | if (link->slave != subdomain) |
1198 | continue; | 1254 | continue; |
1199 | 1255 | ||
1200 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); | 1256 | mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); |
@@ -1206,8 +1262,9 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, | |||
1206 | goto start; | 1262 | goto start; |
1207 | } | 1263 | } |
1208 | 1264 | ||
1209 | list_del(&subdomain->sd_node); | 1265 | list_del(&link->master_node); |
1210 | subdomain->parent = NULL; | 1266 | list_del(&link->slave_node); |
1267 | kfree(link); | ||
1211 | if (subdomain->status != GPD_STATE_POWER_OFF) | 1268 | if (subdomain->status != GPD_STATE_POWER_OFF) |
1212 | genpd_sd_counter_dec(genpd); | 1269 | genpd_sd_counter_dec(genpd); |
1213 | 1270 | ||
@@ -1234,15 +1291,14 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
1234 | if (IS_ERR_OR_NULL(genpd)) | 1291 | if (IS_ERR_OR_NULL(genpd)) |
1235 | return; | 1292 | return; |
1236 | 1293 | ||
1237 | INIT_LIST_HEAD(&genpd->sd_node); | 1294 | INIT_LIST_HEAD(&genpd->master_links); |
1238 | genpd->parent = NULL; | 1295 | INIT_LIST_HEAD(&genpd->slave_links); |
1239 | INIT_LIST_HEAD(&genpd->dev_list); | 1296 | INIT_LIST_HEAD(&genpd->dev_list); |
1240 | INIT_LIST_HEAD(&genpd->sd_list); | ||
1241 | mutex_init(&genpd->lock); | 1297 | mutex_init(&genpd->lock); |
1242 | genpd->gov = gov; | 1298 | genpd->gov = gov; |
1243 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); | 1299 | INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn); |
1244 | genpd->in_progress = 0; | 1300 | genpd->in_progress = 0; |
1245 | genpd->sd_count = 0; | 1301 | atomic_set(&genpd->sd_count, 0); |
1246 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; | 1302 | genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; |
1247 | init_waitqueue_head(&genpd->status_wait_queue); | 1303 | init_waitqueue_head(&genpd->status_wait_queue); |
1248 | genpd->poweroff_task = NULL; | 1304 | genpd->poweroff_task = NULL; |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 9508df71274b..265a0ee3b49e 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -8,6 +8,7 @@ | |||
8 | 8 | ||
9 | #include <linux/pm.h> | 9 | #include <linux/pm.h> |
10 | #include <linux/pm_runtime.h> | 10 | #include <linux/pm_runtime.h> |
11 | #include <linux/export.h> | ||
11 | 12 | ||
12 | #ifdef CONFIG_PM_RUNTIME | 13 | #ifdef CONFIG_PM_RUNTIME |
13 | /** | 14 | /** |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a85459126bc6..c3d2dfcf438d 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/kallsyms.h> | 21 | #include <linux/kallsyms.h> |
22 | #include <linux/export.h> | ||
22 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
23 | #include <linux/pm.h> | 24 | #include <linux/pm.h> |
24 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
@@ -46,6 +47,7 @@ LIST_HEAD(dpm_prepared_list); | |||
46 | LIST_HEAD(dpm_suspended_list); | 47 | LIST_HEAD(dpm_suspended_list); |
47 | LIST_HEAD(dpm_noirq_list); | 48 | LIST_HEAD(dpm_noirq_list); |
48 | 49 | ||
50 | struct suspend_stats suspend_stats; | ||
49 | static DEFINE_MUTEX(dpm_list_mtx); | 51 | static DEFINE_MUTEX(dpm_list_mtx); |
50 | static pm_message_t pm_transition; | 52 | static pm_message_t pm_transition; |
51 | 53 | ||
@@ -65,6 +67,7 @@ void device_pm_init(struct device *dev) | |||
65 | spin_lock_init(&dev->power.lock); | 67 | spin_lock_init(&dev->power.lock); |
66 | pm_runtime_init(dev); | 68 | pm_runtime_init(dev); |
67 | INIT_LIST_HEAD(&dev->power.entry); | 69 | INIT_LIST_HEAD(&dev->power.entry); |
70 | dev->power.power_state = PMSG_INVALID; | ||
68 | } | 71 | } |
69 | 72 | ||
70 | /** | 73 | /** |
@@ -96,6 +99,7 @@ void device_pm_add(struct device *dev) | |||
96 | dev_warn(dev, "parent %s should not be sleeping\n", | 99 | dev_warn(dev, "parent %s should not be sleeping\n", |
97 | dev_name(dev->parent)); | 100 | dev_name(dev->parent)); |
98 | list_add_tail(&dev->power.entry, &dpm_list); | 101 | list_add_tail(&dev->power.entry, &dpm_list); |
102 | dev_pm_qos_constraints_init(dev); | ||
99 | mutex_unlock(&dpm_list_mtx); | 103 | mutex_unlock(&dpm_list_mtx); |
100 | } | 104 | } |
101 | 105 | ||
@@ -109,6 +113,7 @@ void device_pm_remove(struct device *dev) | |||
109 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); | 113 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
110 | complete_all(&dev->power.completion); | 114 | complete_all(&dev->power.completion); |
111 | mutex_lock(&dpm_list_mtx); | 115 | mutex_lock(&dpm_list_mtx); |
116 | dev_pm_qos_constraints_destroy(dev); | ||
112 | list_del_init(&dev->power.entry); | 117 | list_del_init(&dev->power.entry); |
113 | mutex_unlock(&dpm_list_mtx); | 118 | mutex_unlock(&dpm_list_mtx); |
114 | device_wakeup_disable(dev); | 119 | device_wakeup_disable(dev); |
@@ -464,8 +469,12 @@ void dpm_resume_noirq(pm_message_t state) | |||
464 | mutex_unlock(&dpm_list_mtx); | 469 | mutex_unlock(&dpm_list_mtx); |
465 | 470 | ||
466 | error = device_resume_noirq(dev, state); | 471 | error = device_resume_noirq(dev, state); |
467 | if (error) | 472 | if (error) { |
473 | suspend_stats.failed_resume_noirq++; | ||
474 | dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); | ||
475 | dpm_save_failed_dev(dev_name(dev)); | ||
468 | pm_dev_err(dev, state, " early", error); | 476 | pm_dev_err(dev, state, " early", error); |
477 | } | ||
469 | 478 | ||
470 | mutex_lock(&dpm_list_mtx); | 479 | mutex_lock(&dpm_list_mtx); |
471 | put_device(dev); | 480 | put_device(dev); |
@@ -626,8 +635,12 @@ void dpm_resume(pm_message_t state) | |||
626 | mutex_unlock(&dpm_list_mtx); | 635 | mutex_unlock(&dpm_list_mtx); |
627 | 636 | ||
628 | error = device_resume(dev, state, false); | 637 | error = device_resume(dev, state, false); |
629 | if (error) | 638 | if (error) { |
639 | suspend_stats.failed_resume++; | ||
640 | dpm_save_failed_step(SUSPEND_RESUME); | ||
641 | dpm_save_failed_dev(dev_name(dev)); | ||
630 | pm_dev_err(dev, state, "", error); | 642 | pm_dev_err(dev, state, "", error); |
643 | } | ||
631 | 644 | ||
632 | mutex_lock(&dpm_list_mtx); | 645 | mutex_lock(&dpm_list_mtx); |
633 | } | 646 | } |
@@ -802,6 +815,9 @@ int dpm_suspend_noirq(pm_message_t state) | |||
802 | mutex_lock(&dpm_list_mtx); | 815 | mutex_lock(&dpm_list_mtx); |
803 | if (error) { | 816 | if (error) { |
804 | pm_dev_err(dev, state, " late", error); | 817 | pm_dev_err(dev, state, " late", error); |
818 | suspend_stats.failed_suspend_noirq++; | ||
819 | dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); | ||
820 | dpm_save_failed_dev(dev_name(dev)); | ||
805 | put_device(dev); | 821 | put_device(dev); |
806 | break; | 822 | break; |
807 | } | 823 | } |
@@ -902,7 +918,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
902 | } | 918 | } |
903 | 919 | ||
904 | End: | 920 | End: |
905 | dev->power.is_suspended = !error; | 921 | if (!error) { |
922 | dev->power.is_suspended = true; | ||
923 | if (dev->power.wakeup_path | ||
924 | && dev->parent && !dev->parent->power.ignore_children) | ||
925 | dev->parent->power.wakeup_path = true; | ||
926 | } | ||
906 | 927 | ||
907 | device_unlock(dev); | 928 | device_unlock(dev); |
908 | complete_all(&dev->power.completion); | 929 | complete_all(&dev->power.completion); |
@@ -923,8 +944,10 @@ static void async_suspend(void *data, async_cookie_t cookie) | |||
923 | int error; | 944 | int error; |
924 | 945 | ||
925 | error = __device_suspend(dev, pm_transition, true); | 946 | error = __device_suspend(dev, pm_transition, true); |
926 | if (error) | 947 | if (error) { |
948 | dpm_save_failed_dev(dev_name(dev)); | ||
927 | pm_dev_err(dev, pm_transition, " async", error); | 949 | pm_dev_err(dev, pm_transition, " async", error); |
950 | } | ||
928 | 951 | ||
929 | put_device(dev); | 952 | put_device(dev); |
930 | } | 953 | } |
@@ -967,6 +990,7 @@ int dpm_suspend(pm_message_t state) | |||
967 | mutex_lock(&dpm_list_mtx); | 990 | mutex_lock(&dpm_list_mtx); |
968 | if (error) { | 991 | if (error) { |
969 | pm_dev_err(dev, state, "", error); | 992 | pm_dev_err(dev, state, "", error); |
993 | dpm_save_failed_dev(dev_name(dev)); | ||
970 | put_device(dev); | 994 | put_device(dev); |
971 | break; | 995 | break; |
972 | } | 996 | } |
@@ -980,7 +1004,10 @@ int dpm_suspend(pm_message_t state) | |||
980 | async_synchronize_full(); | 1004 | async_synchronize_full(); |
981 | if (!error) | 1005 | if (!error) |
982 | error = async_error; | 1006 | error = async_error; |
983 | if (!error) | 1007 | if (error) { |
1008 | suspend_stats.failed_suspend++; | ||
1009 | dpm_save_failed_step(SUSPEND_SUSPEND); | ||
1010 | } else | ||
984 | dpm_show_time(starttime, state, NULL); | 1011 | dpm_show_time(starttime, state, NULL); |
985 | return error; | 1012 | return error; |
986 | } | 1013 | } |
@@ -999,6 +1026,8 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
999 | 1026 | ||
1000 | device_lock(dev); | 1027 | device_lock(dev); |
1001 | 1028 | ||
1029 | dev->power.wakeup_path = device_may_wakeup(dev); | ||
1030 | |||
1002 | if (dev->pm_domain) { | 1031 | if (dev->pm_domain) { |
1003 | pm_dev_dbg(dev, state, "preparing power domain "); | 1032 | pm_dev_dbg(dev, state, "preparing power domain "); |
1004 | if (dev->pm_domain->ops.prepare) | 1033 | if (dev->pm_domain->ops.prepare) |
@@ -1088,7 +1117,10 @@ int dpm_suspend_start(pm_message_t state) | |||
1088 | int error; | 1117 | int error; |
1089 | 1118 | ||
1090 | error = dpm_prepare(state); | 1119 | error = dpm_prepare(state); |
1091 | if (!error) | 1120 | if (error) { |
1121 | suspend_stats.failed_prepare++; | ||
1122 | dpm_save_failed_step(SUSPEND_PREPARE); | ||
1123 | } else | ||
1092 | error = dpm_suspend(state); | 1124 | error = dpm_suspend(state); |
1093 | return error; | 1125 | return error; |
1094 | } | 1126 | } |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index b23de185cb04..95706fa24c73 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -73,6 +73,7 @@ struct opp { | |||
73 | * RCU usage: nodes are not modified in the list of device_opp, | 73 | * RCU usage: nodes are not modified in the list of device_opp, |
74 | * however addition is possible and is secured by dev_opp_list_lock | 74 | * however addition is possible and is secured by dev_opp_list_lock |
75 | * @dev: device pointer | 75 | * @dev: device pointer |
76 | * @head: notifier head to notify the OPP availability changes. | ||
76 | * @opp_list: list of opps | 77 | * @opp_list: list of opps |
77 | * | 78 | * |
78 | * This is an internal data structure maintaining the link to opps attached to | 79 | * This is an internal data structure maintaining the link to opps attached to |
@@ -83,6 +84,7 @@ struct device_opp { | |||
83 | struct list_head node; | 84 | struct list_head node; |
84 | 85 | ||
85 | struct device *dev; | 86 | struct device *dev; |
87 | struct srcu_notifier_head head; | ||
86 | struct list_head opp_list; | 88 | struct list_head opp_list; |
87 | }; | 89 | }; |
88 | 90 | ||
@@ -404,6 +406,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
404 | } | 406 | } |
405 | 407 | ||
406 | dev_opp->dev = dev; | 408 | dev_opp->dev = dev; |
409 | srcu_init_notifier_head(&dev_opp->head); | ||
407 | INIT_LIST_HEAD(&dev_opp->opp_list); | 410 | INIT_LIST_HEAD(&dev_opp->opp_list); |
408 | 411 | ||
409 | /* Secure the device list modification */ | 412 | /* Secure the device list modification */ |
@@ -428,6 +431,11 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
428 | list_add_rcu(&new_opp->node, head); | 431 | list_add_rcu(&new_opp->node, head); |
429 | mutex_unlock(&dev_opp_list_lock); | 432 | mutex_unlock(&dev_opp_list_lock); |
430 | 433 | ||
434 | /* | ||
435 | * Notify the changes in the availability of the operable | ||
436 | * frequency/voltage list. | ||
437 | */ | ||
438 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); | ||
431 | return 0; | 439 | return 0; |
432 | } | 440 | } |
433 | 441 | ||
@@ -504,6 +512,14 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
504 | mutex_unlock(&dev_opp_list_lock); | 512 | mutex_unlock(&dev_opp_list_lock); |
505 | synchronize_rcu(); | 513 | synchronize_rcu(); |
506 | 514 | ||
515 | /* Notify the change of the OPP availability */ | ||
516 | if (availability_req) | ||
517 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE, | ||
518 | new_opp); | ||
519 | else | ||
520 | srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, | ||
521 | new_opp); | ||
522 | |||
507 | /* clean up old opp */ | 523 | /* clean up old opp */ |
508 | new_opp = opp; | 524 | new_opp = opp; |
509 | goto out; | 525 | goto out; |
@@ -643,3 +659,17 @@ void opp_free_cpufreq_table(struct device *dev, | |||
643 | *table = NULL; | 659 | *table = NULL; |
644 | } | 660 | } |
645 | #endif /* CONFIG_CPU_FREQ */ | 661 | #endif /* CONFIG_CPU_FREQ */ |
662 | |||
663 | /** | ||
664 | * opp_get_notifier() - find notifier_head of the device with opp | ||
665 | * @dev: device pointer used to lookup device OPPs. | ||
666 | */ | ||
667 | struct srcu_notifier_head *opp_get_notifier(struct device *dev) | ||
668 | { | ||
669 | struct device_opp *dev_opp = find_device_opp(dev); | ||
670 | |||
671 | if (IS_ERR(dev_opp)) | ||
672 | return ERR_CAST(dev_opp); /* matching type */ | ||
673 | |||
674 | return &dev_opp->head; | ||
675 | } | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index f2a25f18fde7..9bf62323aaf3 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -1,3 +1,5 @@ | |||
1 | #include <linux/pm_qos.h> | ||
2 | |||
1 | #ifdef CONFIG_PM_RUNTIME | 3 | #ifdef CONFIG_PM_RUNTIME |
2 | 4 | ||
3 | extern void pm_runtime_init(struct device *dev); | 5 | extern void pm_runtime_init(struct device *dev); |
@@ -35,15 +37,21 @@ extern void device_pm_move_last(struct device *); | |||
35 | static inline void device_pm_init(struct device *dev) | 37 | static inline void device_pm_init(struct device *dev) |
36 | { | 38 | { |
37 | spin_lock_init(&dev->power.lock); | 39 | spin_lock_init(&dev->power.lock); |
40 | dev->power.power_state = PMSG_INVALID; | ||
38 | pm_runtime_init(dev); | 41 | pm_runtime_init(dev); |
39 | } | 42 | } |
40 | 43 | ||
44 | static inline void device_pm_add(struct device *dev) | ||
45 | { | ||
46 | dev_pm_qos_constraints_init(dev); | ||
47 | } | ||
48 | |||
41 | static inline void device_pm_remove(struct device *dev) | 49 | static inline void device_pm_remove(struct device *dev) |
42 | { | 50 | { |
51 | dev_pm_qos_constraints_destroy(dev); | ||
43 | pm_runtime_remove(dev); | 52 | pm_runtime_remove(dev); |
44 | } | 53 | } |
45 | 54 | ||
46 | static inline void device_pm_add(struct device *dev) {} | ||
47 | static inline void device_pm_move_before(struct device *deva, | 55 | static inline void device_pm_move_before(struct device *deva, |
48 | struct device *devb) {} | 56 | struct device *devb) {} |
49 | static inline void device_pm_move_after(struct device *deva, | 57 | static inline void device_pm_move_after(struct device *deva, |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c new file mode 100644 index 000000000000..86de6c50fc41 --- /dev/null +++ b/drivers/base/power/qos.c | |||
@@ -0,0 +1,414 @@ | |||
1 | /* | ||
2 | * Devices PM QoS constraints management | ||
3 | * | ||
4 | * Copyright (C) 2011 Texas Instruments, Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * | ||
11 | * This module exposes the interface to kernel space for specifying | ||
12 | * per-device PM QoS dependencies. It provides infrastructure for registration | ||
13 | * of: | ||
14 | * | ||
15 | * Dependents on a QoS value : register requests | ||
16 | * Watchers of QoS value : get notified when target QoS value changes | ||
17 | * | ||
18 | * This QoS design is best effort based. Dependents register their QoS needs. | ||
19 | * Watchers register to keep track of the current QoS needs of the system. | ||
20 | * Watchers can register different types of notification callbacks: | ||
21 | * . a per-device notification callback using the dev_pm_qos_*_notifier API. | ||
22 | * The notification chain data is stored in the per-device constraint | ||
23 | * data struct. | ||
24 | * . a system-wide notification callback using the dev_pm_qos_*_global_notifier | ||
25 | * API. The notification chain data is stored in a static variable. | ||
26 | * | ||
27 | * Note about the per-device constraint data struct allocation: | ||
28 | * . The per-device constraints data struct ptr is tored into the device | ||
29 | * dev_pm_info. | ||
30 | * . To minimize the data usage by the per-device constraints, the data struct | ||
31 | * is only allocated at the first call to dev_pm_qos_add_request. | ||
32 | * . The data is later free'd when the device is removed from the system. | ||
33 | * . A global mutex protects the constraints users from the data being | ||
34 | * allocated and free'd. | ||
35 | */ | ||
36 | |||
37 | #include <linux/pm_qos.h> | ||
38 | #include <linux/spinlock.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/device.h> | ||
41 | #include <linux/mutex.h> | ||
42 | #include <linux/export.h> | ||
43 | |||
44 | |||
45 | static DEFINE_MUTEX(dev_pm_qos_mtx); | ||
46 | |||
47 | static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); | ||
48 | |||
49 | /** | ||
50 | * dev_pm_qos_read_value - Get PM QoS constraint for a given device. | ||
51 | * @dev: Device to get the PM QoS constraint value for. | ||
52 | */ | ||
53 | s32 dev_pm_qos_read_value(struct device *dev) | ||
54 | { | ||
55 | struct pm_qos_constraints *c; | ||
56 | unsigned long flags; | ||
57 | s32 ret = 0; | ||
58 | |||
59 | spin_lock_irqsave(&dev->power.lock, flags); | ||
60 | |||
61 | c = dev->power.constraints; | ||
62 | if (c) | ||
63 | ret = pm_qos_read_value(c); | ||
64 | |||
65 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * apply_constraint | ||
72 | * @req: constraint request to apply | ||
73 | * @action: action to perform add/update/remove, of type enum pm_qos_req_action | ||
74 | * @value: defines the qos request | ||
75 | * | ||
76 | * Internal function to update the constraints list using the PM QoS core | ||
77 | * code and if needed call the per-device and the global notification | ||
78 | * callbacks | ||
79 | */ | ||
80 | static int apply_constraint(struct dev_pm_qos_request *req, | ||
81 | enum pm_qos_req_action action, int value) | ||
82 | { | ||
83 | int ret, curr_value; | ||
84 | |||
85 | ret = pm_qos_update_target(req->dev->power.constraints, | ||
86 | &req->node, action, value); | ||
87 | |||
88 | if (ret) { | ||
89 | /* Call the global callbacks if needed */ | ||
90 | curr_value = pm_qos_read_value(req->dev->power.constraints); | ||
91 | blocking_notifier_call_chain(&dev_pm_notifiers, | ||
92 | (unsigned long)curr_value, | ||
93 | req); | ||
94 | } | ||
95 | |||
96 | return ret; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * dev_pm_qos_constraints_allocate | ||
101 | * @dev: device to allocate data for | ||
102 | * | ||
103 | * Called at the first call to add_request, for constraint data allocation | ||
104 | * Must be called with the dev_pm_qos_mtx mutex held | ||
105 | */ | ||
106 | static int dev_pm_qos_constraints_allocate(struct device *dev) | ||
107 | { | ||
108 | struct pm_qos_constraints *c; | ||
109 | struct blocking_notifier_head *n; | ||
110 | |||
111 | c = kzalloc(sizeof(*c), GFP_KERNEL); | ||
112 | if (!c) | ||
113 | return -ENOMEM; | ||
114 | |||
115 | n = kzalloc(sizeof(*n), GFP_KERNEL); | ||
116 | if (!n) { | ||
117 | kfree(c); | ||
118 | return -ENOMEM; | ||
119 | } | ||
120 | BLOCKING_INIT_NOTIFIER_HEAD(n); | ||
121 | |||
122 | plist_head_init(&c->list); | ||
123 | c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | ||
124 | c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; | ||
125 | c->type = PM_QOS_MIN; | ||
126 | c->notifiers = n; | ||
127 | |||
128 | spin_lock_irq(&dev->power.lock); | ||
129 | dev->power.constraints = c; | ||
130 | spin_unlock_irq(&dev->power.lock); | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /** | ||
136 | * dev_pm_qos_constraints_init - Initalize device's PM QoS constraints pointer. | ||
137 | * @dev: target device | ||
138 | * | ||
139 | * Called from the device PM subsystem during device insertion under | ||
140 | * device_pm_lock(). | ||
141 | */ | ||
142 | void dev_pm_qos_constraints_init(struct device *dev) | ||
143 | { | ||
144 | mutex_lock(&dev_pm_qos_mtx); | ||
145 | dev->power.constraints = NULL; | ||
146 | dev->power.power_state = PMSG_ON; | ||
147 | mutex_unlock(&dev_pm_qos_mtx); | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * dev_pm_qos_constraints_destroy | ||
152 | * @dev: target device | ||
153 | * | ||
154 | * Called from the device PM subsystem on device removal under device_pm_lock(). | ||
155 | */ | ||
156 | void dev_pm_qos_constraints_destroy(struct device *dev) | ||
157 | { | ||
158 | struct dev_pm_qos_request *req, *tmp; | ||
159 | struct pm_qos_constraints *c; | ||
160 | |||
161 | mutex_lock(&dev_pm_qos_mtx); | ||
162 | |||
163 | dev->power.power_state = PMSG_INVALID; | ||
164 | c = dev->power.constraints; | ||
165 | if (!c) | ||
166 | goto out; | ||
167 | |||
168 | /* Flush the constraints list for the device */ | ||
169 | plist_for_each_entry_safe(req, tmp, &c->list, node) { | ||
170 | /* | ||
171 | * Update constraints list and call the notification | ||
172 | * callbacks if needed | ||
173 | */ | ||
174 | apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); | ||
175 | memset(req, 0, sizeof(*req)); | ||
176 | } | ||
177 | |||
178 | spin_lock_irq(&dev->power.lock); | ||
179 | dev->power.constraints = NULL; | ||
180 | spin_unlock_irq(&dev->power.lock); | ||
181 | |||
182 | kfree(c->notifiers); | ||
183 | kfree(c); | ||
184 | |||
185 | out: | ||
186 | mutex_unlock(&dev_pm_qos_mtx); | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * dev_pm_qos_add_request - inserts new qos request into the list | ||
191 | * @dev: target device for the constraint | ||
192 | * @req: pointer to a preallocated handle | ||
193 | * @value: defines the qos request | ||
194 | * | ||
195 | * This function inserts a new entry in the device constraints list of | ||
196 | * requested qos performance characteristics. It recomputes the aggregate | ||
197 | * QoS expectations of parameters and initializes the dev_pm_qos_request | ||
198 | * handle. Caller needs to save this handle for later use in updates and | ||
199 | * removal. | ||
200 | * | ||
201 | * Returns 1 if the aggregated constraint value has changed, | ||
202 | * 0 if the aggregated constraint value has not changed, | ||
203 | * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory | ||
204 | * to allocate for data structures, -ENODEV if the device has just been removed | ||
205 | * from the system. | ||
206 | */ | ||
207 | int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, | ||
208 | s32 value) | ||
209 | { | ||
210 | int ret = 0; | ||
211 | |||
212 | if (!dev || !req) /*guard against callers passing in null */ | ||
213 | return -EINVAL; | ||
214 | |||
215 | if (WARN(dev_pm_qos_request_active(req), | ||
216 | "%s() called for already added request\n", __func__)) | ||
217 | return -EINVAL; | ||
218 | |||
219 | req->dev = dev; | ||
220 | |||
221 | mutex_lock(&dev_pm_qos_mtx); | ||
222 | |||
223 | if (!dev->power.constraints) { | ||
224 | if (dev->power.power_state.event == PM_EVENT_INVALID) { | ||
225 | /* The device has been removed from the system. */ | ||
226 | req->dev = NULL; | ||
227 | ret = -ENODEV; | ||
228 | goto out; | ||
229 | } else { | ||
230 | /* | ||
231 | * Allocate the constraints data on the first call to | ||
232 | * add_request, i.e. only if the data is not already | ||
233 | * allocated and if the device has not been removed. | ||
234 | */ | ||
235 | ret = dev_pm_qos_constraints_allocate(dev); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | if (!ret) | ||
240 | ret = apply_constraint(req, PM_QOS_ADD_REQ, value); | ||
241 | |||
242 | out: | ||
243 | mutex_unlock(&dev_pm_qos_mtx); | ||
244 | |||
245 | return ret; | ||
246 | } | ||
247 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); | ||
248 | |||
249 | /** | ||
250 | * dev_pm_qos_update_request - modifies an existing qos request | ||
251 | * @req : handle to list element holding a dev_pm_qos request to use | ||
252 | * @new_value: defines the qos request | ||
253 | * | ||
254 | * Updates an existing dev PM qos request along with updating the | ||
255 | * target value. | ||
256 | * | ||
257 | * Attempts are made to make this code callable on hot code paths. | ||
258 | * | ||
259 | * Returns 1 if the aggregated constraint value has changed, | ||
260 | * 0 if the aggregated constraint value has not changed, | ||
261 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | ||
262 | * removed from the system | ||
263 | */ | ||
264 | int dev_pm_qos_update_request(struct dev_pm_qos_request *req, | ||
265 | s32 new_value) | ||
266 | { | ||
267 | int ret = 0; | ||
268 | |||
269 | if (!req) /*guard against callers passing in null */ | ||
270 | return -EINVAL; | ||
271 | |||
272 | if (WARN(!dev_pm_qos_request_active(req), | ||
273 | "%s() called for unknown object\n", __func__)) | ||
274 | return -EINVAL; | ||
275 | |||
276 | mutex_lock(&dev_pm_qos_mtx); | ||
277 | |||
278 | if (req->dev->power.constraints) { | ||
279 | if (new_value != req->node.prio) | ||
280 | ret = apply_constraint(req, PM_QOS_UPDATE_REQ, | ||
281 | new_value); | ||
282 | } else { | ||
283 | /* Return if the device has been removed */ | ||
284 | ret = -ENODEV; | ||
285 | } | ||
286 | |||
287 | mutex_unlock(&dev_pm_qos_mtx); | ||
288 | return ret; | ||
289 | } | ||
290 | EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); | ||
291 | |||
292 | /** | ||
293 | * dev_pm_qos_remove_request - modifies an existing qos request | ||
294 | * @req: handle to request list element | ||
295 | * | ||
296 | * Will remove pm qos request from the list of constraints and | ||
297 | * recompute the current target value. Call this on slow code paths. | ||
298 | * | ||
299 | * Returns 1 if the aggregated constraint value has changed, | ||
300 | * 0 if the aggregated constraint value has not changed, | ||
301 | * -EINVAL in case of wrong parameters, -ENODEV if the device has been | ||
302 | * removed from the system | ||
303 | */ | ||
304 | int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) | ||
305 | { | ||
306 | int ret = 0; | ||
307 | |||
308 | if (!req) /*guard against callers passing in null */ | ||
309 | return -EINVAL; | ||
310 | |||
311 | if (WARN(!dev_pm_qos_request_active(req), | ||
312 | "%s() called for unknown object\n", __func__)) | ||
313 | return -EINVAL; | ||
314 | |||
315 | mutex_lock(&dev_pm_qos_mtx); | ||
316 | |||
317 | if (req->dev->power.constraints) { | ||
318 | ret = apply_constraint(req, PM_QOS_REMOVE_REQ, | ||
319 | PM_QOS_DEFAULT_VALUE); | ||
320 | memset(req, 0, sizeof(*req)); | ||
321 | } else { | ||
322 | /* Return if the device has been removed */ | ||
323 | ret = -ENODEV; | ||
324 | } | ||
325 | |||
326 | mutex_unlock(&dev_pm_qos_mtx); | ||
327 | return ret; | ||
328 | } | ||
329 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request); | ||
330 | |||
331 | /** | ||
332 | * dev_pm_qos_add_notifier - sets notification entry for changes to target value | ||
333 | * of per-device PM QoS constraints | ||
334 | * | ||
335 | * @dev: target device for the constraint | ||
336 | * @notifier: notifier block managed by caller. | ||
337 | * | ||
338 | * Will register the notifier into a notification chain that gets called | ||
339 | * upon changes to the target value for the device. | ||
340 | */ | ||
341 | int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier) | ||
342 | { | ||
343 | int retval = 0; | ||
344 | |||
345 | mutex_lock(&dev_pm_qos_mtx); | ||
346 | |||
347 | /* Silently return if the constraints object is not present. */ | ||
348 | if (dev->power.constraints) | ||
349 | retval = blocking_notifier_chain_register( | ||
350 | dev->power.constraints->notifiers, | ||
351 | notifier); | ||
352 | |||
353 | mutex_unlock(&dev_pm_qos_mtx); | ||
354 | return retval; | ||
355 | } | ||
356 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier); | ||
357 | |||
358 | /** | ||
359 | * dev_pm_qos_remove_notifier - deletes notification for changes to target value | ||
360 | * of per-device PM QoS constraints | ||
361 | * | ||
362 | * @dev: target device for the constraint | ||
363 | * @notifier: notifier block to be removed. | ||
364 | * | ||
365 | * Will remove the notifier from the notification chain that gets called | ||
366 | * upon changes to the target value. | ||
367 | */ | ||
368 | int dev_pm_qos_remove_notifier(struct device *dev, | ||
369 | struct notifier_block *notifier) | ||
370 | { | ||
371 | int retval = 0; | ||
372 | |||
373 | mutex_lock(&dev_pm_qos_mtx); | ||
374 | |||
375 | /* Silently return if the constraints object is not present. */ | ||
376 | if (dev->power.constraints) | ||
377 | retval = blocking_notifier_chain_unregister( | ||
378 | dev->power.constraints->notifiers, | ||
379 | notifier); | ||
380 | |||
381 | mutex_unlock(&dev_pm_qos_mtx); | ||
382 | return retval; | ||
383 | } | ||
384 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier); | ||
385 | |||
386 | /** | ||
387 | * dev_pm_qos_add_global_notifier - sets notification entry for changes to | ||
388 | * target value of the PM QoS constraints for any device | ||
389 | * | ||
390 | * @notifier: notifier block managed by caller. | ||
391 | * | ||
392 | * Will register the notifier into a notification chain that gets called | ||
393 | * upon changes to the target value for any device. | ||
394 | */ | ||
395 | int dev_pm_qos_add_global_notifier(struct notifier_block *notifier) | ||
396 | { | ||
397 | return blocking_notifier_chain_register(&dev_pm_notifiers, notifier); | ||
398 | } | ||
399 | EXPORT_SYMBOL_GPL(dev_pm_qos_add_global_notifier); | ||
400 | |||
401 | /** | ||
402 | * dev_pm_qos_remove_global_notifier - deletes notification for changes to | ||
403 | * target value of PM QoS constraints for any device | ||
404 | * | ||
405 | * @notifier: notifier block to be removed. | ||
406 | * | ||
407 | * Will remove the notifier from the notification chain that gets called | ||
408 | * upon changes to the target value for any device. | ||
409 | */ | ||
410 | int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier) | ||
411 | { | ||
412 | return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier); | ||
413 | } | ||
414 | EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier); | ||
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index acb3f83b8079..8c78443bca8f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -8,7 +8,9 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/export.h> | ||
11 | #include <linux/pm_runtime.h> | 12 | #include <linux/pm_runtime.h> |
13 | #include <trace/events/rpm.h> | ||
12 | #include "power.h" | 14 | #include "power.h" |
13 | 15 | ||
14 | static int rpm_resume(struct device *dev, int rpmflags); | 16 | static int rpm_resume(struct device *dev, int rpmflags); |
@@ -28,13 +30,10 @@ static int rpm_suspend(struct device *dev, int rpmflags); | |||
28 | void update_pm_runtime_accounting(struct device *dev) | 30 | void update_pm_runtime_accounting(struct device *dev) |
29 | { | 31 | { |
30 | unsigned long now = jiffies; | 32 | unsigned long now = jiffies; |
31 | int delta; | 33 | unsigned long delta; |
32 | 34 | ||
33 | delta = now - dev->power.accounting_timestamp; | 35 | delta = now - dev->power.accounting_timestamp; |
34 | 36 | ||
35 | if (delta < 0) | ||
36 | delta = 0; | ||
37 | |||
38 | dev->power.accounting_timestamp = now; | 37 | dev->power.accounting_timestamp = now; |
39 | 38 | ||
40 | if (dev->power.disable_depth > 0) | 39 | if (dev->power.disable_depth > 0) |
@@ -155,6 +154,31 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
155 | } | 154 | } |
156 | 155 | ||
157 | /** | 156 | /** |
157 | * __rpm_callback - Run a given runtime PM callback for a given device. | ||
158 | * @cb: Runtime PM callback to run. | ||
159 | * @dev: Device to run the callback for. | ||
160 | */ | ||
161 | static int __rpm_callback(int (*cb)(struct device *), struct device *dev) | ||
162 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
163 | { | ||
164 | int retval; | ||
165 | |||
166 | if (dev->power.irq_safe) | ||
167 | spin_unlock(&dev->power.lock); | ||
168 | else | ||
169 | spin_unlock_irq(&dev->power.lock); | ||
170 | |||
171 | retval = cb(dev); | ||
172 | |||
173 | if (dev->power.irq_safe) | ||
174 | spin_lock(&dev->power.lock); | ||
175 | else | ||
176 | spin_lock_irq(&dev->power.lock); | ||
177 | |||
178 | return retval; | ||
179 | } | ||
180 | |||
181 | /** | ||
158 | * rpm_idle - Notify device bus type if the device can be suspended. | 182 | * rpm_idle - Notify device bus type if the device can be suspended. |
159 | * @dev: Device to notify the bus type about. | 183 | * @dev: Device to notify the bus type about. |
160 | * @rpmflags: Flag bits. | 184 | * @rpmflags: Flag bits. |
@@ -171,6 +195,7 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
171 | int (*callback)(struct device *); | 195 | int (*callback)(struct device *); |
172 | int retval; | 196 | int retval; |
173 | 197 | ||
198 | trace_rpm_idle(dev, rpmflags); | ||
174 | retval = rpm_check_suspend_allowed(dev); | 199 | retval = rpm_check_suspend_allowed(dev); |
175 | if (retval < 0) | 200 | if (retval < 0) |
176 | ; /* Conditions are wrong. */ | 201 | ; /* Conditions are wrong. */ |
@@ -225,24 +250,14 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
225 | else | 250 | else |
226 | callback = NULL; | 251 | callback = NULL; |
227 | 252 | ||
228 | if (callback) { | 253 | if (callback) |
229 | if (dev->power.irq_safe) | 254 | __rpm_callback(callback, dev); |
230 | spin_unlock(&dev->power.lock); | ||
231 | else | ||
232 | spin_unlock_irq(&dev->power.lock); | ||
233 | |||
234 | callback(dev); | ||
235 | |||
236 | if (dev->power.irq_safe) | ||
237 | spin_lock(&dev->power.lock); | ||
238 | else | ||
239 | spin_lock_irq(&dev->power.lock); | ||
240 | } | ||
241 | 255 | ||
242 | dev->power.idle_notification = false; | 256 | dev->power.idle_notification = false; |
243 | wake_up_all(&dev->power.wait_queue); | 257 | wake_up_all(&dev->power.wait_queue); |
244 | 258 | ||
245 | out: | 259 | out: |
260 | trace_rpm_return_int(dev, _THIS_IP_, retval); | ||
246 | return retval; | 261 | return retval; |
247 | } | 262 | } |
248 | 263 | ||
@@ -252,22 +267,14 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
252 | * @dev: Device to run the callback for. | 267 | * @dev: Device to run the callback for. |
253 | */ | 268 | */ |
254 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) | 269 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) |
255 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
256 | { | 270 | { |
257 | int retval; | 271 | int retval; |
258 | 272 | ||
259 | if (!cb) | 273 | if (!cb) |
260 | return -ENOSYS; | 274 | return -ENOSYS; |
261 | 275 | ||
262 | if (dev->power.irq_safe) { | 276 | retval = __rpm_callback(cb, dev); |
263 | retval = cb(dev); | ||
264 | } else { | ||
265 | spin_unlock_irq(&dev->power.lock); | ||
266 | 277 | ||
267 | retval = cb(dev); | ||
268 | |||
269 | spin_lock_irq(&dev->power.lock); | ||
270 | } | ||
271 | dev->power.runtime_error = retval; | 278 | dev->power.runtime_error = retval; |
272 | return retval != -EACCES ? retval : -EIO; | 279 | return retval != -EACCES ? retval : -EIO; |
273 | } | 280 | } |
@@ -277,14 +284,19 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev) | |||
277 | * @dev: Device to suspend. | 284 | * @dev: Device to suspend. |
278 | * @rpmflags: Flag bits. | 285 | * @rpmflags: Flag bits. |
279 | * | 286 | * |
280 | * Check if the device's runtime PM status allows it to be suspended. If | 287 | * Check if the device's runtime PM status allows it to be suspended. |
281 | * another suspend has been started earlier, either return immediately or wait | 288 | * Cancel a pending idle notification, autosuspend or suspend. If |
282 | * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a | 289 | * another suspend has been started earlier, either return immediately |
283 | * pending idle notification. If the RPM_ASYNC flag is set then queue a | 290 | * or wait for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC |
284 | * suspend request; otherwise run the ->runtime_suspend() callback directly. | 291 | * flags. If the RPM_ASYNC flag is set then queue a suspend request; |
285 | * If a deferred resume was requested while the callback was running then carry | 292 | * otherwise run the ->runtime_suspend() callback directly. When |
286 | * it out; otherwise send an idle notification for the device (if the suspend | 293 | * ->runtime_suspend succeeded, if a deferred resume was requested while |
287 | * failed) or for its parent (if the suspend succeeded). | 294 | * the callback was running then carry it out, otherwise send an idle |
295 | * notification for its parent (if the suspend succeeded and both | ||
296 | * ignore_children of parent->power and irq_safe of dev->power are not set). | ||
297 | * If ->runtime_suspend failed with -EAGAIN or -EBUSY, and if the RPM_AUTO | ||
298 | * flag is set and the next autosuspend-delay expiration time is in the | ||
299 | * future, schedule another autosuspend attempt. | ||
288 | * | 300 | * |
289 | * This function must be called under dev->power.lock with interrupts disabled. | 301 | * This function must be called under dev->power.lock with interrupts disabled. |
290 | */ | 302 | */ |
@@ -295,7 +307,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
295 | struct device *parent = NULL; | 307 | struct device *parent = NULL; |
296 | int retval; | 308 | int retval; |
297 | 309 | ||
298 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); | 310 | trace_rpm_suspend(dev, rpmflags); |
299 | 311 | ||
300 | repeat: | 312 | repeat: |
301 | retval = rpm_check_suspend_allowed(dev); | 313 | retval = rpm_check_suspend_allowed(dev); |
@@ -347,6 +359,15 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
347 | goto out; | 359 | goto out; |
348 | } | 360 | } |
349 | 361 | ||
362 | if (dev->power.irq_safe) { | ||
363 | spin_unlock(&dev->power.lock); | ||
364 | |||
365 | cpu_relax(); | ||
366 | |||
367 | spin_lock(&dev->power.lock); | ||
368 | goto repeat; | ||
369 | } | ||
370 | |||
350 | /* Wait for the other suspend running in parallel with us. */ | 371 | /* Wait for the other suspend running in parallel with us. */ |
351 | for (;;) { | 372 | for (;;) { |
352 | prepare_to_wait(&dev->power.wait_queue, &wait, | 373 | prepare_to_wait(&dev->power.wait_queue, &wait, |
@@ -396,19 +417,31 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
396 | if (retval) { | 417 | if (retval) { |
397 | __update_runtime_status(dev, RPM_ACTIVE); | 418 | __update_runtime_status(dev, RPM_ACTIVE); |
398 | dev->power.deferred_resume = false; | 419 | dev->power.deferred_resume = false; |
399 | if (retval == -EAGAIN || retval == -EBUSY) | 420 | if (retval == -EAGAIN || retval == -EBUSY) { |
400 | dev->power.runtime_error = 0; | 421 | dev->power.runtime_error = 0; |
401 | else | 422 | |
423 | /* | ||
424 | * If the callback routine failed an autosuspend, and | ||
425 | * if the last_busy time has been updated so that there | ||
426 | * is a new autosuspend expiration time, automatically | ||
427 | * reschedule another autosuspend. | ||
428 | */ | ||
429 | if ((rpmflags & RPM_AUTO) && | ||
430 | pm_runtime_autosuspend_expiration(dev) != 0) | ||
431 | goto repeat; | ||
432 | } else { | ||
402 | pm_runtime_cancel_pending(dev); | 433 | pm_runtime_cancel_pending(dev); |
403 | } else { | 434 | } |
435 | wake_up_all(&dev->power.wait_queue); | ||
436 | goto out; | ||
437 | } | ||
404 | no_callback: | 438 | no_callback: |
405 | __update_runtime_status(dev, RPM_SUSPENDED); | 439 | __update_runtime_status(dev, RPM_SUSPENDED); |
406 | pm_runtime_deactivate_timer(dev); | 440 | pm_runtime_deactivate_timer(dev); |
407 | 441 | ||
408 | if (dev->parent) { | 442 | if (dev->parent) { |
409 | parent = dev->parent; | 443 | parent = dev->parent; |
410 | atomic_add_unless(&parent->power.child_count, -1, 0); | 444 | atomic_add_unless(&parent->power.child_count, -1, 0); |
411 | } | ||
412 | } | 445 | } |
413 | wake_up_all(&dev->power.wait_queue); | 446 | wake_up_all(&dev->power.wait_queue); |
414 | 447 | ||
@@ -430,7 +463,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
430 | } | 463 | } |
431 | 464 | ||
432 | out: | 465 | out: |
433 | dev_dbg(dev, "%s returns %d\n", __func__, retval); | 466 | trace_rpm_return_int(dev, _THIS_IP_, retval); |
434 | 467 | ||
435 | return retval; | 468 | return retval; |
436 | } | 469 | } |
@@ -459,7 +492,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
459 | struct device *parent = NULL; | 492 | struct device *parent = NULL; |
460 | int retval = 0; | 493 | int retval = 0; |
461 | 494 | ||
462 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); | 495 | trace_rpm_resume(dev, rpmflags); |
463 | 496 | ||
464 | repeat: | 497 | repeat: |
465 | if (dev->power.runtime_error) | 498 | if (dev->power.runtime_error) |
@@ -496,6 +529,15 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
496 | goto out; | 529 | goto out; |
497 | } | 530 | } |
498 | 531 | ||
532 | if (dev->power.irq_safe) { | ||
533 | spin_unlock(&dev->power.lock); | ||
534 | |||
535 | cpu_relax(); | ||
536 | |||
537 | spin_lock(&dev->power.lock); | ||
538 | goto repeat; | ||
539 | } | ||
540 | |||
499 | /* Wait for the operation carried out in parallel with us. */ | 541 | /* Wait for the operation carried out in parallel with us. */ |
500 | for (;;) { | 542 | for (;;) { |
501 | prepare_to_wait(&dev->power.wait_queue, &wait, | 543 | prepare_to_wait(&dev->power.wait_queue, &wait, |
@@ -615,7 +657,7 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
615 | spin_lock_irq(&dev->power.lock); | 657 | spin_lock_irq(&dev->power.lock); |
616 | } | 658 | } |
617 | 659 | ||
618 | dev_dbg(dev, "%s returns %d\n", __func__, retval); | 660 | trace_rpm_return_int(dev, _THIS_IP_, retval); |
619 | 661 | ||
620 | return retval; | 662 | return retval; |
621 | } | 663 | } |
@@ -732,13 +774,16 @@ EXPORT_SYMBOL_GPL(pm_schedule_suspend); | |||
732 | * return immediately if it is larger than zero. Then carry out an idle | 774 | * return immediately if it is larger than zero. Then carry out an idle |
733 | * notification, either synchronous or asynchronous. | 775 | * notification, either synchronous or asynchronous. |
734 | * | 776 | * |
735 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | 777 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, |
778 | * or if pm_runtime_irq_safe() has been called. | ||
736 | */ | 779 | */ |
737 | int __pm_runtime_idle(struct device *dev, int rpmflags) | 780 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
738 | { | 781 | { |
739 | unsigned long flags; | 782 | unsigned long flags; |
740 | int retval; | 783 | int retval; |
741 | 784 | ||
785 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
786 | |||
742 | if (rpmflags & RPM_GET_PUT) { | 787 | if (rpmflags & RPM_GET_PUT) { |
743 | if (!atomic_dec_and_test(&dev->power.usage_count)) | 788 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
744 | return 0; | 789 | return 0; |
@@ -761,13 +806,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_idle); | |||
761 | * return immediately if it is larger than zero. Then carry out a suspend, | 806 | * return immediately if it is larger than zero. Then carry out a suspend, |
762 | * either synchronous or asynchronous. | 807 | * either synchronous or asynchronous. |
763 | * | 808 | * |
764 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | 809 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, |
810 | * or if pm_runtime_irq_safe() has been called. | ||
765 | */ | 811 | */ |
766 | int __pm_runtime_suspend(struct device *dev, int rpmflags) | 812 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
767 | { | 813 | { |
768 | unsigned long flags; | 814 | unsigned long flags; |
769 | int retval; | 815 | int retval; |
770 | 816 | ||
817 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
818 | |||
771 | if (rpmflags & RPM_GET_PUT) { | 819 | if (rpmflags & RPM_GET_PUT) { |
772 | if (!atomic_dec_and_test(&dev->power.usage_count)) | 820 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
773 | return 0; | 821 | return 0; |
@@ -789,13 +837,16 @@ EXPORT_SYMBOL_GPL(__pm_runtime_suspend); | |||
789 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then | 837 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then |
790 | * carry out a resume, either synchronous or asynchronous. | 838 | * carry out a resume, either synchronous or asynchronous. |
791 | * | 839 | * |
792 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | 840 | * This routine may be called in atomic context if the RPM_ASYNC flag is set, |
841 | * or if pm_runtime_irq_safe() has been called. | ||
793 | */ | 842 | */ |
794 | int __pm_runtime_resume(struct device *dev, int rpmflags) | 843 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
795 | { | 844 | { |
796 | unsigned long flags; | 845 | unsigned long flags; |
797 | int retval; | 846 | int retval; |
798 | 847 | ||
848 | might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); | ||
849 | |||
799 | if (rpmflags & RPM_GET_PUT) | 850 | if (rpmflags & RPM_GET_PUT) |
800 | atomic_inc(&dev->power.usage_count); | 851 | atomic_inc(&dev->power.usage_count); |
801 | 852 | ||
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 17b7934f31cb..adf41be0ea66 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -4,6 +4,7 @@ | |||
4 | 4 | ||
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
7 | #include <linux/export.h> | ||
7 | #include <linux/pm_runtime.h> | 8 | #include <linux/pm_runtime.h> |
8 | #include <linux/atomic.h> | 9 | #include <linux/atomic.h> |
9 | #include <linux/jiffies.h> | 10 | #include <linux/jiffies.h> |
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index af10abecb99b..d94a1f5121cf 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/resume-trace.h> | 10 | #include <linux/resume-trace.h> |
11 | #include <linux/export.h> | ||
11 | #include <linux/rtc.h> | 12 | #include <linux/rtc.h> |
12 | 13 | ||
13 | #include <asm/rtc.h> | 14 | #include <asm/rtc.h> |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 84f7c7d5a098..caf995fb774b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
13 | #include <linux/export.h> | ||
13 | #include <linux/suspend.h> | 14 | #include <linux/suspend.h> |
14 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
15 | #include <linux/debugfs.h> | 16 | #include <linux/debugfs.h> |
@@ -276,7 +277,9 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable); | |||
276 | * | 277 | * |
277 | * By default, most devices should leave wakeup disabled. The exceptions are | 278 | * By default, most devices should leave wakeup disabled. The exceptions are |
278 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, | 279 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, |
279 | * possibly network interfaces, etc. | 280 | * possibly network interfaces, etc. Also, devices that don't generate their |
281 | * own wakeup requests but merely forward requests from one bus to another | ||
282 | * (like PCI bridges) should have wakeup enabled by default. | ||
280 | */ | 283 | */ |
281 | int device_init_wakeup(struct device *dev, bool enable) | 284 | int device_init_wakeup(struct device *dev, bool enable) |
282 | { | 285 | { |
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index fabbf6cc5367..2fc6a66f39a4 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig | |||
@@ -4,6 +4,8 @@ | |||
4 | 4 | ||
5 | config REGMAP | 5 | config REGMAP |
6 | default y if (REGMAP_I2C || REGMAP_SPI) | 6 | default y if (REGMAP_I2C || REGMAP_SPI) |
7 | select LZO_COMPRESS | ||
8 | select LZO_DECOMPRESS | ||
7 | bool | 9 | bool |
8 | 10 | ||
9 | config REGMAP_I2C | 11 | config REGMAP_I2C |
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile index f476f4571295..0573c8a9dacb 100644 --- a/drivers/base/regmap/Makefile +++ b/drivers/base/regmap/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-$(CONFIG_REGMAP) += regmap.o | 1 | obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o |
2 | obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o | ||
2 | obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o | 3 | obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o |
3 | obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o | 4 | obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o |
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h new file mode 100644 index 000000000000..348ff02eb93e --- /dev/null +++ b/drivers/base/regmap/internal.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Register map access API internal header | ||
3 | * | ||
4 | * Copyright 2011 Wolfson Microelectronics plc | ||
5 | * | ||
6 | * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef _REGMAP_INTERNAL_H | ||
14 | #define _REGMAP_INTERNAL_H | ||
15 | |||
16 | #include <linux/regmap.h> | ||
17 | #include <linux/fs.h> | ||
18 | |||
19 | struct regmap; | ||
20 | struct regcache_ops; | ||
21 | |||
22 | struct regmap_format { | ||
23 | size_t buf_size; | ||
24 | size_t reg_bytes; | ||
25 | size_t val_bytes; | ||
26 | void (*format_write)(struct regmap *map, | ||
27 | unsigned int reg, unsigned int val); | ||
28 | void (*format_reg)(void *buf, unsigned int reg); | ||
29 | void (*format_val)(void *buf, unsigned int val); | ||
30 | unsigned int (*parse_val)(void *buf); | ||
31 | }; | ||
32 | |||
33 | struct regmap { | ||
34 | struct mutex lock; | ||
35 | |||
36 | struct device *dev; /* Device we do I/O on */ | ||
37 | void *work_buf; /* Scratch buffer used to format I/O */ | ||
38 | struct regmap_format format; /* Buffer format */ | ||
39 | const struct regmap_bus *bus; | ||
40 | |||
41 | #ifdef CONFIG_DEBUG_FS | ||
42 | struct dentry *debugfs; | ||
43 | #endif | ||
44 | |||
45 | unsigned int max_register; | ||
46 | bool (*writeable_reg)(struct device *dev, unsigned int reg); | ||
47 | bool (*readable_reg)(struct device *dev, unsigned int reg); | ||
48 | bool (*volatile_reg)(struct device *dev, unsigned int reg); | ||
49 | bool (*precious_reg)(struct device *dev, unsigned int reg); | ||
50 | |||
51 | u8 read_flag_mask; | ||
52 | u8 write_flag_mask; | ||
53 | |||
54 | /* regcache specific members */ | ||
55 | const struct regcache_ops *cache_ops; | ||
56 | enum regcache_type cache_type; | ||
57 | |||
58 | /* number of bytes in reg_defaults_raw */ | ||
59 | unsigned int cache_size_raw; | ||
60 | /* number of bytes per word in reg_defaults_raw */ | ||
61 | unsigned int cache_word_size; | ||
62 | /* number of entries in reg_defaults */ | ||
63 | unsigned int num_reg_defaults; | ||
64 | /* number of entries in reg_defaults_raw */ | ||
65 | unsigned int num_reg_defaults_raw; | ||
66 | |||
67 | /* if set, only the cache is modified not the HW */ | ||
68 | unsigned int cache_only:1; | ||
69 | /* if set, only the HW is modified not the cache */ | ||
70 | unsigned int cache_bypass:1; | ||
71 | /* if set, remember to free reg_defaults_raw */ | ||
72 | unsigned int cache_free:1; | ||
73 | |||
74 | struct reg_default *reg_defaults; | ||
75 | const void *reg_defaults_raw; | ||
76 | void *cache; | ||
77 | }; | ||
78 | |||
79 | struct regcache_ops { | ||
80 | const char *name; | ||
81 | enum regcache_type type; | ||
82 | int (*init)(struct regmap *map); | ||
83 | int (*exit)(struct regmap *map); | ||
84 | int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); | ||
85 | int (*write)(struct regmap *map, unsigned int reg, unsigned int value); | ||
86 | int (*sync)(struct regmap *map); | ||
87 | }; | ||
88 | |||
89 | bool regmap_writeable(struct regmap *map, unsigned int reg); | ||
90 | bool regmap_readable(struct regmap *map, unsigned int reg); | ||
91 | bool regmap_volatile(struct regmap *map, unsigned int reg); | ||
92 | bool regmap_precious(struct regmap *map, unsigned int reg); | ||
93 | |||
94 | int _regmap_write(struct regmap *map, unsigned int reg, | ||
95 | unsigned int val); | ||
96 | |||
97 | #ifdef CONFIG_DEBUG_FS | ||
98 | extern void regmap_debugfs_initcall(void); | ||
99 | extern void regmap_debugfs_init(struct regmap *map); | ||
100 | extern void regmap_debugfs_exit(struct regmap *map); | ||
101 | #else | ||
102 | static inline void regmap_debugfs_initcall(void) { } | ||
103 | static inline void regmap_debugfs_init(struct regmap *map) { } | ||
104 | static inline void regmap_debugfs_exit(struct regmap *map) { } | ||
105 | #endif | ||
106 | |||
107 | /* regcache core declarations */ | ||
108 | int regcache_init(struct regmap *map); | ||
109 | void regcache_exit(struct regmap *map); | ||
110 | int regcache_read(struct regmap *map, | ||
111 | unsigned int reg, unsigned int *value); | ||
112 | int regcache_write(struct regmap *map, | ||
113 | unsigned int reg, unsigned int value); | ||
114 | int regcache_sync(struct regmap *map); | ||
115 | |||
116 | unsigned int regcache_get_val(const void *base, unsigned int idx, | ||
117 | unsigned int word_size); | ||
118 | bool regcache_set_val(void *base, unsigned int idx, | ||
119 | unsigned int val, unsigned int word_size); | ||
120 | int regcache_lookup_reg(struct regmap *map, unsigned int reg); | ||
121 | int regcache_insert_reg(struct regmap *map, unsigned int reg, | ||
122 | unsigned int val); | ||
123 | |||
124 | extern struct regcache_ops regcache_indexed_ops; | ||
125 | extern struct regcache_ops regcache_rbtree_ops; | ||
126 | extern struct regcache_ops regcache_lzo_ops; | ||
127 | |||
128 | #endif | ||
diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c new file mode 100644 index 000000000000..507731ad8ec1 --- /dev/null +++ b/drivers/base/regmap/regcache-indexed.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * Register cache access API - indexed caching support | ||
3 | * | ||
4 | * Copyright 2011 Wolfson Microelectronics plc | ||
5 | * | ||
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | |||
15 | #include "internal.h" | ||
16 | |||
17 | static int regcache_indexed_read(struct regmap *map, unsigned int reg, | ||
18 | unsigned int *value) | ||
19 | { | ||
20 | int ret; | ||
21 | |||
22 | ret = regcache_lookup_reg(map, reg); | ||
23 | if (ret >= 0) | ||
24 | *value = map->reg_defaults[ret].def; | ||
25 | |||
26 | return ret; | ||
27 | } | ||
28 | |||
29 | static int regcache_indexed_write(struct regmap *map, unsigned int reg, | ||
30 | unsigned int value) | ||
31 | { | ||
32 | int ret; | ||
33 | |||
34 | ret = regcache_lookup_reg(map, reg); | ||
35 | if (ret < 0) | ||
36 | return regcache_insert_reg(map, reg, value); | ||
37 | map->reg_defaults[ret].def = value; | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static int regcache_indexed_sync(struct regmap *map) | ||
42 | { | ||
43 | unsigned int i; | ||
44 | int ret; | ||
45 | |||
46 | for (i = 0; i < map->num_reg_defaults; i++) { | ||
47 | ret = _regmap_write(map, map->reg_defaults[i].reg, | ||
48 | map->reg_defaults[i].def); | ||
49 | if (ret < 0) | ||
50 | return ret; | ||
51 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | ||
52 | map->reg_defaults[i].reg, | ||
53 | map->reg_defaults[i].def); | ||
54 | } | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | struct regcache_ops regcache_indexed_ops = { | ||
59 | .type = REGCACHE_INDEXED, | ||
60 | .name = "indexed", | ||
61 | .read = regcache_indexed_read, | ||
62 | .write = regcache_indexed_write, | ||
63 | .sync = regcache_indexed_sync | ||
64 | }; | ||
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c new file mode 100644 index 000000000000..066aeece3626 --- /dev/null +++ b/drivers/base/regmap/regcache-lzo.c | |||
@@ -0,0 +1,361 @@ | |||
1 | /* | ||
2 | * Register cache access API - LZO caching support | ||
3 | * | ||
4 | * Copyright 2011 Wolfson Microelectronics plc | ||
5 | * | ||
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/lzo.h> | ||
15 | |||
16 | #include "internal.h" | ||
17 | |||
18 | struct regcache_lzo_ctx { | ||
19 | void *wmem; | ||
20 | void *dst; | ||
21 | const void *src; | ||
22 | size_t src_len; | ||
23 | size_t dst_len; | ||
24 | size_t decompressed_size; | ||
25 | unsigned long *sync_bmp; | ||
26 | int sync_bmp_nbits; | ||
27 | }; | ||
28 | |||
29 | #define LZO_BLOCK_NUM 8 | ||
30 | static int regcache_lzo_block_count(void) | ||
31 | { | ||
32 | return LZO_BLOCK_NUM; | ||
33 | } | ||
34 | |||
35 | static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx) | ||
36 | { | ||
37 | lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL); | ||
38 | if (!lzo_ctx->wmem) | ||
39 | return -ENOMEM; | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx) | ||
44 | { | ||
45 | size_t compress_size; | ||
46 | int ret; | ||
47 | |||
48 | ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len, | ||
49 | lzo_ctx->dst, &compress_size, lzo_ctx->wmem); | ||
50 | if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len) | ||
51 | return -EINVAL; | ||
52 | lzo_ctx->dst_len = compress_size; | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx) | ||
57 | { | ||
58 | size_t dst_len; | ||
59 | int ret; | ||
60 | |||
61 | dst_len = lzo_ctx->dst_len; | ||
62 | ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len, | ||
63 | lzo_ctx->dst, &dst_len); | ||
64 | if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len) | ||
65 | return -EINVAL; | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static int regcache_lzo_compress_cache_block(struct regmap *map, | ||
70 | struct regcache_lzo_ctx *lzo_ctx) | ||
71 | { | ||
72 | int ret; | ||
73 | |||
74 | lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE); | ||
75 | lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); | ||
76 | if (!lzo_ctx->dst) { | ||
77 | lzo_ctx->dst_len = 0; | ||
78 | return -ENOMEM; | ||
79 | } | ||
80 | |||
81 | ret = regcache_lzo_compress(lzo_ctx); | ||
82 | if (ret < 0) | ||
83 | return ret; | ||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int regcache_lzo_decompress_cache_block(struct regmap *map, | ||
88 | struct regcache_lzo_ctx *lzo_ctx) | ||
89 | { | ||
90 | int ret; | ||
91 | |||
92 | lzo_ctx->dst_len = lzo_ctx->decompressed_size; | ||
93 | lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL); | ||
94 | if (!lzo_ctx->dst) { | ||
95 | lzo_ctx->dst_len = 0; | ||
96 | return -ENOMEM; | ||
97 | } | ||
98 | |||
99 | ret = regcache_lzo_decompress(lzo_ctx); | ||
100 | if (ret < 0) | ||
101 | return ret; | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static inline int regcache_lzo_get_blkindex(struct regmap *map, | ||
106 | unsigned int reg) | ||
107 | { | ||
108 | return (reg * map->cache_word_size) / | ||
109 | DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()); | ||
110 | } | ||
111 | |||
112 | static inline int regcache_lzo_get_blkpos(struct regmap *map, | ||
113 | unsigned int reg) | ||
114 | { | ||
115 | return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) / | ||
116 | map->cache_word_size); | ||
117 | } | ||
118 | |||
119 | static inline int regcache_lzo_get_blksize(struct regmap *map) | ||
120 | { | ||
121 | return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()); | ||
122 | } | ||
123 | |||
124 | static int regcache_lzo_init(struct regmap *map) | ||
125 | { | ||
126 | struct regcache_lzo_ctx **lzo_blocks; | ||
127 | size_t bmp_size; | ||
128 | int ret, i, blksize, blkcount; | ||
129 | const char *p, *end; | ||
130 | unsigned long *sync_bmp; | ||
131 | |||
132 | ret = 0; | ||
133 | |||
134 | blkcount = regcache_lzo_block_count(); | ||
135 | map->cache = kzalloc(blkcount * sizeof *lzo_blocks, | ||
136 | GFP_KERNEL); | ||
137 | if (!map->cache) | ||
138 | return -ENOMEM; | ||
139 | lzo_blocks = map->cache; | ||
140 | |||
141 | /* | ||
142 | * allocate a bitmap to be used when syncing the cache with | ||
143 | * the hardware. Each time a register is modified, the corresponding | ||
144 | * bit is set in the bitmap, so we know that we have to sync | ||
145 | * that register. | ||
146 | */ | ||
147 | bmp_size = map->num_reg_defaults_raw; | ||
148 | sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long), | ||
149 | GFP_KERNEL); | ||
150 | if (!sync_bmp) { | ||
151 | ret = -ENOMEM; | ||
152 | goto err; | ||
153 | } | ||
154 | bitmap_zero(sync_bmp, bmp_size); | ||
155 | |||
156 | /* allocate the lzo blocks and initialize them */ | ||
157 | for (i = 0; i < blkcount; i++) { | ||
158 | lzo_blocks[i] = kzalloc(sizeof **lzo_blocks, | ||
159 | GFP_KERNEL); | ||
160 | if (!lzo_blocks[i]) { | ||
161 | kfree(sync_bmp); | ||
162 | ret = -ENOMEM; | ||
163 | goto err; | ||
164 | } | ||
165 | lzo_blocks[i]->sync_bmp = sync_bmp; | ||
166 | lzo_blocks[i]->sync_bmp_nbits = bmp_size; | ||
167 | /* alloc the working space for the compressed block */ | ||
168 | ret = regcache_lzo_prepare(lzo_blocks[i]); | ||
169 | if (ret < 0) | ||
170 | goto err; | ||
171 | } | ||
172 | |||
173 | blksize = regcache_lzo_get_blksize(map); | ||
174 | p = map->reg_defaults_raw; | ||
175 | end = map->reg_defaults_raw + map->cache_size_raw; | ||
176 | /* compress the register map and fill the lzo blocks */ | ||
177 | for (i = 0; i < blkcount; i++, p += blksize) { | ||
178 | lzo_blocks[i]->src = p; | ||
179 | if (p + blksize > end) | ||
180 | lzo_blocks[i]->src_len = end - p; | ||
181 | else | ||
182 | lzo_blocks[i]->src_len = blksize; | ||
183 | ret = regcache_lzo_compress_cache_block(map, | ||
184 | lzo_blocks[i]); | ||
185 | if (ret < 0) | ||
186 | goto err; | ||
187 | lzo_blocks[i]->decompressed_size = | ||
188 | lzo_blocks[i]->src_len; | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | err: | ||
193 | regcache_exit(map); | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | static int regcache_lzo_exit(struct regmap *map) | ||
198 | { | ||
199 | struct regcache_lzo_ctx **lzo_blocks; | ||
200 | int i, blkcount; | ||
201 | |||
202 | lzo_blocks = map->cache; | ||
203 | if (!lzo_blocks) | ||
204 | return 0; | ||
205 | |||
206 | blkcount = regcache_lzo_block_count(); | ||
207 | /* | ||
208 | * the pointer to the bitmap used for syncing the cache | ||
209 | * is shared amongst all lzo_blocks. Ensure it is freed | ||
210 | * only once. | ||
211 | */ | ||
212 | if (lzo_blocks[0]) | ||
213 | kfree(lzo_blocks[0]->sync_bmp); | ||
214 | for (i = 0; i < blkcount; i++) { | ||
215 | if (lzo_blocks[i]) { | ||
216 | kfree(lzo_blocks[i]->wmem); | ||
217 | kfree(lzo_blocks[i]->dst); | ||
218 | } | ||
219 | /* each lzo_block is a pointer returned by kmalloc or NULL */ | ||
220 | kfree(lzo_blocks[i]); | ||
221 | } | ||
222 | kfree(lzo_blocks); | ||
223 | map->cache = NULL; | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static int regcache_lzo_read(struct regmap *map, | ||
228 | unsigned int reg, unsigned int *value) | ||
229 | { | ||
230 | struct regcache_lzo_ctx *lzo_block, **lzo_blocks; | ||
231 | int ret, blkindex, blkpos; | ||
232 | size_t blksize, tmp_dst_len; | ||
233 | void *tmp_dst; | ||
234 | |||
235 | /* index of the compressed lzo block */ | ||
236 | blkindex = regcache_lzo_get_blkindex(map, reg); | ||
237 | /* register index within the decompressed block */ | ||
238 | blkpos = regcache_lzo_get_blkpos(map, reg); | ||
239 | /* size of the compressed block */ | ||
240 | blksize = regcache_lzo_get_blksize(map); | ||
241 | lzo_blocks = map->cache; | ||
242 | lzo_block = lzo_blocks[blkindex]; | ||
243 | |||
244 | /* save the pointer and length of the compressed block */ | ||
245 | tmp_dst = lzo_block->dst; | ||
246 | tmp_dst_len = lzo_block->dst_len; | ||
247 | |||
248 | /* prepare the source to be the compressed block */ | ||
249 | lzo_block->src = lzo_block->dst; | ||
250 | lzo_block->src_len = lzo_block->dst_len; | ||
251 | |||
252 | /* decompress the block */ | ||
253 | ret = regcache_lzo_decompress_cache_block(map, lzo_block); | ||
254 | if (ret >= 0) | ||
255 | /* fetch the value from the cache */ | ||
256 | *value = regcache_get_val(lzo_block->dst, blkpos, | ||
257 | map->cache_word_size); | ||
258 | |||
259 | kfree(lzo_block->dst); | ||
260 | /* restore the pointer and length of the compressed block */ | ||
261 | lzo_block->dst = tmp_dst; | ||
262 | lzo_block->dst_len = tmp_dst_len; | ||
263 | |||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | static int regcache_lzo_write(struct regmap *map, | ||
268 | unsigned int reg, unsigned int value) | ||
269 | { | ||
270 | struct regcache_lzo_ctx *lzo_block, **lzo_blocks; | ||
271 | int ret, blkindex, blkpos; | ||
272 | size_t blksize, tmp_dst_len; | ||
273 | void *tmp_dst; | ||
274 | |||
275 | /* index of the compressed lzo block */ | ||
276 | blkindex = regcache_lzo_get_blkindex(map, reg); | ||
277 | /* register index within the decompressed block */ | ||
278 | blkpos = regcache_lzo_get_blkpos(map, reg); | ||
279 | /* size of the compressed block */ | ||
280 | blksize = regcache_lzo_get_blksize(map); | ||
281 | lzo_blocks = map->cache; | ||
282 | lzo_block = lzo_blocks[blkindex]; | ||
283 | |||
284 | /* save the pointer and length of the compressed block */ | ||
285 | tmp_dst = lzo_block->dst; | ||
286 | tmp_dst_len = lzo_block->dst_len; | ||
287 | |||
288 | /* prepare the source to be the compressed block */ | ||
289 | lzo_block->src = lzo_block->dst; | ||
290 | lzo_block->src_len = lzo_block->dst_len; | ||
291 | |||
292 | /* decompress the block */ | ||
293 | ret = regcache_lzo_decompress_cache_block(map, lzo_block); | ||
294 | if (ret < 0) { | ||
295 | kfree(lzo_block->dst); | ||
296 | goto out; | ||
297 | } | ||
298 | |||
299 | /* write the new value to the cache */ | ||
300 | if (regcache_set_val(lzo_block->dst, blkpos, value, | ||
301 | map->cache_word_size)) { | ||
302 | kfree(lzo_block->dst); | ||
303 | goto out; | ||
304 | } | ||
305 | |||
306 | /* prepare the source to be the decompressed block */ | ||
307 | lzo_block->src = lzo_block->dst; | ||
308 | lzo_block->src_len = lzo_block->dst_len; | ||
309 | |||
310 | /* compress the block */ | ||
311 | ret = regcache_lzo_compress_cache_block(map, lzo_block); | ||
312 | if (ret < 0) { | ||
313 | kfree(lzo_block->dst); | ||
314 | kfree(lzo_block->src); | ||
315 | goto out; | ||
316 | } | ||
317 | |||
318 | /* set the bit so we know we have to sync this register */ | ||
319 | set_bit(reg, lzo_block->sync_bmp); | ||
320 | kfree(tmp_dst); | ||
321 | kfree(lzo_block->src); | ||
322 | return 0; | ||
323 | out: | ||
324 | lzo_block->dst = tmp_dst; | ||
325 | lzo_block->dst_len = tmp_dst_len; | ||
326 | return ret; | ||
327 | } | ||
328 | |||
329 | static int regcache_lzo_sync(struct regmap *map) | ||
330 | { | ||
331 | struct regcache_lzo_ctx **lzo_blocks; | ||
332 | unsigned int val; | ||
333 | int i; | ||
334 | int ret; | ||
335 | |||
336 | lzo_blocks = map->cache; | ||
337 | for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { | ||
338 | ret = regcache_read(map, i, &val); | ||
339 | if (ret) | ||
340 | return ret; | ||
341 | map->cache_bypass = 1; | ||
342 | ret = _regmap_write(map, i, val); | ||
343 | map->cache_bypass = 0; | ||
344 | if (ret) | ||
345 | return ret; | ||
346 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | ||
347 | i, val); | ||
348 | } | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | struct regcache_ops regcache_lzo_ops = { | ||
354 | .type = REGCACHE_LZO, | ||
355 | .name = "lzo", | ||
356 | .init = regcache_lzo_init, | ||
357 | .exit = regcache_lzo_exit, | ||
358 | .read = regcache_lzo_read, | ||
359 | .write = regcache_lzo_write, | ||
360 | .sync = regcache_lzo_sync | ||
361 | }; | ||
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c new file mode 100644 index 000000000000..e31498499b0f --- /dev/null +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -0,0 +1,345 @@ | |||
1 | /* | ||
2 | * Register cache access API - rbtree caching support | ||
3 | * | ||
4 | * Copyright 2011 Wolfson Microelectronics plc | ||
5 | * | ||
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/rbtree.h> | ||
15 | |||
16 | #include "internal.h" | ||
17 | |||
18 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | ||
19 | unsigned int value); | ||
20 | |||
21 | struct regcache_rbtree_node { | ||
22 | /* the actual rbtree node holding this block */ | ||
23 | struct rb_node node; | ||
24 | /* base register handled by this block */ | ||
25 | unsigned int base_reg; | ||
26 | /* block of adjacent registers */ | ||
27 | void *block; | ||
28 | /* number of registers available in the block */ | ||
29 | unsigned int blklen; | ||
30 | } __attribute__ ((packed)); | ||
31 | |||
32 | struct regcache_rbtree_ctx { | ||
33 | struct rb_root root; | ||
34 | struct regcache_rbtree_node *cached_rbnode; | ||
35 | }; | ||
36 | |||
37 | static inline void regcache_rbtree_get_base_top_reg( | ||
38 | struct regcache_rbtree_node *rbnode, | ||
39 | unsigned int *base, unsigned int *top) | ||
40 | { | ||
41 | *base = rbnode->base_reg; | ||
42 | *top = rbnode->base_reg + rbnode->blklen - 1; | ||
43 | } | ||
44 | |||
45 | static unsigned int regcache_rbtree_get_register( | ||
46 | struct regcache_rbtree_node *rbnode, unsigned int idx, | ||
47 | unsigned int word_size) | ||
48 | { | ||
49 | return regcache_get_val(rbnode->block, idx, word_size); | ||
50 | } | ||
51 | |||
52 | static void regcache_rbtree_set_register(struct regcache_rbtree_node *rbnode, | ||
53 | unsigned int idx, unsigned int val, | ||
54 | unsigned int word_size) | ||
55 | { | ||
56 | regcache_set_val(rbnode->block, idx, val, word_size); | ||
57 | } | ||
58 | |||
59 | static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map, | ||
60 | unsigned int reg) | ||
61 | { | ||
62 | struct regcache_rbtree_ctx *rbtree_ctx = map->cache; | ||
63 | struct rb_node *node; | ||
64 | struct regcache_rbtree_node *rbnode; | ||
65 | unsigned int base_reg, top_reg; | ||
66 | |||
67 | rbnode = rbtree_ctx->cached_rbnode; | ||
68 | if (rbnode) { | ||
69 | regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); | ||
70 | if (reg >= base_reg && reg <= top_reg) | ||
71 | return rbnode; | ||
72 | } | ||
73 | |||
74 | node = rbtree_ctx->root.rb_node; | ||
75 | while (node) { | ||
76 | rbnode = container_of(node, struct regcache_rbtree_node, node); | ||
77 | regcache_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg); | ||
78 | if (reg >= base_reg && reg <= top_reg) { | ||
79 | rbtree_ctx->cached_rbnode = rbnode; | ||
80 | return rbnode; | ||
81 | } else if (reg > top_reg) { | ||
82 | node = node->rb_right; | ||
83 | } else if (reg < base_reg) { | ||
84 | node = node->rb_left; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | return NULL; | ||
89 | } | ||
90 | |||
91 | static int regcache_rbtree_insert(struct rb_root *root, | ||
92 | struct regcache_rbtree_node *rbnode) | ||
93 | { | ||
94 | struct rb_node **new, *parent; | ||
95 | struct regcache_rbtree_node *rbnode_tmp; | ||
96 | unsigned int base_reg_tmp, top_reg_tmp; | ||
97 | unsigned int base_reg; | ||
98 | |||
99 | parent = NULL; | ||
100 | new = &root->rb_node; | ||
101 | while (*new) { | ||
102 | rbnode_tmp = container_of(*new, struct regcache_rbtree_node, | ||
103 | node); | ||
104 | /* base and top registers of the current rbnode */ | ||
105 | regcache_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp, | ||
106 | &top_reg_tmp); | ||
107 | /* base register of the rbnode to be added */ | ||
108 | base_reg = rbnode->base_reg; | ||
109 | parent = *new; | ||
110 | /* if this register has already been inserted, just return */ | ||
111 | if (base_reg >= base_reg_tmp && | ||
112 | base_reg <= top_reg_tmp) | ||
113 | return 0; | ||
114 | else if (base_reg > top_reg_tmp) | ||
115 | new = &((*new)->rb_right); | ||
116 | else if (base_reg < base_reg_tmp) | ||
117 | new = &((*new)->rb_left); | ||
118 | } | ||
119 | |||
120 | /* insert the node into the rbtree */ | ||
121 | rb_link_node(&rbnode->node, parent, new); | ||
122 | rb_insert_color(&rbnode->node, root); | ||
123 | |||
124 | return 1; | ||
125 | } | ||
126 | |||
127 | static int regcache_rbtree_init(struct regmap *map) | ||
128 | { | ||
129 | struct regcache_rbtree_ctx *rbtree_ctx; | ||
130 | int i; | ||
131 | int ret; | ||
132 | |||
133 | map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL); | ||
134 | if (!map->cache) | ||
135 | return -ENOMEM; | ||
136 | |||
137 | rbtree_ctx = map->cache; | ||
138 | rbtree_ctx->root = RB_ROOT; | ||
139 | rbtree_ctx->cached_rbnode = NULL; | ||
140 | |||
141 | for (i = 0; i < map->num_reg_defaults; i++) { | ||
142 | ret = regcache_rbtree_write(map, | ||
143 | map->reg_defaults[i].reg, | ||
144 | map->reg_defaults[i].def); | ||
145 | if (ret) | ||
146 | goto err; | ||
147 | } | ||
148 | |||
149 | return 0; | ||
150 | |||
151 | err: | ||
152 | regcache_exit(map); | ||
153 | return ret; | ||
154 | } | ||
155 | |||
156 | static int regcache_rbtree_exit(struct regmap *map) | ||
157 | { | ||
158 | struct rb_node *next; | ||
159 | struct regcache_rbtree_ctx *rbtree_ctx; | ||
160 | struct regcache_rbtree_node *rbtree_node; | ||
161 | |||
162 | /* if we've already been called then just return */ | ||
163 | rbtree_ctx = map->cache; | ||
164 | if (!rbtree_ctx) | ||
165 | return 0; | ||
166 | |||
167 | /* free up the rbtree */ | ||
168 | next = rb_first(&rbtree_ctx->root); | ||
169 | while (next) { | ||
170 | rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); | ||
171 | next = rb_next(&rbtree_node->node); | ||
172 | rb_erase(&rbtree_node->node, &rbtree_ctx->root); | ||
173 | kfree(rbtree_node->block); | ||
174 | kfree(rbtree_node); | ||
175 | } | ||
176 | |||
177 | /* release the resources */ | ||
178 | kfree(map->cache); | ||
179 | map->cache = NULL; | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static int regcache_rbtree_read(struct regmap *map, | ||
185 | unsigned int reg, unsigned int *value) | ||
186 | { | ||
187 | struct regcache_rbtree_node *rbnode; | ||
188 | unsigned int reg_tmp; | ||
189 | |||
190 | rbnode = regcache_rbtree_lookup(map, reg); | ||
191 | if (rbnode) { | ||
192 | reg_tmp = reg - rbnode->base_reg; | ||
193 | *value = regcache_rbtree_get_register(rbnode, reg_tmp, | ||
194 | map->cache_word_size); | ||
195 | } else { | ||
196 | return -ENOENT; | ||
197 | } | ||
198 | |||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | |||
203 | static int regcache_rbtree_insert_to_block(struct regcache_rbtree_node *rbnode, | ||
204 | unsigned int pos, unsigned int reg, | ||
205 | unsigned int value, unsigned int word_size) | ||
206 | { | ||
207 | u8 *blk; | ||
208 | |||
209 | blk = krealloc(rbnode->block, | ||
210 | (rbnode->blklen + 1) * word_size, GFP_KERNEL); | ||
211 | if (!blk) | ||
212 | return -ENOMEM; | ||
213 | |||
214 | /* insert the register value in the correct place in the rbnode block */ | ||
215 | memmove(blk + (pos + 1) * word_size, | ||
216 | blk + pos * word_size, | ||
217 | (rbnode->blklen - pos) * word_size); | ||
218 | |||
219 | /* update the rbnode block, its size and the base register */ | ||
220 | rbnode->block = blk; | ||
221 | rbnode->blklen++; | ||
222 | if (!pos) | ||
223 | rbnode->base_reg = reg; | ||
224 | |||
225 | regcache_rbtree_set_register(rbnode, pos, value, word_size); | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | ||
230 | unsigned int value) | ||
231 | { | ||
232 | struct regcache_rbtree_ctx *rbtree_ctx; | ||
233 | struct regcache_rbtree_node *rbnode, *rbnode_tmp; | ||
234 | struct rb_node *node; | ||
235 | unsigned int val; | ||
236 | unsigned int reg_tmp; | ||
237 | unsigned int pos; | ||
238 | int i; | ||
239 | int ret; | ||
240 | |||
241 | rbtree_ctx = map->cache; | ||
242 | /* if we can't locate it in the cached rbnode we'll have | ||
243 | * to traverse the rbtree looking for it. | ||
244 | */ | ||
245 | rbnode = regcache_rbtree_lookup(map, reg); | ||
246 | if (rbnode) { | ||
247 | reg_tmp = reg - rbnode->base_reg; | ||
248 | val = regcache_rbtree_get_register(rbnode, reg_tmp, | ||
249 | map->cache_word_size); | ||
250 | if (val == value) | ||
251 | return 0; | ||
252 | regcache_rbtree_set_register(rbnode, reg_tmp, value, | ||
253 | map->cache_word_size); | ||
254 | } else { | ||
255 | /* look for an adjacent register to the one we are about to add */ | ||
256 | for (node = rb_first(&rbtree_ctx->root); node; | ||
257 | node = rb_next(node)) { | ||
258 | rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, node); | ||
259 | for (i = 0; i < rbnode_tmp->blklen; i++) { | ||
260 | reg_tmp = rbnode_tmp->base_reg + i; | ||
261 | if (abs(reg_tmp - reg) != 1) | ||
262 | continue; | ||
263 | /* decide where in the block to place our register */ | ||
264 | if (reg_tmp + 1 == reg) | ||
265 | pos = i + 1; | ||
266 | else | ||
267 | pos = i; | ||
268 | ret = regcache_rbtree_insert_to_block(rbnode_tmp, pos, | ||
269 | reg, value, | ||
270 | map->cache_word_size); | ||
271 | if (ret) | ||
272 | return ret; | ||
273 | rbtree_ctx->cached_rbnode = rbnode_tmp; | ||
274 | return 0; | ||
275 | } | ||
276 | } | ||
277 | /* we did not manage to find a place to insert it in an existing | ||
278 | * block so create a new rbnode with a single register in its block. | ||
279 | * This block will get populated further if any other adjacent | ||
280 | * registers get modified in the future. | ||
281 | */ | ||
282 | rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL); | ||
283 | if (!rbnode) | ||
284 | return -ENOMEM; | ||
285 | rbnode->blklen = 1; | ||
286 | rbnode->base_reg = reg; | ||
287 | rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size, | ||
288 | GFP_KERNEL); | ||
289 | if (!rbnode->block) { | ||
290 | kfree(rbnode); | ||
291 | return -ENOMEM; | ||
292 | } | ||
293 | regcache_rbtree_set_register(rbnode, 0, value, map->cache_word_size); | ||
294 | regcache_rbtree_insert(&rbtree_ctx->root, rbnode); | ||
295 | rbtree_ctx->cached_rbnode = rbnode; | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | static int regcache_rbtree_sync(struct regmap *map) | ||
302 | { | ||
303 | struct regcache_rbtree_ctx *rbtree_ctx; | ||
304 | struct rb_node *node; | ||
305 | struct regcache_rbtree_node *rbnode; | ||
306 | unsigned int regtmp; | ||
307 | unsigned int val; | ||
308 | int ret; | ||
309 | int i; | ||
310 | |||
311 | rbtree_ctx = map->cache; | ||
312 | for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { | ||
313 | rbnode = rb_entry(node, struct regcache_rbtree_node, node); | ||
314 | for (i = 0; i < rbnode->blklen; i++) { | ||
315 | regtmp = rbnode->base_reg + i; | ||
316 | val = regcache_rbtree_get_register(rbnode, i, | ||
317 | map->cache_word_size); | ||
318 | |||
319 | /* Is this the hardware default? If so skip. */ | ||
320 | ret = regcache_lookup_reg(map, i); | ||
321 | if (ret > 0 && val == map->reg_defaults[ret].def) | ||
322 | continue; | ||
323 | |||
324 | map->cache_bypass = 1; | ||
325 | ret = _regmap_write(map, regtmp, val); | ||
326 | map->cache_bypass = 0; | ||
327 | if (ret) | ||
328 | return ret; | ||
329 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | ||
330 | regtmp, val); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | return 0; | ||
335 | } | ||
336 | |||
337 | struct regcache_ops regcache_rbtree_ops = { | ||
338 | .type = REGCACHE_RBTREE, | ||
339 | .name = "rbtree", | ||
340 | .init = regcache_rbtree_init, | ||
341 | .exit = regcache_rbtree_exit, | ||
342 | .read = regcache_rbtree_read, | ||
343 | .write = regcache_rbtree_write, | ||
344 | .sync = regcache_rbtree_sync | ||
345 | }; | ||
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c new file mode 100644 index 000000000000..666f6f5011dc --- /dev/null +++ b/drivers/base/regmap/regcache.c | |||
@@ -0,0 +1,402 @@ | |||
1 | /* | ||
2 | * Register cache access API | ||
3 | * | ||
4 | * Copyright 2011 Wolfson Microelectronics plc | ||
5 | * | ||
6 | * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/export.h> | ||
15 | #include <trace/events/regmap.h> | ||
16 | #include <linux/bsearch.h> | ||
17 | #include <linux/sort.h> | ||
18 | |||
19 | #include "internal.h" | ||
20 | |||
21 | static const struct regcache_ops *cache_types[] = { | ||
22 | ®cache_indexed_ops, | ||
23 | ®cache_rbtree_ops, | ||
24 | ®cache_lzo_ops, | ||
25 | }; | ||
26 | |||
27 | static int regcache_hw_init(struct regmap *map) | ||
28 | { | ||
29 | int i, j; | ||
30 | int ret; | ||
31 | int count; | ||
32 | unsigned int val; | ||
33 | void *tmp_buf; | ||
34 | |||
35 | if (!map->num_reg_defaults_raw) | ||
36 | return -EINVAL; | ||
37 | |||
38 | if (!map->reg_defaults_raw) { | ||
39 | dev_warn(map->dev, "No cache defaults, reading back from HW\n"); | ||
40 | tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); | ||
41 | if (!tmp_buf) | ||
42 | return -EINVAL; | ||
43 | ret = regmap_bulk_read(map, 0, tmp_buf, | ||
44 | map->num_reg_defaults_raw); | ||
45 | if (ret < 0) { | ||
46 | kfree(tmp_buf); | ||
47 | return ret; | ||
48 | } | ||
49 | map->reg_defaults_raw = tmp_buf; | ||
50 | map->cache_free = 1; | ||
51 | } | ||
52 | |||
53 | /* calculate the size of reg_defaults */ | ||
54 | for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { | ||
55 | val = regcache_get_val(map->reg_defaults_raw, | ||
56 | i, map->cache_word_size); | ||
57 | if (!val) | ||
58 | continue; | ||
59 | count++; | ||
60 | } | ||
61 | |||
62 | map->reg_defaults = kmalloc(count * sizeof(struct reg_default), | ||
63 | GFP_KERNEL); | ||
64 | if (!map->reg_defaults) | ||
65 | return -ENOMEM; | ||
66 | |||
67 | /* fill the reg_defaults */ | ||
68 | map->num_reg_defaults = count; | ||
69 | for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { | ||
70 | val = regcache_get_val(map->reg_defaults_raw, | ||
71 | i, map->cache_word_size); | ||
72 | if (!val) | ||
73 | continue; | ||
74 | map->reg_defaults[j].reg = i; | ||
75 | map->reg_defaults[j].def = val; | ||
76 | j++; | ||
77 | } | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | int regcache_init(struct regmap *map) | ||
83 | { | ||
84 | int ret; | ||
85 | int i; | ||
86 | void *tmp_buf; | ||
87 | |||
88 | if (map->cache_type == REGCACHE_NONE) { | ||
89 | map->cache_bypass = true; | ||
90 | return 0; | ||
91 | } | ||
92 | |||
93 | for (i = 0; i < ARRAY_SIZE(cache_types); i++) | ||
94 | if (cache_types[i]->type == map->cache_type) | ||
95 | break; | ||
96 | |||
97 | if (i == ARRAY_SIZE(cache_types)) { | ||
98 | dev_err(map->dev, "Could not match compress type: %d\n", | ||
99 | map->cache_type); | ||
100 | return -EINVAL; | ||
101 | } | ||
102 | |||
103 | map->cache = NULL; | ||
104 | map->cache_ops = cache_types[i]; | ||
105 | |||
106 | if (!map->cache_ops->read || | ||
107 | !map->cache_ops->write || | ||
108 | !map->cache_ops->name) | ||
109 | return -EINVAL; | ||
110 | |||
111 | /* We still need to ensure that the reg_defaults | ||
112 | * won't vanish from under us. We'll need to make | ||
113 | * a copy of it. | ||
114 | */ | ||
115 | if (map->reg_defaults) { | ||
116 | if (!map->num_reg_defaults) | ||
117 | return -EINVAL; | ||
118 | tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults * | ||
119 | sizeof(struct reg_default), GFP_KERNEL); | ||
120 | if (!tmp_buf) | ||
121 | return -ENOMEM; | ||
122 | map->reg_defaults = tmp_buf; | ||
123 | } else if (map->num_reg_defaults_raw) { | ||
124 | /* Some devices such as PMICs don't have cache defaults, | ||
125 | * we cope with this by reading back the HW registers and | ||
126 | * crafting the cache defaults by hand. | ||
127 | */ | ||
128 | ret = regcache_hw_init(map); | ||
129 | if (ret < 0) | ||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | if (!map->max_register) | ||
134 | map->max_register = map->num_reg_defaults_raw; | ||
135 | |||
136 | if (map->cache_ops->init) { | ||
137 | dev_dbg(map->dev, "Initializing %s cache\n", | ||
138 | map->cache_ops->name); | ||
139 | return map->cache_ops->init(map); | ||
140 | } | ||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | void regcache_exit(struct regmap *map) | ||
145 | { | ||
146 | if (map->cache_type == REGCACHE_NONE) | ||
147 | return; | ||
148 | |||
149 | BUG_ON(!map->cache_ops); | ||
150 | |||
151 | kfree(map->reg_defaults); | ||
152 | if (map->cache_free) | ||
153 | kfree(map->reg_defaults_raw); | ||
154 | |||
155 | if (map->cache_ops->exit) { | ||
156 | dev_dbg(map->dev, "Destroying %s cache\n", | ||
157 | map->cache_ops->name); | ||
158 | map->cache_ops->exit(map); | ||
159 | } | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * regcache_read: Fetch the value of a given register from the cache. | ||
164 | * | ||
165 | * @map: map to configure. | ||
166 | * @reg: The register index. | ||
167 | * @value: The value to be returned. | ||
168 | * | ||
169 | * Return a negative value on failure, 0 on success. | ||
170 | */ | ||
171 | int regcache_read(struct regmap *map, | ||
172 | unsigned int reg, unsigned int *value) | ||
173 | { | ||
174 | if (map->cache_type == REGCACHE_NONE) | ||
175 | return -ENOSYS; | ||
176 | |||
177 | BUG_ON(!map->cache_ops); | ||
178 | |||
179 | if (!regmap_readable(map, reg)) | ||
180 | return -EIO; | ||
181 | |||
182 | if (!regmap_volatile(map, reg)) | ||
183 | return map->cache_ops->read(map, reg, value); | ||
184 | |||
185 | return -EINVAL; | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(regcache_read); | ||
188 | |||
189 | /** | ||
190 | * regcache_write: Set the value of a given register in the cache. | ||
191 | * | ||
192 | * @map: map to configure. | ||
193 | * @reg: The register index. | ||
194 | * @value: The new register value. | ||
195 | * | ||
196 | * Return a negative value on failure, 0 on success. | ||
197 | */ | ||
198 | int regcache_write(struct regmap *map, | ||
199 | unsigned int reg, unsigned int value) | ||
200 | { | ||
201 | if (map->cache_type == REGCACHE_NONE) | ||
202 | return 0; | ||
203 | |||
204 | BUG_ON(!map->cache_ops); | ||
205 | |||
206 | if (!regmap_writeable(map, reg)) | ||
207 | return -EIO; | ||
208 | |||
209 | if (!regmap_volatile(map, reg)) | ||
210 | return map->cache_ops->write(map, reg, value); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(regcache_write); | ||
215 | |||
216 | /** | ||
217 | * regcache_sync: Sync the register cache with the hardware. | ||
218 | * | ||
219 | * @map: map to configure. | ||
220 | * | ||
221 | * Any registers that should not be synced should be marked as | ||
222 | * volatile. In general drivers can choose not to use the provided | ||
223 | * syncing functionality if they so require. | ||
224 | * | ||
225 | * Return a negative value on failure, 0 on success. | ||
226 | */ | ||
227 | int regcache_sync(struct regmap *map) | ||
228 | { | ||
229 | int ret = 0; | ||
230 | unsigned int val; | ||
231 | unsigned int i; | ||
232 | const char *name; | ||
233 | unsigned int bypass; | ||
234 | |||
235 | BUG_ON(!map->cache_ops); | ||
236 | |||
237 | mutex_lock(&map->lock); | ||
238 | /* Remember the initial bypass state */ | ||
239 | bypass = map->cache_bypass; | ||
240 | dev_dbg(map->dev, "Syncing %s cache\n", | ||
241 | map->cache_ops->name); | ||
242 | name = map->cache_ops->name; | ||
243 | trace_regcache_sync(map->dev, name, "start"); | ||
244 | if (map->cache_ops->sync) { | ||
245 | ret = map->cache_ops->sync(map); | ||
246 | } else { | ||
247 | for (i = 0; i < map->num_reg_defaults; i++) { | ||
248 | ret = regcache_read(map, i, &val); | ||
249 | if (ret < 0) | ||
250 | goto out; | ||
251 | map->cache_bypass = 1; | ||
252 | ret = _regmap_write(map, i, val); | ||
253 | map->cache_bypass = 0; | ||
254 | if (ret < 0) | ||
255 | goto out; | ||
256 | dev_dbg(map->dev, "Synced register %#x, value %#x\n", | ||
257 | map->reg_defaults[i].reg, | ||
258 | map->reg_defaults[i].def); | ||
259 | } | ||
260 | |||
261 | } | ||
262 | out: | ||
263 | trace_regcache_sync(map->dev, name, "stop"); | ||
264 | /* Restore the bypass state */ | ||
265 | map->cache_bypass = bypass; | ||
266 | mutex_unlock(&map->lock); | ||
267 | |||
268 | return ret; | ||
269 | } | ||
270 | EXPORT_SYMBOL_GPL(regcache_sync); | ||
271 | |||
272 | /** | ||
273 | * regcache_cache_only: Put a register map into cache only mode | ||
274 | * | ||
275 | * @map: map to configure | ||
276 | * @cache_only: flag if changes should be written to the hardware | ||
277 | * | ||
278 | * When a register map is marked as cache only writes to the register | ||
279 | * map API will only update the register cache, they will not cause | ||
280 | * any hardware changes. This is useful for allowing portions of | ||
281 | * drivers to act as though the device were functioning as normal when | ||
282 | * it is disabled for power saving reasons. | ||
283 | */ | ||
284 | void regcache_cache_only(struct regmap *map, bool enable) | ||
285 | { | ||
286 | mutex_lock(&map->lock); | ||
287 | WARN_ON(map->cache_bypass && enable); | ||
288 | map->cache_only = enable; | ||
289 | mutex_unlock(&map->lock); | ||
290 | } | ||
291 | EXPORT_SYMBOL_GPL(regcache_cache_only); | ||
292 | |||
293 | /** | ||
294 | * regcache_cache_bypass: Put a register map into cache bypass mode | ||
295 | * | ||
296 | * @map: map to configure | ||
297 | * @cache_bypass: flag if changes should not be written to the hardware | ||
298 | * | ||
299 | * When a register map is marked with the cache bypass option, writes | ||
300 | * to the register map API will only update the hardware and not the | ||
301 | * the cache directly. This is useful when syncing the cache back to | ||
302 | * the hardware. | ||
303 | */ | ||
304 | void regcache_cache_bypass(struct regmap *map, bool enable) | ||
305 | { | ||
306 | mutex_lock(&map->lock); | ||
307 | WARN_ON(map->cache_only && enable); | ||
308 | map->cache_bypass = enable; | ||
309 | mutex_unlock(&map->lock); | ||
310 | } | ||
311 | EXPORT_SYMBOL_GPL(regcache_cache_bypass); | ||
312 | |||
313 | bool regcache_set_val(void *base, unsigned int idx, | ||
314 | unsigned int val, unsigned int word_size) | ||
315 | { | ||
316 | switch (word_size) { | ||
317 | case 1: { | ||
318 | u8 *cache = base; | ||
319 | if (cache[idx] == val) | ||
320 | return true; | ||
321 | cache[idx] = val; | ||
322 | break; | ||
323 | } | ||
324 | case 2: { | ||
325 | u16 *cache = base; | ||
326 | if (cache[idx] == val) | ||
327 | return true; | ||
328 | cache[idx] = val; | ||
329 | break; | ||
330 | } | ||
331 | default: | ||
332 | BUG(); | ||
333 | } | ||
334 | /* unreachable */ | ||
335 | return false; | ||
336 | } | ||
337 | |||
338 | unsigned int regcache_get_val(const void *base, unsigned int idx, | ||
339 | unsigned int word_size) | ||
340 | { | ||
341 | if (!base) | ||
342 | return -EINVAL; | ||
343 | |||
344 | switch (word_size) { | ||
345 | case 1: { | ||
346 | const u8 *cache = base; | ||
347 | return cache[idx]; | ||
348 | } | ||
349 | case 2: { | ||
350 | const u16 *cache = base; | ||
351 | return cache[idx]; | ||
352 | } | ||
353 | default: | ||
354 | BUG(); | ||
355 | } | ||
356 | /* unreachable */ | ||
357 | return -1; | ||
358 | } | ||
359 | |||
360 | static int regcache_default_cmp(const void *a, const void *b) | ||
361 | { | ||
362 | const struct reg_default *_a = a; | ||
363 | const struct reg_default *_b = b; | ||
364 | |||
365 | return _a->reg - _b->reg; | ||
366 | } | ||
367 | |||
368 | int regcache_lookup_reg(struct regmap *map, unsigned int reg) | ||
369 | { | ||
370 | struct reg_default key; | ||
371 | struct reg_default *r; | ||
372 | |||
373 | key.reg = reg; | ||
374 | key.def = 0; | ||
375 | |||
376 | r = bsearch(&key, map->reg_defaults, map->num_reg_defaults, | ||
377 | sizeof(struct reg_default), regcache_default_cmp); | ||
378 | |||
379 | if (r) | ||
380 | return r - map->reg_defaults; | ||
381 | else | ||
382 | return -ENOENT; | ||
383 | } | ||
384 | |||
385 | int regcache_insert_reg(struct regmap *map, unsigned int reg, | ||
386 | unsigned int val) | ||
387 | { | ||
388 | void *tmp; | ||
389 | |||
390 | tmp = krealloc(map->reg_defaults, | ||
391 | (map->num_reg_defaults + 1) * sizeof(struct reg_default), | ||
392 | GFP_KERNEL); | ||
393 | if (!tmp) | ||
394 | return -ENOMEM; | ||
395 | map->reg_defaults = tmp; | ||
396 | map->num_reg_defaults++; | ||
397 | map->reg_defaults[map->num_reg_defaults - 1].reg = reg; | ||
398 | map->reg_defaults[map->num_reg_defaults - 1].def = val; | ||
399 | sort(map->reg_defaults, map->num_reg_defaults, | ||
400 | sizeof(struct reg_default), regcache_default_cmp, NULL); | ||
401 | return 0; | ||
402 | } | ||
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c new file mode 100644 index 000000000000..6f397476e27c --- /dev/null +++ b/drivers/base/regmap/regmap-debugfs.c | |||
@@ -0,0 +1,209 @@ | |||
1 | /* | ||
2 | * Register map access API - debugfs | ||
3 | * | ||
4 | * Copyright 2011 Wolfson Microelectronics plc | ||
5 | * | ||
6 | * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/debugfs.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | |||
19 | #include "internal.h" | ||
20 | |||
21 | static struct dentry *regmap_debugfs_root; | ||
22 | |||
23 | /* Calculate the length of a fixed format */ | ||
24 | static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) | ||
25 | { | ||
26 | snprintf(buf, buf_size, "%x", max_val); | ||
27 | return strlen(buf); | ||
28 | } | ||
29 | |||
30 | static int regmap_open_file(struct inode *inode, struct file *file) | ||
31 | { | ||
32 | file->private_data = inode->i_private; | ||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, | ||
37 | size_t count, loff_t *ppos) | ||
38 | { | ||
39 | int reg_len, val_len, tot_len; | ||
40 | size_t buf_pos = 0; | ||
41 | loff_t p = 0; | ||
42 | ssize_t ret; | ||
43 | int i; | ||
44 | struct regmap *map = file->private_data; | ||
45 | char *buf; | ||
46 | unsigned int val; | ||
47 | |||
48 | if (*ppos < 0 || !count) | ||
49 | return -EINVAL; | ||
50 | |||
51 | buf = kmalloc(count, GFP_KERNEL); | ||
52 | if (!buf) | ||
53 | return -ENOMEM; | ||
54 | |||
55 | /* Calculate the length of a fixed format */ | ||
56 | reg_len = regmap_calc_reg_len(map->max_register, buf, count); | ||
57 | val_len = 2 * map->format.val_bytes; | ||
58 | tot_len = reg_len + val_len + 3; /* : \n */ | ||
59 | |||
60 | for (i = 0; i < map->max_register + 1; i++) { | ||
61 | if (!regmap_readable(map, i)) | ||
62 | continue; | ||
63 | |||
64 | if (regmap_precious(map, i)) | ||
65 | continue; | ||
66 | |||
67 | /* If we're in the region the user is trying to read */ | ||
68 | if (p >= *ppos) { | ||
69 | /* ...but not beyond it */ | ||
70 | if (buf_pos >= count - 1 - tot_len) | ||
71 | break; | ||
72 | |||
73 | /* Format the register */ | ||
74 | snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", | ||
75 | reg_len, i); | ||
76 | buf_pos += reg_len + 2; | ||
77 | |||
78 | /* Format the value, write all X if we can't read */ | ||
79 | ret = regmap_read(map, i, &val); | ||
80 | if (ret == 0) | ||
81 | snprintf(buf + buf_pos, count - buf_pos, | ||
82 | "%.*x", val_len, val); | ||
83 | else | ||
84 | memset(buf + buf_pos, 'X', val_len); | ||
85 | buf_pos += 2 * map->format.val_bytes; | ||
86 | |||
87 | buf[buf_pos++] = '\n'; | ||
88 | } | ||
89 | p += tot_len; | ||
90 | } | ||
91 | |||
92 | ret = buf_pos; | ||
93 | |||
94 | if (copy_to_user(user_buf, buf, buf_pos)) { | ||
95 | ret = -EFAULT; | ||
96 | goto out; | ||
97 | } | ||
98 | |||
99 | *ppos += buf_pos; | ||
100 | |||
101 | out: | ||
102 | kfree(buf); | ||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | static const struct file_operations regmap_map_fops = { | ||
107 | .open = regmap_open_file, | ||
108 | .read = regmap_map_read_file, | ||
109 | .llseek = default_llseek, | ||
110 | }; | ||
111 | |||
112 | static ssize_t regmap_access_read_file(struct file *file, | ||
113 | char __user *user_buf, size_t count, | ||
114 | loff_t *ppos) | ||
115 | { | ||
116 | int reg_len, tot_len; | ||
117 | size_t buf_pos = 0; | ||
118 | loff_t p = 0; | ||
119 | ssize_t ret; | ||
120 | int i; | ||
121 | struct regmap *map = file->private_data; | ||
122 | char *buf; | ||
123 | |||
124 | if (*ppos < 0 || !count) | ||
125 | return -EINVAL; | ||
126 | |||
127 | buf = kmalloc(count, GFP_KERNEL); | ||
128 | if (!buf) | ||
129 | return -ENOMEM; | ||
130 | |||
131 | /* Calculate the length of a fixed format */ | ||
132 | reg_len = regmap_calc_reg_len(map->max_register, buf, count); | ||
133 | tot_len = reg_len + 10; /* ': R W V P\n' */ | ||
134 | |||
135 | for (i = 0; i < map->max_register + 1; i++) { | ||
136 | /* Ignore registers which are neither readable nor writable */ | ||
137 | if (!regmap_readable(map, i) && !regmap_writeable(map, i)) | ||
138 | continue; | ||
139 | |||
140 | /* If we're in the region the user is trying to read */ | ||
141 | if (p >= *ppos) { | ||
142 | /* ...but not beyond it */ | ||
143 | if (buf_pos >= count - 1 - tot_len) | ||
144 | break; | ||
145 | |||
146 | /* Format the register */ | ||
147 | snprintf(buf + buf_pos, count - buf_pos, | ||
148 | "%.*x: %c %c %c %c\n", | ||
149 | reg_len, i, | ||
150 | regmap_readable(map, i) ? 'y' : 'n', | ||
151 | regmap_writeable(map, i) ? 'y' : 'n', | ||
152 | regmap_volatile(map, i) ? 'y' : 'n', | ||
153 | regmap_precious(map, i) ? 'y' : 'n'); | ||
154 | |||
155 | buf_pos += tot_len; | ||
156 | } | ||
157 | p += tot_len; | ||
158 | } | ||
159 | |||
160 | ret = buf_pos; | ||
161 | |||
162 | if (copy_to_user(user_buf, buf, buf_pos)) { | ||
163 | ret = -EFAULT; | ||
164 | goto out; | ||
165 | } | ||
166 | |||
167 | *ppos += buf_pos; | ||
168 | |||
169 | out: | ||
170 | kfree(buf); | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static const struct file_operations regmap_access_fops = { | ||
175 | .open = regmap_open_file, | ||
176 | .read = regmap_access_read_file, | ||
177 | .llseek = default_llseek, | ||
178 | }; | ||
179 | |||
180 | void regmap_debugfs_init(struct regmap *map) | ||
181 | { | ||
182 | map->debugfs = debugfs_create_dir(dev_name(map->dev), | ||
183 | regmap_debugfs_root); | ||
184 | if (!map->debugfs) { | ||
185 | dev_warn(map->dev, "Failed to create debugfs directory\n"); | ||
186 | return; | ||
187 | } | ||
188 | |||
189 | if (map->max_register) { | ||
190 | debugfs_create_file("registers", 0400, map->debugfs, | ||
191 | map, ®map_map_fops); | ||
192 | debugfs_create_file("access", 0400, map->debugfs, | ||
193 | map, ®map_access_fops); | ||
194 | } | ||
195 | } | ||
196 | |||
197 | void regmap_debugfs_exit(struct regmap *map) | ||
198 | { | ||
199 | debugfs_remove_recursive(map->debugfs); | ||
200 | } | ||
201 | |||
202 | void regmap_debugfs_initcall(void) | ||
203 | { | ||
204 | regmap_debugfs_root = debugfs_create_dir("regmap", NULL); | ||
205 | if (!regmap_debugfs_root) { | ||
206 | pr_warn("regmap: Failed to create debugfs root\n"); | ||
207 | return; | ||
208 | } | ||
209 | } | ||
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index c4f7a45cd2c3..38621ec87c05 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c | |||
@@ -90,11 +90,9 @@ static int regmap_i2c_read(struct device *dev, | |||
90 | } | 90 | } |
91 | 91 | ||
92 | static struct regmap_bus regmap_i2c = { | 92 | static struct regmap_bus regmap_i2c = { |
93 | .type = &i2c_bus_type, | ||
94 | .write = regmap_i2c_write, | 93 | .write = regmap_i2c_write, |
95 | .gather_write = regmap_i2c_gather_write, | 94 | .gather_write = regmap_i2c_gather_write, |
96 | .read = regmap_i2c_read, | 95 | .read = regmap_i2c_read, |
97 | .owner = THIS_MODULE, | ||
98 | }; | 96 | }; |
99 | 97 | ||
100 | /** | 98 | /** |
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index f8396945d6ed..2560658de344 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c | |||
@@ -48,11 +48,9 @@ static int regmap_spi_read(struct device *dev, | |||
48 | } | 48 | } |
49 | 49 | ||
50 | static struct regmap_bus regmap_spi = { | 50 | static struct regmap_bus regmap_spi = { |
51 | .type = &spi_bus_type, | ||
52 | .write = regmap_spi_write, | 51 | .write = regmap_spi_write, |
53 | .gather_write = regmap_spi_gather_write, | 52 | .gather_write = regmap_spi_gather_write, |
54 | .read = regmap_spi_read, | 53 | .read = regmap_spi_read, |
55 | .owner = THIS_MODULE, | ||
56 | .read_flag_mask = 0x80, | 54 | .read_flag_mask = 0x80, |
57 | }; | 55 | }; |
58 | 56 | ||
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 0eef4da1ac61..bf441db1ee90 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -15,29 +15,54 @@ | |||
15 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | 17 | ||
18 | #include <linux/regmap.h> | 18 | #define CREATE_TRACE_POINTS |
19 | 19 | #include <trace/events/regmap.h> | |
20 | struct regmap; | 20 | |
21 | 21 | #include "internal.h" | |
22 | struct regmap_format { | 22 | |
23 | size_t buf_size; | 23 | bool regmap_writeable(struct regmap *map, unsigned int reg) |
24 | size_t reg_bytes; | 24 | { |
25 | size_t val_bytes; | 25 | if (map->max_register && reg > map->max_register) |
26 | void (*format_write)(struct regmap *map, | 26 | return false; |
27 | unsigned int reg, unsigned int val); | 27 | |
28 | void (*format_reg)(void *buf, unsigned int reg); | 28 | if (map->writeable_reg) |
29 | void (*format_val)(void *buf, unsigned int val); | 29 | return map->writeable_reg(map->dev, reg); |
30 | unsigned int (*parse_val)(void *buf); | 30 | |
31 | }; | 31 | return true; |
32 | 32 | } | |
33 | struct regmap { | 33 | |
34 | struct mutex lock; | 34 | bool regmap_readable(struct regmap *map, unsigned int reg) |
35 | 35 | { | |
36 | struct device *dev; /* Device we do I/O on */ | 36 | if (map->max_register && reg > map->max_register) |
37 | void *work_buf; /* Scratch buffer used to format I/O */ | 37 | return false; |
38 | struct regmap_format format; /* Buffer format */ | 38 | |
39 | const struct regmap_bus *bus; | 39 | if (map->readable_reg) |
40 | }; | 40 | return map->readable_reg(map->dev, reg); |
41 | |||
42 | return true; | ||
43 | } | ||
44 | |||
45 | bool regmap_volatile(struct regmap *map, unsigned int reg) | ||
46 | { | ||
47 | if (map->max_register && reg > map->max_register) | ||
48 | return false; | ||
49 | |||
50 | if (map->volatile_reg) | ||
51 | return map->volatile_reg(map->dev, reg); | ||
52 | |||
53 | return true; | ||
54 | } | ||
55 | |||
56 | bool regmap_precious(struct regmap *map, unsigned int reg) | ||
57 | { | ||
58 | if (map->max_register && reg > map->max_register) | ||
59 | return false; | ||
60 | |||
61 | if (map->precious_reg) | ||
62 | return map->precious_reg(map->dev, reg); | ||
63 | |||
64 | return false; | ||
65 | } | ||
41 | 66 | ||
42 | static void regmap_format_4_12_write(struct regmap *map, | 67 | static void regmap_format_4_12_write(struct regmap *map, |
43 | unsigned int reg, unsigned int val) | 68 | unsigned int reg, unsigned int val) |
@@ -116,6 +141,25 @@ struct regmap *regmap_init(struct device *dev, | |||
116 | map->format.val_bytes = config->val_bits / 8; | 141 | map->format.val_bytes = config->val_bits / 8; |
117 | map->dev = dev; | 142 | map->dev = dev; |
118 | map->bus = bus; | 143 | map->bus = bus; |
144 | map->max_register = config->max_register; | ||
145 | map->writeable_reg = config->writeable_reg; | ||
146 | map->readable_reg = config->readable_reg; | ||
147 | map->volatile_reg = config->volatile_reg; | ||
148 | map->precious_reg = config->precious_reg; | ||
149 | map->cache_type = config->cache_type; | ||
150 | map->reg_defaults = config->reg_defaults; | ||
151 | map->num_reg_defaults = config->num_reg_defaults; | ||
152 | map->num_reg_defaults_raw = config->num_reg_defaults_raw; | ||
153 | map->reg_defaults_raw = config->reg_defaults_raw; | ||
154 | map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw; | ||
155 | map->cache_word_size = config->val_bits / 8; | ||
156 | |||
157 | if (config->read_flag_mask || config->write_flag_mask) { | ||
158 | map->read_flag_mask = config->read_flag_mask; | ||
159 | map->write_flag_mask = config->write_flag_mask; | ||
160 | } else { | ||
161 | map->read_flag_mask = bus->read_flag_mask; | ||
162 | } | ||
119 | 163 | ||
120 | switch (config->reg_bits) { | 164 | switch (config->reg_bits) { |
121 | case 4: | 165 | case 4: |
@@ -168,13 +212,17 @@ struct regmap *regmap_init(struct device *dev, | |||
168 | map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); | 212 | map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); |
169 | if (map->work_buf == NULL) { | 213 | if (map->work_buf == NULL) { |
170 | ret = -ENOMEM; | 214 | ret = -ENOMEM; |
171 | goto err_bus; | 215 | goto err_map; |
172 | } | 216 | } |
173 | 217 | ||
218 | ret = regcache_init(map); | ||
219 | if (ret < 0) | ||
220 | goto err_map; | ||
221 | |||
222 | regmap_debugfs_init(map); | ||
223 | |||
174 | return map; | 224 | return map; |
175 | 225 | ||
176 | err_bus: | ||
177 | module_put(map->bus->owner); | ||
178 | err_map: | 226 | err_map: |
179 | kfree(map); | 227 | kfree(map); |
180 | err: | 228 | err: |
@@ -187,8 +235,9 @@ EXPORT_SYMBOL_GPL(regmap_init); | |||
187 | */ | 235 | */ |
188 | void regmap_exit(struct regmap *map) | 236 | void regmap_exit(struct regmap *map) |
189 | { | 237 | { |
238 | regcache_exit(map); | ||
239 | regmap_debugfs_exit(map); | ||
190 | kfree(map->work_buf); | 240 | kfree(map->work_buf); |
191 | module_put(map->bus->owner); | ||
192 | kfree(map); | 241 | kfree(map); |
193 | } | 242 | } |
194 | EXPORT_SYMBOL_GPL(regmap_exit); | 243 | EXPORT_SYMBOL_GPL(regmap_exit); |
@@ -196,19 +245,38 @@ EXPORT_SYMBOL_GPL(regmap_exit); | |||
196 | static int _regmap_raw_write(struct regmap *map, unsigned int reg, | 245 | static int _regmap_raw_write(struct regmap *map, unsigned int reg, |
197 | const void *val, size_t val_len) | 246 | const void *val, size_t val_len) |
198 | { | 247 | { |
248 | u8 *u8 = map->work_buf; | ||
199 | void *buf; | 249 | void *buf; |
200 | int ret = -ENOTSUPP; | 250 | int ret = -ENOTSUPP; |
201 | size_t len; | 251 | size_t len; |
252 | int i; | ||
253 | |||
254 | /* Check for unwritable registers before we start */ | ||
255 | if (map->writeable_reg) | ||
256 | for (i = 0; i < val_len / map->format.val_bytes; i++) | ||
257 | if (!map->writeable_reg(map->dev, reg + i)) | ||
258 | return -EINVAL; | ||
202 | 259 | ||
203 | map->format.format_reg(map->work_buf, reg); | 260 | map->format.format_reg(map->work_buf, reg); |
204 | 261 | ||
205 | /* Try to do a gather write if we can */ | 262 | u8[0] |= map->write_flag_mask; |
206 | if (map->bus->gather_write) | 263 | |
264 | trace_regmap_hw_write_start(map->dev, reg, | ||
265 | val_len / map->format.val_bytes); | ||
266 | |||
267 | /* If we're doing a single register write we can probably just | ||
268 | * send the work_buf directly, otherwise try to do a gather | ||
269 | * write. | ||
270 | */ | ||
271 | if (val == map->work_buf + map->format.reg_bytes) | ||
272 | ret = map->bus->write(map->dev, map->work_buf, | ||
273 | map->format.reg_bytes + val_len); | ||
274 | else if (map->bus->gather_write) | ||
207 | ret = map->bus->gather_write(map->dev, map->work_buf, | 275 | ret = map->bus->gather_write(map->dev, map->work_buf, |
208 | map->format.reg_bytes, | 276 | map->format.reg_bytes, |
209 | val, val_len); | 277 | val, val_len); |
210 | 278 | ||
211 | /* Otherwise fall back on linearising by hand. */ | 279 | /* If that didn't work fall back on linearising by hand. */ |
212 | if (ret == -ENOTSUPP) { | 280 | if (ret == -ENOTSUPP) { |
213 | len = map->format.reg_bytes + val_len; | 281 | len = map->format.reg_bytes + val_len; |
214 | buf = kmalloc(len, GFP_KERNEL); | 282 | buf = kmalloc(len, GFP_KERNEL); |
@@ -222,19 +290,39 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg, | |||
222 | kfree(buf); | 290 | kfree(buf); |
223 | } | 291 | } |
224 | 292 | ||
293 | trace_regmap_hw_write_done(map->dev, reg, | ||
294 | val_len / map->format.val_bytes); | ||
295 | |||
225 | return ret; | 296 | return ret; |
226 | } | 297 | } |
227 | 298 | ||
228 | static int _regmap_write(struct regmap *map, unsigned int reg, | 299 | int _regmap_write(struct regmap *map, unsigned int reg, |
229 | unsigned int val) | 300 | unsigned int val) |
230 | { | 301 | { |
302 | int ret; | ||
231 | BUG_ON(!map->format.format_write && !map->format.format_val); | 303 | BUG_ON(!map->format.format_write && !map->format.format_val); |
232 | 304 | ||
305 | if (!map->cache_bypass) { | ||
306 | ret = regcache_write(map, reg, val); | ||
307 | if (ret != 0) | ||
308 | return ret; | ||
309 | if (map->cache_only) | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | trace_regmap_reg_write(map->dev, reg, val); | ||
314 | |||
233 | if (map->format.format_write) { | 315 | if (map->format.format_write) { |
234 | map->format.format_write(map, reg, val); | 316 | map->format.format_write(map, reg, val); |
235 | 317 | ||
236 | return map->bus->write(map->dev, map->work_buf, | 318 | trace_regmap_hw_write_start(map->dev, reg, 1); |
237 | map->format.buf_size); | 319 | |
320 | ret = map->bus->write(map->dev, map->work_buf, | ||
321 | map->format.buf_size); | ||
322 | |||
323 | trace_regmap_hw_write_done(map->dev, reg, 1); | ||
324 | |||
325 | return ret; | ||
238 | } else { | 326 | } else { |
239 | map->format.format_val(map->work_buf + map->format.reg_bytes, | 327 | map->format.format_val(map->work_buf + map->format.reg_bytes, |
240 | val); | 328 | val); |
@@ -289,6 +377,8 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, | |||
289 | { | 377 | { |
290 | int ret; | 378 | int ret; |
291 | 379 | ||
380 | WARN_ON(map->cache_type != REGCACHE_NONE); | ||
381 | |||
292 | mutex_lock(&map->lock); | 382 | mutex_lock(&map->lock); |
293 | 383 | ||
294 | ret = _regmap_raw_write(map, reg, val, val_len); | 384 | ret = _regmap_raw_write(map, reg, val, val_len); |
@@ -308,20 +398,23 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
308 | map->format.format_reg(map->work_buf, reg); | 398 | map->format.format_reg(map->work_buf, reg); |
309 | 399 | ||
310 | /* | 400 | /* |
311 | * Some buses flag reads by setting the high bits in the | 401 | * Some buses or devices flag reads by setting the high bits in the |
312 | * register addresss; since it's always the high bits for all | 402 | * register addresss; since it's always the high bits for all |
313 | * current formats we can do this here rather than in | 403 | * current formats we can do this here rather than in |
314 | * formatting. This may break if we get interesting formats. | 404 | * formatting. This may break if we get interesting formats. |
315 | */ | 405 | */ |
316 | if (map->bus->read_flag_mask) | 406 | u8[0] |= map->read_flag_mask; |
317 | u8[0] |= map->bus->read_flag_mask; | 407 | |
408 | trace_regmap_hw_read_start(map->dev, reg, | ||
409 | val_len / map->format.val_bytes); | ||
318 | 410 | ||
319 | ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, | 411 | ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, |
320 | val, val_len); | 412 | val, val_len); |
321 | if (ret != 0) | ||
322 | return ret; | ||
323 | 413 | ||
324 | return 0; | 414 | trace_regmap_hw_read_done(map->dev, reg, |
415 | val_len / map->format.val_bytes); | ||
416 | |||
417 | return ret; | ||
325 | } | 418 | } |
326 | 419 | ||
327 | static int _regmap_read(struct regmap *map, unsigned int reg, | 420 | static int _regmap_read(struct regmap *map, unsigned int reg, |
@@ -332,9 +425,20 @@ static int _regmap_read(struct regmap *map, unsigned int reg, | |||
332 | if (!map->format.parse_val) | 425 | if (!map->format.parse_val) |
333 | return -EINVAL; | 426 | return -EINVAL; |
334 | 427 | ||
428 | if (!map->cache_bypass) { | ||
429 | ret = regcache_read(map, reg, val); | ||
430 | if (ret == 0) | ||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | if (map->cache_only) | ||
435 | return -EBUSY; | ||
436 | |||
335 | ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); | 437 | ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes); |
336 | if (ret == 0) | 438 | if (ret == 0) { |
337 | *val = map->format.parse_val(map->work_buf); | 439 | *val = map->format.parse_val(map->work_buf); |
440 | trace_regmap_reg_read(map->dev, reg, *val); | ||
441 | } | ||
338 | 442 | ||
339 | return ret; | 443 | return ret; |
340 | } | 444 | } |
@@ -378,6 +482,14 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
378 | size_t val_len) | 482 | size_t val_len) |
379 | { | 483 | { |
380 | int ret; | 484 | int ret; |
485 | int i; | ||
486 | bool vol = true; | ||
487 | |||
488 | for (i = 0; i < val_len / map->format.val_bytes; i++) | ||
489 | if (!regmap_volatile(map, reg + i)) | ||
490 | vol = false; | ||
491 | |||
492 | WARN_ON(!vol && map->cache_type != REGCACHE_NONE); | ||
381 | 493 | ||
382 | mutex_lock(&map->lock); | 494 | mutex_lock(&map->lock); |
383 | 495 | ||
@@ -405,16 +517,30 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, | |||
405 | { | 517 | { |
406 | int ret, i; | 518 | int ret, i; |
407 | size_t val_bytes = map->format.val_bytes; | 519 | size_t val_bytes = map->format.val_bytes; |
520 | bool vol = true; | ||
408 | 521 | ||
409 | if (!map->format.parse_val) | 522 | if (!map->format.parse_val) |
410 | return -EINVAL; | 523 | return -EINVAL; |
411 | 524 | ||
412 | ret = regmap_raw_read(map, reg, val, val_bytes * val_count); | 525 | /* Is this a block of volatile registers? */ |
413 | if (ret != 0) | 526 | for (i = 0; i < val_count; i++) |
414 | return ret; | 527 | if (!regmap_volatile(map, reg + i)) |
528 | vol = false; | ||
529 | |||
530 | if (vol || map->cache_type == REGCACHE_NONE) { | ||
531 | ret = regmap_raw_read(map, reg, val, val_bytes * val_count); | ||
532 | if (ret != 0) | ||
533 | return ret; | ||
415 | 534 | ||
416 | for (i = 0; i < val_count * val_bytes; i += val_bytes) | 535 | for (i = 0; i < val_count * val_bytes; i += val_bytes) |
417 | map->format.parse_val(val + i); | 536 | map->format.parse_val(val + i); |
537 | } else { | ||
538 | for (i = 0; i < val_count; i++) { | ||
539 | ret = regmap_read(map, reg + i, val + (i * val_bytes)); | ||
540 | if (ret != 0) | ||
541 | return ret; | ||
542 | } | ||
543 | } | ||
418 | 544 | ||
419 | return 0; | 545 | return 0; |
420 | } | 546 | } |
@@ -453,3 +579,11 @@ out: | |||
453 | return ret; | 579 | return ret; |
454 | } | 580 | } |
455 | EXPORT_SYMBOL_GPL(regmap_update_bits); | 581 | EXPORT_SYMBOL_GPL(regmap_update_bits); |
582 | |||
583 | static int __init regmap_initcall(void) | ||
584 | { | ||
585 | regmap_debugfs_initcall(); | ||
586 | |||
587 | return 0; | ||
588 | } | ||
589 | postcore_initcall(regmap_initcall); | ||
diff --git a/drivers/base/transport_class.c b/drivers/base/transport_class.c index 84997efdb23d..f6c453c3816e 100644 --- a/drivers/base/transport_class.c +++ b/drivers/base/transport_class.c | |||
@@ -27,6 +27,7 @@ | |||
27 | * transport class is framed entirely in terms of generic devices to | 27 | * transport class is framed entirely in terms of generic devices to |
28 | * allow it to be used by any physical HBA in the system. | 28 | * allow it to be used by any physical HBA in the system. |
29 | */ | 29 | */ |
30 | #include <linux/export.h> | ||
30 | #include <linux/attribute_container.h> | 31 | #include <linux/attribute_container.h> |
31 | #include <linux/transport_class.h> | 32 | #include <linux/transport_class.h> |
32 | 33 | ||