diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2014-08-07 02:36:12 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2014-08-07 02:36:12 -0400 |
| commit | 5e2aa2ed08e2e280121dc7cf5609c87d464f12ef (patch) | |
| tree | ca7d7b1480285e3b617fecc5b41f0ce150a82c32 /drivers/base | |
| parent | f62d14a8072b9756db36ba394e2b267470a40240 (diff) | |
| parent | fc8104bc5a3f6f49d79f45f2706f79f77a9fb2ae (diff) | |
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.17.
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/Kconfig | 19 | ||||
| -rw-r--r-- | drivers/base/base.h | 3 | ||||
| -rw-r--r-- | drivers/base/dd.c | 26 | ||||
| -rw-r--r-- | drivers/base/devres.c | 97 | ||||
| -rw-r--r-- | drivers/base/dma-buf.c | 2 | ||||
| -rw-r--r-- | drivers/base/dma-coherent.c | 10 | ||||
| -rw-r--r-- | drivers/base/dma-contiguous.c | 88 | ||||
| -rw-r--r-- | drivers/base/dma-mapping.c | 6 | ||||
| -rw-r--r-- | drivers/base/memory.c | 12 | ||||
| -rw-r--r-- | drivers/base/platform.c | 21 | ||||
| -rw-r--r-- | drivers/base/power/domain.c | 2 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 90 | ||||
| -rw-r--r-- | drivers/base/power/opp.c | 122 | ||||
| -rw-r--r-- | drivers/base/power/wakeup.c | 6 | ||||
| -rw-r--r-- | drivers/base/regmap/regcache-rbtree.c | 8 | ||||
| -rw-r--r-- | drivers/base/regmap/regmap-i2c.c | 104 | ||||
| -rw-r--r-- | drivers/base/regmap/regmap-irq.c | 9 | ||||
| -rw-r--r-- | drivers/base/regmap/regmap-mmio.c | 35 | ||||
| -rw-r--r-- | drivers/base/regmap/regmap.c | 83 | ||||
| -rw-r--r-- | drivers/base/syscore.c | 5 |
20 files changed, 537 insertions, 211 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index 8fa8deab6449..23b8726962af 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
| @@ -1,10 +1,10 @@ | |||
| 1 | menu "Generic Driver Options" | 1 | menu "Generic Driver Options" |
| 2 | 2 | ||
| 3 | config UEVENT_HELPER_PATH | 3 | config UEVENT_HELPER |
| 4 | string "path to uevent helper" | 4 | bool "Support for uevent helper" |
| 5 | default "" | 5 | default y |
| 6 | help | 6 | help |
| 7 | Path to uevent helper program forked by the kernel for | 7 | The uevent helper program is forked by the kernel for |
| 8 | every uevent. | 8 | every uevent. |
| 9 | Before the switch to the netlink-based uevent source, this was | 9 | Before the switch to the netlink-based uevent source, this was |
| 10 | used to hook hotplug scripts into kernel device events. It | 10 | used to hook hotplug scripts into kernel device events. It |
| @@ -15,8 +15,13 @@ config UEVENT_HELPER_PATH | |||
| 15 | that it creates a high system load, or on smaller systems | 15 | that it creates a high system load, or on smaller systems |
| 16 | it is known to create out-of-memory situations during bootup. | 16 | it is known to create out-of-memory situations during bootup. |
| 17 | 17 | ||
| 18 | To disable user space helper program execution at early boot | 18 | config UEVENT_HELPER_PATH |
| 19 | time specify an empty string here. This setting can be altered | 19 | string "path to uevent helper" |
| 20 | depends on UEVENT_HELPER | ||
| 21 | default "" | ||
| 22 | help | ||
| 23 | To disable user space helper program execution at by default | ||
| 24 | specify an empty string here. This setting can still be altered | ||
| 20 | via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper | 25 | via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper |
| 21 | later at runtime. | 26 | later at runtime. |
| 22 | 27 | ||
| @@ -253,7 +258,7 @@ endchoice | |||
| 253 | 258 | ||
| 254 | config CMA_ALIGNMENT | 259 | config CMA_ALIGNMENT |
| 255 | int "Maximum PAGE_SIZE order of alignment for contiguous buffers" | 260 | int "Maximum PAGE_SIZE order of alignment for contiguous buffers" |
| 256 | range 4 9 | 261 | range 4 12 |
| 257 | default 8 | 262 | default 8 |
| 258 | help | 263 | help |
| 259 | DMA mapping framework by default aligns all buffers to the smallest | 264 | DMA mapping framework by default aligns all buffers to the smallest |
diff --git a/drivers/base/base.h b/drivers/base/base.h index 24f424249d9b..251c5d30f963 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
| @@ -63,8 +63,6 @@ struct driver_private { | |||
| 63 | * binding of drivers which were unable to get all the resources needed by | 63 | * binding of drivers which were unable to get all the resources needed by |
| 64 | * the device; typically because it depends on another driver getting | 64 | * the device; typically because it depends on another driver getting |
| 65 | * probed first. | 65 | * probed first. |
| 66 | * @driver_data - private pointer for driver specific info. Will turn into a | ||
| 67 | * list soon. | ||
| 68 | * @device - pointer back to the struct class that this structure is | 66 | * @device - pointer back to the struct class that this structure is |
| 69 | * associated with. | 67 | * associated with. |
| 70 | * | 68 | * |
| @@ -76,7 +74,6 @@ struct device_private { | |||
| 76 | struct klist_node knode_driver; | 74 | struct klist_node knode_driver; |
| 77 | struct klist_node knode_bus; | 75 | struct klist_node knode_bus; |
| 78 | struct list_head deferred_probe; | 76 | struct list_head deferred_probe; |
| 79 | void *driver_data; | ||
| 80 | struct device *device; | 77 | struct device *device; |
| 81 | }; | 78 | }; |
| 82 | #define to_device_private_parent(obj) \ | 79 | #define to_device_private_parent(obj) \ |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 62ec61e8f84a..e4ffbcf2f519 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
| @@ -587,29 +587,3 @@ void driver_detach(struct device_driver *drv) | |||
| 587 | put_device(dev); | 587 | put_device(dev); |
| 588 | } | 588 | } |
| 589 | } | 589 | } |
| 590 | |||
| 591 | /* | ||
| 592 | * These exports can't be _GPL due to .h files using this within them, and it | ||
| 593 | * might break something that was previously working... | ||
| 594 | */ | ||
| 595 | void *dev_get_drvdata(const struct device *dev) | ||
| 596 | { | ||
| 597 | if (dev && dev->p) | ||
| 598 | return dev->p->driver_data; | ||
| 599 | return NULL; | ||
| 600 | } | ||
| 601 | EXPORT_SYMBOL(dev_get_drvdata); | ||
| 602 | |||
| 603 | int dev_set_drvdata(struct device *dev, void *data) | ||
| 604 | { | ||
| 605 | int error; | ||
| 606 | |||
| 607 | if (!dev->p) { | ||
| 608 | error = device_private_init(dev); | ||
| 609 | if (error) | ||
| 610 | return error; | ||
| 611 | } | ||
| 612 | dev->p->driver_data = data; | ||
| 613 | return 0; | ||
| 614 | } | ||
| 615 | EXPORT_SYMBOL(dev_set_drvdata); | ||
diff --git a/drivers/base/devres.c b/drivers/base/devres.c index db4e264eecb6..52302946770f 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c | |||
| @@ -831,3 +831,100 @@ void devm_kfree(struct device *dev, void *p) | |||
| 831 | WARN_ON(rc); | 831 | WARN_ON(rc); |
| 832 | } | 832 | } |
| 833 | EXPORT_SYMBOL_GPL(devm_kfree); | 833 | EXPORT_SYMBOL_GPL(devm_kfree); |
| 834 | |||
| 835 | /** | ||
| 836 | * devm_kmemdup - Resource-managed kmemdup | ||
| 837 | * @dev: Device this memory belongs to | ||
| 838 | * @src: Memory region to duplicate | ||
| 839 | * @len: Memory region length | ||
| 840 | * @gfp: GFP mask to use | ||
| 841 | * | ||
| 842 | * Duplicate region of a memory using resource managed kmalloc | ||
| 843 | */ | ||
| 844 | void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) | ||
| 845 | { | ||
| 846 | void *p; | ||
| 847 | |||
| 848 | p = devm_kmalloc(dev, len, gfp); | ||
| 849 | if (p) | ||
| 850 | memcpy(p, src, len); | ||
| 851 | |||
| 852 | return p; | ||
| 853 | } | ||
| 854 | EXPORT_SYMBOL_GPL(devm_kmemdup); | ||
| 855 | |||
| 856 | struct pages_devres { | ||
| 857 | unsigned long addr; | ||
| 858 | unsigned int order; | ||
| 859 | }; | ||
| 860 | |||
| 861 | static int devm_pages_match(struct device *dev, void *res, void *p) | ||
| 862 | { | ||
| 863 | struct pages_devres *devres = res; | ||
| 864 | struct pages_devres *target = p; | ||
| 865 | |||
| 866 | return devres->addr == target->addr; | ||
| 867 | } | ||
| 868 | |||
| 869 | static void devm_pages_release(struct device *dev, void *res) | ||
| 870 | { | ||
| 871 | struct pages_devres *devres = res; | ||
| 872 | |||
| 873 | free_pages(devres->addr, devres->order); | ||
| 874 | } | ||
| 875 | |||
| 876 | /** | ||
| 877 | * devm_get_free_pages - Resource-managed __get_free_pages | ||
| 878 | * @dev: Device to allocate memory for | ||
| 879 | * @gfp_mask: Allocation gfp flags | ||
| 880 | * @order: Allocation size is (1 << order) pages | ||
| 881 | * | ||
| 882 | * Managed get_free_pages. Memory allocated with this function is | ||
| 883 | * automatically freed on driver detach. | ||
| 884 | * | ||
| 885 | * RETURNS: | ||
| 886 | * Address of allocated memory on success, 0 on failure. | ||
| 887 | */ | ||
| 888 | |||
| 889 | unsigned long devm_get_free_pages(struct device *dev, | ||
| 890 | gfp_t gfp_mask, unsigned int order) | ||
| 891 | { | ||
| 892 | struct pages_devres *devres; | ||
| 893 | unsigned long addr; | ||
| 894 | |||
| 895 | addr = __get_free_pages(gfp_mask, order); | ||
| 896 | |||
| 897 | if (unlikely(!addr)) | ||
| 898 | return 0; | ||
| 899 | |||
| 900 | devres = devres_alloc(devm_pages_release, | ||
| 901 | sizeof(struct pages_devres), GFP_KERNEL); | ||
| 902 | if (unlikely(!devres)) { | ||
| 903 | free_pages(addr, order); | ||
| 904 | return 0; | ||
| 905 | } | ||
| 906 | |||
| 907 | devres->addr = addr; | ||
| 908 | devres->order = order; | ||
| 909 | |||
| 910 | devres_add(dev, devres); | ||
| 911 | return addr; | ||
| 912 | } | ||
| 913 | EXPORT_SYMBOL_GPL(devm_get_free_pages); | ||
| 914 | |||
| 915 | /** | ||
| 916 | * devm_free_pages - Resource-managed free_pages | ||
| 917 | * @dev: Device this memory belongs to | ||
| 918 | * @addr: Memory to free | ||
| 919 | * | ||
| 920 | * Free memory allocated with devm_get_free_pages(). Unlike free_pages, | ||
| 921 | * there is no need to supply the @order. | ||
| 922 | */ | ||
| 923 | void devm_free_pages(struct device *dev, unsigned long addr) | ||
| 924 | { | ||
| 925 | struct pages_devres devres = { .addr = addr }; | ||
| 926 | |||
| 927 | WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, | ||
| 928 | &devres)); | ||
| 929 | } | ||
| 930 | EXPORT_SYMBOL_GPL(devm_free_pages); | ||
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c index ea77701deda4..840c7fa80983 100644 --- a/drivers/base/dma-buf.c +++ b/drivers/base/dma-buf.c | |||
| @@ -491,7 +491,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap); | |||
| 491 | * dma-buf buffer. | 491 | * dma-buf buffer. |
| 492 | * | 492 | * |
| 493 | * This function adjusts the passed in vma so that it points at the file of the | 493 | * This function adjusts the passed in vma so that it points at the file of the |
| 494 | * dma_buf operation. It alsog adjusts the starting pgoff and does bounds | 494 | * dma_buf operation. It also adjusts the starting pgoff and does bounds |
| 495 | * checking on the size of the vma. Then it calls the exporters mmap function to | 495 | * checking on the size of the vma. Then it calls the exporters mmap function to |
| 496 | * set up the mapping. | 496 | * set up the mapping. |
| 497 | * | 497 | * |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index bc256b641027..7d6e84a51424 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
| @@ -10,13 +10,13 @@ | |||
| 10 | struct dma_coherent_mem { | 10 | struct dma_coherent_mem { |
| 11 | void *virt_base; | 11 | void *virt_base; |
| 12 | dma_addr_t device_base; | 12 | dma_addr_t device_base; |
| 13 | phys_addr_t pfn_base; | 13 | unsigned long pfn_base; |
| 14 | int size; | 14 | int size; |
| 15 | int flags; | 15 | int flags; |
| 16 | unsigned long *bitmap; | 16 | unsigned long *bitmap; |
| 17 | }; | 17 | }; |
| 18 | 18 | ||
| 19 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | 19 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
| 20 | dma_addr_t device_addr, size_t size, int flags) | 20 | dma_addr_t device_addr, size_t size, int flags) |
| 21 | { | 21 | { |
| 22 | void __iomem *mem_base = NULL; | 22 | void __iomem *mem_base = NULL; |
| @@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |||
| 32 | 32 | ||
| 33 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | 33 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ |
| 34 | 34 | ||
| 35 | mem_base = ioremap(bus_addr, size); | 35 | mem_base = ioremap(phys_addr, size); |
| 36 | if (!mem_base) | 36 | if (!mem_base) |
| 37 | goto out; | 37 | goto out; |
| 38 | 38 | ||
| @@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |||
| 45 | 45 | ||
| 46 | dev->dma_mem->virt_base = mem_base; | 46 | dev->dma_mem->virt_base = mem_base; |
| 47 | dev->dma_mem->device_base = device_addr; | 47 | dev->dma_mem->device_base = device_addr; |
| 48 | dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); | 48 | dev->dma_mem->pfn_base = PFN_DOWN(phys_addr); |
| 49 | dev->dma_mem->size = pages; | 49 | dev->dma_mem->size = pages; |
| 50 | dev->dma_mem->flags = flags; | 50 | dev->dma_mem->flags = flags; |
| 51 | 51 | ||
| @@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, | |||
| 208 | 208 | ||
| 209 | *ret = -ENXIO; | 209 | *ret = -ENXIO; |
| 210 | if (off < count && user_count <= count - off) { | 210 | if (off < count && user_count <= count - off) { |
| 211 | unsigned pfn = mem->pfn_base + start + off; | 211 | unsigned long pfn = mem->pfn_base + start + off; |
| 212 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, | 212 | *ret = remap_pfn_range(vma, vma->vm_start, pfn, |
| 213 | user_count << PAGE_SHIFT, | 213 | user_count << PAGE_SHIFT, |
| 214 | vma->vm_page_prot); | 214 | vma->vm_page_prot); |
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c index 165c2c299e57..6467c919c509 100644 --- a/drivers/base/dma-contiguous.c +++ b/drivers/base/dma-contiguous.c | |||
| @@ -37,6 +37,7 @@ struct cma { | |||
| 37 | unsigned long base_pfn; | 37 | unsigned long base_pfn; |
| 38 | unsigned long count; | 38 | unsigned long count; |
| 39 | unsigned long *bitmap; | 39 | unsigned long *bitmap; |
| 40 | struct mutex lock; | ||
| 40 | }; | 41 | }; |
| 41 | 42 | ||
| 42 | struct cma *dma_contiguous_default_area; | 43 | struct cma *dma_contiguous_default_area; |
| @@ -59,11 +60,22 @@ struct cma *dma_contiguous_default_area; | |||
| 59 | */ | 60 | */ |
| 60 | static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; | 61 | static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; |
| 61 | static phys_addr_t size_cmdline = -1; | 62 | static phys_addr_t size_cmdline = -1; |
| 63 | static phys_addr_t base_cmdline; | ||
| 64 | static phys_addr_t limit_cmdline; | ||
| 62 | 65 | ||
| 63 | static int __init early_cma(char *p) | 66 | static int __init early_cma(char *p) |
| 64 | { | 67 | { |
| 65 | pr_debug("%s(%s)\n", __func__, p); | 68 | pr_debug("%s(%s)\n", __func__, p); |
| 66 | size_cmdline = memparse(p, &p); | 69 | size_cmdline = memparse(p, &p); |
| 70 | if (*p != '@') | ||
| 71 | return 0; | ||
| 72 | base_cmdline = memparse(p + 1, &p); | ||
| 73 | if (*p != '-') { | ||
| 74 | limit_cmdline = base_cmdline + size_cmdline; | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | limit_cmdline = memparse(p + 1, &p); | ||
| 78 | |||
| 67 | return 0; | 79 | return 0; |
| 68 | } | 80 | } |
| 69 | early_param("cma", early_cma); | 81 | early_param("cma", early_cma); |
| @@ -107,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) | |||
| 107 | void __init dma_contiguous_reserve(phys_addr_t limit) | 119 | void __init dma_contiguous_reserve(phys_addr_t limit) |
| 108 | { | 120 | { |
| 109 | phys_addr_t selected_size = 0; | 121 | phys_addr_t selected_size = 0; |
| 122 | phys_addr_t selected_base = 0; | ||
| 123 | phys_addr_t selected_limit = limit; | ||
| 124 | bool fixed = false; | ||
| 110 | 125 | ||
| 111 | pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); | 126 | pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); |
| 112 | 127 | ||
| 113 | if (size_cmdline != -1) { | 128 | if (size_cmdline != -1) { |
| 114 | selected_size = size_cmdline; | 129 | selected_size = size_cmdline; |
| 130 | selected_base = base_cmdline; | ||
| 131 | selected_limit = min_not_zero(limit_cmdline, limit); | ||
| 132 | if (base_cmdline + size_cmdline == limit_cmdline) | ||
| 133 | fixed = true; | ||
| 115 | } else { | 134 | } else { |
| 116 | #ifdef CONFIG_CMA_SIZE_SEL_MBYTES | 135 | #ifdef CONFIG_CMA_SIZE_SEL_MBYTES |
| 117 | selected_size = size_bytes; | 136 | selected_size = size_bytes; |
| @@ -128,10 +147,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit) | |||
| 128 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, | 147 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, |
| 129 | (unsigned long)selected_size / SZ_1M); | 148 | (unsigned long)selected_size / SZ_1M); |
| 130 | 149 | ||
| 131 | dma_contiguous_reserve_area(selected_size, 0, limit, | 150 | dma_contiguous_reserve_area(selected_size, selected_base, |
| 132 | &dma_contiguous_default_area); | 151 | selected_limit, |
| 152 | &dma_contiguous_default_area, | ||
| 153 | fixed); | ||
| 133 | } | 154 | } |
| 134 | }; | 155 | } |
| 135 | 156 | ||
| 136 | static DEFINE_MUTEX(cma_mutex); | 157 | static DEFINE_MUTEX(cma_mutex); |
| 137 | 158 | ||
| @@ -155,13 +176,24 @@ static int __init cma_activate_area(struct cma *cma) | |||
| 155 | base_pfn = pfn; | 176 | base_pfn = pfn; |
| 156 | for (j = pageblock_nr_pages; j; --j, pfn++) { | 177 | for (j = pageblock_nr_pages; j; --j, pfn++) { |
| 157 | WARN_ON_ONCE(!pfn_valid(pfn)); | 178 | WARN_ON_ONCE(!pfn_valid(pfn)); |
| 179 | /* | ||
| 180 | * alloc_contig_range requires the pfn range | ||
| 181 | * specified to be in the same zone. Make this | ||
| 182 | * simple by forcing the entire CMA resv range | ||
| 183 | * to be in the same zone. | ||
| 184 | */ | ||
| 158 | if (page_zone(pfn_to_page(pfn)) != zone) | 185 | if (page_zone(pfn_to_page(pfn)) != zone) |
| 159 | return -EINVAL; | 186 | goto err; |
| 160 | } | 187 | } |
| 161 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); | 188 | init_cma_reserved_pageblock(pfn_to_page(base_pfn)); |
| 162 | } while (--i); | 189 | } while (--i); |
| 163 | 190 | ||
| 191 | mutex_init(&cma->lock); | ||
| 164 | return 0; | 192 | return 0; |
| 193 | |||
| 194 | err: | ||
| 195 | kfree(cma->bitmap); | ||
| 196 | return -EINVAL; | ||
| 165 | } | 197 | } |
| 166 | 198 | ||
| 167 | static struct cma cma_areas[MAX_CMA_AREAS]; | 199 | static struct cma cma_areas[MAX_CMA_AREAS]; |
| @@ -187,15 +219,20 @@ core_initcall(cma_init_reserved_areas); | |||
| 187 | * @base: Base address of the reserved area optional, use 0 for any | 219 | * @base: Base address of the reserved area optional, use 0 for any |
| 188 | * @limit: End address of the reserved memory (optional, 0 for any). | 220 | * @limit: End address of the reserved memory (optional, 0 for any). |
| 189 | * @res_cma: Pointer to store the created cma region. | 221 | * @res_cma: Pointer to store the created cma region. |
| 222 | * @fixed: hint about where to place the reserved area | ||
| 190 | * | 223 | * |
| 191 | * This function reserves memory from early allocator. It should be | 224 | * This function reserves memory from early allocator. It should be |
| 192 | * called by arch specific code once the early allocator (memblock or bootmem) | 225 | * called by arch specific code once the early allocator (memblock or bootmem) |
| 193 | * has been activated and all other subsystems have already allocated/reserved | 226 | * has been activated and all other subsystems have already allocated/reserved |
| 194 | * memory. This function allows to create custom reserved areas for specific | 227 | * memory. This function allows to create custom reserved areas for specific |
| 195 | * devices. | 228 | * devices. |
| 229 | * | ||
| 230 | * If @fixed is true, reserve contiguous area at exactly @base. If false, | ||
| 231 | * reserve in range from @base to @limit. | ||
| 196 | */ | 232 | */ |
| 197 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | 233 | int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, |
| 198 | phys_addr_t limit, struct cma **res_cma) | 234 | phys_addr_t limit, struct cma **res_cma, |
| 235 | bool fixed) | ||
| 199 | { | 236 | { |
| 200 | struct cma *cma = &cma_areas[cma_area_count]; | 237 | struct cma *cma = &cma_areas[cma_area_count]; |
| 201 | phys_addr_t alignment; | 238 | phys_addr_t alignment; |
| @@ -221,18 +258,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, | |||
| 221 | limit &= ~(alignment - 1); | 258 | limit &= ~(alignment - 1); |
| 222 | 259 | ||
| 223 | /* Reserve memory */ | 260 | /* Reserve memory */ |
| 224 | if (base) { | 261 | if (base && fixed) { |
| 225 | if (memblock_is_region_reserved(base, size) || | 262 | if (memblock_is_region_reserved(base, size) || |
| 226 | memblock_reserve(base, size) < 0) { | 263 | memblock_reserve(base, size) < 0) { |
| 227 | ret = -EBUSY; | 264 | ret = -EBUSY; |
| 228 | goto err; | 265 | goto err; |
| 229 | } | 266 | } |
| 230 | } else { | 267 | } else { |
| 231 | /* | 268 | phys_addr_t addr = memblock_alloc_range(size, alignment, base, |
| 232 | * Use __memblock_alloc_base() since | 269 | limit); |
| 233 | * memblock_alloc_base() panic()s. | ||
| 234 | */ | ||
| 235 | phys_addr_t addr = __memblock_alloc_base(size, alignment, limit); | ||
| 236 | if (!addr) { | 270 | if (!addr) { |
| 237 | ret = -ENOMEM; | 271 | ret = -ENOMEM; |
| 238 | goto err; | 272 | goto err; |
| @@ -261,6 +295,13 @@ err: | |||
| 261 | return ret; | 295 | return ret; |
| 262 | } | 296 | } |
| 263 | 297 | ||
| 298 | static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count) | ||
| 299 | { | ||
| 300 | mutex_lock(&cma->lock); | ||
| 301 | bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); | ||
| 302 | mutex_unlock(&cma->lock); | ||
| 303 | } | ||
| 304 | |||
| 264 | /** | 305 | /** |
| 265 | * dma_alloc_from_contiguous() - allocate pages from contiguous area | 306 | * dma_alloc_from_contiguous() - allocate pages from contiguous area |
| 266 | * @dev: Pointer to device for which the allocation is performed. | 307 | * @dev: Pointer to device for which the allocation is performed. |
| @@ -269,7 +310,7 @@ err: | |||
| 269 | * | 310 | * |
| 270 | * This function allocates memory buffer for specified device. It uses | 311 | * This function allocates memory buffer for specified device. It uses |
| 271 | * device specific contiguous memory area if available or the default | 312 | * device specific contiguous memory area if available or the default |
| 272 | * global one. Requires architecture specific get_dev_cma_area() helper | 313 | * global one. Requires architecture specific dev_get_cma_area() helper |
| 273 | * function. | 314 | * function. |
| 274 | */ | 315 | */ |
| 275 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, | 316 | struct page *dma_alloc_from_contiguous(struct device *dev, int count, |
| @@ -294,30 +335,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count, | |||
| 294 | 335 | ||
| 295 | mask = (1 << align) - 1; | 336 | mask = (1 << align) - 1; |
| 296 | 337 | ||
| 297 | mutex_lock(&cma_mutex); | ||
| 298 | 338 | ||
| 299 | for (;;) { | 339 | for (;;) { |
| 340 | mutex_lock(&cma->lock); | ||
| 300 | pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, | 341 | pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, |
| 301 | start, count, mask); | 342 | start, count, mask); |
| 302 | if (pageno >= cma->count) | 343 | if (pageno >= cma->count) { |
| 344 | mutex_unlock(&cma->lock); | ||
| 303 | break; | 345 | break; |
| 346 | } | ||
| 347 | bitmap_set(cma->bitmap, pageno, count); | ||
| 348 | /* | ||
| 349 | * It's safe to drop the lock here. We've marked this region for | ||
| 350 | * our exclusive use. If the migration fails we will take the | ||
| 351 | * lock again and unmark it. | ||
| 352 | */ | ||
| 353 | mutex_unlock(&cma->lock); | ||
| 304 | 354 | ||
| 305 | pfn = cma->base_pfn + pageno; | 355 | pfn = cma->base_pfn + pageno; |
| 356 | mutex_lock(&cma_mutex); | ||
| 306 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); | 357 | ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); |
| 358 | mutex_unlock(&cma_mutex); | ||
| 307 | if (ret == 0) { | 359 | if (ret == 0) { |
| 308 | bitmap_set(cma->bitmap, pageno, count); | ||
| 309 | page = pfn_to_page(pfn); | 360 | page = pfn_to_page(pfn); |
| 310 | break; | 361 | break; |
| 311 | } else if (ret != -EBUSY) { | 362 | } else if (ret != -EBUSY) { |
| 363 | clear_cma_bitmap(cma, pfn, count); | ||
| 312 | break; | 364 | break; |
| 313 | } | 365 | } |
| 366 | clear_cma_bitmap(cma, pfn, count); | ||
| 314 | pr_debug("%s(): memory range at %p is busy, retrying\n", | 367 | pr_debug("%s(): memory range at %p is busy, retrying\n", |
| 315 | __func__, pfn_to_page(pfn)); | 368 | __func__, pfn_to_page(pfn)); |
| 316 | /* try again with a bit different memory target */ | 369 | /* try again with a bit different memory target */ |
| 317 | start = pageno + mask + 1; | 370 | start = pageno + mask + 1; |
| 318 | } | 371 | } |
| 319 | 372 | ||
| 320 | mutex_unlock(&cma_mutex); | ||
| 321 | pr_debug("%s(): returned %p\n", __func__, page); | 373 | pr_debug("%s(): returned %p\n", __func__, page); |
| 322 | return page; | 374 | return page; |
| 323 | } | 375 | } |
| @@ -350,10 +402,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages, | |||
| 350 | 402 | ||
| 351 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); | 403 | VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); |
| 352 | 404 | ||
| 353 | mutex_lock(&cma_mutex); | ||
| 354 | bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count); | ||
| 355 | free_contig_range(pfn, count); | 405 | free_contig_range(pfn, count); |
| 356 | mutex_unlock(&cma_mutex); | 406 | clear_cma_bitmap(cma, pfn, count); |
| 357 | 407 | ||
| 358 | return true; | 408 | return true; |
| 359 | } | 409 | } |
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 0ce39a33b3c2..6cd08e145bfa 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
| @@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) | |||
| 175 | /** | 175 | /** |
| 176 | * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() | 176 | * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() |
| 177 | * @dev: Device to declare coherent memory for | 177 | * @dev: Device to declare coherent memory for |
| 178 | * @bus_addr: Bus address of coherent memory to be declared | 178 | * @phys_addr: Physical address of coherent memory to be declared |
| 179 | * @device_addr: Device address of coherent memory to be declared | 179 | * @device_addr: Device address of coherent memory to be declared |
| 180 | * @size: Size of coherent memory to be declared | 180 | * @size: Size of coherent memory to be declared |
| 181 | * @flags: Flags | 181 | * @flags: Flags |
| @@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res) | |||
| 185 | * RETURNS: | 185 | * RETURNS: |
| 186 | * 0 on success, -errno on failure. | 186 | * 0 on success, -errno on failure. |
| 187 | */ | 187 | */ |
| 188 | int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | 188 | int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
| 189 | dma_addr_t device_addr, size_t size, int flags) | 189 | dma_addr_t device_addr, size_t size, int flags) |
| 190 | { | 190 | { |
| 191 | void *res; | 191 | void *res; |
| @@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |||
| 195 | if (!res) | 195 | if (!res) |
| 196 | return -ENOMEM; | 196 | return -ENOMEM; |
| 197 | 197 | ||
| 198 | rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, | 198 | rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size, |
| 199 | flags); | 199 | flags); |
| 200 | if (rc == 0) | 200 | if (rc == 0) |
| 201 | devres_add(dev, res); | 201 | devres_add(dev, res); |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index bece691cb5d9..89f752dd8465 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -118,16 +118,6 @@ static ssize_t show_mem_start_phys_index(struct device *dev, | |||
| 118 | return sprintf(buf, "%08lx\n", phys_index); | 118 | return sprintf(buf, "%08lx\n", phys_index); |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static ssize_t show_mem_end_phys_index(struct device *dev, | ||
| 122 | struct device_attribute *attr, char *buf) | ||
| 123 | { | ||
| 124 | struct memory_block *mem = to_memory_block(dev); | ||
| 125 | unsigned long phys_index; | ||
| 126 | |||
| 127 | phys_index = mem->end_section_nr / sections_per_block; | ||
| 128 | return sprintf(buf, "%08lx\n", phys_index); | ||
| 129 | } | ||
| 130 | |||
| 131 | /* | 121 | /* |
| 132 | * Show whether the section of memory is likely to be hot-removable | 122 | * Show whether the section of memory is likely to be hot-removable |
| 133 | */ | 123 | */ |
| @@ -384,7 +374,6 @@ static ssize_t show_phys_device(struct device *dev, | |||
| 384 | } | 374 | } |
| 385 | 375 | ||
| 386 | static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); | 376 | static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); |
| 387 | static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL); | ||
| 388 | static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); | 377 | static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); |
| 389 | static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); | 378 | static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); |
| 390 | static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); | 379 | static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); |
| @@ -529,7 +518,6 @@ struct memory_block *find_memory_block(struct mem_section *section) | |||
| 529 | 518 | ||
| 530 | static struct attribute *memory_memblk_attrs[] = { | 519 | static struct attribute *memory_memblk_attrs[] = { |
| 531 | &dev_attr_phys_index.attr, | 520 | &dev_attr_phys_index.attr, |
| 532 | &dev_attr_end_phys_index.attr, | ||
| 533 | &dev_attr_state.attr, | 521 | &dev_attr_state.attr, |
| 534 | &dev_attr_phys_device.attr, | 522 | &dev_attr_phys_device.attr, |
| 535 | &dev_attr_removable.attr, | 523 | &dev_attr_removable.attr, |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 5b47210889e0..eee48c49f5de 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
| @@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num) | |||
| 89 | return dev->archdata.irqs[num]; | 89 | return dev->archdata.irqs[num]; |
| 90 | #else | 90 | #else |
| 91 | struct resource *r; | 91 | struct resource *r; |
| 92 | if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) | 92 | if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { |
| 93 | return of_irq_get(dev->dev.of_node, num); | 93 | int ret; |
| 94 | |||
| 95 | ret = of_irq_get(dev->dev.of_node, num); | ||
| 96 | if (ret >= 0 || ret == -EPROBE_DEFER) | ||
| 97 | return ret; | ||
| 98 | } | ||
| 94 | 99 | ||
| 95 | r = platform_get_resource(dev, IORESOURCE_IRQ, num); | 100 | r = platform_get_resource(dev, IORESOURCE_IRQ, num); |
| 96 | 101 | ||
| @@ -131,9 +136,17 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname); | |||
| 131 | */ | 136 | */ |
| 132 | int platform_get_irq_byname(struct platform_device *dev, const char *name) | 137 | int platform_get_irq_byname(struct platform_device *dev, const char *name) |
| 133 | { | 138 | { |
| 134 | struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, | 139 | struct resource *r; |
| 135 | name); | 140 | |
| 141 | if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) { | ||
| 142 | int ret; | ||
| 143 | |||
| 144 | ret = of_irq_get_byname(dev->dev.of_node, name); | ||
| 145 | if (ret >= 0 || ret == -EPROBE_DEFER) | ||
| 146 | return ret; | ||
| 147 | } | ||
| 136 | 148 | ||
| 149 | r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); | ||
| 137 | return r ? r->start : -ENXIO; | 150 | return r ? r->start : -ENXIO; |
| 138 | } | 151 | } |
| 139 | EXPORT_SYMBOL_GPL(platform_get_irq_byname); | 152 | EXPORT_SYMBOL_GPL(platform_get_irq_byname); |
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ae098a261fcd..eee55c1e5fde 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
| @@ -105,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd) | |||
| 105 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) | 105 | static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) |
| 106 | { | 106 | { |
| 107 | atomic_inc(&genpd->sd_count); | 107 | atomic_inc(&genpd->sd_count); |
| 108 | smp_mb__after_atomic_inc(); | 108 | smp_mb__after_atomic(); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) | 111 | static void genpd_acquire_lock(struct generic_pm_domain *genpd) |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 86d5e4fb5b98..bf412961a934 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -214,9 +214,6 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, | |||
| 214 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | 214 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), |
| 215 | error, (unsigned long long)nsecs >> 10); | 215 | error, (unsigned long long)nsecs >> 10); |
| 216 | } | 216 | } |
| 217 | |||
| 218 | trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event), | ||
| 219 | error); | ||
| 220 | } | 217 | } |
| 221 | 218 | ||
| 222 | /** | 219 | /** |
| @@ -387,7 +384,9 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev, | |||
| 387 | calltime = initcall_debug_start(dev); | 384 | calltime = initcall_debug_start(dev); |
| 388 | 385 | ||
| 389 | pm_dev_dbg(dev, state, info); | 386 | pm_dev_dbg(dev, state, info); |
| 387 | trace_device_pm_callback_start(dev, info, state.event); | ||
| 390 | error = cb(dev); | 388 | error = cb(dev); |
| 389 | trace_device_pm_callback_end(dev, error); | ||
| 391 | suspend_report_result(cb, error); | 390 | suspend_report_result(cb, error); |
| 392 | 391 | ||
| 393 | initcall_debug_report(dev, calltime, error, state, info); | 392 | initcall_debug_report(dev, calltime, error, state, info); |
| @@ -479,7 +478,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn | |||
| 479 | TRACE_DEVICE(dev); | 478 | TRACE_DEVICE(dev); |
| 480 | TRACE_RESUME(0); | 479 | TRACE_RESUME(0); |
| 481 | 480 | ||
| 482 | if (dev->power.syscore) | 481 | if (dev->power.syscore || dev->power.direct_complete) |
| 483 | goto Out; | 482 | goto Out; |
| 484 | 483 | ||
| 485 | if (!dev->power.is_noirq_suspended) | 484 | if (!dev->power.is_noirq_suspended) |
| @@ -545,6 +544,7 @@ static void dpm_resume_noirq(pm_message_t state) | |||
| 545 | struct device *dev; | 544 | struct device *dev; |
| 546 | ktime_t starttime = ktime_get(); | 545 | ktime_t starttime = ktime_get(); |
| 547 | 546 | ||
| 547 | trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true); | ||
| 548 | mutex_lock(&dpm_list_mtx); | 548 | mutex_lock(&dpm_list_mtx); |
| 549 | pm_transition = state; | 549 | pm_transition = state; |
| 550 | 550 | ||
| @@ -587,6 +587,7 @@ static void dpm_resume_noirq(pm_message_t state) | |||
| 587 | dpm_show_time(starttime, state, "noirq"); | 587 | dpm_show_time(starttime, state, "noirq"); |
| 588 | resume_device_irqs(); | 588 | resume_device_irqs(); |
| 589 | cpuidle_resume(); | 589 | cpuidle_resume(); |
| 590 | trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); | ||
| 590 | } | 591 | } |
| 591 | 592 | ||
| 592 | /** | 593 | /** |
| @@ -605,7 +606,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn | |||
| 605 | TRACE_DEVICE(dev); | 606 | TRACE_DEVICE(dev); |
| 606 | TRACE_RESUME(0); | 607 | TRACE_RESUME(0); |
| 607 | 608 | ||
| 608 | if (dev->power.syscore) | 609 | if (dev->power.syscore || dev->power.direct_complete) |
| 609 | goto Out; | 610 | goto Out; |
| 610 | 611 | ||
| 611 | if (!dev->power.is_late_suspended) | 612 | if (!dev->power.is_late_suspended) |
| @@ -664,6 +665,7 @@ static void dpm_resume_early(pm_message_t state) | |||
| 664 | struct device *dev; | 665 | struct device *dev; |
| 665 | ktime_t starttime = ktime_get(); | 666 | ktime_t starttime = ktime_get(); |
| 666 | 667 | ||
| 668 | trace_suspend_resume(TPS("dpm_resume_early"), state.event, true); | ||
| 667 | mutex_lock(&dpm_list_mtx); | 669 | mutex_lock(&dpm_list_mtx); |
| 668 | pm_transition = state; | 670 | pm_transition = state; |
| 669 | 671 | ||
| @@ -703,6 +705,7 @@ static void dpm_resume_early(pm_message_t state) | |||
| 703 | mutex_unlock(&dpm_list_mtx); | 705 | mutex_unlock(&dpm_list_mtx); |
| 704 | async_synchronize_full(); | 706 | async_synchronize_full(); |
| 705 | dpm_show_time(starttime, state, "early"); | 707 | dpm_show_time(starttime, state, "early"); |
| 708 | trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); | ||
| 706 | } | 709 | } |
| 707 | 710 | ||
| 708 | /** | 711 | /** |
| @@ -735,6 +738,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
| 735 | if (dev->power.syscore) | 738 | if (dev->power.syscore) |
| 736 | goto Complete; | 739 | goto Complete; |
| 737 | 740 | ||
| 741 | if (dev->power.direct_complete) { | ||
| 742 | /* Match the pm_runtime_disable() in __device_suspend(). */ | ||
| 743 | pm_runtime_enable(dev); | ||
| 744 | goto Complete; | ||
| 745 | } | ||
| 746 | |||
| 738 | dpm_wait(dev->parent, async); | 747 | dpm_wait(dev->parent, async); |
| 739 | dpm_watchdog_set(&wd, dev); | 748 | dpm_watchdog_set(&wd, dev); |
| 740 | device_lock(dev); | 749 | device_lock(dev); |
| @@ -828,6 +837,7 @@ void dpm_resume(pm_message_t state) | |||
| 828 | struct device *dev; | 837 | struct device *dev; |
| 829 | ktime_t starttime = ktime_get(); | 838 | ktime_t starttime = ktime_get(); |
| 830 | 839 | ||
| 840 | trace_suspend_resume(TPS("dpm_resume"), state.event, true); | ||
| 831 | might_sleep(); | 841 | might_sleep(); |
| 832 | 842 | ||
| 833 | mutex_lock(&dpm_list_mtx); | 843 | mutex_lock(&dpm_list_mtx); |
| @@ -869,6 +879,7 @@ void dpm_resume(pm_message_t state) | |||
| 869 | dpm_show_time(starttime, state, NULL); | 879 | dpm_show_time(starttime, state, NULL); |
| 870 | 880 | ||
| 871 | cpufreq_resume(); | 881 | cpufreq_resume(); |
| 882 | trace_suspend_resume(TPS("dpm_resume"), state.event, false); | ||
| 872 | } | 883 | } |
| 873 | 884 | ||
| 874 | /** | 885 | /** |
| @@ -907,7 +918,9 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
| 907 | 918 | ||
| 908 | if (callback) { | 919 | if (callback) { |
| 909 | pm_dev_dbg(dev, state, info); | 920 | pm_dev_dbg(dev, state, info); |
| 921 | trace_device_pm_callback_start(dev, info, state.event); | ||
| 910 | callback(dev); | 922 | callback(dev); |
| 923 | trace_device_pm_callback_end(dev, 0); | ||
| 911 | } | 924 | } |
| 912 | 925 | ||
| 913 | device_unlock(dev); | 926 | device_unlock(dev); |
| @@ -926,6 +939,7 @@ void dpm_complete(pm_message_t state) | |||
| 926 | { | 939 | { |
| 927 | struct list_head list; | 940 | struct list_head list; |
| 928 | 941 | ||
| 942 | trace_suspend_resume(TPS("dpm_complete"), state.event, true); | ||
| 929 | might_sleep(); | 943 | might_sleep(); |
| 930 | 944 | ||
| 931 | INIT_LIST_HEAD(&list); | 945 | INIT_LIST_HEAD(&list); |
| @@ -945,6 +959,7 @@ void dpm_complete(pm_message_t state) | |||
| 945 | } | 959 | } |
| 946 | list_splice(&list, &dpm_list); | 960 | list_splice(&list, &dpm_list); |
| 947 | mutex_unlock(&dpm_list_mtx); | 961 | mutex_unlock(&dpm_list_mtx); |
| 962 | trace_suspend_resume(TPS("dpm_complete"), state.event, false); | ||
| 948 | } | 963 | } |
| 949 | 964 | ||
| 950 | /** | 965 | /** |
| @@ -1007,7 +1022,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a | |||
| 1007 | goto Complete; | 1022 | goto Complete; |
| 1008 | } | 1023 | } |
| 1009 | 1024 | ||
| 1010 | if (dev->power.syscore) | 1025 | if (dev->power.syscore || dev->power.direct_complete) |
| 1011 | goto Complete; | 1026 | goto Complete; |
| 1012 | 1027 | ||
| 1013 | dpm_wait_for_children(dev, async); | 1028 | dpm_wait_for_children(dev, async); |
| @@ -1080,6 +1095,7 @@ static int dpm_suspend_noirq(pm_message_t state) | |||
| 1080 | ktime_t starttime = ktime_get(); | 1095 | ktime_t starttime = ktime_get(); |
| 1081 | int error = 0; | 1096 | int error = 0; |
| 1082 | 1097 | ||
| 1098 | trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); | ||
| 1083 | cpuidle_pause(); | 1099 | cpuidle_pause(); |
| 1084 | suspend_device_irqs(); | 1100 | suspend_device_irqs(); |
| 1085 | mutex_lock(&dpm_list_mtx); | 1101 | mutex_lock(&dpm_list_mtx); |
| @@ -1120,6 +1136,7 @@ static int dpm_suspend_noirq(pm_message_t state) | |||
| 1120 | } else { | 1136 | } else { |
| 1121 | dpm_show_time(starttime, state, "noirq"); | 1137 | dpm_show_time(starttime, state, "noirq"); |
| 1122 | } | 1138 | } |
| 1139 | trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); | ||
| 1123 | return error; | 1140 | return error; |
| 1124 | } | 1141 | } |
| 1125 | 1142 | ||
| @@ -1146,7 +1163,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as | |||
| 1146 | goto Complete; | 1163 | goto Complete; |
| 1147 | } | 1164 | } |
| 1148 | 1165 | ||
| 1149 | if (dev->power.syscore) | 1166 | if (dev->power.syscore || dev->power.direct_complete) |
| 1150 | goto Complete; | 1167 | goto Complete; |
| 1151 | 1168 | ||
| 1152 | dpm_wait_for_children(dev, async); | 1169 | dpm_wait_for_children(dev, async); |
| @@ -1216,6 +1233,7 @@ static int dpm_suspend_late(pm_message_t state) | |||
| 1216 | ktime_t starttime = ktime_get(); | 1233 | ktime_t starttime = ktime_get(); |
| 1217 | int error = 0; | 1234 | int error = 0; |
| 1218 | 1235 | ||
| 1236 | trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true); | ||
| 1219 | mutex_lock(&dpm_list_mtx); | 1237 | mutex_lock(&dpm_list_mtx); |
| 1220 | pm_transition = state; | 1238 | pm_transition = state; |
| 1221 | async_error = 0; | 1239 | async_error = 0; |
| @@ -1251,6 +1269,7 @@ static int dpm_suspend_late(pm_message_t state) | |||
| 1251 | } else { | 1269 | } else { |
| 1252 | dpm_show_time(starttime, state, "late"); | 1270 | dpm_show_time(starttime, state, "late"); |
| 1253 | } | 1271 | } |
| 1272 | trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); | ||
| 1254 | return error; | 1273 | return error; |
| 1255 | } | 1274 | } |
| 1256 | 1275 | ||
| @@ -1289,7 +1308,9 @@ static int legacy_suspend(struct device *dev, pm_message_t state, | |||
| 1289 | 1308 | ||
| 1290 | calltime = initcall_debug_start(dev); | 1309 | calltime = initcall_debug_start(dev); |
| 1291 | 1310 | ||
| 1311 | trace_device_pm_callback_start(dev, info, state.event); | ||
| 1292 | error = cb(dev, state); | 1312 | error = cb(dev, state); |
| 1313 | trace_device_pm_callback_end(dev, error); | ||
| 1293 | suspend_report_result(cb, error); | 1314 | suspend_report_result(cb, error); |
| 1294 | 1315 | ||
| 1295 | initcall_debug_report(dev, calltime, error, state, info); | 1316 | initcall_debug_report(dev, calltime, error, state, info); |
| @@ -1332,6 +1353,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 1332 | if (dev->power.syscore) | 1353 | if (dev->power.syscore) |
| 1333 | goto Complete; | 1354 | goto Complete; |
| 1334 | 1355 | ||
| 1356 | if (dev->power.direct_complete) { | ||
| 1357 | if (pm_runtime_status_suspended(dev)) { | ||
| 1358 | pm_runtime_disable(dev); | ||
| 1359 | if (pm_runtime_suspended_if_enabled(dev)) | ||
| 1360 | goto Complete; | ||
| 1361 | |||
| 1362 | pm_runtime_enable(dev); | ||
| 1363 | } | ||
| 1364 | dev->power.direct_complete = false; | ||
| 1365 | } | ||
| 1366 | |||
| 1335 | dpm_watchdog_set(&wd, dev); | 1367 | dpm_watchdog_set(&wd, dev); |
| 1336 | device_lock(dev); | 1368 | device_lock(dev); |
| 1337 | 1369 | ||
| @@ -1382,10 +1414,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 1382 | 1414 | ||
| 1383 | End: | 1415 | End: |
| 1384 | if (!error) { | 1416 | if (!error) { |
| 1417 | struct device *parent = dev->parent; | ||
| 1418 | |||
| 1385 | dev->power.is_suspended = true; | 1419 | dev->power.is_suspended = true; |
| 1386 | if (dev->power.wakeup_path | 1420 | if (parent) { |
| 1387 | && dev->parent && !dev->parent->power.ignore_children) | 1421 | spin_lock_irq(&parent->power.lock); |
| 1388 | dev->parent->power.wakeup_path = true; | 1422 | |
| 1423 | dev->parent->power.direct_complete = false; | ||
| 1424 | if (dev->power.wakeup_path | ||
| 1425 | && !dev->parent->power.ignore_children) | ||
| 1426 | dev->parent->power.wakeup_path = true; | ||
| 1427 | |||
| 1428 | spin_unlock_irq(&parent->power.lock); | ||
| 1429 | } | ||
| 1389 | } | 1430 | } |
| 1390 | 1431 | ||
| 1391 | device_unlock(dev); | 1432 | device_unlock(dev); |
| @@ -1435,6 +1476,7 @@ int dpm_suspend(pm_message_t state) | |||
| 1435 | ktime_t starttime = ktime_get(); | 1476 | ktime_t starttime = ktime_get(); |
| 1436 | int error = 0; | 1477 | int error = 0; |
| 1437 | 1478 | ||
| 1479 | trace_suspend_resume(TPS("dpm_suspend"), state.event, true); | ||
| 1438 | might_sleep(); | 1480 | might_sleep(); |
| 1439 | 1481 | ||
| 1440 | cpufreq_suspend(); | 1482 | cpufreq_suspend(); |
| @@ -1472,6 +1514,7 @@ int dpm_suspend(pm_message_t state) | |||
| 1472 | dpm_save_failed_step(SUSPEND_SUSPEND); | 1514 | dpm_save_failed_step(SUSPEND_SUSPEND); |
| 1473 | } else | 1515 | } else |
| 1474 | dpm_show_time(starttime, state, NULL); | 1516 | dpm_show_time(starttime, state, NULL); |
| 1517 | trace_suspend_resume(TPS("dpm_suspend"), state.event, false); | ||
| 1475 | return error; | 1518 | return error; |
| 1476 | } | 1519 | } |
| 1477 | 1520 | ||
| @@ -1487,7 +1530,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 1487 | { | 1530 | { |
| 1488 | int (*callback)(struct device *) = NULL; | 1531 | int (*callback)(struct device *) = NULL; |
| 1489 | char *info = NULL; | 1532 | char *info = NULL; |
| 1490 | int error = 0; | 1533 | int ret = 0; |
| 1491 | 1534 | ||
| 1492 | if (dev->power.syscore) | 1535 | if (dev->power.syscore) |
| 1493 | return 0; | 1536 | return 0; |
| @@ -1524,16 +1567,29 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 1524 | } | 1567 | } |
| 1525 | 1568 | ||
| 1526 | if (callback) { | 1569 | if (callback) { |
| 1527 | error = callback(dev); | 1570 | trace_device_pm_callback_start(dev, info, state.event); |
| 1528 | suspend_report_result(callback, error); | 1571 | ret = callback(dev); |
| 1572 | trace_device_pm_callback_end(dev, ret); | ||
| 1529 | } | 1573 | } |
| 1530 | 1574 | ||
| 1531 | device_unlock(dev); | 1575 | device_unlock(dev); |
| 1532 | 1576 | ||
| 1533 | if (error) | 1577 | if (ret < 0) { |
| 1578 | suspend_report_result(callback, ret); | ||
| 1534 | pm_runtime_put(dev); | 1579 | pm_runtime_put(dev); |
| 1535 | 1580 | return ret; | |
| 1536 | return error; | 1581 | } |
| 1582 | /* | ||
| 1583 | * A positive return value from ->prepare() means "this device appears | ||
| 1584 | * to be runtime-suspended and its state is fine, so if it really is | ||
| 1585 | * runtime-suspended, you can leave it in that state provided that you | ||
| 1586 | * will do the same thing with all of its descendants". This only | ||
| 1587 | * applies to suspend transitions, however. | ||
| 1588 | */ | ||
| 1589 | spin_lock_irq(&dev->power.lock); | ||
| 1590 | dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; | ||
| 1591 | spin_unlock_irq(&dev->power.lock); | ||
| 1592 | return 0; | ||
| 1537 | } | 1593 | } |
| 1538 | 1594 | ||
| 1539 | /** | 1595 | /** |
| @@ -1546,6 +1602,7 @@ int dpm_prepare(pm_message_t state) | |||
| 1546 | { | 1602 | { |
| 1547 | int error = 0; | 1603 | int error = 0; |
| 1548 | 1604 | ||
| 1605 | trace_suspend_resume(TPS("dpm_prepare"), state.event, true); | ||
| 1549 | might_sleep(); | 1606 | might_sleep(); |
| 1550 | 1607 | ||
| 1551 | mutex_lock(&dpm_list_mtx); | 1608 | mutex_lock(&dpm_list_mtx); |
| @@ -1576,6 +1633,7 @@ int dpm_prepare(pm_message_t state) | |||
| 1576 | put_device(dev); | 1633 | put_device(dev); |
| 1577 | } | 1634 | } |
| 1578 | mutex_unlock(&dpm_list_mtx); | 1635 | mutex_unlock(&dpm_list_mtx); |
| 1636 | trace_suspend_resume(TPS("dpm_prepare"), state.event, false); | ||
| 1579 | return error; | 1637 | return error; |
| 1580 | } | 1638 | } |
| 1581 | 1639 | ||
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 25538675d59e..89ced955fafa 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 18 | #include <linux/cpufreq.h> | ||
| 19 | #include <linux/device.h> | 18 | #include <linux/device.h> |
| 20 | #include <linux/list.h> | 19 | #include <linux/list.h> |
| 21 | #include <linux/rculist.h> | 20 | #include <linux/rculist.h> |
| @@ -394,6 +393,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); | |||
| 394 | * to keep the integrity of the internal data structures. Callers should ensure | 393 | * to keep the integrity of the internal data structures. Callers should ensure |
| 395 | * that this function is *NOT* called under RCU protection or in contexts where | 394 | * that this function is *NOT* called under RCU protection or in contexts where |
| 396 | * mutex cannot be locked. | 395 | * mutex cannot be locked. |
| 396 | * | ||
| 397 | * Return: | ||
| 398 | * 0: On success OR | ||
| 399 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
| 400 | * -EEXIST: Freq are same and volt are different OR | ||
| 401 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
| 402 | * -ENOMEM: Memory allocation failure | ||
| 397 | */ | 403 | */ |
| 398 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | 404 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) |
| 399 | { | 405 | { |
| @@ -443,15 +449,31 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | |||
| 443 | new_opp->u_volt = u_volt; | 449 | new_opp->u_volt = u_volt; |
| 444 | new_opp->available = true; | 450 | new_opp->available = true; |
| 445 | 451 | ||
| 446 | /* Insert new OPP in order of increasing frequency */ | 452 | /* |
| 453 | * Insert new OPP in order of increasing frequency | ||
| 454 | * and discard if already present | ||
| 455 | */ | ||
| 447 | head = &dev_opp->opp_list; | 456 | head = &dev_opp->opp_list; |
| 448 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { | 457 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { |
| 449 | if (new_opp->rate < opp->rate) | 458 | if (new_opp->rate <= opp->rate) |
| 450 | break; | 459 | break; |
| 451 | else | 460 | else |
| 452 | head = &opp->node; | 461 | head = &opp->node; |
| 453 | } | 462 | } |
| 454 | 463 | ||
| 464 | /* Duplicate OPPs ? */ | ||
| 465 | if (new_opp->rate == opp->rate) { | ||
| 466 | int ret = opp->available && new_opp->u_volt == opp->u_volt ? | ||
| 467 | 0 : -EEXIST; | ||
| 468 | |||
| 469 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", | ||
| 470 | __func__, opp->rate, opp->u_volt, opp->available, | ||
| 471 | new_opp->rate, new_opp->u_volt, new_opp->available); | ||
| 472 | mutex_unlock(&dev_opp_list_lock); | ||
| 473 | kfree(new_opp); | ||
| 474 | return ret; | ||
| 475 | } | ||
| 476 | |||
| 455 | list_add_rcu(&new_opp->node, head); | 477 | list_add_rcu(&new_opp->node, head); |
| 456 | mutex_unlock(&dev_opp_list_lock); | 478 | mutex_unlock(&dev_opp_list_lock); |
| 457 | 479 | ||
| @@ -596,96 +618,6 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq) | |||
| 596 | } | 618 | } |
| 597 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); | 619 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
| 598 | 620 | ||
| 599 | #ifdef CONFIG_CPU_FREQ | ||
| 600 | /** | ||
| 601 | * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device | ||
| 602 | * @dev: device for which we do this operation | ||
| 603 | * @table: Cpufreq table returned back to caller | ||
| 604 | * | ||
| 605 | * Generate a cpufreq table for a provided device- this assumes that the | ||
| 606 | * opp list is already initialized and ready for usage. | ||
| 607 | * | ||
| 608 | * This function allocates required memory for the cpufreq table. It is | ||
| 609 | * expected that the caller does the required maintenance such as freeing | ||
| 610 | * the table as required. | ||
| 611 | * | ||
| 612 | * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM | ||
| 613 | * if no memory available for the operation (table is not populated), returns 0 | ||
| 614 | * if successful and table is populated. | ||
| 615 | * | ||
| 616 | * WARNING: It is important for the callers to ensure refreshing their copy of | ||
| 617 | * the table if any of the mentioned functions have been invoked in the interim. | ||
| 618 | * | ||
| 619 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
| 620 | * To simplify the logic, we pretend we are updater and hold relevant mutex here | ||
| 621 | * Callers should ensure that this function is *NOT* called under RCU protection | ||
| 622 | * or in contexts where mutex locking cannot be used. | ||
| 623 | */ | ||
| 624 | int dev_pm_opp_init_cpufreq_table(struct device *dev, | ||
| 625 | struct cpufreq_frequency_table **table) | ||
| 626 | { | ||
| 627 | struct device_opp *dev_opp; | ||
| 628 | struct dev_pm_opp *opp; | ||
| 629 | struct cpufreq_frequency_table *freq_table; | ||
| 630 | int i = 0; | ||
| 631 | |||
| 632 | /* Pretend as if I am an updater */ | ||
| 633 | mutex_lock(&dev_opp_list_lock); | ||
| 634 | |||
| 635 | dev_opp = find_device_opp(dev); | ||
| 636 | if (IS_ERR(dev_opp)) { | ||
| 637 | int r = PTR_ERR(dev_opp); | ||
| 638 | mutex_unlock(&dev_opp_list_lock); | ||
| 639 | dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
| 640 | return r; | ||
| 641 | } | ||
| 642 | |||
| 643 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * | ||
| 644 | (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL); | ||
| 645 | if (!freq_table) { | ||
| 646 | mutex_unlock(&dev_opp_list_lock); | ||
| 647 | dev_warn(dev, "%s: Unable to allocate frequency table\n", | ||
| 648 | __func__); | ||
| 649 | return -ENOMEM; | ||
| 650 | } | ||
| 651 | |||
| 652 | list_for_each_entry(opp, &dev_opp->opp_list, node) { | ||
| 653 | if (opp->available) { | ||
| 654 | freq_table[i].driver_data = i; | ||
| 655 | freq_table[i].frequency = opp->rate / 1000; | ||
| 656 | i++; | ||
| 657 | } | ||
| 658 | } | ||
| 659 | mutex_unlock(&dev_opp_list_lock); | ||
| 660 | |||
| 661 | freq_table[i].driver_data = i; | ||
| 662 | freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
| 663 | |||
| 664 | *table = &freq_table[0]; | ||
| 665 | |||
| 666 | return 0; | ||
| 667 | } | ||
| 668 | EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); | ||
| 669 | |||
| 670 | /** | ||
| 671 | * dev_pm_opp_free_cpufreq_table() - free the cpufreq table | ||
| 672 | * @dev: device for which we do this operation | ||
| 673 | * @table: table to free | ||
| 674 | * | ||
| 675 | * Free up the table allocated by dev_pm_opp_init_cpufreq_table | ||
| 676 | */ | ||
| 677 | void dev_pm_opp_free_cpufreq_table(struct device *dev, | ||
| 678 | struct cpufreq_frequency_table **table) | ||
| 679 | { | ||
| 680 | if (!table) | ||
| 681 | return; | ||
| 682 | |||
| 683 | kfree(*table); | ||
| 684 | *table = NULL; | ||
| 685 | } | ||
| 686 | EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); | ||
| 687 | #endif /* CONFIG_CPU_FREQ */ | ||
| 688 | |||
| 689 | /** | 621 | /** |
| 690 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp | 622 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp |
| 691 | * @dev: device pointer used to lookup device OPPs. | 623 | * @dev: device pointer used to lookup device OPPs. |
| @@ -734,11 +666,9 @@ int of_init_opp_table(struct device *dev) | |||
| 734 | unsigned long freq = be32_to_cpup(val++) * 1000; | 666 | unsigned long freq = be32_to_cpup(val++) * 1000; |
| 735 | unsigned long volt = be32_to_cpup(val++); | 667 | unsigned long volt = be32_to_cpup(val++); |
| 736 | 668 | ||
| 737 | if (dev_pm_opp_add(dev, freq, volt)) { | 669 | if (dev_pm_opp_add(dev, freq, volt)) |
| 738 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | 670 | dev_warn(dev, "%s: Failed to add OPP %ld\n", |
| 739 | __func__, freq); | 671 | __func__, freq); |
| 740 | continue; | ||
| 741 | } | ||
| 742 | nr -= 2; | 672 | nr -= 2; |
| 743 | } | 673 | } |
| 744 | 674 | ||
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 2d56f4113ae7..eb1bd2ecad8b 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable) | |||
| 318 | { | 318 | { |
| 319 | int ret = 0; | 319 | int ret = 0; |
| 320 | 320 | ||
| 321 | if (!dev) | ||
| 322 | return -EINVAL; | ||
| 323 | |||
| 321 | if (enable) { | 324 | if (enable) { |
| 322 | device_set_wakeup_capable(dev, true); | 325 | device_set_wakeup_capable(dev, true); |
| 323 | ret = device_wakeup_enable(dev); | 326 | ret = device_wakeup_enable(dev); |
| 324 | } else { | 327 | } else { |
| 328 | if (dev->power.can_wakeup) | ||
| 329 | device_wakeup_disable(dev); | ||
| 330 | |||
| 325 | device_set_wakeup_capable(dev, false); | 331 | device_set_wakeup_capable(dev, false); |
| 326 | } | 332 | } |
| 327 | 333 | ||
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 930cad4e5df8..6a7e4fa12854 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
| @@ -23,16 +23,16 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg, | |||
| 23 | static int regcache_rbtree_exit(struct regmap *map); | 23 | static int regcache_rbtree_exit(struct regmap *map); |
| 24 | 24 | ||
| 25 | struct regcache_rbtree_node { | 25 | struct regcache_rbtree_node { |
| 26 | /* the actual rbtree node holding this block */ | ||
| 27 | struct rb_node node; | ||
| 28 | /* base register handled by this block */ | ||
| 29 | unsigned int base_reg; | ||
| 30 | /* block of adjacent registers */ | 26 | /* block of adjacent registers */ |
| 31 | void *block; | 27 | void *block; |
| 32 | /* Which registers are present */ | 28 | /* Which registers are present */ |
| 33 | long *cache_present; | 29 | long *cache_present; |
| 30 | /* base register handled by this block */ | ||
| 31 | unsigned int base_reg; | ||
| 34 | /* number of registers available in the block */ | 32 | /* number of registers available in the block */ |
| 35 | unsigned int blklen; | 33 | unsigned int blklen; |
| 34 | /* the actual rbtree node holding this block */ | ||
| 35 | struct rb_node node; | ||
| 36 | } __attribute__ ((packed)); | 36 | } __attribute__ ((packed)); |
| 37 | 37 | ||
| 38 | struct regcache_rbtree_ctx { | 38 | struct regcache_rbtree_ctx { |
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index ebd189529760..ca193d1ef47c 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c | |||
| @@ -14,6 +14,79 @@ | |||
| 14 | #include <linux/i2c.h> | 14 | #include <linux/i2c.h> |
| 15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
| 16 | 16 | ||
| 17 | |||
| 18 | static int regmap_smbus_byte_reg_read(void *context, unsigned int reg, | ||
| 19 | unsigned int *val) | ||
| 20 | { | ||
| 21 | struct device *dev = context; | ||
| 22 | struct i2c_client *i2c = to_i2c_client(dev); | ||
| 23 | int ret; | ||
| 24 | |||
| 25 | if (reg > 0xff) | ||
| 26 | return -EINVAL; | ||
| 27 | |||
| 28 | ret = i2c_smbus_read_byte_data(i2c, reg); | ||
| 29 | if (ret < 0) | ||
| 30 | return ret; | ||
| 31 | |||
| 32 | *val = ret; | ||
| 33 | |||
| 34 | return 0; | ||
| 35 | } | ||
| 36 | |||
| 37 | static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, | ||
| 38 | unsigned int val) | ||
| 39 | { | ||
| 40 | struct device *dev = context; | ||
| 41 | struct i2c_client *i2c = to_i2c_client(dev); | ||
| 42 | |||
| 43 | if (val > 0xff || reg > 0xff) | ||
| 44 | return -EINVAL; | ||
| 45 | |||
| 46 | return i2c_smbus_write_byte_data(i2c, reg, val); | ||
| 47 | } | ||
| 48 | |||
| 49 | static struct regmap_bus regmap_smbus_byte = { | ||
| 50 | .reg_write = regmap_smbus_byte_reg_write, | ||
| 51 | .reg_read = regmap_smbus_byte_reg_read, | ||
| 52 | }; | ||
| 53 | |||
| 54 | static int regmap_smbus_word_reg_read(void *context, unsigned int reg, | ||
| 55 | unsigned int *val) | ||
| 56 | { | ||
| 57 | struct device *dev = context; | ||
| 58 | struct i2c_client *i2c = to_i2c_client(dev); | ||
| 59 | int ret; | ||
| 60 | |||
| 61 | if (reg > 0xff) | ||
| 62 | return -EINVAL; | ||
| 63 | |||
| 64 | ret = i2c_smbus_read_word_data(i2c, reg); | ||
| 65 | if (ret < 0) | ||
| 66 | return ret; | ||
| 67 | |||
| 68 | *val = ret; | ||
| 69 | |||
| 70 | return 0; | ||
| 71 | } | ||
| 72 | |||
| 73 | static int regmap_smbus_word_reg_write(void *context, unsigned int reg, | ||
| 74 | unsigned int val) | ||
| 75 | { | ||
| 76 | struct device *dev = context; | ||
| 77 | struct i2c_client *i2c = to_i2c_client(dev); | ||
| 78 | |||
| 79 | if (val > 0xffff || reg > 0xff) | ||
| 80 | return -EINVAL; | ||
| 81 | |||
| 82 | return i2c_smbus_write_word_data(i2c, reg, val); | ||
| 83 | } | ||
| 84 | |||
| 85 | static struct regmap_bus regmap_smbus_word = { | ||
| 86 | .reg_write = regmap_smbus_word_reg_write, | ||
| 87 | .reg_read = regmap_smbus_word_reg_read, | ||
| 88 | }; | ||
| 89 | |||
| 17 | static int regmap_i2c_write(void *context, const void *data, size_t count) | 90 | static int regmap_i2c_write(void *context, const void *data, size_t count) |
| 18 | { | 91 | { |
| 19 | struct device *dev = context; | 92 | struct device *dev = context; |
| @@ -97,6 +170,23 @@ static struct regmap_bus regmap_i2c = { | |||
| 97 | .read = regmap_i2c_read, | 170 | .read = regmap_i2c_read, |
| 98 | }; | 171 | }; |
| 99 | 172 | ||
| 173 | static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c, | ||
| 174 | const struct regmap_config *config) | ||
| 175 | { | ||
| 176 | if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C)) | ||
| 177 | return ®map_i2c; | ||
| 178 | else if (config->val_bits == 16 && config->reg_bits == 8 && | ||
| 179 | i2c_check_functionality(i2c->adapter, | ||
| 180 | I2C_FUNC_SMBUS_WORD_DATA)) | ||
| 181 | return ®map_smbus_word; | ||
| 182 | else if (config->val_bits == 8 && config->reg_bits == 8 && | ||
| 183 | i2c_check_functionality(i2c->adapter, | ||
| 184 | I2C_FUNC_SMBUS_BYTE_DATA)) | ||
| 185 | return ®map_smbus_byte; | ||
| 186 | |||
| 187 | return ERR_PTR(-ENOTSUPP); | ||
| 188 | } | ||
| 189 | |||
| 100 | /** | 190 | /** |
| 101 | * regmap_init_i2c(): Initialise register map | 191 | * regmap_init_i2c(): Initialise register map |
| 102 | * | 192 | * |
| @@ -109,7 +199,12 @@ static struct regmap_bus regmap_i2c = { | |||
| 109 | struct regmap *regmap_init_i2c(struct i2c_client *i2c, | 199 | struct regmap *regmap_init_i2c(struct i2c_client *i2c, |
| 110 | const struct regmap_config *config) | 200 | const struct regmap_config *config) |
| 111 | { | 201 | { |
| 112 | return regmap_init(&i2c->dev, ®map_i2c, &i2c->dev, config); | 202 | const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); |
| 203 | |||
| 204 | if (IS_ERR(bus)) | ||
| 205 | return ERR_CAST(bus); | ||
| 206 | |||
| 207 | return regmap_init(&i2c->dev, bus, &i2c->dev, config); | ||
| 113 | } | 208 | } |
| 114 | EXPORT_SYMBOL_GPL(regmap_init_i2c); | 209 | EXPORT_SYMBOL_GPL(regmap_init_i2c); |
| 115 | 210 | ||
| @@ -126,7 +221,12 @@ EXPORT_SYMBOL_GPL(regmap_init_i2c); | |||
| 126 | struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, | 221 | struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, |
| 127 | const struct regmap_config *config) | 222 | const struct regmap_config *config) |
| 128 | { | 223 | { |
| 129 | return devm_regmap_init(&i2c->dev, ®map_i2c, &i2c->dev, config); | 224 | const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config); |
| 225 | |||
| 226 | if (IS_ERR(bus)) | ||
| 227 | return ERR_CAST(bus); | ||
| 228 | |||
| 229 | return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config); | ||
| 130 | } | 230 | } |
| 131 | EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); | 231 | EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); |
| 132 | 232 | ||
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index edf88f20cbce..6299a50a5960 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c | |||
| @@ -10,13 +10,13 @@ | |||
| 10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/export.h> | ||
| 14 | #include <linux/device.h> | 13 | #include <linux/device.h> |
| 15 | #include <linux/regmap.h> | 14 | #include <linux/export.h> |
| 16 | #include <linux/irq.h> | ||
| 17 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
| 16 | #include <linux/irq.h> | ||
| 18 | #include <linux/irqdomain.h> | 17 | #include <linux/irqdomain.h> |
| 19 | #include <linux/pm_runtime.h> | 18 | #include <linux/pm_runtime.h> |
| 19 | #include <linux/regmap.h> | ||
| 20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
| 21 | 21 | ||
| 22 | #include "internal.h" | 22 | #include "internal.h" |
| @@ -347,6 +347,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, | |||
| 347 | int ret = -ENOMEM; | 347 | int ret = -ENOMEM; |
| 348 | u32 reg; | 348 | u32 reg; |
| 349 | 349 | ||
| 350 | if (chip->num_regs <= 0) | ||
| 351 | return -EINVAL; | ||
| 352 | |||
| 350 | for (i = 0; i < chip->num_irqs; i++) { | 353 | for (i = 0; i < chip->num_irqs; i++) { |
| 351 | if (chip->irqs[i].reg_offset % map->reg_stride) | 354 | if (chip->irqs[i].reg_offset % map->reg_stride) |
| 352 | return -EINVAL; | 355 | return -EINVAL; |
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 1e03e7f8bacb..04a329a377e9 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c | |||
| @@ -61,9 +61,28 @@ static int regmap_mmio_regbits_check(size_t reg_bits) | |||
| 61 | } | 61 | } |
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | static inline void regmap_mmio_count_check(size_t count) | 64 | static inline void regmap_mmio_count_check(size_t count, u32 offset) |
| 65 | { | 65 | { |
| 66 | BUG_ON(count % 2 != 0); | 66 | BUG_ON(count <= offset); |
| 67 | } | ||
| 68 | |||
| 69 | static inline unsigned int | ||
| 70 | regmap_mmio_get_offset(const void *reg, size_t reg_size) | ||
| 71 | { | ||
| 72 | switch (reg_size) { | ||
| 73 | case 1: | ||
| 74 | return *(u8 *)reg; | ||
| 75 | case 2: | ||
| 76 | return *(u16 *)reg; | ||
| 77 | case 4: | ||
| 78 | return *(u32 *)reg; | ||
| 79 | #ifdef CONFIG_64BIT | ||
| 80 | case 8: | ||
| 81 | return *(u64 *)reg; | ||
| 82 | #endif | ||
| 83 | default: | ||
| 84 | BUG(); | ||
| 85 | } | ||
| 67 | } | 86 | } |
| 68 | 87 | ||
| 69 | static int regmap_mmio_gather_write(void *context, | 88 | static int regmap_mmio_gather_write(void *context, |
| @@ -71,7 +90,7 @@ static int regmap_mmio_gather_write(void *context, | |||
| 71 | const void *val, size_t val_size) | 90 | const void *val, size_t val_size) |
| 72 | { | 91 | { |
| 73 | struct regmap_mmio_context *ctx = context; | 92 | struct regmap_mmio_context *ctx = context; |
| 74 | u32 offset; | 93 | unsigned int offset; |
| 75 | int ret; | 94 | int ret; |
| 76 | 95 | ||
| 77 | regmap_mmio_regsize_check(reg_size); | 96 | regmap_mmio_regsize_check(reg_size); |
| @@ -82,7 +101,7 @@ static int regmap_mmio_gather_write(void *context, | |||
| 82 | return ret; | 101 | return ret; |
| 83 | } | 102 | } |
| 84 | 103 | ||
| 85 | offset = *(u32 *)reg; | 104 | offset = regmap_mmio_get_offset(reg, reg_size); |
| 86 | 105 | ||
| 87 | while (val_size) { | 106 | while (val_size) { |
| 88 | switch (ctx->val_bytes) { | 107 | switch (ctx->val_bytes) { |
| @@ -118,9 +137,9 @@ static int regmap_mmio_gather_write(void *context, | |||
| 118 | static int regmap_mmio_write(void *context, const void *data, size_t count) | 137 | static int regmap_mmio_write(void *context, const void *data, size_t count) |
| 119 | { | 138 | { |
| 120 | struct regmap_mmio_context *ctx = context; | 139 | struct regmap_mmio_context *ctx = context; |
| 121 | u32 offset = ctx->reg_bytes + ctx->pad_bytes; | 140 | unsigned int offset = ctx->reg_bytes + ctx->pad_bytes; |
| 122 | 141 | ||
| 123 | regmap_mmio_count_check(count); | 142 | regmap_mmio_count_check(count, offset); |
| 124 | 143 | ||
| 125 | return regmap_mmio_gather_write(context, data, ctx->reg_bytes, | 144 | return regmap_mmio_gather_write(context, data, ctx->reg_bytes, |
| 126 | data + offset, count - offset); | 145 | data + offset, count - offset); |
| @@ -131,7 +150,7 @@ static int regmap_mmio_read(void *context, | |||
| 131 | void *val, size_t val_size) | 150 | void *val, size_t val_size) |
| 132 | { | 151 | { |
| 133 | struct regmap_mmio_context *ctx = context; | 152 | struct regmap_mmio_context *ctx = context; |
| 134 | u32 offset; | 153 | unsigned int offset; |
| 135 | int ret; | 154 | int ret; |
| 136 | 155 | ||
| 137 | regmap_mmio_regsize_check(reg_size); | 156 | regmap_mmio_regsize_check(reg_size); |
| @@ -142,7 +161,7 @@ static int regmap_mmio_read(void *context, | |||
| 142 | return ret; | 161 | return ret; |
| 143 | } | 162 | } |
| 144 | 163 | ||
| 145 | offset = *(u32 *)reg; | 164 | offset = regmap_mmio_get_offset(reg, reg_size); |
| 146 | 165 | ||
| 147 | while (val_size) { | 166 | while (val_size) { |
| 148 | switch (ctx->val_bytes) { | 167 | switch (ctx->val_bytes) { |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 63e30ef096e2..74d8c0672cf6 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
| @@ -35,10 +35,14 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg, | |||
| 35 | unsigned int mask, unsigned int val, | 35 | unsigned int mask, unsigned int val, |
| 36 | bool *change); | 36 | bool *change); |
| 37 | 37 | ||
| 38 | static int _regmap_bus_reg_read(void *context, unsigned int reg, | ||
| 39 | unsigned int *val); | ||
| 38 | static int _regmap_bus_read(void *context, unsigned int reg, | 40 | static int _regmap_bus_read(void *context, unsigned int reg, |
| 39 | unsigned int *val); | 41 | unsigned int *val); |
| 40 | static int _regmap_bus_formatted_write(void *context, unsigned int reg, | 42 | static int _regmap_bus_formatted_write(void *context, unsigned int reg, |
| 41 | unsigned int val); | 43 | unsigned int val); |
| 44 | static int _regmap_bus_reg_write(void *context, unsigned int reg, | ||
| 45 | unsigned int val); | ||
| 42 | static int _regmap_bus_raw_write(void *context, unsigned int reg, | 46 | static int _regmap_bus_raw_write(void *context, unsigned int reg, |
| 43 | unsigned int val); | 47 | unsigned int val); |
| 44 | 48 | ||
| @@ -192,6 +196,13 @@ static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift) | |||
| 192 | b[0] = cpu_to_be16(val << shift); | 196 | b[0] = cpu_to_be16(val << shift); |
| 193 | } | 197 | } |
| 194 | 198 | ||
| 199 | static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift) | ||
| 200 | { | ||
| 201 | __le16 *b = buf; | ||
| 202 | |||
| 203 | b[0] = cpu_to_le16(val << shift); | ||
| 204 | } | ||
| 205 | |||
| 195 | static void regmap_format_16_native(void *buf, unsigned int val, | 206 | static void regmap_format_16_native(void *buf, unsigned int val, |
| 196 | unsigned int shift) | 207 | unsigned int shift) |
| 197 | { | 208 | { |
| @@ -216,6 +227,13 @@ static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift) | |||
| 216 | b[0] = cpu_to_be32(val << shift); | 227 | b[0] = cpu_to_be32(val << shift); |
| 217 | } | 228 | } |
| 218 | 229 | ||
| 230 | static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift) | ||
| 231 | { | ||
| 232 | __le32 *b = buf; | ||
| 233 | |||
| 234 | b[0] = cpu_to_le32(val << shift); | ||
| 235 | } | ||
| 236 | |||
| 219 | static void regmap_format_32_native(void *buf, unsigned int val, | 237 | static void regmap_format_32_native(void *buf, unsigned int val, |
| 220 | unsigned int shift) | 238 | unsigned int shift) |
| 221 | { | 239 | { |
| @@ -240,6 +258,13 @@ static unsigned int regmap_parse_16_be(const void *buf) | |||
| 240 | return be16_to_cpu(b[0]); | 258 | return be16_to_cpu(b[0]); |
| 241 | } | 259 | } |
| 242 | 260 | ||
| 261 | static unsigned int regmap_parse_16_le(const void *buf) | ||
| 262 | { | ||
| 263 | const __le16 *b = buf; | ||
| 264 | |||
| 265 | return le16_to_cpu(b[0]); | ||
| 266 | } | ||
| 267 | |||
| 243 | static void regmap_parse_16_be_inplace(void *buf) | 268 | static void regmap_parse_16_be_inplace(void *buf) |
| 244 | { | 269 | { |
| 245 | __be16 *b = buf; | 270 | __be16 *b = buf; |
| @@ -247,6 +272,13 @@ static void regmap_parse_16_be_inplace(void *buf) | |||
| 247 | b[0] = be16_to_cpu(b[0]); | 272 | b[0] = be16_to_cpu(b[0]); |
| 248 | } | 273 | } |
| 249 | 274 | ||
| 275 | static void regmap_parse_16_le_inplace(void *buf) | ||
| 276 | { | ||
| 277 | __le16 *b = buf; | ||
| 278 | |||
| 279 | b[0] = le16_to_cpu(b[0]); | ||
| 280 | } | ||
| 281 | |||
| 250 | static unsigned int regmap_parse_16_native(const void *buf) | 282 | static unsigned int regmap_parse_16_native(const void *buf) |
| 251 | { | 283 | { |
| 252 | return *(u16 *)buf; | 284 | return *(u16 *)buf; |
| @@ -269,6 +301,13 @@ static unsigned int regmap_parse_32_be(const void *buf) | |||
| 269 | return be32_to_cpu(b[0]); | 301 | return be32_to_cpu(b[0]); |
| 270 | } | 302 | } |
| 271 | 303 | ||
| 304 | static unsigned int regmap_parse_32_le(const void *buf) | ||
| 305 | { | ||
| 306 | const __le32 *b = buf; | ||
| 307 | |||
| 308 | return le32_to_cpu(b[0]); | ||
| 309 | } | ||
| 310 | |||
| 272 | static void regmap_parse_32_be_inplace(void *buf) | 311 | static void regmap_parse_32_be_inplace(void *buf) |
| 273 | { | 312 | { |
| 274 | __be32 *b = buf; | 313 | __be32 *b = buf; |
| @@ -276,6 +315,13 @@ static void regmap_parse_32_be_inplace(void *buf) | |||
| 276 | b[0] = be32_to_cpu(b[0]); | 315 | b[0] = be32_to_cpu(b[0]); |
| 277 | } | 316 | } |
| 278 | 317 | ||
| 318 | static void regmap_parse_32_le_inplace(void *buf) | ||
| 319 | { | ||
| 320 | __le32 *b = buf; | ||
| 321 | |||
| 322 | b[0] = le32_to_cpu(b[0]); | ||
| 323 | } | ||
| 324 | |||
| 279 | static unsigned int regmap_parse_32_native(const void *buf) | 325 | static unsigned int regmap_parse_32_native(const void *buf) |
| 280 | { | 326 | { |
| 281 | return *(u32 *)buf; | 327 | return *(u32 *)buf; |
| @@ -495,6 +541,12 @@ struct regmap *regmap_init(struct device *dev, | |||
| 495 | 541 | ||
| 496 | map->defer_caching = false; | 542 | map->defer_caching = false; |
| 497 | goto skip_format_initialization; | 543 | goto skip_format_initialization; |
| 544 | } else if (!bus->read || !bus->write) { | ||
| 545 | map->reg_read = _regmap_bus_reg_read; | ||
| 546 | map->reg_write = _regmap_bus_reg_write; | ||
| 547 | |||
| 548 | map->defer_caching = false; | ||
| 549 | goto skip_format_initialization; | ||
| 498 | } else { | 550 | } else { |
| 499 | map->reg_read = _regmap_bus_read; | 551 | map->reg_read = _regmap_bus_read; |
| 500 | } | 552 | } |
| @@ -608,6 +660,11 @@ struct regmap *regmap_init(struct device *dev, | |||
| 608 | map->format.parse_val = regmap_parse_16_be; | 660 | map->format.parse_val = regmap_parse_16_be; |
| 609 | map->format.parse_inplace = regmap_parse_16_be_inplace; | 661 | map->format.parse_inplace = regmap_parse_16_be_inplace; |
| 610 | break; | 662 | break; |
| 663 | case REGMAP_ENDIAN_LITTLE: | ||
| 664 | map->format.format_val = regmap_format_16_le; | ||
| 665 | map->format.parse_val = regmap_parse_16_le; | ||
| 666 | map->format.parse_inplace = regmap_parse_16_le_inplace; | ||
| 667 | break; | ||
| 611 | case REGMAP_ENDIAN_NATIVE: | 668 | case REGMAP_ENDIAN_NATIVE: |
| 612 | map->format.format_val = regmap_format_16_native; | 669 | map->format.format_val = regmap_format_16_native; |
| 613 | map->format.parse_val = regmap_parse_16_native; | 670 | map->format.parse_val = regmap_parse_16_native; |
| @@ -629,6 +686,11 @@ struct regmap *regmap_init(struct device *dev, | |||
| 629 | map->format.parse_val = regmap_parse_32_be; | 686 | map->format.parse_val = regmap_parse_32_be; |
| 630 | map->format.parse_inplace = regmap_parse_32_be_inplace; | 687 | map->format.parse_inplace = regmap_parse_32_be_inplace; |
| 631 | break; | 688 | break; |
| 689 | case REGMAP_ENDIAN_LITTLE: | ||
| 690 | map->format.format_val = regmap_format_32_le; | ||
| 691 | map->format.parse_val = regmap_parse_32_le; | ||
| 692 | map->format.parse_inplace = regmap_parse_32_le_inplace; | ||
| 693 | break; | ||
| 632 | case REGMAP_ENDIAN_NATIVE: | 694 | case REGMAP_ENDIAN_NATIVE: |
| 633 | map->format.format_val = regmap_format_32_native; | 695 | map->format.format_val = regmap_format_32_native; |
| 634 | map->format.parse_val = regmap_parse_32_native; | 696 | map->format.parse_val = regmap_parse_32_native; |
| @@ -1284,6 +1346,14 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg, | |||
| 1284 | return ret; | 1346 | return ret; |
| 1285 | } | 1347 | } |
| 1286 | 1348 | ||
| 1349 | static int _regmap_bus_reg_write(void *context, unsigned int reg, | ||
| 1350 | unsigned int val) | ||
| 1351 | { | ||
| 1352 | struct regmap *map = context; | ||
| 1353 | |||
| 1354 | return map->bus->reg_write(map->bus_context, reg, val); | ||
| 1355 | } | ||
| 1356 | |||
| 1287 | static int _regmap_bus_raw_write(void *context, unsigned int reg, | 1357 | static int _regmap_bus_raw_write(void *context, unsigned int reg, |
| 1288 | unsigned int val) | 1358 | unsigned int val) |
| 1289 | { | 1359 | { |
| @@ -1615,6 +1685,9 @@ static int _regmap_raw_multi_reg_write(struct regmap *map, | |||
| 1615 | size_t pair_size = reg_bytes + pad_bytes + val_bytes; | 1685 | size_t pair_size = reg_bytes + pad_bytes + val_bytes; |
| 1616 | size_t len = pair_size * num_regs; | 1686 | size_t len = pair_size * num_regs; |
| 1617 | 1687 | ||
| 1688 | if (!len) | ||
| 1689 | return -EINVAL; | ||
| 1690 | |||
| 1618 | buf = kzalloc(len, GFP_KERNEL); | 1691 | buf = kzalloc(len, GFP_KERNEL); |
| 1619 | if (!buf) | 1692 | if (!buf) |
| 1620 | return -ENOMEM; | 1693 | return -ENOMEM; |
| @@ -1662,7 +1735,7 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map, | |||
| 1662 | int ret; | 1735 | int ret; |
| 1663 | int i, n; | 1736 | int i, n; |
| 1664 | struct reg_default *base; | 1737 | struct reg_default *base; |
| 1665 | unsigned int this_page; | 1738 | unsigned int this_page = 0; |
| 1666 | /* | 1739 | /* |
| 1667 | * the set of registers are not neccessarily in order, but | 1740 | * the set of registers are not neccessarily in order, but |
| 1668 | * since the order of write must be preserved this algorithm | 1741 | * since the order of write must be preserved this algorithm |
| @@ -1925,6 +1998,14 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, | |||
| 1925 | return ret; | 1998 | return ret; |
| 1926 | } | 1999 | } |
| 1927 | 2000 | ||
| 2001 | static int _regmap_bus_reg_read(void *context, unsigned int reg, | ||
| 2002 | unsigned int *val) | ||
| 2003 | { | ||
| 2004 | struct regmap *map = context; | ||
| 2005 | |||
| 2006 | return map->bus->reg_read(map->bus_context, reg, val); | ||
| 2007 | } | ||
| 2008 | |||
| 1928 | static int _regmap_bus_read(void *context, unsigned int reg, | 2009 | static int _regmap_bus_read(void *context, unsigned int reg, |
| 1929 | unsigned int *val) | 2010 | unsigned int *val) |
| 1930 | { | 2011 | { |
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index e8d11b6630ee..dbb8350ea8dc 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
| 11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
| 12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
| 13 | #include <trace/events/power.h> | ||
| 13 | 14 | ||
| 14 | static LIST_HEAD(syscore_ops_list); | 15 | static LIST_HEAD(syscore_ops_list); |
| 15 | static DEFINE_MUTEX(syscore_ops_lock); | 16 | static DEFINE_MUTEX(syscore_ops_lock); |
| @@ -49,6 +50,7 @@ int syscore_suspend(void) | |||
| 49 | struct syscore_ops *ops; | 50 | struct syscore_ops *ops; |
| 50 | int ret = 0; | 51 | int ret = 0; |
| 51 | 52 | ||
| 53 | trace_suspend_resume(TPS("syscore_suspend"), 0, true); | ||
| 52 | pr_debug("Checking wakeup interrupts\n"); | 54 | pr_debug("Checking wakeup interrupts\n"); |
| 53 | 55 | ||
| 54 | /* Return error code if there are any wakeup interrupts pending. */ | 56 | /* Return error code if there are any wakeup interrupts pending. */ |
| @@ -70,6 +72,7 @@ int syscore_suspend(void) | |||
| 70 | "Interrupts enabled after %pF\n", ops->suspend); | 72 | "Interrupts enabled after %pF\n", ops->suspend); |
| 71 | } | 73 | } |
| 72 | 74 | ||
| 75 | trace_suspend_resume(TPS("syscore_suspend"), 0, false); | ||
| 73 | return 0; | 76 | return 0; |
| 74 | 77 | ||
| 75 | err_out: | 78 | err_out: |
| @@ -92,6 +95,7 @@ void syscore_resume(void) | |||
| 92 | { | 95 | { |
| 93 | struct syscore_ops *ops; | 96 | struct syscore_ops *ops; |
| 94 | 97 | ||
| 98 | trace_suspend_resume(TPS("syscore_resume"), 0, true); | ||
| 95 | WARN_ONCE(!irqs_disabled(), | 99 | WARN_ONCE(!irqs_disabled(), |
| 96 | "Interrupts enabled before system core resume.\n"); | 100 | "Interrupts enabled before system core resume.\n"); |
| 97 | 101 | ||
| @@ -103,6 +107,7 @@ void syscore_resume(void) | |||
| 103 | WARN_ONCE(!irqs_disabled(), | 107 | WARN_ONCE(!irqs_disabled(), |
| 104 | "Interrupts enabled after %pF\n", ops->resume); | 108 | "Interrupts enabled after %pF\n", ops->resume); |
| 105 | } | 109 | } |
| 110 | trace_suspend_resume(TPS("syscore_resume"), 0, false); | ||
| 106 | } | 111 | } |
| 107 | EXPORT_SYMBOL_GPL(syscore_resume); | 112 | EXPORT_SYMBOL_GPL(syscore_resume); |
| 108 | #endif /* CONFIG_PM_SLEEP */ | 113 | #endif /* CONFIG_PM_SLEEP */ |
