aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig19
-rw-r--r--drivers/base/base.h3
-rw-r--r--drivers/base/dd.c26
-rw-r--r--drivers/base/devres.c97
-rw-r--r--drivers/base/dma-buf.c2
-rw-r--r--drivers/base/dma-coherent.c10
-rw-r--r--drivers/base/dma-contiguous.c76
-rw-r--r--drivers/base/dma-mapping.c6
-rw-r--r--drivers/base/memory.c12
-rw-r--r--drivers/base/platform.c7
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/power/main.c66
-rw-r--r--drivers/base/power/opp.c122
-rw-r--r--drivers/base/power/wakeup.c6
-rw-r--r--drivers/base/regmap/regcache-rbtree.c8
-rw-r--r--drivers/base/regmap/regmap-i2c.c104
-rw-r--r--drivers/base/regmap/regmap-irq.c9
-rw-r--r--drivers/base/regmap/regmap-mmio.c35
-rw-r--r--drivers/base/regmap/regmap.c83
19 files changed, 486 insertions, 207 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 8fa8deab6449..23b8726962af 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -1,10 +1,10 @@
1menu "Generic Driver Options" 1menu "Generic Driver Options"
2 2
3config UEVENT_HELPER_PATH 3config UEVENT_HELPER
4 string "path to uevent helper" 4 bool "Support for uevent helper"
5 default "" 5 default y
6 help 6 help
7 Path to uevent helper program forked by the kernel for 7 The uevent helper program is forked by the kernel for
8 every uevent. 8 every uevent.
9 Before the switch to the netlink-based uevent source, this was 9 Before the switch to the netlink-based uevent source, this was
10 used to hook hotplug scripts into kernel device events. It 10 used to hook hotplug scripts into kernel device events. It
@@ -15,8 +15,13 @@ config UEVENT_HELPER_PATH
15 that it creates a high system load, or on smaller systems 15 that it creates a high system load, or on smaller systems
16 it is known to create out-of-memory situations during bootup. 16 it is known to create out-of-memory situations during bootup.
17 17
18 To disable user space helper program execution at early boot 18config UEVENT_HELPER_PATH
19 time specify an empty string here. This setting can be altered 19 string "path to uevent helper"
20 depends on UEVENT_HELPER
21 default ""
22 help
23 To disable user space helper program execution at by default
24 specify an empty string here. This setting can still be altered
20 via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper 25 via /proc/sys/kernel/hotplug or via /sys/kernel/uevent_helper
21 later at runtime. 26 later at runtime.
22 27
@@ -253,7 +258,7 @@ endchoice
253 258
254config CMA_ALIGNMENT 259config CMA_ALIGNMENT
255 int "Maximum PAGE_SIZE order of alignment for contiguous buffers" 260 int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
256 range 4 9 261 range 4 12
257 default 8 262 default 8
258 help 263 help
259 DMA mapping framework by default aligns all buffers to the smallest 264 DMA mapping framework by default aligns all buffers to the smallest
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 24f424249d9b..251c5d30f963 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -63,8 +63,6 @@ struct driver_private {
63 * binding of drivers which were unable to get all the resources needed by 63 * binding of drivers which were unable to get all the resources needed by
64 * the device; typically because it depends on another driver getting 64 * the device; typically because it depends on another driver getting
65 * probed first. 65 * probed first.
66 * @driver_data - private pointer for driver specific info. Will turn into a
67 * list soon.
68 * @device - pointer back to the struct class that this structure is 66 * @device - pointer back to the struct class that this structure is
69 * associated with. 67 * associated with.
70 * 68 *
@@ -76,7 +74,6 @@ struct device_private {
76 struct klist_node knode_driver; 74 struct klist_node knode_driver;
77 struct klist_node knode_bus; 75 struct klist_node knode_bus;
78 struct list_head deferred_probe; 76 struct list_head deferred_probe;
79 void *driver_data;
80 struct device *device; 77 struct device *device;
81}; 78};
82#define to_device_private_parent(obj) \ 79#define to_device_private_parent(obj) \
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 62ec61e8f84a..e4ffbcf2f519 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -587,29 +587,3 @@ void driver_detach(struct device_driver *drv)
587 put_device(dev); 587 put_device(dev);
588 } 588 }
589} 589}
590
591/*
592 * These exports can't be _GPL due to .h files using this within them, and it
593 * might break something that was previously working...
594 */
595void *dev_get_drvdata(const struct device *dev)
596{
597 if (dev && dev->p)
598 return dev->p->driver_data;
599 return NULL;
600}
601EXPORT_SYMBOL(dev_get_drvdata);
602
603int dev_set_drvdata(struct device *dev, void *data)
604{
605 int error;
606
607 if (!dev->p) {
608 error = device_private_init(dev);
609 if (error)
610 return error;
611 }
612 dev->p->driver_data = data;
613 return 0;
614}
615EXPORT_SYMBOL(dev_set_drvdata);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index db4e264eecb6..52302946770f 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -831,3 +831,100 @@ void devm_kfree(struct device *dev, void *p)
831 WARN_ON(rc); 831 WARN_ON(rc);
832} 832}
833EXPORT_SYMBOL_GPL(devm_kfree); 833EXPORT_SYMBOL_GPL(devm_kfree);
834
835/**
836 * devm_kmemdup - Resource-managed kmemdup
837 * @dev: Device this memory belongs to
838 * @src: Memory region to duplicate
839 * @len: Memory region length
840 * @gfp: GFP mask to use
841 *
842 * Duplicate region of a memory using resource managed kmalloc
843 */
844void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp)
845{
846 void *p;
847
848 p = devm_kmalloc(dev, len, gfp);
849 if (p)
850 memcpy(p, src, len);
851
852 return p;
853}
854EXPORT_SYMBOL_GPL(devm_kmemdup);
855
856struct pages_devres {
857 unsigned long addr;
858 unsigned int order;
859};
860
861static int devm_pages_match(struct device *dev, void *res, void *p)
862{
863 struct pages_devres *devres = res;
864 struct pages_devres *target = p;
865
866 return devres->addr == target->addr;
867}
868
869static void devm_pages_release(struct device *dev, void *res)
870{
871 struct pages_devres *devres = res;
872
873 free_pages(devres->addr, devres->order);
874}
875
876/**
877 * devm_get_free_pages - Resource-managed __get_free_pages
878 * @dev: Device to allocate memory for
879 * @gfp_mask: Allocation gfp flags
880 * @order: Allocation size is (1 << order) pages
881 *
882 * Managed get_free_pages. Memory allocated with this function is
883 * automatically freed on driver detach.
884 *
885 * RETURNS:
886 * Address of allocated memory on success, 0 on failure.
887 */
888
889unsigned long devm_get_free_pages(struct device *dev,
890 gfp_t gfp_mask, unsigned int order)
891{
892 struct pages_devres *devres;
893 unsigned long addr;
894
895 addr = __get_free_pages(gfp_mask, order);
896
897 if (unlikely(!addr))
898 return 0;
899
900 devres = devres_alloc(devm_pages_release,
901 sizeof(struct pages_devres), GFP_KERNEL);
902 if (unlikely(!devres)) {
903 free_pages(addr, order);
904 return 0;
905 }
906
907 devres->addr = addr;
908 devres->order = order;
909
910 devres_add(dev, devres);
911 return addr;
912}
913EXPORT_SYMBOL_GPL(devm_get_free_pages);
914
915/**
916 * devm_free_pages - Resource-managed free_pages
917 * @dev: Device this memory belongs to
918 * @addr: Memory to free
919 *
920 * Free memory allocated with devm_get_free_pages(). Unlike free_pages,
921 * there is no need to supply the @order.
922 */
923void devm_free_pages(struct device *dev, unsigned long addr)
924{
925 struct pages_devres devres = { .addr = addr };
926
927 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match,
928 &devres));
929}
930EXPORT_SYMBOL_GPL(devm_free_pages);
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index ea77701deda4..840c7fa80983 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -491,7 +491,7 @@ EXPORT_SYMBOL_GPL(dma_buf_kunmap);
491 * dma-buf buffer. 491 * dma-buf buffer.
492 * 492 *
493 * This function adjusts the passed in vma so that it points at the file of the 493 * This function adjusts the passed in vma so that it points at the file of the
494 * dma_buf operation. It alsog adjusts the starting pgoff and does bounds 494 * dma_buf operation. It also adjusts the starting pgoff and does bounds
495 * checking on the size of the vma. Then it calls the exporters mmap function to 495 * checking on the size of the vma. Then it calls the exporters mmap function to
496 * set up the mapping. 496 * set up the mapping.
497 * 497 *
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bc256b641027..7d6e84a51424 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,13 +10,13 @@
10struct dma_coherent_mem { 10struct dma_coherent_mem {
11 void *virt_base; 11 void *virt_base;
12 dma_addr_t device_base; 12 dma_addr_t device_base;
13 phys_addr_t pfn_base; 13 unsigned long pfn_base;
14 int size; 14 int size;
15 int flags; 15 int flags;
16 unsigned long *bitmap; 16 unsigned long *bitmap;
17}; 17};
18 18
19int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 19int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
20 dma_addr_t device_addr, size_t size, int flags) 20 dma_addr_t device_addr, size_t size, int flags)
21{ 21{
22 void __iomem *mem_base = NULL; 22 void __iomem *mem_base = NULL;
@@ -32,7 +32,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
32 32
33 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ 33 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
34 34
35 mem_base = ioremap(bus_addr, size); 35 mem_base = ioremap(phys_addr, size);
36 if (!mem_base) 36 if (!mem_base)
37 goto out; 37 goto out;
38 38
@@ -45,7 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
45 45
46 dev->dma_mem->virt_base = mem_base; 46 dev->dma_mem->virt_base = mem_base;
47 dev->dma_mem->device_base = device_addr; 47 dev->dma_mem->device_base = device_addr;
48 dev->dma_mem->pfn_base = PFN_DOWN(bus_addr); 48 dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);
49 dev->dma_mem->size = pages; 49 dev->dma_mem->size = pages;
50 dev->dma_mem->flags = flags; 50 dev->dma_mem->flags = flags;
51 51
@@ -208,7 +208,7 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
208 208
209 *ret = -ENXIO; 209 *ret = -ENXIO;
210 if (off < count && user_count <= count - off) { 210 if (off < count && user_count <= count - off) {
211 unsigned pfn = mem->pfn_base + start + off; 211 unsigned long pfn = mem->pfn_base + start + off;
212 *ret = remap_pfn_range(vma, vma->vm_start, pfn, 212 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
213 user_count << PAGE_SHIFT, 213 user_count << PAGE_SHIFT,
214 vma->vm_page_prot); 214 vma->vm_page_prot);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 165c2c299e57..83969f8c5727 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -37,6 +37,7 @@ struct cma {
37 unsigned long base_pfn; 37 unsigned long base_pfn;
38 unsigned long count; 38 unsigned long count;
39 unsigned long *bitmap; 39 unsigned long *bitmap;
40 struct mutex lock;
40}; 41};
41 42
42struct cma *dma_contiguous_default_area; 43struct cma *dma_contiguous_default_area;
@@ -59,11 +60,22 @@ struct cma *dma_contiguous_default_area;
59 */ 60 */
60static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; 61static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
61static phys_addr_t size_cmdline = -1; 62static phys_addr_t size_cmdline = -1;
63static phys_addr_t base_cmdline;
64static phys_addr_t limit_cmdline;
62 65
63static int __init early_cma(char *p) 66static int __init early_cma(char *p)
64{ 67{
65 pr_debug("%s(%s)\n", __func__, p); 68 pr_debug("%s(%s)\n", __func__, p);
66 size_cmdline = memparse(p, &p); 69 size_cmdline = memparse(p, &p);
70 if (*p != '@')
71 return 0;
72 base_cmdline = memparse(p + 1, &p);
73 if (*p != '-') {
74 limit_cmdline = base_cmdline + size_cmdline;
75 return 0;
76 }
77 limit_cmdline = memparse(p + 1, &p);
78
67 return 0; 79 return 0;
68} 80}
69early_param("cma", early_cma); 81early_param("cma", early_cma);
@@ -107,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
107void __init dma_contiguous_reserve(phys_addr_t limit) 119void __init dma_contiguous_reserve(phys_addr_t limit)
108{ 120{
109 phys_addr_t selected_size = 0; 121 phys_addr_t selected_size = 0;
122 phys_addr_t selected_base = 0;
123 phys_addr_t selected_limit = limit;
124 bool fixed = false;
110 125
111 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); 126 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
112 127
113 if (size_cmdline != -1) { 128 if (size_cmdline != -1) {
114 selected_size = size_cmdline; 129 selected_size = size_cmdline;
130 selected_base = base_cmdline;
131 selected_limit = min_not_zero(limit_cmdline, limit);
132 if (base_cmdline + size_cmdline == limit_cmdline)
133 fixed = true;
115 } else { 134 } else {
116#ifdef CONFIG_CMA_SIZE_SEL_MBYTES 135#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
117 selected_size = size_bytes; 136 selected_size = size_bytes;
@@ -128,10 +147,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
128 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 147 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129 (unsigned long)selected_size / SZ_1M); 148 (unsigned long)selected_size / SZ_1M);
130 149
131 dma_contiguous_reserve_area(selected_size, 0, limit, 150 dma_contiguous_reserve_area(selected_size, selected_base,
132 &dma_contiguous_default_area); 151 selected_limit,
152 &dma_contiguous_default_area,
153 fixed);
133 } 154 }
134}; 155}
135 156
136static DEFINE_MUTEX(cma_mutex); 157static DEFINE_MUTEX(cma_mutex);
137 158
@@ -161,6 +182,7 @@ static int __init cma_activate_area(struct cma *cma)
161 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 182 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
162 } while (--i); 183 } while (--i);
163 184
185 mutex_init(&cma->lock);
164 return 0; 186 return 0;
165} 187}
166 188
@@ -187,15 +209,20 @@ core_initcall(cma_init_reserved_areas);
187 * @base: Base address of the reserved area optional, use 0 for any 209 * @base: Base address of the reserved area optional, use 0 for any
188 * @limit: End address of the reserved memory (optional, 0 for any). 210 * @limit: End address of the reserved memory (optional, 0 for any).
189 * @res_cma: Pointer to store the created cma region. 211 * @res_cma: Pointer to store the created cma region.
212 * @fixed: hint about where to place the reserved area
190 * 213 *
191 * This function reserves memory from early allocator. It should be 214 * This function reserves memory from early allocator. It should be
192 * called by arch specific code once the early allocator (memblock or bootmem) 215 * called by arch specific code once the early allocator (memblock or bootmem)
193 * has been activated and all other subsystems have already allocated/reserved 216 * has been activated and all other subsystems have already allocated/reserved
194 * memory. This function allows to create custom reserved areas for specific 217 * memory. This function allows to create custom reserved areas for specific
195 * devices. 218 * devices.
219 *
220 * If @fixed is true, reserve contiguous area at exactly @base. If false,
221 * reserve in range from @base to @limit.
196 */ 222 */
197int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, 223int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
198 phys_addr_t limit, struct cma **res_cma) 224 phys_addr_t limit, struct cma **res_cma,
225 bool fixed)
199{ 226{
200 struct cma *cma = &cma_areas[cma_area_count]; 227 struct cma *cma = &cma_areas[cma_area_count];
201 phys_addr_t alignment; 228 phys_addr_t alignment;
@@ -221,18 +248,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
221 limit &= ~(alignment - 1); 248 limit &= ~(alignment - 1);
222 249
223 /* Reserve memory */ 250 /* Reserve memory */
224 if (base) { 251 if (base && fixed) {
225 if (memblock_is_region_reserved(base, size) || 252 if (memblock_is_region_reserved(base, size) ||
226 memblock_reserve(base, size) < 0) { 253 memblock_reserve(base, size) < 0) {
227 ret = -EBUSY; 254 ret = -EBUSY;
228 goto err; 255 goto err;
229 } 256 }
230 } else { 257 } else {
231 /* 258 phys_addr_t addr = memblock_alloc_range(size, alignment, base,
232 * Use __memblock_alloc_base() since 259 limit);
233 * memblock_alloc_base() panic()s.
234 */
235 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
236 if (!addr) { 260 if (!addr) {
237 ret = -ENOMEM; 261 ret = -ENOMEM;
238 goto err; 262 goto err;
@@ -261,6 +285,13 @@ err:
261 return ret; 285 return ret;
262} 286}
263 287
288static void clear_cma_bitmap(struct cma *cma, unsigned long pfn, int count)
289{
290 mutex_lock(&cma->lock);
291 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
292 mutex_unlock(&cma->lock);
293}
294
264/** 295/**
265 * dma_alloc_from_contiguous() - allocate pages from contiguous area 296 * dma_alloc_from_contiguous() - allocate pages from contiguous area
266 * @dev: Pointer to device for which the allocation is performed. 297 * @dev: Pointer to device for which the allocation is performed.
@@ -269,7 +300,7 @@ err:
269 * 300 *
270 * This function allocates memory buffer for specified device. It uses 301 * This function allocates memory buffer for specified device. It uses
271 * device specific contiguous memory area if available or the default 302 * device specific contiguous memory area if available or the default
272 * global one. Requires architecture specific get_dev_cma_area() helper 303 * global one. Requires architecture specific dev_get_cma_area() helper
273 * function. 304 * function.
274 */ 305 */
275struct page *dma_alloc_from_contiguous(struct device *dev, int count, 306struct page *dma_alloc_from_contiguous(struct device *dev, int count,
@@ -294,30 +325,41 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
294 325
295 mask = (1 << align) - 1; 326 mask = (1 << align) - 1;
296 327
297 mutex_lock(&cma_mutex);
298 328
299 for (;;) { 329 for (;;) {
330 mutex_lock(&cma->lock);
300 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count, 331 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
301 start, count, mask); 332 start, count, mask);
302 if (pageno >= cma->count) 333 if (pageno >= cma->count) {
334 mutex_unlock(&cma->lock);
303 break; 335 break;
336 }
337 bitmap_set(cma->bitmap, pageno, count);
338 /*
339 * It's safe to drop the lock here. We've marked this region for
340 * our exclusive use. If the migration fails we will take the
341 * lock again and unmark it.
342 */
343 mutex_unlock(&cma->lock);
304 344
305 pfn = cma->base_pfn + pageno; 345 pfn = cma->base_pfn + pageno;
346 mutex_lock(&cma_mutex);
306 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); 347 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
348 mutex_unlock(&cma_mutex);
307 if (ret == 0) { 349 if (ret == 0) {
308 bitmap_set(cma->bitmap, pageno, count);
309 page = pfn_to_page(pfn); 350 page = pfn_to_page(pfn);
310 break; 351 break;
311 } else if (ret != -EBUSY) { 352 } else if (ret != -EBUSY) {
353 clear_cma_bitmap(cma, pfn, count);
312 break; 354 break;
313 } 355 }
356 clear_cma_bitmap(cma, pfn, count);
314 pr_debug("%s(): memory range at %p is busy, retrying\n", 357 pr_debug("%s(): memory range at %p is busy, retrying\n",
315 __func__, pfn_to_page(pfn)); 358 __func__, pfn_to_page(pfn));
316 /* try again with a bit different memory target */ 359 /* try again with a bit different memory target */
317 start = pageno + mask + 1; 360 start = pageno + mask + 1;
318 } 361 }
319 362
320 mutex_unlock(&cma_mutex);
321 pr_debug("%s(): returned %p\n", __func__, page); 363 pr_debug("%s(): returned %p\n", __func__, page);
322 return page; 364 return page;
323} 365}
@@ -350,10 +392,8 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
350 392
351 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); 393 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
352 394
353 mutex_lock(&cma_mutex);
354 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
355 free_contig_range(pfn, count); 395 free_contig_range(pfn, count);
356 mutex_unlock(&cma_mutex); 396 clear_cma_bitmap(cma, pfn, count);
357 397
358 return true; 398 return true;
359} 399}
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 0ce39a33b3c2..6cd08e145bfa 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -175,7 +175,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
175/** 175/**
176 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() 176 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
177 * @dev: Device to declare coherent memory for 177 * @dev: Device to declare coherent memory for
178 * @bus_addr: Bus address of coherent memory to be declared 178 * @phys_addr: Physical address of coherent memory to be declared
179 * @device_addr: Device address of coherent memory to be declared 179 * @device_addr: Device address of coherent memory to be declared
180 * @size: Size of coherent memory to be declared 180 * @size: Size of coherent memory to be declared
181 * @flags: Flags 181 * @flags: Flags
@@ -185,7 +185,7 @@ static void dmam_coherent_decl_release(struct device *dev, void *res)
185 * RETURNS: 185 * RETURNS:
186 * 0 on success, -errno on failure. 186 * 0 on success, -errno on failure.
187 */ 187 */
188int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, 188int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
189 dma_addr_t device_addr, size_t size, int flags) 189 dma_addr_t device_addr, size_t size, int flags)
190{ 190{
191 void *res; 191 void *res;
@@ -195,7 +195,7 @@ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
195 if (!res) 195 if (!res)
196 return -ENOMEM; 196 return -ENOMEM;
197 197
198 rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, 198 rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
199 flags); 199 flags);
200 if (rc == 0) 200 if (rc == 0)
201 devres_add(dev, res); 201 devres_add(dev, res);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index bece691cb5d9..89f752dd8465 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -118,16 +118,6 @@ static ssize_t show_mem_start_phys_index(struct device *dev,
118 return sprintf(buf, "%08lx\n", phys_index); 118 return sprintf(buf, "%08lx\n", phys_index);
119} 119}
120 120
121static ssize_t show_mem_end_phys_index(struct device *dev,
122 struct device_attribute *attr, char *buf)
123{
124 struct memory_block *mem = to_memory_block(dev);
125 unsigned long phys_index;
126
127 phys_index = mem->end_section_nr / sections_per_block;
128 return sprintf(buf, "%08lx\n", phys_index);
129}
130
131/* 121/*
132 * Show whether the section of memory is likely to be hot-removable 122 * Show whether the section of memory is likely to be hot-removable
133 */ 123 */
@@ -384,7 +374,6 @@ static ssize_t show_phys_device(struct device *dev,
384} 374}
385 375
386static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); 376static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
387static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
388static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); 377static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
389static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); 378static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
390static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL); 379static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
@@ -529,7 +518,6 @@ struct memory_block *find_memory_block(struct mem_section *section)
529 518
530static struct attribute *memory_memblk_attrs[] = { 519static struct attribute *memory_memblk_attrs[] = {
531 &dev_attr_phys_index.attr, 520 &dev_attr_phys_index.attr,
532 &dev_attr_end_phys_index.attr,
533 &dev_attr_state.attr, 521 &dev_attr_state.attr,
534 &dev_attr_phys_device.attr, 522 &dev_attr_phys_device.attr,
535 &dev_attr_removable.attr, 523 &dev_attr_removable.attr,
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 5b47210889e0..9e9227e1762d 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -131,9 +131,12 @@ EXPORT_SYMBOL_GPL(platform_get_resource_byname);
131 */ 131 */
132int platform_get_irq_byname(struct platform_device *dev, const char *name) 132int platform_get_irq_byname(struct platform_device *dev, const char *name)
133{ 133{
134 struct resource *r = platform_get_resource_byname(dev, IORESOURCE_IRQ, 134 struct resource *r;
135 name); 135
136 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
137 return of_irq_get_byname(dev->dev.of_node, name);
136 138
139 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
137 return r ? r->start : -ENXIO; 140 return r ? r->start : -ENXIO;
138} 141}
139EXPORT_SYMBOL_GPL(platform_get_irq_byname); 142EXPORT_SYMBOL_GPL(platform_get_irq_byname);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index ae098a261fcd..eee55c1e5fde 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -105,7 +105,7 @@ static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
105static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) 105static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
106{ 106{
107 atomic_inc(&genpd->sd_count); 107 atomic_inc(&genpd->sd_count);
108 smp_mb__after_atomic_inc(); 108 smp_mb__after_atomic();
109} 109}
110 110
111static void genpd_acquire_lock(struct generic_pm_domain *genpd) 111static void genpd_acquire_lock(struct generic_pm_domain *genpd)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 86d5e4fb5b98..343ffad59377 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -479,7 +479,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn
479 TRACE_DEVICE(dev); 479 TRACE_DEVICE(dev);
480 TRACE_RESUME(0); 480 TRACE_RESUME(0);
481 481
482 if (dev->power.syscore) 482 if (dev->power.syscore || dev->power.direct_complete)
483 goto Out; 483 goto Out;
484 484
485 if (!dev->power.is_noirq_suspended) 485 if (!dev->power.is_noirq_suspended)
@@ -605,7 +605,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn
605 TRACE_DEVICE(dev); 605 TRACE_DEVICE(dev);
606 TRACE_RESUME(0); 606 TRACE_RESUME(0);
607 607
608 if (dev->power.syscore) 608 if (dev->power.syscore || dev->power.direct_complete)
609 goto Out; 609 goto Out;
610 610
611 if (!dev->power.is_late_suspended) 611 if (!dev->power.is_late_suspended)
@@ -735,6 +735,12 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
735 if (dev->power.syscore) 735 if (dev->power.syscore)
736 goto Complete; 736 goto Complete;
737 737
738 if (dev->power.direct_complete) {
739 /* Match the pm_runtime_disable() in __device_suspend(). */
740 pm_runtime_enable(dev);
741 goto Complete;
742 }
743
738 dpm_wait(dev->parent, async); 744 dpm_wait(dev->parent, async);
739 dpm_watchdog_set(&wd, dev); 745 dpm_watchdog_set(&wd, dev);
740 device_lock(dev); 746 device_lock(dev);
@@ -1007,7 +1013,7 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
1007 goto Complete; 1013 goto Complete;
1008 } 1014 }
1009 1015
1010 if (dev->power.syscore) 1016 if (dev->power.syscore || dev->power.direct_complete)
1011 goto Complete; 1017 goto Complete;
1012 1018
1013 dpm_wait_for_children(dev, async); 1019 dpm_wait_for_children(dev, async);
@@ -1146,7 +1152,7 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
1146 goto Complete; 1152 goto Complete;
1147 } 1153 }
1148 1154
1149 if (dev->power.syscore) 1155 if (dev->power.syscore || dev->power.direct_complete)
1150 goto Complete; 1156 goto Complete;
1151 1157
1152 dpm_wait_for_children(dev, async); 1158 dpm_wait_for_children(dev, async);
@@ -1332,6 +1338,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1332 if (dev->power.syscore) 1338 if (dev->power.syscore)
1333 goto Complete; 1339 goto Complete;
1334 1340
1341 if (dev->power.direct_complete) {
1342 if (pm_runtime_status_suspended(dev)) {
1343 pm_runtime_disable(dev);
1344 if (pm_runtime_suspended_if_enabled(dev))
1345 goto Complete;
1346
1347 pm_runtime_enable(dev);
1348 }
1349 dev->power.direct_complete = false;
1350 }
1351
1335 dpm_watchdog_set(&wd, dev); 1352 dpm_watchdog_set(&wd, dev);
1336 device_lock(dev); 1353 device_lock(dev);
1337 1354
@@ -1382,10 +1399,19 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1382 1399
1383 End: 1400 End:
1384 if (!error) { 1401 if (!error) {
1402 struct device *parent = dev->parent;
1403
1385 dev->power.is_suspended = true; 1404 dev->power.is_suspended = true;
1386 if (dev->power.wakeup_path 1405 if (parent) {
1387 && dev->parent && !dev->parent->power.ignore_children) 1406 spin_lock_irq(&parent->power.lock);
1388 dev->parent->power.wakeup_path = true; 1407
1408 dev->parent->power.direct_complete = false;
1409 if (dev->power.wakeup_path
1410 && !dev->parent->power.ignore_children)
1411 dev->parent->power.wakeup_path = true;
1412
1413 spin_unlock_irq(&parent->power.lock);
1414 }
1389 } 1415 }
1390 1416
1391 device_unlock(dev); 1417 device_unlock(dev);
@@ -1487,7 +1513,7 @@ static int device_prepare(struct device *dev, pm_message_t state)
1487{ 1513{
1488 int (*callback)(struct device *) = NULL; 1514 int (*callback)(struct device *) = NULL;
1489 char *info = NULL; 1515 char *info = NULL;
1490 int error = 0; 1516 int ret = 0;
1491 1517
1492 if (dev->power.syscore) 1518 if (dev->power.syscore)
1493 return 0; 1519 return 0;
@@ -1523,17 +1549,27 @@ static int device_prepare(struct device *dev, pm_message_t state)
1523 callback = dev->driver->pm->prepare; 1549 callback = dev->driver->pm->prepare;
1524 } 1550 }
1525 1551
1526 if (callback) { 1552 if (callback)
1527 error = callback(dev); 1553 ret = callback(dev);
1528 suspend_report_result(callback, error);
1529 }
1530 1554
1531 device_unlock(dev); 1555 device_unlock(dev);
1532 1556
1533 if (error) 1557 if (ret < 0) {
1558 suspend_report_result(callback, ret);
1534 pm_runtime_put(dev); 1559 pm_runtime_put(dev);
1535 1560 return ret;
1536 return error; 1561 }
1562 /*
1563 * A positive return value from ->prepare() means "this device appears
1564 * to be runtime-suspended and its state is fine, so if it really is
1565 * runtime-suspended, you can leave it in that state provided that you
1566 * will do the same thing with all of its descendants". This only
1567 * applies to suspend transitions, however.
1568 */
1569 spin_lock_irq(&dev->power.lock);
1570 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1571 spin_unlock_irq(&dev->power.lock);
1572 return 0;
1537} 1573}
1538 1574
1539/** 1575/**
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 25538675d59e..89ced955fafa 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -15,7 +15,6 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/cpufreq.h>
19#include <linux/device.h> 18#include <linux/device.h>
20#include <linux/list.h> 19#include <linux/list.h>
21#include <linux/rculist.h> 20#include <linux/rculist.h>
@@ -394,6 +393,13 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
394 * to keep the integrity of the internal data structures. Callers should ensure 393 * to keep the integrity of the internal data structures. Callers should ensure
395 * that this function is *NOT* called under RCU protection or in contexts where 394 * that this function is *NOT* called under RCU protection or in contexts where
396 * mutex cannot be locked. 395 * mutex cannot be locked.
396 *
397 * Return:
398 * 0: On success OR
399 * Duplicate OPPs (both freq and volt are same) and opp->available
400 * -EEXIST: Freq are same and volt are different OR
401 * Duplicate OPPs (both freq and volt are same) and !opp->available
402 * -ENOMEM: Memory allocation failure
397 */ 403 */
398int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 404int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
399{ 405{
@@ -443,15 +449,31 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
443 new_opp->u_volt = u_volt; 449 new_opp->u_volt = u_volt;
444 new_opp->available = true; 450 new_opp->available = true;
445 451
446 /* Insert new OPP in order of increasing frequency */ 452 /*
453 * Insert new OPP in order of increasing frequency
454 * and discard if already present
455 */
447 head = &dev_opp->opp_list; 456 head = &dev_opp->opp_list;
448 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { 457 list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
449 if (new_opp->rate < opp->rate) 458 if (new_opp->rate <= opp->rate)
450 break; 459 break;
451 else 460 else
452 head = &opp->node; 461 head = &opp->node;
453 } 462 }
454 463
464 /* Duplicate OPPs ? */
465 if (new_opp->rate == opp->rate) {
466 int ret = opp->available && new_opp->u_volt == opp->u_volt ?
467 0 : -EEXIST;
468
469 dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
470 __func__, opp->rate, opp->u_volt, opp->available,
471 new_opp->rate, new_opp->u_volt, new_opp->available);
472 mutex_unlock(&dev_opp_list_lock);
473 kfree(new_opp);
474 return ret;
475 }
476
455 list_add_rcu(&new_opp->node, head); 477 list_add_rcu(&new_opp->node, head);
456 mutex_unlock(&dev_opp_list_lock); 478 mutex_unlock(&dev_opp_list_lock);
457 479
@@ -596,96 +618,6 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq)
596} 618}
597EXPORT_SYMBOL_GPL(dev_pm_opp_disable); 619EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
598 620
599#ifdef CONFIG_CPU_FREQ
600/**
601 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
602 * @dev: device for which we do this operation
603 * @table: Cpufreq table returned back to caller
604 *
605 * Generate a cpufreq table for a provided device- this assumes that the
606 * opp list is already initialized and ready for usage.
607 *
608 * This function allocates required memory for the cpufreq table. It is
609 * expected that the caller does the required maintenance such as freeing
610 * the table as required.
611 *
612 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
613 * if no memory available for the operation (table is not populated), returns 0
614 * if successful and table is populated.
615 *
616 * WARNING: It is important for the callers to ensure refreshing their copy of
617 * the table if any of the mentioned functions have been invoked in the interim.
618 *
619 * Locking: The internal device_opp and opp structures are RCU protected.
620 * To simplify the logic, we pretend we are updater and hold relevant mutex here
621 * Callers should ensure that this function is *NOT* called under RCU protection
622 * or in contexts where mutex locking cannot be used.
623 */
624int dev_pm_opp_init_cpufreq_table(struct device *dev,
625 struct cpufreq_frequency_table **table)
626{
627 struct device_opp *dev_opp;
628 struct dev_pm_opp *opp;
629 struct cpufreq_frequency_table *freq_table;
630 int i = 0;
631
632 /* Pretend as if I am an updater */
633 mutex_lock(&dev_opp_list_lock);
634
635 dev_opp = find_device_opp(dev);
636 if (IS_ERR(dev_opp)) {
637 int r = PTR_ERR(dev_opp);
638 mutex_unlock(&dev_opp_list_lock);
639 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
640 return r;
641 }
642
643 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
644 (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
645 if (!freq_table) {
646 mutex_unlock(&dev_opp_list_lock);
647 dev_warn(dev, "%s: Unable to allocate frequency table\n",
648 __func__);
649 return -ENOMEM;
650 }
651
652 list_for_each_entry(opp, &dev_opp->opp_list, node) {
653 if (opp->available) {
654 freq_table[i].driver_data = i;
655 freq_table[i].frequency = opp->rate / 1000;
656 i++;
657 }
658 }
659 mutex_unlock(&dev_opp_list_lock);
660
661 freq_table[i].driver_data = i;
662 freq_table[i].frequency = CPUFREQ_TABLE_END;
663
664 *table = &freq_table[0];
665
666 return 0;
667}
668EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
669
670/**
671 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
672 * @dev: device for which we do this operation
673 * @table: table to free
674 *
675 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
676 */
677void dev_pm_opp_free_cpufreq_table(struct device *dev,
678 struct cpufreq_frequency_table **table)
679{
680 if (!table)
681 return;
682
683 kfree(*table);
684 *table = NULL;
685}
686EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
687#endif /* CONFIG_CPU_FREQ */
688
689/** 621/**
690 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp 622 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
691 * @dev: device pointer used to lookup device OPPs. 623 * @dev: device pointer used to lookup device OPPs.
@@ -734,11 +666,9 @@ int of_init_opp_table(struct device *dev)
734 unsigned long freq = be32_to_cpup(val++) * 1000; 666 unsigned long freq = be32_to_cpup(val++) * 1000;
735 unsigned long volt = be32_to_cpup(val++); 667 unsigned long volt = be32_to_cpup(val++);
736 668
737 if (dev_pm_opp_add(dev, freq, volt)) { 669 if (dev_pm_opp_add(dev, freq, volt))
738 dev_warn(dev, "%s: Failed to add OPP %ld\n", 670 dev_warn(dev, "%s: Failed to add OPP %ld\n",
739 __func__, freq); 671 __func__, freq);
740 continue;
741 }
742 nr -= 2; 672 nr -= 2;
743 } 673 }
744 674
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 2d56f4113ae7..eb1bd2ecad8b 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -318,10 +318,16 @@ int device_init_wakeup(struct device *dev, bool enable)
318{ 318{
319 int ret = 0; 319 int ret = 0;
320 320
321 if (!dev)
322 return -EINVAL;
323
321 if (enable) { 324 if (enable) {
322 device_set_wakeup_capable(dev, true); 325 device_set_wakeup_capable(dev, true);
323 ret = device_wakeup_enable(dev); 326 ret = device_wakeup_enable(dev);
324 } else { 327 } else {
328 if (dev->power.can_wakeup)
329 device_wakeup_disable(dev);
330
325 device_set_wakeup_capable(dev, false); 331 device_set_wakeup_capable(dev, false);
326 } 332 }
327 333
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 930cad4e5df8..6a7e4fa12854 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -23,16 +23,16 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
23static int regcache_rbtree_exit(struct regmap *map); 23static int regcache_rbtree_exit(struct regmap *map);
24 24
25struct regcache_rbtree_node { 25struct regcache_rbtree_node {
26 /* the actual rbtree node holding this block */
27 struct rb_node node;
28 /* base register handled by this block */
29 unsigned int base_reg;
30 /* block of adjacent registers */ 26 /* block of adjacent registers */
31 void *block; 27 void *block;
32 /* Which registers are present */ 28 /* Which registers are present */
33 long *cache_present; 29 long *cache_present;
30 /* base register handled by this block */
31 unsigned int base_reg;
34 /* number of registers available in the block */ 32 /* number of registers available in the block */
35 unsigned int blklen; 33 unsigned int blklen;
34 /* the actual rbtree node holding this block */
35 struct rb_node node;
36} __attribute__ ((packed)); 36} __attribute__ ((packed));
37 37
38struct regcache_rbtree_ctx { 38struct regcache_rbtree_ctx {
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index ebd189529760..ca193d1ef47c 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -14,6 +14,79 @@
14#include <linux/i2c.h> 14#include <linux/i2c.h>
15#include <linux/module.h> 15#include <linux/module.h>
16 16
17
18static int regmap_smbus_byte_reg_read(void *context, unsigned int reg,
19 unsigned int *val)
20{
21 struct device *dev = context;
22 struct i2c_client *i2c = to_i2c_client(dev);
23 int ret;
24
25 if (reg > 0xff)
26 return -EINVAL;
27
28 ret = i2c_smbus_read_byte_data(i2c, reg);
29 if (ret < 0)
30 return ret;
31
32 *val = ret;
33
34 return 0;
35}
36
37static int regmap_smbus_byte_reg_write(void *context, unsigned int reg,
38 unsigned int val)
39{
40 struct device *dev = context;
41 struct i2c_client *i2c = to_i2c_client(dev);
42
43 if (val > 0xff || reg > 0xff)
44 return -EINVAL;
45
46 return i2c_smbus_write_byte_data(i2c, reg, val);
47}
48
49static struct regmap_bus regmap_smbus_byte = {
50 .reg_write = regmap_smbus_byte_reg_write,
51 .reg_read = regmap_smbus_byte_reg_read,
52};
53
54static int regmap_smbus_word_reg_read(void *context, unsigned int reg,
55 unsigned int *val)
56{
57 struct device *dev = context;
58 struct i2c_client *i2c = to_i2c_client(dev);
59 int ret;
60
61 if (reg > 0xff)
62 return -EINVAL;
63
64 ret = i2c_smbus_read_word_data(i2c, reg);
65 if (ret < 0)
66 return ret;
67
68 *val = ret;
69
70 return 0;
71}
72
73static int regmap_smbus_word_reg_write(void *context, unsigned int reg,
74 unsigned int val)
75{
76 struct device *dev = context;
77 struct i2c_client *i2c = to_i2c_client(dev);
78
79 if (val > 0xffff || reg > 0xff)
80 return -EINVAL;
81
82 return i2c_smbus_write_word_data(i2c, reg, val);
83}
84
85static struct regmap_bus regmap_smbus_word = {
86 .reg_write = regmap_smbus_word_reg_write,
87 .reg_read = regmap_smbus_word_reg_read,
88};
89
17static int regmap_i2c_write(void *context, const void *data, size_t count) 90static int regmap_i2c_write(void *context, const void *data, size_t count)
18{ 91{
19 struct device *dev = context; 92 struct device *dev = context;
@@ -97,6 +170,23 @@ static struct regmap_bus regmap_i2c = {
97 .read = regmap_i2c_read, 170 .read = regmap_i2c_read,
98}; 171};
99 172
173static const struct regmap_bus *regmap_get_i2c_bus(struct i2c_client *i2c,
174 const struct regmap_config *config)
175{
176 if (i2c_check_functionality(i2c->adapter, I2C_FUNC_I2C))
177 return &regmap_i2c;
178 else if (config->val_bits == 16 && config->reg_bits == 8 &&
179 i2c_check_functionality(i2c->adapter,
180 I2C_FUNC_SMBUS_WORD_DATA))
181 return &regmap_smbus_word;
182 else if (config->val_bits == 8 && config->reg_bits == 8 &&
183 i2c_check_functionality(i2c->adapter,
184 I2C_FUNC_SMBUS_BYTE_DATA))
185 return &regmap_smbus_byte;
186
187 return ERR_PTR(-ENOTSUPP);
188}
189
100/** 190/**
101 * regmap_init_i2c(): Initialise register map 191 * regmap_init_i2c(): Initialise register map
102 * 192 *
@@ -109,7 +199,12 @@ static struct regmap_bus regmap_i2c = {
109struct regmap *regmap_init_i2c(struct i2c_client *i2c, 199struct regmap *regmap_init_i2c(struct i2c_client *i2c,
110 const struct regmap_config *config) 200 const struct regmap_config *config)
111{ 201{
112 return regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config); 202 const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
203
204 if (IS_ERR(bus))
205 return ERR_CAST(bus);
206
207 return regmap_init(&i2c->dev, bus, &i2c->dev, config);
113} 208}
114EXPORT_SYMBOL_GPL(regmap_init_i2c); 209EXPORT_SYMBOL_GPL(regmap_init_i2c);
115 210
@@ -126,7 +221,12 @@ EXPORT_SYMBOL_GPL(regmap_init_i2c);
126struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c, 221struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
127 const struct regmap_config *config) 222 const struct regmap_config *config)
128{ 223{
129 return devm_regmap_init(&i2c->dev, &regmap_i2c, &i2c->dev, config); 224 const struct regmap_bus *bus = regmap_get_i2c_bus(i2c, config);
225
226 if (IS_ERR(bus))
227 return ERR_CAST(bus);
228
229 return devm_regmap_init(&i2c->dev, bus, &i2c->dev, config);
130} 230}
131EXPORT_SYMBOL_GPL(devm_regmap_init_i2c); 231EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
132 232
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index edf88f20cbce..6299a50a5960 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -10,13 +10,13 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/export.h>
14#include <linux/device.h> 13#include <linux/device.h>
15#include <linux/regmap.h> 14#include <linux/export.h>
16#include <linux/irq.h>
17#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/irq.h>
18#include <linux/irqdomain.h> 17#include <linux/irqdomain.h>
19#include <linux/pm_runtime.h> 18#include <linux/pm_runtime.h>
19#include <linux/regmap.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21 21
22#include "internal.h" 22#include "internal.h"
@@ -347,6 +347,9 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
347 int ret = -ENOMEM; 347 int ret = -ENOMEM;
348 u32 reg; 348 u32 reg;
349 349
350 if (chip->num_regs <= 0)
351 return -EINVAL;
352
350 for (i = 0; i < chip->num_irqs; i++) { 353 for (i = 0; i < chip->num_irqs; i++) {
351 if (chip->irqs[i].reg_offset % map->reg_stride) 354 if (chip->irqs[i].reg_offset % map->reg_stride)
352 return -EINVAL; 355 return -EINVAL;
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 1e03e7f8bacb..04a329a377e9 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -61,9 +61,28 @@ static int regmap_mmio_regbits_check(size_t reg_bits)
61 } 61 }
62} 62}
63 63
64static inline void regmap_mmio_count_check(size_t count) 64static inline void regmap_mmio_count_check(size_t count, u32 offset)
65{ 65{
66 BUG_ON(count % 2 != 0); 66 BUG_ON(count <= offset);
67}
68
69static inline unsigned int
70regmap_mmio_get_offset(const void *reg, size_t reg_size)
71{
72 switch (reg_size) {
73 case 1:
74 return *(u8 *)reg;
75 case 2:
76 return *(u16 *)reg;
77 case 4:
78 return *(u32 *)reg;
79#ifdef CONFIG_64BIT
80 case 8:
81 return *(u64 *)reg;
82#endif
83 default:
84 BUG();
85 }
67} 86}
68 87
69static int regmap_mmio_gather_write(void *context, 88static int regmap_mmio_gather_write(void *context,
@@ -71,7 +90,7 @@ static int regmap_mmio_gather_write(void *context,
71 const void *val, size_t val_size) 90 const void *val, size_t val_size)
72{ 91{
73 struct regmap_mmio_context *ctx = context; 92 struct regmap_mmio_context *ctx = context;
74 u32 offset; 93 unsigned int offset;
75 int ret; 94 int ret;
76 95
77 regmap_mmio_regsize_check(reg_size); 96 regmap_mmio_regsize_check(reg_size);
@@ -82,7 +101,7 @@ static int regmap_mmio_gather_write(void *context,
82 return ret; 101 return ret;
83 } 102 }
84 103
85 offset = *(u32 *)reg; 104 offset = regmap_mmio_get_offset(reg, reg_size);
86 105
87 while (val_size) { 106 while (val_size) {
88 switch (ctx->val_bytes) { 107 switch (ctx->val_bytes) {
@@ -118,9 +137,9 @@ static int regmap_mmio_gather_write(void *context,
118static int regmap_mmio_write(void *context, const void *data, size_t count) 137static int regmap_mmio_write(void *context, const void *data, size_t count)
119{ 138{
120 struct regmap_mmio_context *ctx = context; 139 struct regmap_mmio_context *ctx = context;
121 u32 offset = ctx->reg_bytes + ctx->pad_bytes; 140 unsigned int offset = ctx->reg_bytes + ctx->pad_bytes;
122 141
123 regmap_mmio_count_check(count); 142 regmap_mmio_count_check(count, offset);
124 143
125 return regmap_mmio_gather_write(context, data, ctx->reg_bytes, 144 return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
126 data + offset, count - offset); 145 data + offset, count - offset);
@@ -131,7 +150,7 @@ static int regmap_mmio_read(void *context,
131 void *val, size_t val_size) 150 void *val, size_t val_size)
132{ 151{
133 struct regmap_mmio_context *ctx = context; 152 struct regmap_mmio_context *ctx = context;
134 u32 offset; 153 unsigned int offset;
135 int ret; 154 int ret;
136 155
137 regmap_mmio_regsize_check(reg_size); 156 regmap_mmio_regsize_check(reg_size);
@@ -142,7 +161,7 @@ static int regmap_mmio_read(void *context,
142 return ret; 161 return ret;
143 } 162 }
144 163
145 offset = *(u32 *)reg; 164 offset = regmap_mmio_get_offset(reg, reg_size);
146 165
147 while (val_size) { 166 while (val_size) {
148 switch (ctx->val_bytes) { 167 switch (ctx->val_bytes) {
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 63e30ef096e2..74d8c0672cf6 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -35,10 +35,14 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
35 unsigned int mask, unsigned int val, 35 unsigned int mask, unsigned int val,
36 bool *change); 36 bool *change);
37 37
38static int _regmap_bus_reg_read(void *context, unsigned int reg,
39 unsigned int *val);
38static int _regmap_bus_read(void *context, unsigned int reg, 40static int _regmap_bus_read(void *context, unsigned int reg,
39 unsigned int *val); 41 unsigned int *val);
40static int _regmap_bus_formatted_write(void *context, unsigned int reg, 42static int _regmap_bus_formatted_write(void *context, unsigned int reg,
41 unsigned int val); 43 unsigned int val);
44static int _regmap_bus_reg_write(void *context, unsigned int reg,
45 unsigned int val);
42static int _regmap_bus_raw_write(void *context, unsigned int reg, 46static int _regmap_bus_raw_write(void *context, unsigned int reg,
43 unsigned int val); 47 unsigned int val);
44 48
@@ -192,6 +196,13 @@ static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
192 b[0] = cpu_to_be16(val << shift); 196 b[0] = cpu_to_be16(val << shift);
193} 197}
194 198
199static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
200{
201 __le16 *b = buf;
202
203 b[0] = cpu_to_le16(val << shift);
204}
205
195static void regmap_format_16_native(void *buf, unsigned int val, 206static void regmap_format_16_native(void *buf, unsigned int val,
196 unsigned int shift) 207 unsigned int shift)
197{ 208{
@@ -216,6 +227,13 @@ static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
216 b[0] = cpu_to_be32(val << shift); 227 b[0] = cpu_to_be32(val << shift);
217} 228}
218 229
230static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
231{
232 __le32 *b = buf;
233
234 b[0] = cpu_to_le32(val << shift);
235}
236
219static void regmap_format_32_native(void *buf, unsigned int val, 237static void regmap_format_32_native(void *buf, unsigned int val,
220 unsigned int shift) 238 unsigned int shift)
221{ 239{
@@ -240,6 +258,13 @@ static unsigned int regmap_parse_16_be(const void *buf)
240 return be16_to_cpu(b[0]); 258 return be16_to_cpu(b[0]);
241} 259}
242 260
261static unsigned int regmap_parse_16_le(const void *buf)
262{
263 const __le16 *b = buf;
264
265 return le16_to_cpu(b[0]);
266}
267
243static void regmap_parse_16_be_inplace(void *buf) 268static void regmap_parse_16_be_inplace(void *buf)
244{ 269{
245 __be16 *b = buf; 270 __be16 *b = buf;
@@ -247,6 +272,13 @@ static void regmap_parse_16_be_inplace(void *buf)
247 b[0] = be16_to_cpu(b[0]); 272 b[0] = be16_to_cpu(b[0]);
248} 273}
249 274
275static void regmap_parse_16_le_inplace(void *buf)
276{
277 __le16 *b = buf;
278
279 b[0] = le16_to_cpu(b[0]);
280}
281
250static unsigned int regmap_parse_16_native(const void *buf) 282static unsigned int regmap_parse_16_native(const void *buf)
251{ 283{
252 return *(u16 *)buf; 284 return *(u16 *)buf;
@@ -269,6 +301,13 @@ static unsigned int regmap_parse_32_be(const void *buf)
269 return be32_to_cpu(b[0]); 301 return be32_to_cpu(b[0]);
270} 302}
271 303
304static unsigned int regmap_parse_32_le(const void *buf)
305{
306 const __le32 *b = buf;
307
308 return le32_to_cpu(b[0]);
309}
310
272static void regmap_parse_32_be_inplace(void *buf) 311static void regmap_parse_32_be_inplace(void *buf)
273{ 312{
274 __be32 *b = buf; 313 __be32 *b = buf;
@@ -276,6 +315,13 @@ static void regmap_parse_32_be_inplace(void *buf)
276 b[0] = be32_to_cpu(b[0]); 315 b[0] = be32_to_cpu(b[0]);
277} 316}
278 317
318static void regmap_parse_32_le_inplace(void *buf)
319{
320 __le32 *b = buf;
321
322 b[0] = le32_to_cpu(b[0]);
323}
324
279static unsigned int regmap_parse_32_native(const void *buf) 325static unsigned int regmap_parse_32_native(const void *buf)
280{ 326{
281 return *(u32 *)buf; 327 return *(u32 *)buf;
@@ -495,6 +541,12 @@ struct regmap *regmap_init(struct device *dev,
495 541
496 map->defer_caching = false; 542 map->defer_caching = false;
497 goto skip_format_initialization; 543 goto skip_format_initialization;
544 } else if (!bus->read || !bus->write) {
545 map->reg_read = _regmap_bus_reg_read;
546 map->reg_write = _regmap_bus_reg_write;
547
548 map->defer_caching = false;
549 goto skip_format_initialization;
498 } else { 550 } else {
499 map->reg_read = _regmap_bus_read; 551 map->reg_read = _regmap_bus_read;
500 } 552 }
@@ -608,6 +660,11 @@ struct regmap *regmap_init(struct device *dev,
608 map->format.parse_val = regmap_parse_16_be; 660 map->format.parse_val = regmap_parse_16_be;
609 map->format.parse_inplace = regmap_parse_16_be_inplace; 661 map->format.parse_inplace = regmap_parse_16_be_inplace;
610 break; 662 break;
663 case REGMAP_ENDIAN_LITTLE:
664 map->format.format_val = regmap_format_16_le;
665 map->format.parse_val = regmap_parse_16_le;
666 map->format.parse_inplace = regmap_parse_16_le_inplace;
667 break;
611 case REGMAP_ENDIAN_NATIVE: 668 case REGMAP_ENDIAN_NATIVE:
612 map->format.format_val = regmap_format_16_native; 669 map->format.format_val = regmap_format_16_native;
613 map->format.parse_val = regmap_parse_16_native; 670 map->format.parse_val = regmap_parse_16_native;
@@ -629,6 +686,11 @@ struct regmap *regmap_init(struct device *dev,
629 map->format.parse_val = regmap_parse_32_be; 686 map->format.parse_val = regmap_parse_32_be;
630 map->format.parse_inplace = regmap_parse_32_be_inplace; 687 map->format.parse_inplace = regmap_parse_32_be_inplace;
631 break; 688 break;
689 case REGMAP_ENDIAN_LITTLE:
690 map->format.format_val = regmap_format_32_le;
691 map->format.parse_val = regmap_parse_32_le;
692 map->format.parse_inplace = regmap_parse_32_le_inplace;
693 break;
632 case REGMAP_ENDIAN_NATIVE: 694 case REGMAP_ENDIAN_NATIVE:
633 map->format.format_val = regmap_format_32_native; 695 map->format.format_val = regmap_format_32_native;
634 map->format.parse_val = regmap_parse_32_native; 696 map->format.parse_val = regmap_parse_32_native;
@@ -1284,6 +1346,14 @@ static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1284 return ret; 1346 return ret;
1285} 1347}
1286 1348
1349static int _regmap_bus_reg_write(void *context, unsigned int reg,
1350 unsigned int val)
1351{
1352 struct regmap *map = context;
1353
1354 return map->bus->reg_write(map->bus_context, reg, val);
1355}
1356
1287static int _regmap_bus_raw_write(void *context, unsigned int reg, 1357static int _regmap_bus_raw_write(void *context, unsigned int reg,
1288 unsigned int val) 1358 unsigned int val)
1289{ 1359{
@@ -1615,6 +1685,9 @@ static int _regmap_raw_multi_reg_write(struct regmap *map,
1615 size_t pair_size = reg_bytes + pad_bytes + val_bytes; 1685 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
1616 size_t len = pair_size * num_regs; 1686 size_t len = pair_size * num_regs;
1617 1687
1688 if (!len)
1689 return -EINVAL;
1690
1618 buf = kzalloc(len, GFP_KERNEL); 1691 buf = kzalloc(len, GFP_KERNEL);
1619 if (!buf) 1692 if (!buf)
1620 return -ENOMEM; 1693 return -ENOMEM;
@@ -1662,7 +1735,7 @@ static int _regmap_range_multi_paged_reg_write(struct regmap *map,
1662 int ret; 1735 int ret;
1663 int i, n; 1736 int i, n;
1664 struct reg_default *base; 1737 struct reg_default *base;
1665 unsigned int this_page; 1738 unsigned int this_page = 0;
1666 /* 1739 /*
1667 * the set of registers are not neccessarily in order, but 1740 * the set of registers are not neccessarily in order, but
1668 * since the order of write must be preserved this algorithm 1741 * since the order of write must be preserved this algorithm
@@ -1925,6 +1998,14 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1925 return ret; 1998 return ret;
1926} 1999}
1927 2000
2001static int _regmap_bus_reg_read(void *context, unsigned int reg,
2002 unsigned int *val)
2003{
2004 struct regmap *map = context;
2005
2006 return map->bus->reg_read(map->bus_context, reg, val);
2007}
2008
1928static int _regmap_bus_read(void *context, unsigned int reg, 2009static int _regmap_bus_read(void *context, unsigned int reg,
1929 unsigned int *val) 2010 unsigned int *val)
1930{ 2011{