aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig7
-rw-r--r--drivers/base/attribute_container.c2
-rw-r--r--drivers/base/bus.c14
-rw-r--r--drivers/base/core.c10
-rw-r--r--drivers/base/devres.c4
-rw-r--r--drivers/base/dma-contiguous.c24
-rw-r--r--drivers/base/dma-mapping.c4
-rw-r--r--drivers/base/firmware_class.c314
-rw-r--r--drivers/base/memory.c42
-rw-r--r--drivers/base/node.c86
-rw-r--r--drivers/base/platform.c37
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/domain.c16
-rw-r--r--drivers/base/power/opp.c44
-rw-r--r--drivers/base/power/power.h6
-rw-r--r--drivers/base/power/qos.c323
-rw-r--r--drivers/base/power/sysfs.c94
-rw-r--r--drivers/base/regmap/internal.h24
-rw-r--r--drivers/base/regmap/regmap-debugfs.c148
-rw-r--r--drivers/base/regmap/regmap-irq.c19
-rw-r--r--drivers/base/regmap/regmap.c269
21 files changed, 1093 insertions, 400 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index b34b5cda5ae1..c8b453939da2 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -57,7 +57,7 @@ config DEVTMPFS_MOUNT
57 on the rootfs is completely empty. 57 on the rootfs is completely empty.
58 58
59config STANDALONE 59config STANDALONE
60 bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL 60 bool "Select only drivers that don't need compile-time external firmware"
61 default y 61 default y
62 help 62 help
63 Select this option if you don't have magic firmware for drivers that 63 Select this option if you don't have magic firmware for drivers that
@@ -185,7 +185,6 @@ config DMA_SHARED_BUFFER
185 bool 185 bool
186 default n 186 default n
187 select ANON_INODES 187 select ANON_INODES
188 depends on EXPERIMENTAL
189 help 188 help
190 This option enables the framework for buffer-sharing between 189 This option enables the framework for buffer-sharing between
191 multiple drivers. A buffer is associated with a file using driver 190 multiple drivers. A buffer is associated with a file using driver
@@ -193,8 +192,8 @@ config DMA_SHARED_BUFFER
193 driver. 192 driver.
194 193
195config CMA 194config CMA
196 bool "Contiguous Memory Allocator (EXPERIMENTAL)" 195 bool "Contiguous Memory Allocator"
197 depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL 196 depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK
198 select MIGRATION 197 select MIGRATION
199 select MEMORY_ISOLATION 198 select MEMORY_ISOLATION
200 help 199 help
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index 8fc200b2e2c0..d78b204e65c1 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -158,7 +158,7 @@ attribute_container_add_device(struct device *dev,
158 158
159 ic = kzalloc(sizeof(*ic), GFP_KERNEL); 159 ic = kzalloc(sizeof(*ic), GFP_KERNEL);
160 if (!ic) { 160 if (!ic) {
161 dev_printk(KERN_ERR, dev, "failed to allocate class container\n"); 161 dev_err(dev, "failed to allocate class container\n");
162 continue; 162 continue;
163 } 163 }
164 164
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 181ed2660b33..24eb07868344 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -164,8 +164,6 @@ static const struct kset_uevent_ops bus_uevent_ops = {
164 164
165static struct kset *bus_kset; 165static struct kset *bus_kset;
166 166
167
168#ifdef CONFIG_HOTPLUG
169/* Manually detach a device from its associated driver. */ 167/* Manually detach a device from its associated driver. */
170static ssize_t driver_unbind(struct device_driver *drv, 168static ssize_t driver_unbind(struct device_driver *drv,
171 const char *buf, size_t count) 169 const char *buf, size_t count)
@@ -252,7 +250,6 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
252 return -EINVAL; 250 return -EINVAL;
253 return count; 251 return count;
254} 252}
255#endif
256 253
257static struct device *next_device(struct klist_iter *i) 254static struct device *next_device(struct klist_iter *i)
258{ 255{
@@ -618,11 +615,6 @@ static void driver_remove_attrs(struct bus_type *bus,
618 } 615 }
619} 616}
620 617
621#ifdef CONFIG_HOTPLUG
622/*
623 * Thanks to drivers making their tables __devinit, we can't allow manual
624 * bind and unbind from userspace unless CONFIG_HOTPLUG is enabled.
625 */
626static int __must_check add_bind_files(struct device_driver *drv) 618static int __must_check add_bind_files(struct device_driver *drv)
627{ 619{
628 int ret; 620 int ret;
@@ -666,12 +658,6 @@ static void remove_probe_files(struct bus_type *bus)
666 bus_remove_file(bus, &bus_attr_drivers_autoprobe); 658 bus_remove_file(bus, &bus_attr_drivers_autoprobe);
667 bus_remove_file(bus, &bus_attr_drivers_probe); 659 bus_remove_file(bus, &bus_attr_drivers_probe);
668} 660}
669#else
670static inline int add_bind_files(struct device_driver *drv) { return 0; }
671static inline void remove_bind_files(struct device_driver *drv) {}
672static inline int add_probe_files(struct bus_type *bus) { return 0; }
673static inline void remove_probe_files(struct bus_type *bus) {}
674#endif
675 661
676static ssize_t driver_uevent_store(struct device_driver *drv, 662static ssize_t driver_uevent_store(struct device_driver *drv,
677 const char *buf, size_t count) 663 const char *buf, size_t count)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c8ae1f6b01b9..a235085e343c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1201,7 +1201,6 @@ void device_del(struct device *dev)
1201 if (dev->bus) 1201 if (dev->bus)
1202 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 1202 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
1203 BUS_NOTIFY_DEL_DEVICE, dev); 1203 BUS_NOTIFY_DEL_DEVICE, dev);
1204 device_pm_remove(dev);
1205 dpm_sysfs_remove(dev); 1204 dpm_sysfs_remove(dev);
1206 if (parent) 1205 if (parent)
1207 klist_del(&dev->p->knode_parent); 1206 klist_del(&dev->p->knode_parent);
@@ -1226,6 +1225,7 @@ void device_del(struct device *dev)
1226 device_remove_file(dev, &uevent_attr); 1225 device_remove_file(dev, &uevent_attr);
1227 device_remove_attrs(dev); 1226 device_remove_attrs(dev);
1228 bus_remove_device(dev); 1227 bus_remove_device(dev);
1228 device_pm_remove(dev);
1229 driver_deferred_probe_del(dev); 1229 driver_deferred_probe_del(dev);
1230 1230
1231 /* Notify the platform of the removal, in case they 1231 /* Notify the platform of the removal, in case they
@@ -1420,7 +1420,7 @@ struct root_device {
1420 struct module *owner; 1420 struct module *owner;
1421}; 1421};
1422 1422
1423inline struct root_device *to_root_device(struct device *d) 1423static inline struct root_device *to_root_device(struct device *d)
1424{ 1424{
1425 return container_of(d, struct root_device, dev); 1425 return container_of(d, struct root_device, dev);
1426} 1426}
@@ -1861,10 +1861,12 @@ void device_shutdown(void)
1861 pm_runtime_barrier(dev); 1861 pm_runtime_barrier(dev);
1862 1862
1863 if (dev->bus && dev->bus->shutdown) { 1863 if (dev->bus && dev->bus->shutdown) {
1864 dev_dbg(dev, "shutdown\n"); 1864 if (initcall_debug)
1865 dev_info(dev, "shutdown\n");
1865 dev->bus->shutdown(dev); 1866 dev->bus->shutdown(dev);
1866 } else if (dev->driver && dev->driver->shutdown) { 1867 } else if (dev->driver && dev->driver->shutdown) {
1867 dev_dbg(dev, "shutdown\n"); 1868 if (initcall_debug)
1869 dev_info(dev, "shutdown\n");
1868 dev->driver->shutdown(dev); 1870 dev->driver->shutdown(dev);
1869 } 1871 }
1870 1872
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 8731979d668a..668390664764 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -50,8 +50,8 @@ static void devres_log(struct device *dev, struct devres_node *node,
50 const char *op) 50 const char *op)
51{ 51{
52 if (unlikely(log_devres)) 52 if (unlikely(log_devres))
53 dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n", 53 dev_err(dev, "DEVRES %3s %p %s (%lu bytes)\n",
54 op, node, node->name, (unsigned long)node->size); 54 op, node, node->name, (unsigned long)node->size);
55} 55}
56#else /* CONFIG_DEBUG_DEVRES */ 56#else /* CONFIG_DEBUG_DEVRES */
57#define set_node_dbginfo(node, n, s) do {} while (0) 57#define set_node_dbginfo(node, n, s) do {} while (0)
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 612afcc5a938..0ca54421ce97 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -57,8 +57,8 @@ struct cma *dma_contiguous_default_area;
57 * Users, who want to set the size of global CMA area for their system 57 * Users, who want to set the size of global CMA area for their system
58 * should use cma= kernel parameter. 58 * should use cma= kernel parameter.
59 */ 59 */
60static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M; 60static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
61static long size_cmdline = -1; 61static phys_addr_t size_cmdline = -1;
62 62
63static int __init early_cma(char *p) 63static int __init early_cma(char *p)
64{ 64{
@@ -70,7 +70,7 @@ early_param("cma", early_cma);
70 70
71#ifdef CONFIG_CMA_SIZE_PERCENTAGE 71#ifdef CONFIG_CMA_SIZE_PERCENTAGE
72 72
73static unsigned long __init __maybe_unused cma_early_percent_memory(void) 73static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
74{ 74{
75 struct memblock_region *reg; 75 struct memblock_region *reg;
76 unsigned long total_pages = 0; 76 unsigned long total_pages = 0;
@@ -88,7 +88,7 @@ static unsigned long __init __maybe_unused cma_early_percent_memory(void)
88 88
89#else 89#else
90 90
91static inline __maybe_unused unsigned long cma_early_percent_memory(void) 91static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
92{ 92{
93 return 0; 93 return 0;
94} 94}
@@ -106,7 +106,7 @@ static inline __maybe_unused unsigned long cma_early_percent_memory(void)
106 */ 106 */
107void __init dma_contiguous_reserve(phys_addr_t limit) 107void __init dma_contiguous_reserve(phys_addr_t limit)
108{ 108{
109 unsigned long selected_size = 0; 109 phys_addr_t selected_size = 0;
110 110
111 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); 111 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
112 112
@@ -126,7 +126,7 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
126 126
127 if (selected_size) { 127 if (selected_size) {
128 pr_debug("%s: reserving %ld MiB for global area\n", __func__, 128 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
129 selected_size / SZ_1M); 129 (unsigned long)selected_size / SZ_1M);
130 130
131 dma_declare_contiguous(NULL, selected_size, 0, limit); 131 dma_declare_contiguous(NULL, selected_size, 0, limit);
132 } 132 }
@@ -227,11 +227,11 @@ core_initcall(cma_init_reserved_areas);
227 * called by board specific code when early allocator (memblock or bootmem) 227 * called by board specific code when early allocator (memblock or bootmem)
228 * is still activate. 228 * is still activate.
229 */ 229 */
230int __init dma_declare_contiguous(struct device *dev, unsigned long size, 230int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
231 phys_addr_t base, phys_addr_t limit) 231 phys_addr_t base, phys_addr_t limit)
232{ 232{
233 struct cma_reserved *r = &cma_reserved[cma_reserved_count]; 233 struct cma_reserved *r = &cma_reserved[cma_reserved_count];
234 unsigned long alignment; 234 phys_addr_t alignment;
235 235
236 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__, 236 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
237 (unsigned long)size, (unsigned long)base, 237 (unsigned long)size, (unsigned long)base,
@@ -268,10 +268,6 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
268 if (!addr) { 268 if (!addr) {
269 base = -ENOMEM; 269 base = -ENOMEM;
270 goto err; 270 goto err;
271 } else if (addr + size > ~(unsigned long)0) {
272 memblock_free(addr, size);
273 base = -EINVAL;
274 goto err;
275 } else { 271 } else {
276 base = addr; 272 base = addr;
277 } 273 }
@@ -285,14 +281,14 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
285 r->size = size; 281 r->size = size;
286 r->dev = dev; 282 r->dev = dev;
287 cma_reserved_count++; 283 cma_reserved_count++;
288 pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M, 284 pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
289 (unsigned long)base); 285 (unsigned long)base);
290 286
291 /* Architecture specific contiguous memory fixup. */ 287 /* Architecture specific contiguous memory fixup. */
292 dma_contiguous_early_fixup(base, size); 288 dma_contiguous_early_fixup(base, size);
293 return 0; 289 return 0;
294err: 290err:
295 pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M); 291 pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
296 return base; 292 return base;
297} 293}
298 294
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 3fbedc75e7c5..0ce39a33b3c2 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -218,6 +218,8 @@ void dmam_release_declared_memory(struct device *dev)
218} 218}
219EXPORT_SYMBOL(dmam_release_declared_memory); 219EXPORT_SYMBOL(dmam_release_declared_memory);
220 220
221#endif
222
221/* 223/*
222 * Create scatter-list for the already allocated DMA buffer. 224 * Create scatter-list for the already allocated DMA buffer.
223 */ 225 */
@@ -236,8 +238,6 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
236} 238}
237EXPORT_SYMBOL(dma_common_get_sgtable); 239EXPORT_SYMBOL(dma_common_get_sgtable);
238 240
239#endif
240
241/* 241/*
242 * Create userspace mapping for the DMA-coherent memory. 242 * Create userspace mapping for the DMA-coherent memory.
243 */ 243 */
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 81541452887b..d81460309182 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -36,68 +36,6 @@ MODULE_AUTHOR("Manuel Estrada Sainz");
36MODULE_DESCRIPTION("Multi purpose firmware loading support"); 36MODULE_DESCRIPTION("Multi purpose firmware loading support");
37MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
38 38
39static const char *fw_path[] = {
40 "/lib/firmware/updates/" UTS_RELEASE,
41 "/lib/firmware/updates",
42 "/lib/firmware/" UTS_RELEASE,
43 "/lib/firmware"
44};
45
46/* Don't inline this: 'struct kstat' is biggish */
47static noinline long fw_file_size(struct file *file)
48{
49 struct kstat st;
50 if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
51 return -1;
52 if (!S_ISREG(st.mode))
53 return -1;
54 if (st.size != (long)st.size)
55 return -1;
56 return st.size;
57}
58
59static bool fw_read_file_contents(struct file *file, struct firmware *fw)
60{
61 long size;
62 char *buf;
63
64 size = fw_file_size(file);
65 if (size < 0)
66 return false;
67 buf = vmalloc(size);
68 if (!buf)
69 return false;
70 if (kernel_read(file, 0, buf, size) != size) {
71 vfree(buf);
72 return false;
73 }
74 fw->data = buf;
75 fw->size = size;
76 return true;
77}
78
79static bool fw_get_filesystem_firmware(struct firmware *fw, const char *name)
80{
81 int i;
82 bool success = false;
83 char *path = __getname();
84
85 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
86 struct file *file;
87 snprintf(path, PATH_MAX, "%s/%s", fw_path[i], name);
88
89 file = filp_open(path, O_RDONLY, 0);
90 if (IS_ERR(file))
91 continue;
92 success = fw_read_file_contents(file, fw);
93 fput(file);
94 if (success)
95 break;
96 }
97 __putname(path);
98 return success;
99}
100
101/* Builtin firmware support */ 39/* Builtin firmware support */
102 40
103#ifdef CONFIG_FW_LOADER 41#ifdef CONFIG_FW_LOADER
@@ -150,6 +88,11 @@ enum {
150 FW_STATUS_ABORT, 88 FW_STATUS_ABORT,
151}; 89};
152 90
91enum fw_buf_fmt {
92 VMALLOC_BUF, /* used in direct loading */
93 PAGE_BUF, /* used in loading via userspace */
94};
95
153static int loading_timeout = 60; /* In seconds */ 96static int loading_timeout = 60; /* In seconds */
154 97
155static inline long firmware_loading_timeout(void) 98static inline long firmware_loading_timeout(void)
@@ -173,8 +116,6 @@ struct firmware_cache {
173 spinlock_t name_lock; 116 spinlock_t name_lock;
174 struct list_head fw_names; 117 struct list_head fw_names;
175 118
176 wait_queue_head_t wait_queue;
177 int cnt;
178 struct delayed_work work; 119 struct delayed_work work;
179 120
180 struct notifier_block pm_notify; 121 struct notifier_block pm_notify;
@@ -187,6 +128,7 @@ struct firmware_buf {
187 struct completion completion; 128 struct completion completion;
188 struct firmware_cache *fwc; 129 struct firmware_cache *fwc;
189 unsigned long status; 130 unsigned long status;
131 enum fw_buf_fmt fmt;
190 void *data; 132 void *data;
191 size_t size; 133 size_t size;
192 struct page **pages; 134 struct page **pages;
@@ -201,7 +143,7 @@ struct fw_cache_entry {
201}; 143};
202 144
203struct firmware_priv { 145struct firmware_priv {
204 struct timer_list timeout; 146 struct delayed_work timeout_work;
205 bool nowait; 147 bool nowait;
206 struct device dev; 148 struct device dev;
207 struct firmware_buf *buf; 149 struct firmware_buf *buf;
@@ -240,6 +182,7 @@ static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
240 strcpy(buf->fw_id, fw_name); 182 strcpy(buf->fw_id, fw_name);
241 buf->fwc = fwc; 183 buf->fwc = fwc;
242 init_completion(&buf->completion); 184 init_completion(&buf->completion);
185 buf->fmt = VMALLOC_BUF;
243 186
244 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf); 187 pr_debug("%s: fw-%s buf=%p\n", __func__, fw_name, buf);
245 188
@@ -303,20 +246,104 @@ static void __fw_free_buf(struct kref *ref)
303 __func__, buf->fw_id, buf, buf->data, 246 __func__, buf->fw_id, buf, buf->data,
304 (unsigned int)buf->size); 247 (unsigned int)buf->size);
305 248
306 spin_lock(&fwc->lock);
307 list_del(&buf->list); 249 list_del(&buf->list);
308 spin_unlock(&fwc->lock); 250 spin_unlock(&fwc->lock);
309 251
310 vunmap(buf->data); 252
311 for (i = 0; i < buf->nr_pages; i++) 253 if (buf->fmt == PAGE_BUF) {
312 __free_page(buf->pages[i]); 254 vunmap(buf->data);
313 kfree(buf->pages); 255 for (i = 0; i < buf->nr_pages; i++)
256 __free_page(buf->pages[i]);
257 kfree(buf->pages);
258 } else
259 vfree(buf->data);
314 kfree(buf); 260 kfree(buf);
315} 261}
316 262
317static void fw_free_buf(struct firmware_buf *buf) 263static void fw_free_buf(struct firmware_buf *buf)
318{ 264{
319 kref_put(&buf->ref, __fw_free_buf); 265 struct firmware_cache *fwc = buf->fwc;
266 spin_lock(&fwc->lock);
267 if (!kref_put(&buf->ref, __fw_free_buf))
268 spin_unlock(&fwc->lock);
269}
270
271/* direct firmware loading support */
272static char fw_path_para[256];
273static const char * const fw_path[] = {
274 fw_path_para,
275 "/lib/firmware/updates/" UTS_RELEASE,
276 "/lib/firmware/updates",
277 "/lib/firmware/" UTS_RELEASE,
278 "/lib/firmware"
279};
280
281/*
282 * Typical usage is that passing 'firmware_class.path=$CUSTOMIZED_PATH'
283 * from kernel command line because firmware_class is generally built in
284 * kernel instead of module.
285 */
286module_param_string(path, fw_path_para, sizeof(fw_path_para), 0644);
287MODULE_PARM_DESC(path, "customized firmware image search path with a higher priority than default path");
288
289/* Don't inline this: 'struct kstat' is biggish */
290static noinline_for_stack long fw_file_size(struct file *file)
291{
292 struct kstat st;
293 if (vfs_getattr(file->f_path.mnt, file->f_path.dentry, &st))
294 return -1;
295 if (!S_ISREG(st.mode))
296 return -1;
297 if (st.size != (long)st.size)
298 return -1;
299 return st.size;
300}
301
302static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
303{
304 long size;
305 char *buf;
306
307 size = fw_file_size(file);
308 if (size < 0)
309 return false;
310 buf = vmalloc(size);
311 if (!buf)
312 return false;
313 if (kernel_read(file, 0, buf, size) != size) {
314 vfree(buf);
315 return false;
316 }
317 fw_buf->data = buf;
318 fw_buf->size = size;
319 return true;
320}
321
322static bool fw_get_filesystem_firmware(struct firmware_buf *buf)
323{
324 int i;
325 bool success = false;
326 char *path = __getname();
327
328 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
329 struct file *file;
330
331 /* skip the unset customized path */
332 if (!fw_path[i][0])
333 continue;
334
335 snprintf(path, PATH_MAX, "%s/%s", fw_path[i], buf->fw_id);
336
337 file = filp_open(path, O_RDONLY, 0);
338 if (IS_ERR(file))
339 continue;
340 success = fw_read_file_contents(file, buf);
341 fput(file);
342 if (success)
343 break;
344 }
345 __putname(path);
346 return success;
320} 347}
321 348
322static struct firmware_priv *to_firmware_priv(struct device *dev) 349static struct firmware_priv *to_firmware_priv(struct device *dev)
@@ -423,6 +450,21 @@ static void firmware_free_data(const struct firmware *fw)
423#ifndef PAGE_KERNEL_RO 450#ifndef PAGE_KERNEL_RO
424#define PAGE_KERNEL_RO PAGE_KERNEL 451#define PAGE_KERNEL_RO PAGE_KERNEL
425#endif 452#endif
453
454/* one pages buffer should be mapped/unmapped only once */
455static int fw_map_pages_buf(struct firmware_buf *buf)
456{
457 if (buf->fmt != PAGE_BUF)
458 return 0;
459
460 if (buf->data)
461 vunmap(buf->data);
462 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
463 if (!buf->data)
464 return -ENOMEM;
465 return 0;
466}
467
426/** 468/**
427 * firmware_loading_store - set value in the 'loading' control file 469 * firmware_loading_store - set value in the 'loading' control file
428 * @dev: device pointer 470 * @dev: device pointer
@@ -467,6 +509,14 @@ static ssize_t firmware_loading_store(struct device *dev,
467 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) { 509 if (test_bit(FW_STATUS_LOADING, &fw_buf->status)) {
468 set_bit(FW_STATUS_DONE, &fw_buf->status); 510 set_bit(FW_STATUS_DONE, &fw_buf->status);
469 clear_bit(FW_STATUS_LOADING, &fw_buf->status); 511 clear_bit(FW_STATUS_LOADING, &fw_buf->status);
512
513 /*
514 * Several loading requests may be pending on
515 * one same firmware buf, so let all requests
516 * see the mapped 'buf->data' once the loading
517 * is completed.
518 * */
519 fw_map_pages_buf(fw_buf);
470 complete_all(&fw_buf->completion); 520 complete_all(&fw_buf->completion);
471 break; 521 break;
472 } 522 }
@@ -634,11 +684,18 @@ static struct bin_attribute firmware_attr_data = {
634 .write = firmware_data_write, 684 .write = firmware_data_write,
635}; 685};
636 686
637static void firmware_class_timeout(u_long data) 687static void firmware_class_timeout_work(struct work_struct *work)
638{ 688{
639 struct firmware_priv *fw_priv = (struct firmware_priv *) data; 689 struct firmware_priv *fw_priv = container_of(work,
690 struct firmware_priv, timeout_work.work);
640 691
692 mutex_lock(&fw_lock);
693 if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
694 mutex_unlock(&fw_lock);
695 return;
696 }
641 fw_load_abort(fw_priv); 697 fw_load_abort(fw_priv);
698 mutex_unlock(&fw_lock);
642} 699}
643 700
644static struct firmware_priv * 701static struct firmware_priv *
@@ -657,8 +714,8 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
657 714
658 fw_priv->nowait = nowait; 715 fw_priv->nowait = nowait;
659 fw_priv->fw = firmware; 716 fw_priv->fw = firmware;
660 setup_timer(&fw_priv->timeout, 717 INIT_DELAYED_WORK(&fw_priv->timeout_work,
661 firmware_class_timeout, (u_long) fw_priv); 718 firmware_class_timeout_work);
662 719
663 f_dev = &fw_priv->dev; 720 f_dev = &fw_priv->dev;
664 721
@@ -670,15 +727,6 @@ exit:
670 return fw_priv; 727 return fw_priv;
671} 728}
672 729
673/* one pages buffer is mapped/unmapped only once */
674static int fw_map_pages_buf(struct firmware_buf *buf)
675{
676 buf->data = vmap(buf->pages, buf->nr_pages, 0, PAGE_KERNEL_RO);
677 if (!buf->data)
678 return -ENOMEM;
679 return 0;
680}
681
682/* store the pages buffer info firmware from buf */ 730/* store the pages buffer info firmware from buf */
683static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw) 731static void fw_set_page_data(struct firmware_buf *buf, struct firmware *fw)
684{ 732{
@@ -778,11 +826,6 @@ _request_firmware_prepare(const struct firmware **firmware_p, const char *name,
778 return NULL; 826 return NULL;
779 } 827 }
780 828
781 if (fw_get_filesystem_firmware(firmware, name)) {
782 dev_dbg(device, "firmware: direct-loading firmware %s\n", name);
783 return NULL;
784 }
785
786 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf); 829 ret = fw_lookup_and_allocate_buf(name, &fw_cache, &buf);
787 if (!ret) 830 if (!ret)
788 fw_priv = fw_create_instance(firmware, name, device, 831 fw_priv = fw_create_instance(firmware, name, device,
@@ -832,6 +875,23 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
832 struct device *f_dev = &fw_priv->dev; 875 struct device *f_dev = &fw_priv->dev;
833 struct firmware_buf *buf = fw_priv->buf; 876 struct firmware_buf *buf = fw_priv->buf;
834 struct firmware_cache *fwc = &fw_cache; 877 struct firmware_cache *fwc = &fw_cache;
878 int direct_load = 0;
879
880 /* try direct loading from fs first */
881 if (fw_get_filesystem_firmware(buf)) {
882 dev_dbg(f_dev->parent, "firmware: direct-loading"
883 " firmware %s\n", buf->fw_id);
884
885 mutex_lock(&fw_lock);
886 set_bit(FW_STATUS_DONE, &buf->status);
887 mutex_unlock(&fw_lock);
888 complete_all(&buf->completion);
889 direct_load = 1;
890 goto handle_fw;
891 }
892
893 /* fall back on userspace loading */
894 buf->fmt = PAGE_BUF;
835 895
836 dev_set_uevent_suppress(f_dev, true); 896 dev_set_uevent_suppress(f_dev, true);
837 897
@@ -860,16 +920,16 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
860 dev_set_uevent_suppress(f_dev, false); 920 dev_set_uevent_suppress(f_dev, false);
861 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id); 921 dev_dbg(f_dev, "firmware: requesting %s\n", buf->fw_id);
862 if (timeout != MAX_SCHEDULE_TIMEOUT) 922 if (timeout != MAX_SCHEDULE_TIMEOUT)
863 mod_timer(&fw_priv->timeout, 923 schedule_delayed_work(&fw_priv->timeout_work, timeout);
864 round_jiffies_up(jiffies + timeout));
865 924
866 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); 925 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
867 } 926 }
868 927
869 wait_for_completion(&buf->completion); 928 wait_for_completion(&buf->completion);
870 929
871 del_timer_sync(&fw_priv->timeout); 930 cancel_delayed_work_sync(&fw_priv->timeout_work);
872 931
932handle_fw:
873 mutex_lock(&fw_lock); 933 mutex_lock(&fw_lock);
874 if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status)) 934 if (!buf->size || test_bit(FW_STATUS_ABORT, &buf->status))
875 retval = -ENOENT; 935 retval = -ENOENT;
@@ -884,9 +944,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
884 if (!retval && f_dev->parent) 944 if (!retval && f_dev->parent)
885 fw_add_devm_name(f_dev->parent, buf->fw_id); 945 fw_add_devm_name(f_dev->parent, buf->fw_id);
886 946
887 if (!retval)
888 retval = fw_map_pages_buf(buf);
889
890 /* 947 /*
891 * After caching firmware image is started, let it piggyback 948 * After caching firmware image is started, let it piggyback
892 * on request firmware. 949 * on request firmware.
@@ -902,6 +959,9 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
902 fw_priv->buf = NULL; 959 fw_priv->buf = NULL;
903 mutex_unlock(&fw_lock); 960 mutex_unlock(&fw_lock);
904 961
962 if (direct_load)
963 goto err_put_dev;
964
905 device_remove_file(f_dev, &dev_attr_loading); 965 device_remove_file(f_dev, &dev_attr_loading);
906err_del_bin_attr: 966err_del_bin_attr:
907 device_remove_bin_file(f_dev, &firmware_attr_data); 967 device_remove_bin_file(f_dev, &firmware_attr_data);
@@ -928,6 +988,9 @@ err_put_dev:
928 * firmware image for this or any other device. 988 * firmware image for this or any other device.
929 * 989 *
930 * Caller must hold the reference count of @device. 990 * Caller must hold the reference count of @device.
991 *
992 * The function can be called safely inside device's suspend and
993 * resume callback.
931 **/ 994 **/
932int 995int
933request_firmware(const struct firmware **firmware_p, const char *name, 996request_firmware(const struct firmware **firmware_p, const char *name,
@@ -1129,6 +1192,8 @@ int uncache_firmware(const char *fw_name)
1129} 1192}
1130 1193
1131#ifdef CONFIG_PM_SLEEP 1194#ifdef CONFIG_PM_SLEEP
1195static ASYNC_DOMAIN_EXCLUSIVE(fw_cache_domain);
1196
1132static struct fw_cache_entry *alloc_fw_cache_entry(const char *name) 1197static struct fw_cache_entry *alloc_fw_cache_entry(const char *name)
1133{ 1198{
1134 struct fw_cache_entry *fce; 1199 struct fw_cache_entry *fce;
@@ -1142,17 +1207,27 @@ exit:
1142 return fce; 1207 return fce;
1143} 1208}
1144 1209
1145static int fw_cache_piggyback_on_request(const char *name) 1210static int __fw_entry_found(const char *name)
1146{ 1211{
1147 struct firmware_cache *fwc = &fw_cache; 1212 struct firmware_cache *fwc = &fw_cache;
1148 struct fw_cache_entry *fce; 1213 struct fw_cache_entry *fce;
1149 int ret = 0;
1150 1214
1151 spin_lock(&fwc->name_lock);
1152 list_for_each_entry(fce, &fwc->fw_names, list) { 1215 list_for_each_entry(fce, &fwc->fw_names, list) {
1153 if (!strcmp(fce->name, name)) 1216 if (!strcmp(fce->name, name))
1154 goto found; 1217 return 1;
1155 } 1218 }
1219 return 0;
1220}
1221
1222static int fw_cache_piggyback_on_request(const char *name)
1223{
1224 struct firmware_cache *fwc = &fw_cache;
1225 struct fw_cache_entry *fce;
1226 int ret = 0;
1227
1228 spin_lock(&fwc->name_lock);
1229 if (__fw_entry_found(name))
1230 goto found;
1156 1231
1157 fce = alloc_fw_cache_entry(name); 1232 fce = alloc_fw_cache_entry(name);
1158 if (fce) { 1233 if (fce) {
@@ -1185,12 +1260,6 @@ static void __async_dev_cache_fw_image(void *fw_entry,
1185 1260
1186 free_fw_cache_entry(fce); 1261 free_fw_cache_entry(fce);
1187 } 1262 }
1188
1189 spin_lock(&fwc->name_lock);
1190 fwc->cnt--;
1191 spin_unlock(&fwc->name_lock);
1192
1193 wake_up(&fwc->wait_queue);
1194} 1263}
1195 1264
1196/* called with dev->devres_lock held */ 1265/* called with dev->devres_lock held */
@@ -1229,11 +1298,19 @@ static void dev_cache_fw_image(struct device *dev, void *data)
1229 list_del(&fce->list); 1298 list_del(&fce->list);
1230 1299
1231 spin_lock(&fwc->name_lock); 1300 spin_lock(&fwc->name_lock);
1232 fwc->cnt++; 1301 /* only one cache entry for one firmware */
1233 list_add(&fce->list, &fwc->fw_names); 1302 if (!__fw_entry_found(fce->name)) {
1303 list_add(&fce->list, &fwc->fw_names);
1304 } else {
1305 free_fw_cache_entry(fce);
1306 fce = NULL;
1307 }
1234 spin_unlock(&fwc->name_lock); 1308 spin_unlock(&fwc->name_lock);
1235 1309
1236 async_schedule(__async_dev_cache_fw_image, (void *)fce); 1310 if (fce)
1311 async_schedule_domain(__async_dev_cache_fw_image,
1312 (void *)fce,
1313 &fw_cache_domain);
1237 } 1314 }
1238} 1315}
1239 1316
@@ -1275,6 +1352,9 @@ static void device_cache_fw_images(void)
1275 1352
1276 pr_debug("%s\n", __func__); 1353 pr_debug("%s\n", __func__);
1277 1354
1355 /* cancel uncache work */
1356 cancel_delayed_work_sync(&fwc->work);
1357
1278 /* 1358 /*
1279 * use small loading timeout for caching devices' firmware 1359 * use small loading timeout for caching devices' firmware
1280 * because all these firmware images have been loaded 1360 * because all these firmware images have been loaded
@@ -1292,21 +1372,7 @@ static void device_cache_fw_images(void)
1292 mutex_unlock(&fw_lock); 1372 mutex_unlock(&fw_lock);
1293 1373
1294 /* wait for completion of caching firmware for all devices */ 1374 /* wait for completion of caching firmware for all devices */
1295 spin_lock(&fwc->name_lock); 1375 async_synchronize_full_domain(&fw_cache_domain);
1296 for (;;) {
1297 prepare_to_wait(&fwc->wait_queue, &wait,
1298 TASK_UNINTERRUPTIBLE);
1299 if (!fwc->cnt)
1300 break;
1301
1302 spin_unlock(&fwc->name_lock);
1303
1304 schedule();
1305
1306 spin_lock(&fwc->name_lock);
1307 }
1308 spin_unlock(&fwc->name_lock);
1309 finish_wait(&fwc->wait_queue, &wait);
1310 1376
1311 loading_timeout = old_timeout; 1377 loading_timeout = old_timeout;
1312} 1378}
@@ -1394,9 +1460,7 @@ static void __init fw_cache_init(void)
1394#ifdef CONFIG_PM_SLEEP 1460#ifdef CONFIG_PM_SLEEP
1395 spin_lock_init(&fw_cache.name_lock); 1461 spin_lock_init(&fw_cache.name_lock);
1396 INIT_LIST_HEAD(&fw_cache.fw_names); 1462 INIT_LIST_HEAD(&fw_cache.fw_names);
1397 fw_cache.cnt = 0;
1398 1463
1399 init_waitqueue_head(&fw_cache.wait_queue);
1400 INIT_DELAYED_WORK(&fw_cache.work, 1464 INIT_DELAYED_WORK(&fw_cache.work,
1401 device_uncache_fw_images_work); 1465 device_uncache_fw_images_work);
1402 1466
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 86c88216a503..987604d56c83 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -70,6 +70,13 @@ void unregister_memory_isolate_notifier(struct notifier_block *nb)
70} 70}
71EXPORT_SYMBOL(unregister_memory_isolate_notifier); 71EXPORT_SYMBOL(unregister_memory_isolate_notifier);
72 72
73static void memory_block_release(struct device *dev)
74{
75 struct memory_block *mem = container_of(dev, struct memory_block, dev);
76
77 kfree(mem);
78}
79
73/* 80/*
74 * register_memory - Setup a sysfs device for a memory block 81 * register_memory - Setup a sysfs device for a memory block
75 */ 82 */
@@ -80,6 +87,7 @@ int register_memory(struct memory_block *memory)
80 87
81 memory->dev.bus = &memory_subsys; 88 memory->dev.bus = &memory_subsys;
82 memory->dev.id = memory->start_section_nr / sections_per_block; 89 memory->dev.id = memory->start_section_nr / sections_per_block;
90 memory->dev.release = memory_block_release;
83 91
84 error = device_register(&memory->dev); 92 error = device_register(&memory->dev);
85 return error; 93 return error;
@@ -246,7 +254,7 @@ static bool pages_correctly_reserved(unsigned long start_pfn,
246 * OK to have direct references to sparsemem variables in here. 254 * OK to have direct references to sparsemem variables in here.
247 */ 255 */
248static int 256static int
249memory_block_action(unsigned long phys_index, unsigned long action) 257memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
250{ 258{
251 unsigned long start_pfn; 259 unsigned long start_pfn;
252 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; 260 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
@@ -261,7 +269,7 @@ memory_block_action(unsigned long phys_index, unsigned long action)
261 if (!pages_correctly_reserved(start_pfn, nr_pages)) 269 if (!pages_correctly_reserved(start_pfn, nr_pages))
262 return -EBUSY; 270 return -EBUSY;
263 271
264 ret = online_pages(start_pfn, nr_pages); 272 ret = online_pages(start_pfn, nr_pages, online_type);
265 break; 273 break;
266 case MEM_OFFLINE: 274 case MEM_OFFLINE:
267 ret = offline_pages(start_pfn, nr_pages); 275 ret = offline_pages(start_pfn, nr_pages);
@@ -276,7 +284,8 @@ memory_block_action(unsigned long phys_index, unsigned long action)
276} 284}
277 285
278static int __memory_block_change_state(struct memory_block *mem, 286static int __memory_block_change_state(struct memory_block *mem,
279 unsigned long to_state, unsigned long from_state_req) 287 unsigned long to_state, unsigned long from_state_req,
288 int online_type)
280{ 289{
281 int ret = 0; 290 int ret = 0;
282 291
@@ -288,7 +297,7 @@ static int __memory_block_change_state(struct memory_block *mem,
288 if (to_state == MEM_OFFLINE) 297 if (to_state == MEM_OFFLINE)
289 mem->state = MEM_GOING_OFFLINE; 298 mem->state = MEM_GOING_OFFLINE;
290 299
291 ret = memory_block_action(mem->start_section_nr, to_state); 300 ret = memory_block_action(mem->start_section_nr, to_state, online_type);
292 301
293 if (ret) { 302 if (ret) {
294 mem->state = from_state_req; 303 mem->state = from_state_req;
@@ -311,12 +320,14 @@ out:
311} 320}
312 321
313static int memory_block_change_state(struct memory_block *mem, 322static int memory_block_change_state(struct memory_block *mem,
314 unsigned long to_state, unsigned long from_state_req) 323 unsigned long to_state, unsigned long from_state_req,
324 int online_type)
315{ 325{
316 int ret; 326 int ret;
317 327
318 mutex_lock(&mem->state_mutex); 328 mutex_lock(&mem->state_mutex);
319 ret = __memory_block_change_state(mem, to_state, from_state_req); 329 ret = __memory_block_change_state(mem, to_state, from_state_req,
330 online_type);
320 mutex_unlock(&mem->state_mutex); 331 mutex_unlock(&mem->state_mutex);
321 332
322 return ret; 333 return ret;
@@ -330,10 +341,18 @@ store_mem_state(struct device *dev,
330 341
331 mem = container_of(dev, struct memory_block, dev); 342 mem = container_of(dev, struct memory_block, dev);
332 343
333 if (!strncmp(buf, "online", min((int)count, 6))) 344 if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
334 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); 345 ret = memory_block_change_state(mem, MEM_ONLINE,
335 else if(!strncmp(buf, "offline", min((int)count, 7))) 346 MEM_OFFLINE, ONLINE_KERNEL);
336 ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 347 else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
348 ret = memory_block_change_state(mem, MEM_ONLINE,
349 MEM_OFFLINE, ONLINE_MOVABLE);
350 else if (!strncmp(buf, "online", min_t(int, count, 6)))
351 ret = memory_block_change_state(mem, MEM_ONLINE,
352 MEM_OFFLINE, ONLINE_KEEP);
353 else if(!strncmp(buf, "offline", min_t(int, count, 7)))
354 ret = memory_block_change_state(mem, MEM_OFFLINE,
355 MEM_ONLINE, -1);
337 356
338 if (ret) 357 if (ret)
339 return ret; 358 return ret;
@@ -635,7 +654,6 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
635 mem_remove_simple_file(mem, phys_device); 654 mem_remove_simple_file(mem, phys_device);
636 mem_remove_simple_file(mem, removable); 655 mem_remove_simple_file(mem, removable);
637 unregister_memory(mem); 656 unregister_memory(mem);
638 kfree(mem);
639 } else 657 } else
640 kobject_put(&mem->dev.kobj); 658 kobject_put(&mem->dev.kobj);
641 659
@@ -669,7 +687,7 @@ int offline_memory_block(struct memory_block *mem)
669 687
670 mutex_lock(&mem->state_mutex); 688 mutex_lock(&mem->state_mutex);
671 if (mem->state != MEM_OFFLINE) 689 if (mem->state != MEM_OFFLINE)
672 ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); 690 ret = __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
673 mutex_unlock(&mem->state_mutex); 691 mutex_unlock(&mem->state_mutex);
674 692
675 return ret; 693 return ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index af1a177216f1..fac124a7e1c5 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -227,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
227static inline bool hugetlb_register_node(struct node *node) 227static inline bool hugetlb_register_node(struct node *node)
228{ 228{
229 if (__hugetlb_register_node && 229 if (__hugetlb_register_node &&
230 node_state(node->dev.id, N_HIGH_MEMORY)) { 230 node_state(node->dev.id, N_MEMORY)) {
231 __hugetlb_register_node(node); 231 __hugetlb_register_node(node);
232 return true; 232 return true;
233 } 233 }
@@ -252,6 +252,24 @@ static inline void hugetlb_register_node(struct node *node) {}
252static inline void hugetlb_unregister_node(struct node *node) {} 252static inline void hugetlb_unregister_node(struct node *node) {}
253#endif 253#endif
254 254
255static void node_device_release(struct device *dev)
256{
257 struct node *node = to_node(dev);
258
259#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
260 /*
261 * We schedule the work only when a memory section is
262 * onlined/offlined on this node. When we come here,
263 * all the memory on this node has been offlined,
264 * so we won't enqueue new work to this work.
265 *
266 * The work is using node->node_work, so we should
267 * flush work before freeing the memory.
268 */
269 flush_work(&node->node_work);
270#endif
271 kfree(node);
272}
255 273
256/* 274/*
257 * register_node - Setup a sysfs device for a node. 275 * register_node - Setup a sysfs device for a node.
@@ -259,12 +277,13 @@ static inline void hugetlb_unregister_node(struct node *node) {}
259 * 277 *
260 * Initialize and register the node device. 278 * Initialize and register the node device.
261 */ 279 */
262int register_node(struct node *node, int num, struct node *parent) 280static int register_node(struct node *node, int num, struct node *parent)
263{ 281{
264 int error; 282 int error;
265 283
266 node->dev.id = num; 284 node->dev.id = num;
267 node->dev.bus = &node_subsys; 285 node->dev.bus = &node_subsys;
286 node->dev.release = node_device_release;
268 error = device_register(&node->dev); 287 error = device_register(&node->dev);
269 288
270 if (!error){ 289 if (!error){
@@ -306,7 +325,7 @@ void unregister_node(struct node *node)
306 device_unregister(&node->dev); 325 device_unregister(&node->dev);
307} 326}
308 327
309struct node node_devices[MAX_NUMNODES]; 328struct node *node_devices[MAX_NUMNODES];
310 329
311/* 330/*
312 * register cpu under node 331 * register cpu under node
@@ -323,15 +342,15 @@ int register_cpu_under_node(unsigned int cpu, unsigned int nid)
323 if (!obj) 342 if (!obj)
324 return 0; 343 return 0;
325 344
326 ret = sysfs_create_link(&node_devices[nid].dev.kobj, 345 ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
327 &obj->kobj, 346 &obj->kobj,
328 kobject_name(&obj->kobj)); 347 kobject_name(&obj->kobj));
329 if (ret) 348 if (ret)
330 return ret; 349 return ret;
331 350
332 return sysfs_create_link(&obj->kobj, 351 return sysfs_create_link(&obj->kobj,
333 &node_devices[nid].dev.kobj, 352 &node_devices[nid]->dev.kobj,
334 kobject_name(&node_devices[nid].dev.kobj)); 353 kobject_name(&node_devices[nid]->dev.kobj));
335} 354}
336 355
337int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) 356int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
@@ -345,10 +364,10 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
345 if (!obj) 364 if (!obj)
346 return 0; 365 return 0;
347 366
348 sysfs_remove_link(&node_devices[nid].dev.kobj, 367 sysfs_remove_link(&node_devices[nid]->dev.kobj,
349 kobject_name(&obj->kobj)); 368 kobject_name(&obj->kobj));
350 sysfs_remove_link(&obj->kobj, 369 sysfs_remove_link(&obj->kobj,
351 kobject_name(&node_devices[nid].dev.kobj)); 370 kobject_name(&node_devices[nid]->dev.kobj));
352 371
353 return 0; 372 return 0;
354} 373}
@@ -390,15 +409,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
390 continue; 409 continue;
391 if (page_nid != nid) 410 if (page_nid != nid)
392 continue; 411 continue;
393 ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj, 412 ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
394 &mem_blk->dev.kobj, 413 &mem_blk->dev.kobj,
395 kobject_name(&mem_blk->dev.kobj)); 414 kobject_name(&mem_blk->dev.kobj));
396 if (ret) 415 if (ret)
397 return ret; 416 return ret;
398 417
399 return sysfs_create_link_nowarn(&mem_blk->dev.kobj, 418 return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
400 &node_devices[nid].dev.kobj, 419 &node_devices[nid]->dev.kobj,
401 kobject_name(&node_devices[nid].dev.kobj)); 420 kobject_name(&node_devices[nid]->dev.kobj));
402 } 421 }
403 /* mem section does not span the specified node */ 422 /* mem section does not span the specified node */
404 return 0; 423 return 0;
@@ -431,10 +450,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
431 continue; 450 continue;
432 if (node_test_and_set(nid, *unlinked_nodes)) 451 if (node_test_and_set(nid, *unlinked_nodes))
433 continue; 452 continue;
434 sysfs_remove_link(&node_devices[nid].dev.kobj, 453 sysfs_remove_link(&node_devices[nid]->dev.kobj,
435 kobject_name(&mem_blk->dev.kobj)); 454 kobject_name(&mem_blk->dev.kobj));
436 sysfs_remove_link(&mem_blk->dev.kobj, 455 sysfs_remove_link(&mem_blk->dev.kobj,
437 kobject_name(&node_devices[nid].dev.kobj)); 456 kobject_name(&node_devices[nid]->dev.kobj));
438 } 457 }
439 NODEMASK_FREE(unlinked_nodes); 458 NODEMASK_FREE(unlinked_nodes);
440 return 0; 459 return 0;
@@ -500,7 +519,7 @@ static void node_hugetlb_work(struct work_struct *work)
500 519
501static void init_node_hugetlb_work(int nid) 520static void init_node_hugetlb_work(int nid)
502{ 521{
503 INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work); 522 INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
504} 523}
505 524
506static int node_memory_callback(struct notifier_block *self, 525static int node_memory_callback(struct notifier_block *self,
@@ -517,7 +536,7 @@ static int node_memory_callback(struct notifier_block *self,
517 * when transitioning to/from memoryless state. 536 * when transitioning to/from memoryless state.
518 */ 537 */
519 if (nid != NUMA_NO_NODE) 538 if (nid != NUMA_NO_NODE)
520 schedule_work(&node_devices[nid].node_work); 539 schedule_work(&node_devices[nid]->node_work);
521 break; 540 break;
522 541
523 case MEM_GOING_ONLINE: 542 case MEM_GOING_ONLINE:
@@ -558,9 +577,13 @@ int register_one_node(int nid)
558 struct node *parent = NULL; 577 struct node *parent = NULL;
559 578
560 if (p_node != nid) 579 if (p_node != nid)
561 parent = &node_devices[p_node]; 580 parent = node_devices[p_node];
581
582 node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
583 if (!node_devices[nid])
584 return -ENOMEM;
562 585
563 error = register_node(&node_devices[nid], nid, parent); 586 error = register_node(node_devices[nid], nid, parent);
564 587
565 /* link cpu under this node */ 588 /* link cpu under this node */
566 for_each_present_cpu(cpu) { 589 for_each_present_cpu(cpu) {
@@ -581,7 +604,8 @@ int register_one_node(int nid)
581 604
582void unregister_one_node(int nid) 605void unregister_one_node(int nid)
583{ 606{
584 unregister_node(&node_devices[nid]); 607 unregister_node(node_devices[nid]);
608 node_devices[nid] = NULL;
585} 609}
586 610
587/* 611/*
@@ -614,23 +638,29 @@ static ssize_t show_node_state(struct device *dev,
614 { __ATTR(name, 0444, show_node_state, NULL), state } 638 { __ATTR(name, 0444, show_node_state, NULL), state }
615 639
616static struct node_attr node_state_attr[] = { 640static struct node_attr node_state_attr[] = {
617 _NODE_ATTR(possible, N_POSSIBLE), 641 [N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
618 _NODE_ATTR(online, N_ONLINE), 642 [N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
619 _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), 643 [N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
620 _NODE_ATTR(has_cpu, N_CPU),
621#ifdef CONFIG_HIGHMEM 644#ifdef CONFIG_HIGHMEM
622 _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), 645 [N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
623#endif 646#endif
647#ifdef CONFIG_MOVABLE_NODE
648 [N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
649#endif
650 [N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
624}; 651};
625 652
626static struct attribute *node_state_attrs[] = { 653static struct attribute *node_state_attrs[] = {
627 &node_state_attr[0].attr.attr, 654 &node_state_attr[N_POSSIBLE].attr.attr,
628 &node_state_attr[1].attr.attr, 655 &node_state_attr[N_ONLINE].attr.attr,
629 &node_state_attr[2].attr.attr, 656 &node_state_attr[N_NORMAL_MEMORY].attr.attr,
630 &node_state_attr[3].attr.attr,
631#ifdef CONFIG_HIGHMEM 657#ifdef CONFIG_HIGHMEM
632 &node_state_attr[4].attr.attr, 658 &node_state_attr[N_HIGH_MEMORY].attr.attr,
659#endif
660#ifdef CONFIG_MOVABLE_NODE
661 &node_state_attr[N_MEMORY].attr.attr,
633#endif 662#endif
663 &node_state_attr[N_CPU].attr.attr,
634 NULL 664 NULL
635}; 665};
636 666
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8727e9c5eea4..c0b8df38402b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/pm_runtime.h> 22#include <linux/pm_runtime.h>
23#include <linux/idr.h> 23#include <linux/idr.h>
24#include <linux/acpi.h>
24 25
25#include "base.h" 26#include "base.h"
26#include "power/power.h" 27#include "power/power.h"
@@ -44,7 +45,7 @@ EXPORT_SYMBOL_GPL(platform_bus);
44 * be setup before the platform_notifier is called. So if a user needs to 45 * be setup before the platform_notifier is called. So if a user needs to
45 * manipulate any relevant information in the pdev_archdata they can do: 46 * manipulate any relevant information in the pdev_archdata they can do:
46 * 47 *
47 * platform_devic_alloc() 48 * platform_device_alloc()
48 * ... manipulate ... 49 * ... manipulate ...
49 * platform_device_add() 50 * platform_device_add()
50 * 51 *
@@ -83,9 +84,16 @@ EXPORT_SYMBOL_GPL(platform_get_resource);
83 */ 84 */
84int platform_get_irq(struct platform_device *dev, unsigned int num) 85int platform_get_irq(struct platform_device *dev, unsigned int num)
85{ 86{
87#ifdef CONFIG_SPARC
88 /* sparc does not have irqs represented as IORESOURCE_IRQ resources */
89 if (!dev || num >= dev->archdata.num_irqs)
90 return -ENXIO;
91 return dev->archdata.irqs[num];
92#else
86 struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num); 93 struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
87 94
88 return r ? r->start : -ENXIO; 95 return r ? r->start : -ENXIO;
96#endif
89} 97}
90EXPORT_SYMBOL_GPL(platform_get_irq); 98EXPORT_SYMBOL_GPL(platform_get_irq);
91 99
@@ -115,7 +123,7 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
115EXPORT_SYMBOL_GPL(platform_get_resource_byname); 123EXPORT_SYMBOL_GPL(platform_get_resource_byname);
116 124
117/** 125/**
118 * platform_get_irq - get an IRQ for a device 126 * platform_get_irq_byname - get an IRQ for a device by name
119 * @dev: platform device 127 * @dev: platform device
120 * @name: IRQ name 128 * @name: IRQ name
121 */ 129 */
@@ -429,6 +437,7 @@ struct platform_device *platform_device_register_full(
429 goto err_alloc; 437 goto err_alloc;
430 438
431 pdev->dev.parent = pdevinfo->parent; 439 pdev->dev.parent = pdevinfo->parent;
440 ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle);
432 441
433 if (pdevinfo->dma_mask) { 442 if (pdevinfo->dma_mask) {
434 /* 443 /*
@@ -459,6 +468,7 @@ struct platform_device *platform_device_register_full(
459 ret = platform_device_add(pdev); 468 ret = platform_device_add(pdev);
460 if (ret) { 469 if (ret) {
461err: 470err:
471 ACPI_HANDLE_SET(&pdev->dev, NULL);
462 kfree(pdev->dev.dma_mask); 472 kfree(pdev->dev.dma_mask);
463 473
464err_alloc: 474err_alloc:
@@ -474,8 +484,16 @@ static int platform_drv_probe(struct device *_dev)
474{ 484{
475 struct platform_driver *drv = to_platform_driver(_dev->driver); 485 struct platform_driver *drv = to_platform_driver(_dev->driver);
476 struct platform_device *dev = to_platform_device(_dev); 486 struct platform_device *dev = to_platform_device(_dev);
487 int ret;
488
489 if (ACPI_HANDLE(_dev))
490 acpi_dev_pm_attach(_dev, true);
477 491
478 return drv->probe(dev); 492 ret = drv->probe(dev);
493 if (ret && ACPI_HANDLE(_dev))
494 acpi_dev_pm_detach(_dev, true);
495
496 return ret;
479} 497}
480 498
481static int platform_drv_probe_fail(struct device *_dev) 499static int platform_drv_probe_fail(struct device *_dev)
@@ -487,8 +505,13 @@ static int platform_drv_remove(struct device *_dev)
487{ 505{
488 struct platform_driver *drv = to_platform_driver(_dev->driver); 506 struct platform_driver *drv = to_platform_driver(_dev->driver);
489 struct platform_device *dev = to_platform_device(_dev); 507 struct platform_device *dev = to_platform_device(_dev);
508 int ret;
490 509
491 return drv->remove(dev); 510 ret = drv->remove(dev);
511 if (ACPI_HANDLE(_dev))
512 acpi_dev_pm_detach(_dev, true);
513
514 return ret;
492} 515}
493 516
494static void platform_drv_shutdown(struct device *_dev) 517static void platform_drv_shutdown(struct device *_dev)
@@ -497,6 +520,8 @@ static void platform_drv_shutdown(struct device *_dev)
497 struct platform_device *dev = to_platform_device(_dev); 520 struct platform_device *dev = to_platform_device(_dev);
498 521
499 drv->shutdown(dev); 522 drv->shutdown(dev);
523 if (ACPI_HANDLE(_dev))
524 acpi_dev_pm_detach(_dev, true);
500} 525}
501 526
502/** 527/**
@@ -702,6 +727,10 @@ static int platform_match(struct device *dev, struct device_driver *drv)
702 if (of_driver_match_device(dev, drv)) 727 if (of_driver_match_device(dev, drv))
703 return 1; 728 return 1;
704 729
730 /* Then try ACPI style match */
731 if (acpi_driver_match_device(dev, drv))
732 return 1;
733
705 /* Then try to match against the id table */ 734 /* Then try to match against the id table */
706 if (pdrv->id_table) 735 if (pdrv->id_table)
707 return platform_match_id(pdrv->id_table, pdev) != NULL; 736 return platform_match_id(pdrv->id_table, pdev) != NULL;
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index eb78e9640c4a..9d8fde709390 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -99,7 +99,7 @@ static void __pm_clk_remove(struct pm_clock_entry *ce)
99 99
100 if (ce->status < PCE_STATUS_ERROR) { 100 if (ce->status < PCE_STATUS_ERROR) {
101 if (ce->status == PCE_STATUS_ENABLED) 101 if (ce->status == PCE_STATUS_ENABLED)
102 clk_disable(ce->clk); 102 clk_disable_unprepare(ce->clk);
103 103
104 if (ce->status >= PCE_STATUS_ACQUIRED) 104 if (ce->status >= PCE_STATUS_ACQUIRED)
105 clk_put(ce->clk); 105 clk_put(ce->clk);
@@ -396,7 +396,7 @@ static void enable_clock(struct device *dev, const char *con_id)
396 396
397 clk = clk_get(dev, con_id); 397 clk = clk_get(dev, con_id);
398 if (!IS_ERR(clk)) { 398 if (!IS_ERR(clk)) {
399 clk_enable(clk); 399 clk_prepare_enable(clk);
400 clk_put(clk); 400 clk_put(clk);
401 dev_info(dev, "Runtime PM disabled, clock forced on.\n"); 401 dev_info(dev, "Runtime PM disabled, clock forced on.\n");
402 } 402 }
@@ -413,7 +413,7 @@ static void disable_clock(struct device *dev, const char *con_id)
413 413
414 clk = clk_get(dev, con_id); 414 clk = clk_get(dev, con_id);
415 if (!IS_ERR(clk)) { 415 if (!IS_ERR(clk)) {
416 clk_disable(clk); 416 clk_disable_unprepare(clk);
417 clk_put(clk); 417 clk_put(clk);
418 dev_info(dev, "Runtime PM disabled, clock forced off.\n"); 418 dev_info(dev, "Runtime PM disabled, clock forced off.\n");
419 } 419 }
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index c22b869245d9..acc3a8ded29d 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -470,10 +470,19 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
470 return -EBUSY; 470 return -EBUSY;
471 471
472 not_suspended = 0; 472 not_suspended = 0;
473 list_for_each_entry(pdd, &genpd->dev_list, list_node) 473 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
474 enum pm_qos_flags_status stat;
475
476 stat = dev_pm_qos_flags(pdd->dev,
477 PM_QOS_FLAG_NO_POWER_OFF
478 | PM_QOS_FLAG_REMOTE_WAKEUP);
479 if (stat > PM_QOS_FLAGS_NONE)
480 return -EBUSY;
481
474 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 482 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
475 || pdd->dev->power.irq_safe)) 483 || pdd->dev->power.irq_safe))
476 not_suspended++; 484 not_suspended++;
485 }
477 486
478 if (not_suspended > genpd->in_progress) 487 if (not_suspended > genpd->in_progress)
479 return -EBUSY; 488 return -EBUSY;
@@ -1862,7 +1871,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1862 cpuidle_drv = cpuidle_driver_ref(); 1871 cpuidle_drv = cpuidle_driver_ref();
1863 if (!cpuidle_drv) { 1872 if (!cpuidle_drv) {
1864 ret = -ENODEV; 1873 ret = -ENODEV;
1865 goto out; 1874 goto err_drv;
1866 } 1875 }
1867 if (cpuidle_drv->state_count <= state) { 1876 if (cpuidle_drv->state_count <= state) {
1868 ret = -EINVAL; 1877 ret = -EINVAL;
@@ -1884,6 +1893,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1884 1893
1885 err: 1894 err:
1886 cpuidle_driver_unref(); 1895 cpuidle_driver_unref();
1896
1897 err_drv:
1898 kfree(cpu_data);
1887 goto out; 1899 goto out;
1888} 1900}
1889 1901
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d9468642fc41..50b2831e027d 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -23,6 +23,7 @@
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/opp.h> 24#include <linux/opp.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/export.h>
26 27
27/* 28/*
28 * Internal data structure organization with the OPP layer library is as 29 * Internal data structure organization with the OPP layer library is as
@@ -65,6 +66,7 @@ struct opp {
65 unsigned long u_volt; 66 unsigned long u_volt;
66 67
67 struct device_opp *dev_opp; 68 struct device_opp *dev_opp;
69 struct rcu_head head;
68}; 70};
69 71
70/** 72/**
@@ -160,6 +162,7 @@ unsigned long opp_get_voltage(struct opp *opp)
160 162
161 return v; 163 return v;
162} 164}
165EXPORT_SYMBOL(opp_get_voltage);
163 166
164/** 167/**
165 * opp_get_freq() - Gets the frequency corresponding to an available opp 168 * opp_get_freq() - Gets the frequency corresponding to an available opp
@@ -189,6 +192,7 @@ unsigned long opp_get_freq(struct opp *opp)
189 192
190 return f; 193 return f;
191} 194}
195EXPORT_SYMBOL(opp_get_freq);
192 196
193/** 197/**
194 * opp_get_opp_count() - Get number of opps available in the opp list 198 * opp_get_opp_count() - Get number of opps available in the opp list
@@ -221,6 +225,7 @@ int opp_get_opp_count(struct device *dev)
221 225
222 return count; 226 return count;
223} 227}
228EXPORT_SYMBOL(opp_get_opp_count);
224 229
225/** 230/**
226 * opp_find_freq_exact() - search for an exact frequency 231 * opp_find_freq_exact() - search for an exact frequency
@@ -230,7 +235,10 @@ int opp_get_opp_count(struct device *dev)
230 * 235 *
231 * Searches for exact match in the opp list and returns pointer to the matching 236 * Searches for exact match in the opp list and returns pointer to the matching
232 * opp if found, else returns ERR_PTR in case of error and should be handled 237 * opp if found, else returns ERR_PTR in case of error and should be handled
233 * using IS_ERR. 238 * using IS_ERR. Error return values can be:
239 * EINVAL: for bad pointer
240 * ERANGE: no match found for search
241 * ENODEV: if device not found in list of registered devices
234 * 242 *
235 * Note: available is a modifier for the search. if available=true, then the 243 * Note: available is a modifier for the search. if available=true, then the
236 * match is for exact matching frequency and is available in the stored OPP 244 * match is for exact matching frequency and is available in the stored OPP
@@ -249,7 +257,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
249 bool available) 257 bool available)
250{ 258{
251 struct device_opp *dev_opp; 259 struct device_opp *dev_opp;
252 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); 260 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
253 261
254 dev_opp = find_device_opp(dev); 262 dev_opp = find_device_opp(dev);
255 if (IS_ERR(dev_opp)) { 263 if (IS_ERR(dev_opp)) {
@@ -268,6 +276,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
268 276
269 return opp; 277 return opp;
270} 278}
279EXPORT_SYMBOL(opp_find_freq_exact);
271 280
272/** 281/**
273 * opp_find_freq_ceil() - Search for an rounded ceil freq 282 * opp_find_freq_ceil() - Search for an rounded ceil freq
@@ -278,7 +287,11 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
278 * for a device. 287 * for a device.
279 * 288 *
280 * Returns matching *opp and refreshes *freq accordingly, else returns 289 * Returns matching *opp and refreshes *freq accordingly, else returns
281 * ERR_PTR in case of error and should be handled using IS_ERR. 290 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
291 * values can be:
292 * EINVAL: for bad pointer
293 * ERANGE: no match found for search
294 * ENODEV: if device not found in list of registered devices
282 * 295 *
283 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 296 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
284 * protected pointer. The reason for the same is that the opp pointer which is 297 * protected pointer. The reason for the same is that the opp pointer which is
@@ -289,7 +302,7 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
289struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) 302struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
290{ 303{
291 struct device_opp *dev_opp; 304 struct device_opp *dev_opp;
292 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); 305 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
293 306
294 if (!dev || !freq) { 307 if (!dev || !freq) {
295 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 308 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -298,7 +311,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
298 311
299 dev_opp = find_device_opp(dev); 312 dev_opp = find_device_opp(dev);
300 if (IS_ERR(dev_opp)) 313 if (IS_ERR(dev_opp))
301 return opp; 314 return ERR_CAST(dev_opp);
302 315
303 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 316 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
304 if (temp_opp->available && temp_opp->rate >= *freq) { 317 if (temp_opp->available && temp_opp->rate >= *freq) {
@@ -310,6 +323,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
310 323
311 return opp; 324 return opp;
312} 325}
326EXPORT_SYMBOL(opp_find_freq_ceil);
313 327
314/** 328/**
315 * opp_find_freq_floor() - Search for a rounded floor freq 329 * opp_find_freq_floor() - Search for a rounded floor freq
@@ -320,7 +334,11 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
320 * for a device. 334 * for a device.
321 * 335 *
322 * Returns matching *opp and refreshes *freq accordingly, else returns 336 * Returns matching *opp and refreshes *freq accordingly, else returns
323 * ERR_PTR in case of error and should be handled using IS_ERR. 337 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
338 * values can be:
339 * EINVAL: for bad pointer
340 * ERANGE: no match found for search
341 * ENODEV: if device not found in list of registered devices
324 * 342 *
325 * Locking: This function must be called under rcu_read_lock(). opp is a rcu 343 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
326 * protected pointer. The reason for the same is that the opp pointer which is 344 * protected pointer. The reason for the same is that the opp pointer which is
@@ -331,7 +349,7 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
331struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) 349struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
332{ 350{
333 struct device_opp *dev_opp; 351 struct device_opp *dev_opp;
334 struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); 352 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE);
335 353
336 if (!dev || !freq) { 354 if (!dev || !freq) {
337 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 355 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -340,7 +358,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
340 358
341 dev_opp = find_device_opp(dev); 359 dev_opp = find_device_opp(dev);
342 if (IS_ERR(dev_opp)) 360 if (IS_ERR(dev_opp))
343 return opp; 361 return ERR_CAST(dev_opp);
344 362
345 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { 363 list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
346 if (temp_opp->available) { 364 if (temp_opp->available) {
@@ -356,6 +374,7 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
356 374
357 return opp; 375 return opp;
358} 376}
377EXPORT_SYMBOL(opp_find_freq_floor);
359 378
360/** 379/**
361 * opp_add() - Add an OPP table from a table definitions 380 * opp_add() - Add an OPP table from a table definitions
@@ -512,7 +531,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
512 531
513 list_replace_rcu(&opp->node, &new_opp->node); 532 list_replace_rcu(&opp->node, &new_opp->node);
514 mutex_unlock(&dev_opp_list_lock); 533 mutex_unlock(&dev_opp_list_lock);
515 synchronize_rcu(); 534 kfree_rcu(opp, head);
516 535
517 /* Notify the change of the OPP availability */ 536 /* Notify the change of the OPP availability */
518 if (availability_req) 537 if (availability_req)
@@ -522,13 +541,10 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
522 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE, 541 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
523 new_opp); 542 new_opp);
524 543
525 /* clean up old opp */ 544 return 0;
526 new_opp = opp;
527 goto out;
528 545
529unlock: 546unlock:
530 mutex_unlock(&dev_opp_list_lock); 547 mutex_unlock(&dev_opp_list_lock);
531out:
532 kfree(new_opp); 548 kfree(new_opp);
533 return r; 549 return r;
534} 550}
@@ -552,6 +568,7 @@ int opp_enable(struct device *dev, unsigned long freq)
552{ 568{
553 return opp_set_availability(dev, freq, true); 569 return opp_set_availability(dev, freq, true);
554} 570}
571EXPORT_SYMBOL(opp_enable);
555 572
556/** 573/**
557 * opp_disable() - Disable a specific OPP 574 * opp_disable() - Disable a specific OPP
@@ -573,6 +590,7 @@ int opp_disable(struct device *dev, unsigned long freq)
573{ 590{
574 return opp_set_availability(dev, freq, false); 591 return opp_set_availability(dev, freq, false);
575} 592}
593EXPORT_SYMBOL(opp_disable);
576 594
577#ifdef CONFIG_CPU_FREQ 595#ifdef CONFIG_CPU_FREQ
578/** 596/**
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 0dbfdf4419af..b16686a0a5a2 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -93,8 +93,10 @@ extern void dpm_sysfs_remove(struct device *dev);
93extern void rpm_sysfs_remove(struct device *dev); 93extern void rpm_sysfs_remove(struct device *dev);
94extern int wakeup_sysfs_add(struct device *dev); 94extern int wakeup_sysfs_add(struct device *dev);
95extern void wakeup_sysfs_remove(struct device *dev); 95extern void wakeup_sysfs_remove(struct device *dev);
96extern int pm_qos_sysfs_add(struct device *dev); 96extern int pm_qos_sysfs_add_latency(struct device *dev);
97extern void pm_qos_sysfs_remove(struct device *dev); 97extern void pm_qos_sysfs_remove_latency(struct device *dev);
98extern int pm_qos_sysfs_add_flags(struct device *dev);
99extern void pm_qos_sysfs_remove_flags(struct device *dev);
98 100
99#else /* CONFIG_PM */ 101#else /* CONFIG_PM */
100 102
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 74a67e0019a2..ff46387f5308 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -40,6 +40,7 @@
40#include <linux/device.h> 40#include <linux/device.h>
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43#include <linux/pm_runtime.h>
43 44
44#include "power.h" 45#include "power.h"
45 46
@@ -48,6 +49,50 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
48static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers); 49static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
49 50
50/** 51/**
52 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
53 * @dev: Device to check the PM QoS flags for.
54 * @mask: Flags to check against.
55 *
56 * This routine must be called with dev->power.lock held.
57 */
58enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
59{
60 struct dev_pm_qos *qos = dev->power.qos;
61 struct pm_qos_flags *pqf;
62 s32 val;
63
64 if (!qos)
65 return PM_QOS_FLAGS_UNDEFINED;
66
67 pqf = &qos->flags;
68 if (list_empty(&pqf->list))
69 return PM_QOS_FLAGS_UNDEFINED;
70
71 val = pqf->effective_flags & mask;
72 if (val)
73 return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
74
75 return PM_QOS_FLAGS_NONE;
76}
77
78/**
79 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
80 * @dev: Device to check the PM QoS flags for.
81 * @mask: Flags to check against.
82 */
83enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
84{
85 unsigned long irqflags;
86 enum pm_qos_flags_status ret;
87
88 spin_lock_irqsave(&dev->power.lock, irqflags);
89 ret = __dev_pm_qos_flags(dev, mask);
90 spin_unlock_irqrestore(&dev->power.lock, irqflags);
91
92 return ret;
93}
94
95/**
51 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device. 96 * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
52 * @dev: Device to get the PM QoS constraint value for. 97 * @dev: Device to get the PM QoS constraint value for.
53 * 98 *
@@ -55,9 +100,7 @@ static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
55 */ 100 */
56s32 __dev_pm_qos_read_value(struct device *dev) 101s32 __dev_pm_qos_read_value(struct device *dev)
57{ 102{
58 struct pm_qos_constraints *c = dev->power.constraints; 103 return dev->power.qos ? pm_qos_read_value(&dev->power.qos->latency) : 0;
59
60 return c ? pm_qos_read_value(c) : 0;
61} 104}
62 105
63/** 106/**
@@ -76,30 +119,39 @@ s32 dev_pm_qos_read_value(struct device *dev)
76 return ret; 119 return ret;
77} 120}
78 121
79/* 122/**
80 * apply_constraint 123 * apply_constraint - Add/modify/remove device PM QoS request.
81 * @req: constraint request to apply 124 * @req: Constraint request to apply
82 * @action: action to perform add/update/remove, of type enum pm_qos_req_action 125 * @action: Action to perform (add/update/remove).
83 * @value: defines the qos request 126 * @value: Value to assign to the QoS request.
84 * 127 *
85 * Internal function to update the constraints list using the PM QoS core 128 * Internal function to update the constraints list using the PM QoS core
86 * code and if needed call the per-device and the global notification 129 * code and if needed call the per-device and the global notification
87 * callbacks 130 * callbacks
88 */ 131 */
89static int apply_constraint(struct dev_pm_qos_request *req, 132static int apply_constraint(struct dev_pm_qos_request *req,
90 enum pm_qos_req_action action, int value) 133 enum pm_qos_req_action action, s32 value)
91{ 134{
92 int ret, curr_value; 135 struct dev_pm_qos *qos = req->dev->power.qos;
93 136 int ret;
94 ret = pm_qos_update_target(req->dev->power.constraints,
95 &req->node, action, value);
96 137
97 if (ret) { 138 switch(req->type) {
98 /* Call the global callbacks if needed */ 139 case DEV_PM_QOS_LATENCY:
99 curr_value = pm_qos_read_value(req->dev->power.constraints); 140 ret = pm_qos_update_target(&qos->latency, &req->data.pnode,
100 blocking_notifier_call_chain(&dev_pm_notifiers, 141 action, value);
101 (unsigned long)curr_value, 142 if (ret) {
102 req); 143 value = pm_qos_read_value(&qos->latency);
144 blocking_notifier_call_chain(&dev_pm_notifiers,
145 (unsigned long)value,
146 req);
147 }
148 break;
149 case DEV_PM_QOS_FLAGS:
150 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
151 action, value);
152 break;
153 default:
154 ret = -EINVAL;
103 } 155 }
104 156
105 return ret; 157 return ret;
@@ -114,28 +166,32 @@ static int apply_constraint(struct dev_pm_qos_request *req,
114 */ 166 */
115static int dev_pm_qos_constraints_allocate(struct device *dev) 167static int dev_pm_qos_constraints_allocate(struct device *dev)
116{ 168{
169 struct dev_pm_qos *qos;
117 struct pm_qos_constraints *c; 170 struct pm_qos_constraints *c;
118 struct blocking_notifier_head *n; 171 struct blocking_notifier_head *n;
119 172
120 c = kzalloc(sizeof(*c), GFP_KERNEL); 173 qos = kzalloc(sizeof(*qos), GFP_KERNEL);
121 if (!c) 174 if (!qos)
122 return -ENOMEM; 175 return -ENOMEM;
123 176
124 n = kzalloc(sizeof(*n), GFP_KERNEL); 177 n = kzalloc(sizeof(*n), GFP_KERNEL);
125 if (!n) { 178 if (!n) {
126 kfree(c); 179 kfree(qos);
127 return -ENOMEM; 180 return -ENOMEM;
128 } 181 }
129 BLOCKING_INIT_NOTIFIER_HEAD(n); 182 BLOCKING_INIT_NOTIFIER_HEAD(n);
130 183
184 c = &qos->latency;
131 plist_head_init(&c->list); 185 plist_head_init(&c->list);
132 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 186 c->target_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
133 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE; 187 c->default_value = PM_QOS_DEV_LAT_DEFAULT_VALUE;
134 c->type = PM_QOS_MIN; 188 c->type = PM_QOS_MIN;
135 c->notifiers = n; 189 c->notifiers = n;
136 190
191 INIT_LIST_HEAD(&qos->flags.list);
192
137 spin_lock_irq(&dev->power.lock); 193 spin_lock_irq(&dev->power.lock);
138 dev->power.constraints = c; 194 dev->power.qos = qos;
139 spin_unlock_irq(&dev->power.lock); 195 spin_unlock_irq(&dev->power.lock);
140 196
141 return 0; 197 return 0;
@@ -151,7 +207,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
151void dev_pm_qos_constraints_init(struct device *dev) 207void dev_pm_qos_constraints_init(struct device *dev)
152{ 208{
153 mutex_lock(&dev_pm_qos_mtx); 209 mutex_lock(&dev_pm_qos_mtx);
154 dev->power.constraints = NULL; 210 dev->power.qos = NULL;
155 dev->power.power_state = PMSG_ON; 211 dev->power.power_state = PMSG_ON;
156 mutex_unlock(&dev_pm_qos_mtx); 212 mutex_unlock(&dev_pm_qos_mtx);
157} 213}
@@ -164,24 +220,28 @@ void dev_pm_qos_constraints_init(struct device *dev)
164 */ 220 */
165void dev_pm_qos_constraints_destroy(struct device *dev) 221void dev_pm_qos_constraints_destroy(struct device *dev)
166{ 222{
223 struct dev_pm_qos *qos;
167 struct dev_pm_qos_request *req, *tmp; 224 struct dev_pm_qos_request *req, *tmp;
168 struct pm_qos_constraints *c; 225 struct pm_qos_constraints *c;
226 struct pm_qos_flags *f;
169 227
170 /* 228 /*
171 * If the device's PM QoS resume latency limit has been exposed to user 229 * If the device's PM QoS resume latency limit or PM QoS flags have been
172 * space, it has to be hidden at this point. 230 * exposed to user space, they have to be hidden at this point.
173 */ 231 */
174 dev_pm_qos_hide_latency_limit(dev); 232 dev_pm_qos_hide_latency_limit(dev);
233 dev_pm_qos_hide_flags(dev);
175 234
176 mutex_lock(&dev_pm_qos_mtx); 235 mutex_lock(&dev_pm_qos_mtx);
177 236
178 dev->power.power_state = PMSG_INVALID; 237 dev->power.power_state = PMSG_INVALID;
179 c = dev->power.constraints; 238 qos = dev->power.qos;
180 if (!c) 239 if (!qos)
181 goto out; 240 goto out;
182 241
183 /* Flush the constraints list for the device */ 242 /* Flush the constraints lists for the device. */
184 plist_for_each_entry_safe(req, tmp, &c->list, node) { 243 c = &qos->latency;
244 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
185 /* 245 /*
186 * Update constraints list and call the notification 246 * Update constraints list and call the notification
187 * callbacks if needed 247 * callbacks if needed
@@ -189,13 +249,18 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
189 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 249 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
190 memset(req, 0, sizeof(*req)); 250 memset(req, 0, sizeof(*req));
191 } 251 }
252 f = &qos->flags;
253 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
254 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
255 memset(req, 0, sizeof(*req));
256 }
192 257
193 spin_lock_irq(&dev->power.lock); 258 spin_lock_irq(&dev->power.lock);
194 dev->power.constraints = NULL; 259 dev->power.qos = NULL;
195 spin_unlock_irq(&dev->power.lock); 260 spin_unlock_irq(&dev->power.lock);
196 261
197 kfree(c->notifiers); 262 kfree(c->notifiers);
198 kfree(c); 263 kfree(qos);
199 264
200 out: 265 out:
201 mutex_unlock(&dev_pm_qos_mtx); 266 mutex_unlock(&dev_pm_qos_mtx);
@@ -205,6 +270,7 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
205 * dev_pm_qos_add_request - inserts new qos request into the list 270 * dev_pm_qos_add_request - inserts new qos request into the list
206 * @dev: target device for the constraint 271 * @dev: target device for the constraint
207 * @req: pointer to a preallocated handle 272 * @req: pointer to a preallocated handle
273 * @type: type of the request
208 * @value: defines the qos request 274 * @value: defines the qos request
209 * 275 *
210 * This function inserts a new entry in the device constraints list of 276 * This function inserts a new entry in the device constraints list of
@@ -218,9 +284,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
218 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory 284 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
219 * to allocate for data structures, -ENODEV if the device has just been removed 285 * to allocate for data structures, -ENODEV if the device has just been removed
220 * from the system. 286 * from the system.
287 *
288 * Callers should ensure that the target device is not RPM_SUSPENDED before
289 * using this function for requests of type DEV_PM_QOS_FLAGS.
221 */ 290 */
222int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 291int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
223 s32 value) 292 enum dev_pm_qos_req_type type, s32 value)
224{ 293{
225 int ret = 0; 294 int ret = 0;
226 295
@@ -235,7 +304,7 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
235 304
236 mutex_lock(&dev_pm_qos_mtx); 305 mutex_lock(&dev_pm_qos_mtx);
237 306
238 if (!dev->power.constraints) { 307 if (!dev->power.qos) {
239 if (dev->power.power_state.event == PM_EVENT_INVALID) { 308 if (dev->power.power_state.event == PM_EVENT_INVALID) {
240 /* The device has been removed from the system. */ 309 /* The device has been removed from the system. */
241 req->dev = NULL; 310 req->dev = NULL;
@@ -251,8 +320,10 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
251 } 320 }
252 } 321 }
253 322
254 if (!ret) 323 if (!ret) {
324 req->type = type;
255 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); 325 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
326 }
256 327
257 out: 328 out:
258 mutex_unlock(&dev_pm_qos_mtx); 329 mutex_unlock(&dev_pm_qos_mtx);
@@ -262,6 +333,37 @@ int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
262EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); 333EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
263 334
264/** 335/**
336 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
337 * @req : PM QoS request to modify.
338 * @new_value: New value to request.
339 */
340static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
341 s32 new_value)
342{
343 s32 curr_value;
344 int ret = 0;
345
346 if (!req->dev->power.qos)
347 return -ENODEV;
348
349 switch(req->type) {
350 case DEV_PM_QOS_LATENCY:
351 curr_value = req->data.pnode.prio;
352 break;
353 case DEV_PM_QOS_FLAGS:
354 curr_value = req->data.flr.flags;
355 break;
356 default:
357 return -EINVAL;
358 }
359
360 if (curr_value != new_value)
361 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
362
363 return ret;
364}
365
366/**
265 * dev_pm_qos_update_request - modifies an existing qos request 367 * dev_pm_qos_update_request - modifies an existing qos request
266 * @req : handle to list element holding a dev_pm_qos request to use 368 * @req : handle to list element holding a dev_pm_qos request to use
267 * @new_value: defines the qos request 369 * @new_value: defines the qos request
@@ -275,11 +377,13 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
275 * 0 if the aggregated constraint value has not changed, 377 * 0 if the aggregated constraint value has not changed,
276 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 378 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
277 * removed from the system 379 * removed from the system
380 *
381 * Callers should ensure that the target device is not RPM_SUSPENDED before
382 * using this function for requests of type DEV_PM_QOS_FLAGS.
278 */ 383 */
279int dev_pm_qos_update_request(struct dev_pm_qos_request *req, 384int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
280 s32 new_value)
281{ 385{
282 int ret = 0; 386 int ret;
283 387
284 if (!req) /*guard against callers passing in null */ 388 if (!req) /*guard against callers passing in null */
285 return -EINVAL; 389 return -EINVAL;
@@ -289,17 +393,9 @@ int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
289 return -EINVAL; 393 return -EINVAL;
290 394
291 mutex_lock(&dev_pm_qos_mtx); 395 mutex_lock(&dev_pm_qos_mtx);
292 396 ret = __dev_pm_qos_update_request(req, new_value);
293 if (req->dev->power.constraints) {
294 if (new_value != req->node.prio)
295 ret = apply_constraint(req, PM_QOS_UPDATE_REQ,
296 new_value);
297 } else {
298 /* Return if the device has been removed */
299 ret = -ENODEV;
300 }
301
302 mutex_unlock(&dev_pm_qos_mtx); 397 mutex_unlock(&dev_pm_qos_mtx);
398
303 return ret; 399 return ret;
304} 400}
305EXPORT_SYMBOL_GPL(dev_pm_qos_update_request); 401EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
@@ -315,6 +411,9 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
315 * 0 if the aggregated constraint value has not changed, 411 * 0 if the aggregated constraint value has not changed,
316 * -EINVAL in case of wrong parameters, -ENODEV if the device has been 412 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
317 * removed from the system 413 * removed from the system
414 *
415 * Callers should ensure that the target device is not RPM_SUSPENDED before
416 * using this function for requests of type DEV_PM_QOS_FLAGS.
318 */ 417 */
319int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) 418int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
320{ 419{
@@ -329,7 +428,7 @@ int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
329 428
330 mutex_lock(&dev_pm_qos_mtx); 429 mutex_lock(&dev_pm_qos_mtx);
331 430
332 if (req->dev->power.constraints) { 431 if (req->dev->power.qos) {
333 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, 432 ret = apply_constraint(req, PM_QOS_REMOVE_REQ,
334 PM_QOS_DEFAULT_VALUE); 433 PM_QOS_DEFAULT_VALUE);
335 memset(req, 0, sizeof(*req)); 434 memset(req, 0, sizeof(*req));
@@ -362,13 +461,13 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier)
362 461
363 mutex_lock(&dev_pm_qos_mtx); 462 mutex_lock(&dev_pm_qos_mtx);
364 463
365 if (!dev->power.constraints) 464 if (!dev->power.qos)
366 ret = dev->power.power_state.event != PM_EVENT_INVALID ? 465 ret = dev->power.power_state.event != PM_EVENT_INVALID ?
367 dev_pm_qos_constraints_allocate(dev) : -ENODEV; 466 dev_pm_qos_constraints_allocate(dev) : -ENODEV;
368 467
369 if (!ret) 468 if (!ret)
370 ret = blocking_notifier_chain_register( 469 ret = blocking_notifier_chain_register(
371 dev->power.constraints->notifiers, notifier); 470 dev->power.qos->latency.notifiers, notifier);
372 471
373 mutex_unlock(&dev_pm_qos_mtx); 472 mutex_unlock(&dev_pm_qos_mtx);
374 return ret; 473 return ret;
@@ -393,9 +492,9 @@ int dev_pm_qos_remove_notifier(struct device *dev,
393 mutex_lock(&dev_pm_qos_mtx); 492 mutex_lock(&dev_pm_qos_mtx);
394 493
395 /* Silently return if the constraints object is not present. */ 494 /* Silently return if the constraints object is not present. */
396 if (dev->power.constraints) 495 if (dev->power.qos)
397 retval = blocking_notifier_chain_unregister( 496 retval = blocking_notifier_chain_unregister(
398 dev->power.constraints->notifiers, 497 dev->power.qos->latency.notifiers,
399 notifier); 498 notifier);
400 499
401 mutex_unlock(&dev_pm_qos_mtx); 500 mutex_unlock(&dev_pm_qos_mtx);
@@ -449,9 +548,10 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
449 ancestor = ancestor->parent; 548 ancestor = ancestor->parent;
450 549
451 if (ancestor) 550 if (ancestor)
452 error = dev_pm_qos_add_request(ancestor, req, value); 551 error = dev_pm_qos_add_request(ancestor, req,
552 DEV_PM_QOS_LATENCY, value);
453 553
454 if (error) 554 if (error < 0)
455 req->dev = NULL; 555 req->dev = NULL;
456 556
457 return error; 557 return error;
@@ -459,10 +559,19 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
459EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); 559EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
460 560
461#ifdef CONFIG_PM_RUNTIME 561#ifdef CONFIG_PM_RUNTIME
462static void __dev_pm_qos_drop_user_request(struct device *dev) 562static void __dev_pm_qos_drop_user_request(struct device *dev,
563 enum dev_pm_qos_req_type type)
463{ 564{
464 dev_pm_qos_remove_request(dev->power.pq_req); 565 switch(type) {
465 dev->power.pq_req = NULL; 566 case DEV_PM_QOS_LATENCY:
567 dev_pm_qos_remove_request(dev->power.qos->latency_req);
568 dev->power.qos->latency_req = NULL;
569 break;
570 case DEV_PM_QOS_FLAGS:
571 dev_pm_qos_remove_request(dev->power.qos->flags_req);
572 dev->power.qos->flags_req = NULL;
573 break;
574 }
466} 575}
467 576
468/** 577/**
@@ -478,21 +587,21 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
478 if (!device_is_registered(dev) || value < 0) 587 if (!device_is_registered(dev) || value < 0)
479 return -EINVAL; 588 return -EINVAL;
480 589
481 if (dev->power.pq_req) 590 if (dev->power.qos && dev->power.qos->latency_req)
482 return -EEXIST; 591 return -EEXIST;
483 592
484 req = kzalloc(sizeof(*req), GFP_KERNEL); 593 req = kzalloc(sizeof(*req), GFP_KERNEL);
485 if (!req) 594 if (!req)
486 return -ENOMEM; 595 return -ENOMEM;
487 596
488 ret = dev_pm_qos_add_request(dev, req, value); 597 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY, value);
489 if (ret < 0) 598 if (ret < 0)
490 return ret; 599 return ret;
491 600
492 dev->power.pq_req = req; 601 dev->power.qos->latency_req = req;
493 ret = pm_qos_sysfs_add(dev); 602 ret = pm_qos_sysfs_add_latency(dev);
494 if (ret) 603 if (ret)
495 __dev_pm_qos_drop_user_request(dev); 604 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
496 605
497 return ret; 606 return ret;
498} 607}
@@ -504,10 +613,92 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
504 */ 613 */
505void dev_pm_qos_hide_latency_limit(struct device *dev) 614void dev_pm_qos_hide_latency_limit(struct device *dev)
506{ 615{
507 if (dev->power.pq_req) { 616 if (dev->power.qos && dev->power.qos->latency_req) {
508 pm_qos_sysfs_remove(dev); 617 pm_qos_sysfs_remove_latency(dev);
509 __dev_pm_qos_drop_user_request(dev); 618 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
510 } 619 }
511} 620}
512EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit); 621EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
622
623/**
624 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
625 * @dev: Device whose PM QoS flags are to be exposed to user space.
626 * @val: Initial values of the flags.
627 */
628int dev_pm_qos_expose_flags(struct device *dev, s32 val)
629{
630 struct dev_pm_qos_request *req;
631 int ret;
632
633 if (!device_is_registered(dev))
634 return -EINVAL;
635
636 if (dev->power.qos && dev->power.qos->flags_req)
637 return -EEXIST;
638
639 req = kzalloc(sizeof(*req), GFP_KERNEL);
640 if (!req)
641 return -ENOMEM;
642
643 pm_runtime_get_sync(dev);
644 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
645 if (ret < 0)
646 goto fail;
647
648 dev->power.qos->flags_req = req;
649 ret = pm_qos_sysfs_add_flags(dev);
650 if (ret)
651 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
652
653fail:
654 pm_runtime_put(dev);
655 return ret;
656}
657EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
658
659/**
660 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
661 * @dev: Device whose PM QoS flags are to be hidden from user space.
662 */
663void dev_pm_qos_hide_flags(struct device *dev)
664{
665 if (dev->power.qos && dev->power.qos->flags_req) {
666 pm_qos_sysfs_remove_flags(dev);
667 pm_runtime_get_sync(dev);
668 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
669 pm_runtime_put(dev);
670 }
671}
672EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
673
674/**
675 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
676 * @dev: Device to update the PM QoS flags request for.
677 * @mask: Flags to set/clear.
678 * @set: Whether to set or clear the flags (true means set).
679 */
680int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
681{
682 s32 value;
683 int ret;
684
685 if (!dev->power.qos || !dev->power.qos->flags_req)
686 return -EINVAL;
687
688 pm_runtime_get_sync(dev);
689 mutex_lock(&dev_pm_qos_mtx);
690
691 value = dev_pm_qos_requested_flags(dev);
692 if (set)
693 value |= mask;
694 else
695 value &= ~mask;
696
697 ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
698
699 mutex_unlock(&dev_pm_qos_mtx);
700 pm_runtime_put(dev);
701
702 return ret;
703}
513#endif /* CONFIG_PM_RUNTIME */ 704#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index b91dc6f1e914..50d16e3cb0a9 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -221,7 +221,7 @@ static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
221static ssize_t pm_qos_latency_show(struct device *dev, 221static ssize_t pm_qos_latency_show(struct device *dev,
222 struct device_attribute *attr, char *buf) 222 struct device_attribute *attr, char *buf)
223{ 223{
224 return sprintf(buf, "%d\n", dev->power.pq_req->node.prio); 224 return sprintf(buf, "%d\n", dev_pm_qos_requested_latency(dev));
225} 225}
226 226
227static ssize_t pm_qos_latency_store(struct device *dev, 227static ssize_t pm_qos_latency_store(struct device *dev,
@@ -237,12 +237,66 @@ static ssize_t pm_qos_latency_store(struct device *dev,
237 if (value < 0) 237 if (value < 0)
238 return -EINVAL; 238 return -EINVAL;
239 239
240 ret = dev_pm_qos_update_request(dev->power.pq_req, value); 240 ret = dev_pm_qos_update_request(dev->power.qos->latency_req, value);
241 return ret < 0 ? ret : n; 241 return ret < 0 ? ret : n;
242} 242}
243 243
244static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, 244static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
245 pm_qos_latency_show, pm_qos_latency_store); 245 pm_qos_latency_show, pm_qos_latency_store);
246
247static ssize_t pm_qos_no_power_off_show(struct device *dev,
248 struct device_attribute *attr,
249 char *buf)
250{
251 return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
252 & PM_QOS_FLAG_NO_POWER_OFF));
253}
254
255static ssize_t pm_qos_no_power_off_store(struct device *dev,
256 struct device_attribute *attr,
257 const char *buf, size_t n)
258{
259 int ret;
260
261 if (kstrtoint(buf, 0, &ret))
262 return -EINVAL;
263
264 if (ret != 0 && ret != 1)
265 return -EINVAL;
266
267 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
268 return ret < 0 ? ret : n;
269}
270
271static DEVICE_ATTR(pm_qos_no_power_off, 0644,
272 pm_qos_no_power_off_show, pm_qos_no_power_off_store);
273
274static ssize_t pm_qos_remote_wakeup_show(struct device *dev,
275 struct device_attribute *attr,
276 char *buf)
277{
278 return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
279 & PM_QOS_FLAG_REMOTE_WAKEUP));
280}
281
282static ssize_t pm_qos_remote_wakeup_store(struct device *dev,
283 struct device_attribute *attr,
284 const char *buf, size_t n)
285{
286 int ret;
287
288 if (kstrtoint(buf, 0, &ret))
289 return -EINVAL;
290
291 if (ret != 0 && ret != 1)
292 return -EINVAL;
293
294 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP, ret);
295 return ret < 0 ? ret : n;
296}
297
298static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
299 pm_qos_remote_wakeup_show, pm_qos_remote_wakeup_store);
246#endif /* CONFIG_PM_RUNTIME */ 300#endif /* CONFIG_PM_RUNTIME */
247 301
248#ifdef CONFIG_PM_SLEEP 302#ifdef CONFIG_PM_SLEEP
@@ -564,15 +618,27 @@ static struct attribute_group pm_runtime_attr_group = {
564 .attrs = runtime_attrs, 618 .attrs = runtime_attrs,
565}; 619};
566 620
567static struct attribute *pm_qos_attrs[] = { 621static struct attribute *pm_qos_latency_attrs[] = {
568#ifdef CONFIG_PM_RUNTIME 622#ifdef CONFIG_PM_RUNTIME
569 &dev_attr_pm_qos_resume_latency_us.attr, 623 &dev_attr_pm_qos_resume_latency_us.attr,
570#endif /* CONFIG_PM_RUNTIME */ 624#endif /* CONFIG_PM_RUNTIME */
571 NULL, 625 NULL,
572}; 626};
573static struct attribute_group pm_qos_attr_group = { 627static struct attribute_group pm_qos_latency_attr_group = {
574 .name = power_group_name, 628 .name = power_group_name,
575 .attrs = pm_qos_attrs, 629 .attrs = pm_qos_latency_attrs,
630};
631
632static struct attribute *pm_qos_flags_attrs[] = {
633#ifdef CONFIG_PM_RUNTIME
634 &dev_attr_pm_qos_no_power_off.attr,
635 &dev_attr_pm_qos_remote_wakeup.attr,
636#endif /* CONFIG_PM_RUNTIME */
637 NULL,
638};
639static struct attribute_group pm_qos_flags_attr_group = {
640 .name = power_group_name,
641 .attrs = pm_qos_flags_attrs,
576}; 642};
577 643
578int dpm_sysfs_add(struct device *dev) 644int dpm_sysfs_add(struct device *dev)
@@ -615,14 +681,24 @@ void wakeup_sysfs_remove(struct device *dev)
615 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 681 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
616} 682}
617 683
618int pm_qos_sysfs_add(struct device *dev) 684int pm_qos_sysfs_add_latency(struct device *dev)
685{
686 return sysfs_merge_group(&dev->kobj, &pm_qos_latency_attr_group);
687}
688
689void pm_qos_sysfs_remove_latency(struct device *dev)
690{
691 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_attr_group);
692}
693
694int pm_qos_sysfs_add_flags(struct device *dev)
619{ 695{
620 return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group); 696 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
621} 697}
622 698
623void pm_qos_sysfs_remove(struct device *dev) 699void pm_qos_sysfs_remove_flags(struct device *dev)
624{ 700{
625 sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group); 701 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
626} 702}
627 703
628void rpm_sysfs_remove(struct device *dev) 704void rpm_sysfs_remove(struct device *dev)
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 80f9ab9c3aa4..401d1919635a 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -15,10 +15,18 @@
15 15
16#include <linux/regmap.h> 16#include <linux/regmap.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/list.h>
18 19
19struct regmap; 20struct regmap;
20struct regcache_ops; 21struct regcache_ops;
21 22
23struct regmap_debugfs_off_cache {
24 struct list_head list;
25 off_t min;
26 off_t max;
27 unsigned int base_reg;
28};
29
22struct regmap_format { 30struct regmap_format {
23 size_t buf_size; 31 size_t buf_size;
24 size_t reg_bytes; 32 size_t reg_bytes;
@@ -31,14 +39,12 @@ struct regmap_format {
31 unsigned int (*parse_val)(void *buf); 39 unsigned int (*parse_val)(void *buf);
32}; 40};
33 41
34typedef void (*regmap_lock)(struct regmap *map);
35typedef void (*regmap_unlock)(struct regmap *map);
36
37struct regmap { 42struct regmap {
38 struct mutex mutex; 43 struct mutex mutex;
39 spinlock_t spinlock; 44 spinlock_t spinlock;
40 regmap_lock lock; 45 regmap_lock lock;
41 regmap_unlock unlock; 46 regmap_unlock unlock;
47 void *lock_arg; /* This is passed to lock/unlock functions */
42 48
43 struct device *dev; /* Device we do I/O on */ 49 struct device *dev; /* Device we do I/O on */
44 void *work_buf; /* Scratch buffer used to format I/O */ 50 void *work_buf; /* Scratch buffer used to format I/O */
@@ -50,6 +56,12 @@ struct regmap {
50#ifdef CONFIG_DEBUG_FS 56#ifdef CONFIG_DEBUG_FS
51 struct dentry *debugfs; 57 struct dentry *debugfs;
52 const char *debugfs_name; 58 const char *debugfs_name;
59
60 unsigned int debugfs_reg_len;
61 unsigned int debugfs_val_len;
62 unsigned int debugfs_tot_len;
63
64 struct list_head debugfs_off_cache;
53#endif 65#endif
54 66
55 unsigned int max_register; 67 unsigned int max_register;
@@ -57,6 +69,10 @@ struct regmap {
57 bool (*readable_reg)(struct device *dev, unsigned int reg); 69 bool (*readable_reg)(struct device *dev, unsigned int reg);
58 bool (*volatile_reg)(struct device *dev, unsigned int reg); 70 bool (*volatile_reg)(struct device *dev, unsigned int reg);
59 bool (*precious_reg)(struct device *dev, unsigned int reg); 71 bool (*precious_reg)(struct device *dev, unsigned int reg);
72 const struct regmap_access_table *wr_table;
73 const struct regmap_access_table *rd_table;
74 const struct regmap_access_table *volatile_table;
75 const struct regmap_access_table *precious_table;
60 76
61 u8 read_flag_mask; 77 u8 read_flag_mask;
62 u8 write_flag_mask; 78 u8 write_flag_mask;
@@ -120,6 +136,8 @@ int _regmap_write(struct regmap *map, unsigned int reg,
120 136
121struct regmap_range_node { 137struct regmap_range_node {
122 struct rb_node node; 138 struct rb_node node;
139 const char *name;
140 struct regmap *map;
123 141
124 unsigned int range_min; 142 unsigned int range_min;
125 unsigned int range_max; 143 unsigned int range_max;
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index bb1ff175b962..07aad786f817 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -56,17 +56,74 @@ static const struct file_operations regmap_name_fops = {
56 .llseek = default_llseek, 56 .llseek = default_llseek,
57}; 57};
58 58
59static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, 59/*
60 size_t count, loff_t *ppos) 60 * Work out where the start offset maps into register numbers, bearing
61 * in mind that we suppress hidden registers.
62 */
63static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
64 unsigned int base,
65 loff_t from,
66 loff_t *pos)
61{ 67{
62 int reg_len, val_len, tot_len; 68 struct regmap_debugfs_off_cache *c = NULL;
63 size_t buf_pos = 0;
64 loff_t p = 0; 69 loff_t p = 0;
70 unsigned int i, ret;
71
72 /*
73 * If we don't have a cache build one so we don't have to do a
74 * linear scan each time.
75 */
76 if (list_empty(&map->debugfs_off_cache)) {
77 for (i = base; i <= map->max_register; i += map->reg_stride) {
78 /* Skip unprinted registers, closing off cache entry */
79 if (!regmap_readable(map, i) ||
80 regmap_precious(map, i)) {
81 if (c) {
82 c->max = p - 1;
83 list_add_tail(&c->list,
84 &map->debugfs_off_cache);
85 c = NULL;
86 }
87
88 continue;
89 }
90
91 /* No cache entry? Start a new one */
92 if (!c) {
93 c = kzalloc(sizeof(*c), GFP_KERNEL);
94 if (!c)
95 break;
96 c->min = p;
97 c->base_reg = i;
98 }
99
100 p += map->debugfs_tot_len;
101 }
102 }
103
104 /* Find the relevant block */
105 list_for_each_entry(c, &map->debugfs_off_cache, list) {
106 if (*pos >= c->min && *pos <= c->max) {
107 *pos = c->min;
108 return c->base_reg;
109 }
110
111 ret = c->max;
112 }
113
114 return ret;
115}
116
117static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
118 unsigned int to, char __user *user_buf,
119 size_t count, loff_t *ppos)
120{
121 size_t buf_pos = 0;
122 loff_t p = *ppos;
65 ssize_t ret; 123 ssize_t ret;
66 int i; 124 int i;
67 struct regmap *map = file->private_data;
68 char *buf; 125 char *buf;
69 unsigned int val; 126 unsigned int val, start_reg;
70 127
71 if (*ppos < 0 || !count) 128 if (*ppos < 0 || !count)
72 return -EINVAL; 129 return -EINVAL;
@@ -76,11 +133,18 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
76 return -ENOMEM; 133 return -ENOMEM;
77 134
78 /* Calculate the length of a fixed format */ 135 /* Calculate the length of a fixed format */
79 reg_len = regmap_calc_reg_len(map->max_register, buf, count); 136 if (!map->debugfs_tot_len) {
80 val_len = 2 * map->format.val_bytes; 137 map->debugfs_reg_len = regmap_calc_reg_len(map->max_register,
81 tot_len = reg_len + val_len + 3; /* : \n */ 138 buf, count);
139 map->debugfs_val_len = 2 * map->format.val_bytes;
140 map->debugfs_tot_len = map->debugfs_reg_len +
141 map->debugfs_val_len + 3; /* : \n */
142 }
82 143
83 for (i = 0; i <= map->max_register; i += map->reg_stride) { 144 /* Work out which register we're starting at */
145 start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
146
147 for (i = start_reg; i <= to; i += map->reg_stride) {
84 if (!regmap_readable(map, i)) 148 if (!regmap_readable(map, i))
85 continue; 149 continue;
86 150
@@ -90,26 +154,27 @@ static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
90 /* If we're in the region the user is trying to read */ 154 /* If we're in the region the user is trying to read */
91 if (p >= *ppos) { 155 if (p >= *ppos) {
92 /* ...but not beyond it */ 156 /* ...but not beyond it */
93 if (buf_pos >= count - 1 - tot_len) 157 if (buf_pos + 1 + map->debugfs_tot_len >= count)
94 break; 158 break;
95 159
96 /* Format the register */ 160 /* Format the register */
97 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ", 161 snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
98 reg_len, i); 162 map->debugfs_reg_len, i - from);
99 buf_pos += reg_len + 2; 163 buf_pos += map->debugfs_reg_len + 2;
100 164
101 /* Format the value, write all X if we can't read */ 165 /* Format the value, write all X if we can't read */
102 ret = regmap_read(map, i, &val); 166 ret = regmap_read(map, i, &val);
103 if (ret == 0) 167 if (ret == 0)
104 snprintf(buf + buf_pos, count - buf_pos, 168 snprintf(buf + buf_pos, count - buf_pos,
105 "%.*x", val_len, val); 169 "%.*x", map->debugfs_val_len, val);
106 else 170 else
107 memset(buf + buf_pos, 'X', val_len); 171 memset(buf + buf_pos, 'X',
172 map->debugfs_val_len);
108 buf_pos += 2 * map->format.val_bytes; 173 buf_pos += 2 * map->format.val_bytes;
109 174
110 buf[buf_pos++] = '\n'; 175 buf[buf_pos++] = '\n';
111 } 176 }
112 p += tot_len; 177 p += map->debugfs_tot_len;
113 } 178 }
114 179
115 ret = buf_pos; 180 ret = buf_pos;
@@ -126,6 +191,15 @@ out:
126 return ret; 191 return ret;
127} 192}
128 193
194static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
195 size_t count, loff_t *ppos)
196{
197 struct regmap *map = file->private_data;
198
199 return regmap_read_debugfs(map, 0, map->max_register, user_buf,
200 count, ppos);
201}
202
129#undef REGMAP_ALLOW_WRITE_DEBUGFS 203#undef REGMAP_ALLOW_WRITE_DEBUGFS
130#ifdef REGMAP_ALLOW_WRITE_DEBUGFS 204#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
131/* 205/*
@@ -174,6 +248,22 @@ static const struct file_operations regmap_map_fops = {
174 .llseek = default_llseek, 248 .llseek = default_llseek,
175}; 249};
176 250
251static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
252 size_t count, loff_t *ppos)
253{
254 struct regmap_range_node *range = file->private_data;
255 struct regmap *map = range->map;
256
257 return regmap_read_debugfs(map, range->range_min, range->range_max,
258 user_buf, count, ppos);
259}
260
261static const struct file_operations regmap_range_fops = {
262 .open = simple_open,
263 .read = regmap_range_read_file,
264 .llseek = default_llseek,
265};
266
177static ssize_t regmap_access_read_file(struct file *file, 267static ssize_t regmap_access_read_file(struct file *file,
178 char __user *user_buf, size_t count, 268 char __user *user_buf, size_t count,
179 loff_t *ppos) 269 loff_t *ppos)
@@ -244,6 +334,11 @@ static const struct file_operations regmap_access_fops = {
244 334
245void regmap_debugfs_init(struct regmap *map, const char *name) 335void regmap_debugfs_init(struct regmap *map, const char *name)
246{ 336{
337 struct rb_node *next;
338 struct regmap_range_node *range_node;
339
340 INIT_LIST_HEAD(&map->debugfs_off_cache);
341
247 if (name) { 342 if (name) {
248 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s", 343 map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
249 dev_name(map->dev), name); 344 dev_name(map->dev), name);
@@ -276,11 +371,32 @@ void regmap_debugfs_init(struct regmap *map, const char *name)
276 debugfs_create_bool("cache_bypass", 0400, map->debugfs, 371 debugfs_create_bool("cache_bypass", 0400, map->debugfs,
277 &map->cache_bypass); 372 &map->cache_bypass);
278 } 373 }
374
375 next = rb_first(&map->range_tree);
376 while (next) {
377 range_node = rb_entry(next, struct regmap_range_node, node);
378
379 if (range_node->name)
380 debugfs_create_file(range_node->name, 0400,
381 map->debugfs, range_node,
382 &regmap_range_fops);
383
384 next = rb_next(&range_node->node);
385 }
279} 386}
280 387
281void regmap_debugfs_exit(struct regmap *map) 388void regmap_debugfs_exit(struct regmap *map)
282{ 389{
390 struct regmap_debugfs_off_cache *c;
391
283 debugfs_remove_recursive(map->debugfs); 392 debugfs_remove_recursive(map->debugfs);
393 while (!list_empty(&map->debugfs_off_cache)) {
394 c = list_first_entry(&map->debugfs_off_cache,
395 struct regmap_debugfs_off_cache,
396 list);
397 list_del(&c->list);
398 kfree(c);
399 }
284 kfree(map->debugfs_name); 400 kfree(map->debugfs_name);
285} 401}
286 402
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 5b6b1d8e6cc0..5972ad958544 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -458,3 +458,22 @@ int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
458 return irq_create_mapping(data->domain, irq); 458 return irq_create_mapping(data->domain, irq);
459} 459}
460EXPORT_SYMBOL_GPL(regmap_irq_get_virq); 460EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
461
462/**
463 * regmap_irq_get_domain(): Retrieve the irq_domain for the chip
464 *
465 * Useful for drivers to request their own IRQs and for integration
466 * with subsystems. For ease of integration NULL is accepted as a
467 * domain, allowing devices to just call this even if no domain is
468 * allocated.
469 *
470 * @data: regmap_irq controller to operate on.
471 */
472struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
473{
474 if (data)
475 return data->domain;
476 else
477 return NULL;
478}
479EXPORT_SYMBOL_GPL(regmap_irq_get_domain);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 52069d29ff12..42d5cb0f503f 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -34,6 +34,36 @@ static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 unsigned int mask, unsigned int val, 34 unsigned int mask, unsigned int val,
35 bool *change); 35 bool *change);
36 36
37bool regmap_reg_in_ranges(unsigned int reg,
38 const struct regmap_range *ranges,
39 unsigned int nranges)
40{
41 const struct regmap_range *r;
42 int i;
43
44 for (i = 0, r = ranges; i < nranges; i++, r++)
45 if (regmap_reg_in_range(reg, r))
46 return true;
47 return false;
48}
49EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
50
51static bool _regmap_check_range_table(struct regmap *map,
52 unsigned int reg,
53 const struct regmap_access_table *table)
54{
55 /* Check "no ranges" first */
56 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
57 return false;
58
59 /* In case zero "yes ranges" are supplied, any reg is OK */
60 if (!table->n_yes_ranges)
61 return true;
62
63 return regmap_reg_in_ranges(reg, table->yes_ranges,
64 table->n_yes_ranges);
65}
66
37bool regmap_writeable(struct regmap *map, unsigned int reg) 67bool regmap_writeable(struct regmap *map, unsigned int reg)
38{ 68{
39 if (map->max_register && reg > map->max_register) 69 if (map->max_register && reg > map->max_register)
@@ -42,6 +72,9 @@ bool regmap_writeable(struct regmap *map, unsigned int reg)
42 if (map->writeable_reg) 72 if (map->writeable_reg)
43 return map->writeable_reg(map->dev, reg); 73 return map->writeable_reg(map->dev, reg);
44 74
75 if (map->wr_table)
76 return _regmap_check_range_table(map, reg, map->wr_table);
77
45 return true; 78 return true;
46} 79}
47 80
@@ -56,6 +89,9 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
56 if (map->readable_reg) 89 if (map->readable_reg)
57 return map->readable_reg(map->dev, reg); 90 return map->readable_reg(map->dev, reg);
58 91
92 if (map->rd_table)
93 return _regmap_check_range_table(map, reg, map->rd_table);
94
59 return true; 95 return true;
60} 96}
61 97
@@ -67,6 +103,9 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
67 if (map->volatile_reg) 103 if (map->volatile_reg)
68 return map->volatile_reg(map->dev, reg); 104 return map->volatile_reg(map->dev, reg);
69 105
106 if (map->volatile_table)
107 return _regmap_check_range_table(map, reg, map->volatile_table);
108
70 return true; 109 return true;
71} 110}
72 111
@@ -78,11 +117,14 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
78 if (map->precious_reg) 117 if (map->precious_reg)
79 return map->precious_reg(map->dev, reg); 118 return map->precious_reg(map->dev, reg);
80 119
120 if (map->precious_table)
121 return _regmap_check_range_table(map, reg, map->precious_table);
122
81 return false; 123 return false;
82} 124}
83 125
84static bool regmap_volatile_range(struct regmap *map, unsigned int reg, 126static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
85 unsigned int num) 127 size_t num)
86{ 128{
87 unsigned int i; 129 unsigned int i;
88 130
@@ -214,23 +256,27 @@ static unsigned int regmap_parse_32_native(void *buf)
214 return *(u32 *)buf; 256 return *(u32 *)buf;
215} 257}
216 258
217static void regmap_lock_mutex(struct regmap *map) 259static void regmap_lock_mutex(void *__map)
218{ 260{
261 struct regmap *map = __map;
219 mutex_lock(&map->mutex); 262 mutex_lock(&map->mutex);
220} 263}
221 264
222static void regmap_unlock_mutex(struct regmap *map) 265static void regmap_unlock_mutex(void *__map)
223{ 266{
267 struct regmap *map = __map;
224 mutex_unlock(&map->mutex); 268 mutex_unlock(&map->mutex);
225} 269}
226 270
227static void regmap_lock_spinlock(struct regmap *map) 271static void regmap_lock_spinlock(void *__map)
228{ 272{
273 struct regmap *map = __map;
229 spin_lock(&map->spinlock); 274 spin_lock(&map->spinlock);
230} 275}
231 276
232static void regmap_unlock_spinlock(struct regmap *map) 277static void regmap_unlock_spinlock(void *__map)
233{ 278{
279 struct regmap *map = __map;
234 spin_unlock(&map->spinlock); 280 spin_unlock(&map->spinlock);
235} 281}
236 282
@@ -335,14 +381,21 @@ struct regmap *regmap_init(struct device *dev,
335 goto err; 381 goto err;
336 } 382 }
337 383
338 if (bus->fast_io) { 384 if (config->lock && config->unlock) {
339 spin_lock_init(&map->spinlock); 385 map->lock = config->lock;
340 map->lock = regmap_lock_spinlock; 386 map->unlock = config->unlock;
341 map->unlock = regmap_unlock_spinlock; 387 map->lock_arg = config->lock_arg;
342 } else { 388 } else {
343 mutex_init(&map->mutex); 389 if (bus->fast_io) {
344 map->lock = regmap_lock_mutex; 390 spin_lock_init(&map->spinlock);
345 map->unlock = regmap_unlock_mutex; 391 map->lock = regmap_lock_spinlock;
392 map->unlock = regmap_unlock_spinlock;
393 } else {
394 mutex_init(&map->mutex);
395 map->lock = regmap_lock_mutex;
396 map->unlock = regmap_unlock_mutex;
397 }
398 map->lock_arg = map;
346 } 399 }
347 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 400 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
348 map->format.pad_bytes = config->pad_bits / 8; 401 map->format.pad_bytes = config->pad_bits / 8;
@@ -359,6 +412,10 @@ struct regmap *regmap_init(struct device *dev,
359 map->bus = bus; 412 map->bus = bus;
360 map->bus_context = bus_context; 413 map->bus_context = bus_context;
361 map->max_register = config->max_register; 414 map->max_register = config->max_register;
415 map->wr_table = config->wr_table;
416 map->rd_table = config->rd_table;
417 map->volatile_table = config->volatile_table;
418 map->precious_table = config->precious_table;
362 map->writeable_reg = config->writeable_reg; 419 map->writeable_reg = config->writeable_reg;
363 map->readable_reg = config->readable_reg; 420 map->readable_reg = config->readable_reg;
364 map->volatile_reg = config->volatile_reg; 421 map->volatile_reg = config->volatile_reg;
@@ -519,20 +576,38 @@ struct regmap *regmap_init(struct device *dev,
519 } 576 }
520 577
521 map->range_tree = RB_ROOT; 578 map->range_tree = RB_ROOT;
522 for (i = 0; i < config->n_ranges; i++) { 579 for (i = 0; i < config->num_ranges; i++) {
523 const struct regmap_range_cfg *range_cfg = &config->ranges[i]; 580 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
524 struct regmap_range_node *new; 581 struct regmap_range_node *new;
525 582
526 /* Sanity check */ 583 /* Sanity check */
527 if (range_cfg->range_max < range_cfg->range_min || 584 if (range_cfg->range_max < range_cfg->range_min) {
528 range_cfg->range_max > map->max_register || 585 dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
529 range_cfg->selector_reg > map->max_register || 586 range_cfg->range_max, range_cfg->range_min);
530 range_cfg->window_len == 0) 587 goto err_range;
588 }
589
590 if (range_cfg->range_max > map->max_register) {
591 dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
592 range_cfg->range_max, map->max_register);
593 goto err_range;
594 }
595
596 if (range_cfg->selector_reg > map->max_register) {
597 dev_err(map->dev,
598 "Invalid range %d: selector out of map\n", i);
599 goto err_range;
600 }
601
602 if (range_cfg->window_len == 0) {
603 dev_err(map->dev, "Invalid range %d: window_len 0\n",
604 i);
531 goto err_range; 605 goto err_range;
606 }
532 607
533 /* Make sure, that this register range has no selector 608 /* Make sure, that this register range has no selector
534 or data window within its boundary */ 609 or data window within its boundary */
535 for (j = 0; j < config->n_ranges; j++) { 610 for (j = 0; j < config->num_ranges; j++) {
536 unsigned sel_reg = config->ranges[j].selector_reg; 611 unsigned sel_reg = config->ranges[j].selector_reg;
537 unsigned win_min = config->ranges[j].window_start; 612 unsigned win_min = config->ranges[j].window_start;
538 unsigned win_max = win_min + 613 unsigned win_max = win_min +
@@ -540,11 +615,17 @@ struct regmap *regmap_init(struct device *dev,
540 615
541 if (range_cfg->range_min <= sel_reg && 616 if (range_cfg->range_min <= sel_reg &&
542 sel_reg <= range_cfg->range_max) { 617 sel_reg <= range_cfg->range_max) {
618 dev_err(map->dev,
619 "Range %d: selector for %d in window\n",
620 i, j);
543 goto err_range; 621 goto err_range;
544 } 622 }
545 623
546 if (!(win_max < range_cfg->range_min || 624 if (!(win_max < range_cfg->range_min ||
547 win_min > range_cfg->range_max)) { 625 win_min > range_cfg->range_max)) {
626 dev_err(map->dev,
627 "Range %d: window for %d in window\n",
628 i, j);
548 goto err_range; 629 goto err_range;
549 } 630 }
550 } 631 }
@@ -555,6 +636,8 @@ struct regmap *regmap_init(struct device *dev,
555 goto err_range; 636 goto err_range;
556 } 637 }
557 638
639 new->map = map;
640 new->name = range_cfg->name;
558 new->range_min = range_cfg->range_min; 641 new->range_min = range_cfg->range_min;
559 new->range_max = range_cfg->range_max; 642 new->range_max = range_cfg->range_max;
560 new->selector_reg = range_cfg->selector_reg; 643 new->selector_reg = range_cfg->selector_reg;
@@ -564,6 +647,7 @@ struct regmap *regmap_init(struct device *dev,
564 new->window_len = range_cfg->window_len; 647 new->window_len = range_cfg->window_len;
565 648
566 if (_regmap_range_add(map, new) == false) { 649 if (_regmap_range_add(map, new) == false) {
650 dev_err(map->dev, "Failed to add range %d\n", i);
567 kfree(new); 651 kfree(new);
568 goto err_range; 652 goto err_range;
569 } 653 }
@@ -579,7 +663,7 @@ struct regmap *regmap_init(struct device *dev,
579 } 663 }
580 664
581 ret = regcache_init(map, config); 665 ret = regcache_init(map, config);
582 if (ret < 0) 666 if (ret != 0)
583 goto err_range; 667 goto err_range;
584 668
585 regmap_debugfs_init(map, config->name); 669 regmap_debugfs_init(map, config->name);
@@ -738,59 +822,57 @@ struct regmap *dev_get_regmap(struct device *dev, const char *name)
738EXPORT_SYMBOL_GPL(dev_get_regmap); 822EXPORT_SYMBOL_GPL(dev_get_regmap);
739 823
740static int _regmap_select_page(struct regmap *map, unsigned int *reg, 824static int _regmap_select_page(struct regmap *map, unsigned int *reg,
825 struct regmap_range_node *range,
741 unsigned int val_num) 826 unsigned int val_num)
742{ 827{
743 struct regmap_range_node *range;
744 void *orig_work_buf; 828 void *orig_work_buf;
745 unsigned int win_offset; 829 unsigned int win_offset;
746 unsigned int win_page; 830 unsigned int win_page;
747 bool page_chg; 831 bool page_chg;
748 int ret; 832 int ret;
749 833
750 range = _regmap_range_lookup(map, *reg); 834 win_offset = (*reg - range->range_min) % range->window_len;
751 if (range) { 835 win_page = (*reg - range->range_min) / range->window_len;
752 win_offset = (*reg - range->range_min) % range->window_len;
753 win_page = (*reg - range->range_min) / range->window_len;
754
755 if (val_num > 1) {
756 /* Bulk write shouldn't cross range boundary */
757 if (*reg + val_num - 1 > range->range_max)
758 return -EINVAL;
759 836
760 /* ... or single page boundary */ 837 if (val_num > 1) {
761 if (val_num > range->window_len - win_offset) 838 /* Bulk write shouldn't cross range boundary */
762 return -EINVAL; 839 if (*reg + val_num - 1 > range->range_max)
763 } 840 return -EINVAL;
764 841
765 /* It is possible to have selector register inside data window. 842 /* ... or single page boundary */
766 In that case, selector register is located on every page and 843 if (val_num > range->window_len - win_offset)
767 it needs no page switching, when accessed alone. */ 844 return -EINVAL;
768 if (val_num > 1 || 845 }
769 range->window_start + win_offset != range->selector_reg) {
770 /* Use separate work_buf during page switching */
771 orig_work_buf = map->work_buf;
772 map->work_buf = map->selector_work_buf;
773 846
774 ret = _regmap_update_bits(map, range->selector_reg, 847 /* It is possible to have selector register inside data window.
775 range->selector_mask, 848 In that case, selector register is located on every page and
776 win_page << range->selector_shift, 849 it needs no page switching, when accessed alone. */
777 &page_chg); 850 if (val_num > 1 ||
851 range->window_start + win_offset != range->selector_reg) {
852 /* Use separate work_buf during page switching */
853 orig_work_buf = map->work_buf;
854 map->work_buf = map->selector_work_buf;
778 855
779 map->work_buf = orig_work_buf; 856 ret = _regmap_update_bits(map, range->selector_reg,
857 range->selector_mask,
858 win_page << range->selector_shift,
859 &page_chg);
780 860
781 if (ret < 0) 861 map->work_buf = orig_work_buf;
782 return ret;
783 }
784 862
785 *reg = range->window_start + win_offset; 863 if (ret != 0)
864 return ret;
786 } 865 }
787 866
867 *reg = range->window_start + win_offset;
868
788 return 0; 869 return 0;
789} 870}
790 871
791static int _regmap_raw_write(struct regmap *map, unsigned int reg, 872static int _regmap_raw_write(struct regmap *map, unsigned int reg,
792 const void *val, size_t val_len) 873 const void *val, size_t val_len)
793{ 874{
875 struct regmap_range_node *range;
794 u8 *u8 = map->work_buf; 876 u8 *u8 = map->work_buf;
795 void *buf; 877 void *buf;
796 int ret = -ENOTSUPP; 878 int ret = -ENOTSUPP;
@@ -814,7 +896,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
814 ival); 896 ival);
815 if (ret) { 897 if (ret) {
816 dev_err(map->dev, 898 dev_err(map->dev,
817 "Error in caching of register: %u ret: %d\n", 899 "Error in caching of register: %x ret: %d\n",
818 reg + i, ret); 900 reg + i, ret);
819 return ret; 901 return ret;
820 } 902 }
@@ -825,9 +907,35 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
825 } 907 }
826 } 908 }
827 909
828 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes); 910 range = _regmap_range_lookup(map, reg);
829 if (ret < 0) 911 if (range) {
830 return ret; 912 int val_num = val_len / map->format.val_bytes;
913 int win_offset = (reg - range->range_min) % range->window_len;
914 int win_residue = range->window_len - win_offset;
915
916 /* If the write goes beyond the end of the window split it */
917 while (val_num > win_residue) {
918 dev_dbg(map->dev, "Writing window %d/%zu\n",
919 win_residue, val_len / map->format.val_bytes);
920 ret = _regmap_raw_write(map, reg, val, win_residue *
921 map->format.val_bytes);
922 if (ret != 0)
923 return ret;
924
925 reg += win_residue;
926 val_num -= win_residue;
927 val += win_residue * map->format.val_bytes;
928 val_len -= win_residue * map->format.val_bytes;
929
930 win_offset = (reg - range->range_min) %
931 range->window_len;
932 win_residue = range->window_len - win_offset;
933 }
934
935 ret = _regmap_select_page(map, &reg, range, val_num);
936 if (ret != 0)
937 return ret;
938 }
831 939
832 map->format.format_reg(map->work_buf, reg, map->reg_shift); 940 map->format.format_reg(map->work_buf, reg, map->reg_shift);
833 941
@@ -876,6 +984,7 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
876int _regmap_write(struct regmap *map, unsigned int reg, 984int _regmap_write(struct regmap *map, unsigned int reg,
877 unsigned int val) 985 unsigned int val)
878{ 986{
987 struct regmap_range_node *range;
879 int ret; 988 int ret;
880 BUG_ON(!map->format.format_write && !map->format.format_val); 989 BUG_ON(!map->format.format_write && !map->format.format_val);
881 990
@@ -897,9 +1006,12 @@ int _regmap_write(struct regmap *map, unsigned int reg,
897 trace_regmap_reg_write(map->dev, reg, val); 1006 trace_regmap_reg_write(map->dev, reg, val);
898 1007
899 if (map->format.format_write) { 1008 if (map->format.format_write) {
900 ret = _regmap_select_page(map, &reg, 1); 1009 range = _regmap_range_lookup(map, reg);
901 if (ret < 0) 1010 if (range) {
902 return ret; 1011 ret = _regmap_select_page(map, &reg, range, 1);
1012 if (ret != 0)
1013 return ret;
1014 }
903 1015
904 map->format.format_write(map, reg, val); 1016 map->format.format_write(map, reg, val);
905 1017
@@ -939,11 +1051,11 @@ int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
939 if (reg % map->reg_stride) 1051 if (reg % map->reg_stride)
940 return -EINVAL; 1052 return -EINVAL;
941 1053
942 map->lock(map); 1054 map->lock(map->lock_arg);
943 1055
944 ret = _regmap_write(map, reg, val); 1056 ret = _regmap_write(map, reg, val);
945 1057
946 map->unlock(map); 1058 map->unlock(map->lock_arg);
947 1059
948 return ret; 1060 return ret;
949} 1061}
@@ -975,11 +1087,11 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
975 if (reg % map->reg_stride) 1087 if (reg % map->reg_stride)
976 return -EINVAL; 1088 return -EINVAL;
977 1089
978 map->lock(map); 1090 map->lock(map->lock_arg);
979 1091
980 ret = _regmap_raw_write(map, reg, val, val_len); 1092 ret = _regmap_raw_write(map, reg, val, val_len);
981 1093
982 map->unlock(map); 1094 map->unlock(map->lock_arg);
983 1095
984 return ret; 1096 return ret;
985} 1097}
@@ -1011,7 +1123,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1011 if (reg % map->reg_stride) 1123 if (reg % map->reg_stride)
1012 return -EINVAL; 1124 return -EINVAL;
1013 1125
1014 map->lock(map); 1126 map->lock(map->lock_arg);
1015 1127
1016 /* No formatting is require if val_byte is 1 */ 1128 /* No formatting is require if val_byte is 1 */
1017 if (val_bytes == 1) { 1129 if (val_bytes == 1) {
@@ -1047,7 +1159,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1047 kfree(wval); 1159 kfree(wval);
1048 1160
1049out: 1161out:
1050 map->unlock(map); 1162 map->unlock(map->lock_arg);
1051 return ret; 1163 return ret;
1052} 1164}
1053EXPORT_SYMBOL_GPL(regmap_bulk_write); 1165EXPORT_SYMBOL_GPL(regmap_bulk_write);
@@ -1055,12 +1167,17 @@ EXPORT_SYMBOL_GPL(regmap_bulk_write);
1055static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 1167static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1056 unsigned int val_len) 1168 unsigned int val_len)
1057{ 1169{
1170 struct regmap_range_node *range;
1058 u8 *u8 = map->work_buf; 1171 u8 *u8 = map->work_buf;
1059 int ret; 1172 int ret;
1060 1173
1061 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes); 1174 range = _regmap_range_lookup(map, reg);
1062 if (ret < 0) 1175 if (range) {
1063 return ret; 1176 ret = _regmap_select_page(map, &reg, range,
1177 val_len / map->format.val_bytes);
1178 if (ret != 0)
1179 return ret;
1180 }
1064 1181
1065 map->format.format_reg(map->work_buf, reg, map->reg_shift); 1182 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1066 1183
@@ -1137,11 +1254,11 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
1137 if (reg % map->reg_stride) 1254 if (reg % map->reg_stride)
1138 return -EINVAL; 1255 return -EINVAL;
1139 1256
1140 map->lock(map); 1257 map->lock(map->lock_arg);
1141 1258
1142 ret = _regmap_read(map, reg, val); 1259 ret = _regmap_read(map, reg, val);
1143 1260
1144 map->unlock(map); 1261 map->unlock(map->lock_arg);
1145 1262
1146 return ret; 1263 return ret;
1147} 1264}
@@ -1171,7 +1288,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1171 if (reg % map->reg_stride) 1288 if (reg % map->reg_stride)
1172 return -EINVAL; 1289 return -EINVAL;
1173 1290
1174 map->lock(map); 1291 map->lock(map->lock_arg);
1175 1292
1176 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass || 1293 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
1177 map->cache_type == REGCACHE_NONE) { 1294 map->cache_type == REGCACHE_NONE) {
@@ -1193,7 +1310,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1193 } 1310 }
1194 1311
1195 out: 1312 out:
1196 map->unlock(map); 1313 map->unlock(map->lock_arg);
1197 1314
1198 return ret; 1315 return ret;
1199} 1316}
@@ -1300,9 +1417,9 @@ int regmap_update_bits(struct regmap *map, unsigned int reg,
1300 bool change; 1417 bool change;
1301 int ret; 1418 int ret;
1302 1419
1303 map->lock(map); 1420 map->lock(map->lock_arg);
1304 ret = _regmap_update_bits(map, reg, mask, val, &change); 1421 ret = _regmap_update_bits(map, reg, mask, val, &change);
1305 map->unlock(map); 1422 map->unlock(map->lock_arg);
1306 1423
1307 return ret; 1424 return ret;
1308} 1425}
@@ -1326,9 +1443,9 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1326{ 1443{
1327 int ret; 1444 int ret;
1328 1445
1329 map->lock(map); 1446 map->lock(map->lock_arg);
1330 ret = _regmap_update_bits(map, reg, mask, val, change); 1447 ret = _regmap_update_bits(map, reg, mask, val, change);
1331 map->unlock(map); 1448 map->unlock(map->lock_arg);
1332 return ret; 1449 return ret;
1333} 1450}
1334EXPORT_SYMBOL_GPL(regmap_update_bits_check); 1451EXPORT_SYMBOL_GPL(regmap_update_bits_check);
@@ -1357,7 +1474,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1357 if (map->patch) 1474 if (map->patch)
1358 return -EBUSY; 1475 return -EBUSY;
1359 1476
1360 map->lock(map); 1477 map->lock(map->lock_arg);
1361 1478
1362 bypass = map->cache_bypass; 1479 bypass = map->cache_bypass;
1363 1480
@@ -1385,7 +1502,7 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1385out: 1502out:
1386 map->cache_bypass = bypass; 1503 map->cache_bypass = bypass;
1387 1504
1388 map->unlock(map); 1505 map->unlock(map->lock_arg);
1389 1506
1390 return ret; 1507 return ret;
1391} 1508}