aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig3
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h6
-rw-r--r--drivers/base/bus.c6
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/cpu.c11
-rw-r--r--drivers/base/dd.c148
-rw-r--r--drivers/base/dma-buf.c165
-rw-r--r--drivers/base/driver.c65
-rw-r--r--drivers/base/firmware_class.c208
-rw-r--r--drivers/base/memory.c2
-rw-r--r--drivers/base/platform.c2
-rw-r--r--drivers/base/power/clock_ops.c1
-rw-r--r--drivers/base/power/common.c1
-rw-r--r--drivers/base/power/domain.c253
-rw-r--r--drivers/base/power/generic_ops.c157
-rw-r--r--drivers/base/power/main.c247
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/power.h4
-rw-r--r--drivers/base/power/qos.c61
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--drivers/base/power/sysfs.c47
-rw-r--r--drivers/base/power/wakeup.c85
-rw-r--r--drivers/base/regmap/internal.h14
-rw-r--r--drivers/base/regmap/regcache-lzo.c17
-rw-r--r--drivers/base/regmap/regcache-rbtree.c38
-rw-r--r--drivers/base/regmap/regcache.c104
-rw-r--r--drivers/base/regmap/regmap-debugfs.c89
-rw-r--r--drivers/base/regmap/regmap-i2c.c17
-rw-r--r--drivers/base/regmap/regmap-irq.c1
-rw-r--r--drivers/base/regmap/regmap-spi.c17
-rw-r--r--drivers/base/regmap/regmap.c307
-rw-r--r--drivers/base/soc.c183
33 files changed, 1860 insertions, 411 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 7be9f79018e9..9aa618acfe97 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -176,6 +176,9 @@ config GENERIC_CPU_DEVICES
176 bool 176 bool
177 default n 177 default n
178 178
179config SOC_BUS
180 bool
181
179source "drivers/base/regmap/Kconfig" 182source "drivers/base/regmap/Kconfig"
180 183
181config DMA_SHARED_BUFFER 184config DMA_SHARED_BUFFER
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 610f9997a403..b6d1b9c4200c 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_MODULES) += module.o
19endif 19endif
20obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o 20obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
21obj-$(CONFIG_REGMAP) += regmap/ 21obj-$(CONFIG_REGMAP) += regmap/
22obj-$(CONFIG_SOC_BUS) += soc.o
22 23
23ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 24ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
24 25
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b858dfd9a37c..6ee17bb391a9 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -59,6 +59,10 @@ struct driver_private {
59 * @knode_parent - node in sibling list 59 * @knode_parent - node in sibling list
60 * @knode_driver - node in driver list 60 * @knode_driver - node in driver list
61 * @knode_bus - node in bus list 61 * @knode_bus - node in bus list
62 * @deferred_probe - entry in deferred_probe_list which is used to retry the
63 * binding of drivers which were unable to get all the resources needed by
64 * the device; typically because it depends on another driver getting
65 * probed first.
62 * @driver_data - private pointer for driver specific info. Will turn into a 66 * @driver_data - private pointer for driver specific info. Will turn into a
63 * list soon. 67 * list soon.
64 * @device - pointer back to the struct class that this structure is 68 * @device - pointer back to the struct class that this structure is
@@ -71,6 +75,7 @@ struct device_private {
71 struct klist_node knode_parent; 75 struct klist_node knode_parent;
72 struct klist_node knode_driver; 76 struct klist_node knode_driver;
73 struct klist_node knode_bus; 77 struct klist_node knode_bus;
78 struct list_head deferred_probe;
74 void *driver_data; 79 void *driver_data;
75 struct device *device; 80 struct device *device;
76}; 81};
@@ -105,6 +110,7 @@ extern void bus_remove_driver(struct device_driver *drv);
105 110
106extern void driver_detach(struct device_driver *drv); 111extern void driver_detach(struct device_driver *drv);
107extern int driver_probe_device(struct device_driver *drv, struct device *dev); 112extern int driver_probe_device(struct device_driver *drv, struct device *dev);
113extern void driver_deferred_probe_del(struct device *dev);
108static inline int driver_match_device(struct device_driver *drv, 114static inline int driver_match_device(struct device_driver *drv,
109 struct device *dev) 115 struct device *dev)
110{ 116{
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 40fb12288ce2..26a06b801b5b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1194,13 +1194,15 @@ EXPORT_SYMBOL_GPL(subsys_interface_register);
1194 1194
1195void subsys_interface_unregister(struct subsys_interface *sif) 1195void subsys_interface_unregister(struct subsys_interface *sif)
1196{ 1196{
1197 struct bus_type *subsys = sif->subsys; 1197 struct bus_type *subsys;
1198 struct subsys_dev_iter iter; 1198 struct subsys_dev_iter iter;
1199 struct device *dev; 1199 struct device *dev;
1200 1200
1201 if (!sif) 1201 if (!sif || !sif->subsys)
1202 return; 1202 return;
1203 1203
1204 subsys = sif->subsys;
1205
1204 mutex_lock(&subsys->p->mutex); 1206 mutex_lock(&subsys->p->mutex);
1205 list_del_init(&sif->node); 1207 list_del_init(&sif->node);
1206 if (sif->remove_dev) { 1208 if (sif->remove_dev) {
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 74dda4f697f9..e28ce9898af4 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -18,6 +18,8 @@
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/kdev_t.h> 19#include <linux/kdev_t.h>
20#include <linux/notifier.h> 20#include <linux/notifier.h>
21#include <linux/of.h>
22#include <linux/of_device.h>
21#include <linux/genhd.h> 23#include <linux/genhd.h>
22#include <linux/kallsyms.h> 24#include <linux/kallsyms.h>
23#include <linux/mutex.h> 25#include <linux/mutex.h>
@@ -267,6 +269,9 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
267 if (dev->driver) 269 if (dev->driver)
268 add_uevent_var(env, "DRIVER=%s", dev->driver->name); 270 add_uevent_var(env, "DRIVER=%s", dev->driver->name);
269 271
272 /* Add common DT information about the device */
273 of_device_uevent(dev, env);
274
270 /* have the bus specific function add its stuff */ 275 /* have the bus specific function add its stuff */
271 if (dev->bus && dev->bus->uevent) { 276 if (dev->bus && dev->bus->uevent) {
272 retval = dev->bus->uevent(dev, env); 277 retval = dev->bus->uevent(dev, env);
@@ -921,6 +926,7 @@ int device_private_init(struct device *dev)
921 dev->p->device = dev; 926 dev->p->device = dev;
922 klist_init(&dev->p->klist_children, klist_children_get, 927 klist_init(&dev->p->klist_children, klist_children_get,
923 klist_children_put); 928 klist_children_put);
929 INIT_LIST_HEAD(&dev->p->deferred_probe);
924 return 0; 930 return 0;
925} 931}
926 932
@@ -1188,6 +1194,7 @@ void device_del(struct device *dev)
1188 device_remove_file(dev, &uevent_attr); 1194 device_remove_file(dev, &uevent_attr);
1189 device_remove_attrs(dev); 1195 device_remove_attrs(dev);
1190 bus_remove_device(dev); 1196 bus_remove_device(dev);
1197 driver_deferred_probe_del(dev);
1191 1198
1192 /* 1199 /*
1193 * Some platform devices are driven without driver attached 1200 * Some platform devices are driven without driver attached
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4dabf5077c48..adf937bf4091 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -11,6 +11,7 @@
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/node.h> 12#include <linux/node.h>
13#include <linux/gfp.h> 13#include <linux/gfp.h>
14#include <linux/slab.h>
14#include <linux/percpu.h> 15#include <linux/percpu.h>
15 16
16#include "base.h" 17#include "base.h"
@@ -244,6 +245,9 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
244 cpu->dev.id = num; 245 cpu->dev.id = num;
245 cpu->dev.bus = &cpu_subsys; 246 cpu->dev.bus = &cpu_subsys;
246 cpu->dev.release = cpu_device_release; 247 cpu->dev.release = cpu_device_release;
248#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
249 cpu->dev.bus->uevent = arch_cpu_uevent;
250#endif
247 error = device_register(&cpu->dev); 251 error = device_register(&cpu->dev);
248 if (!error && cpu->hotpluggable) 252 if (!error && cpu->hotpluggable)
249 register_cpu_control(cpu); 253 register_cpu_control(cpu);
@@ -268,6 +272,10 @@ struct device *get_cpu_device(unsigned cpu)
268} 272}
269EXPORT_SYMBOL_GPL(get_cpu_device); 273EXPORT_SYMBOL_GPL(get_cpu_device);
270 274
275#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
276static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
277#endif
278
271static struct attribute *cpu_root_attrs[] = { 279static struct attribute *cpu_root_attrs[] = {
272#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 280#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
273 &dev_attr_probe.attr, 281 &dev_attr_probe.attr,
@@ -278,6 +286,9 @@ static struct attribute *cpu_root_attrs[] = {
278 &cpu_attrs[2].attr.attr, 286 &cpu_attrs[2].attr.attr,
279 &dev_attr_kernel_max.attr, 287 &dev_attr_kernel_max.attr,
280 &dev_attr_offline.attr, 288 &dev_attr_offline.attr,
289#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
290 &dev_attr_modalias.attr,
291#endif
281 NULL 292 NULL
282}; 293};
283 294
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 142e3d600f14..1b1cbb571d38 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -28,6 +28,141 @@
28#include "base.h" 28#include "base.h"
29#include "power/power.h" 29#include "power/power.h"
30 30
31/*
32 * Deferred Probe infrastructure.
33 *
34 * Sometimes driver probe order matters, but the kernel doesn't always have
35 * dependency information which means some drivers will get probed before a
36 * resource it depends on is available. For example, an SDHCI driver may
37 * first need a GPIO line from an i2c GPIO controller before it can be
38 * initialized. If a required resource is not available yet, a driver can
39 * request probing to be deferred by returning -EPROBE_DEFER from its probe hook
40 *
41 * Deferred probe maintains two lists of devices, a pending list and an active
42 * list. A driver returning -EPROBE_DEFER causes the device to be added to the
43 * pending list. A successful driver probe will trigger moving all devices
44 * from the pending to the active list so that the workqueue will eventually
45 * retry them.
46 *
47 * The deferred_probe_mutex must be held any time the deferred_probe_*_list
48 * of the (struct device*)->p->deferred_probe pointers are manipulated
49 */
50static DEFINE_MUTEX(deferred_probe_mutex);
51static LIST_HEAD(deferred_probe_pending_list);
52static LIST_HEAD(deferred_probe_active_list);
53static struct workqueue_struct *deferred_wq;
54
55/**
56 * deferred_probe_work_func() - Retry probing devices in the active list.
57 */
58static void deferred_probe_work_func(struct work_struct *work)
59{
60 struct device *dev;
61 struct device_private *private;
62 /*
63 * This block processes every device in the deferred 'active' list.
64 * Each device is removed from the active list and passed to
65 * bus_probe_device() to re-attempt the probe. The loop continues
66 * until every device in the active list is removed and retried.
67 *
68 * Note: Once the device is removed from the list and the mutex is
69 * released, it is possible for the device get freed by another thread
70 * and cause a illegal pointer dereference. This code uses
71 * get/put_device() to ensure the device structure cannot disappear
72 * from under our feet.
73 */
74 mutex_lock(&deferred_probe_mutex);
75 while (!list_empty(&deferred_probe_active_list)) {
76 private = list_first_entry(&deferred_probe_active_list,
77 typeof(*dev->p), deferred_probe);
78 dev = private->device;
79 list_del_init(&private->deferred_probe);
80
81 get_device(dev);
82
83 /*
84 * Drop the mutex while probing each device; the probe path may
85 * manipulate the deferred list
86 */
87 mutex_unlock(&deferred_probe_mutex);
88 dev_dbg(dev, "Retrying from deferred list\n");
89 bus_probe_device(dev);
90 mutex_lock(&deferred_probe_mutex);
91
92 put_device(dev);
93 }
94 mutex_unlock(&deferred_probe_mutex);
95}
96static DECLARE_WORK(deferred_probe_work, deferred_probe_work_func);
97
98static void driver_deferred_probe_add(struct device *dev)
99{
100 mutex_lock(&deferred_probe_mutex);
101 if (list_empty(&dev->p->deferred_probe)) {
102 dev_dbg(dev, "Added to deferred list\n");
103 list_add(&dev->p->deferred_probe, &deferred_probe_pending_list);
104 }
105 mutex_unlock(&deferred_probe_mutex);
106}
107
108void driver_deferred_probe_del(struct device *dev)
109{
110 mutex_lock(&deferred_probe_mutex);
111 if (!list_empty(&dev->p->deferred_probe)) {
112 dev_dbg(dev, "Removed from deferred list\n");
113 list_del_init(&dev->p->deferred_probe);
114 }
115 mutex_unlock(&deferred_probe_mutex);
116}
117
118static bool driver_deferred_probe_enable = false;
119/**
120 * driver_deferred_probe_trigger() - Kick off re-probing deferred devices
121 *
122 * This functions moves all devices from the pending list to the active
123 * list and schedules the deferred probe workqueue to process them. It
124 * should be called anytime a driver is successfully bound to a device.
125 */
126static void driver_deferred_probe_trigger(void)
127{
128 if (!driver_deferred_probe_enable)
129 return;
130
131 /*
132 * A successful probe means that all the devices in the pending list
133 * should be triggered to be reprobed. Move all the deferred devices
134 * into the active list so they can be retried by the workqueue
135 */
136 mutex_lock(&deferred_probe_mutex);
137 list_splice_tail_init(&deferred_probe_pending_list,
138 &deferred_probe_active_list);
139 mutex_unlock(&deferred_probe_mutex);
140
141 /*
142 * Kick the re-probe thread. It may already be scheduled, but it is
143 * safe to kick it again.
144 */
145 queue_work(deferred_wq, &deferred_probe_work);
146}
147
148/**
149 * deferred_probe_initcall() - Enable probing of deferred devices
150 *
151 * We don't want to get in the way when the bulk of drivers are getting probed.
152 * Instead, this initcall makes sure that deferred probing is delayed until
153 * late_initcall time.
154 */
155static int deferred_probe_initcall(void)
156{
157 deferred_wq = create_singlethread_workqueue("deferwq");
158 if (WARN_ON(!deferred_wq))
159 return -ENOMEM;
160
161 driver_deferred_probe_enable = true;
162 driver_deferred_probe_trigger();
163 return 0;
164}
165late_initcall(deferred_probe_initcall);
31 166
32static void driver_bound(struct device *dev) 167static void driver_bound(struct device *dev)
33{ 168{
@@ -42,6 +177,13 @@ static void driver_bound(struct device *dev)
42 177
43 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices); 178 klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
44 179
180 /*
181 * Make sure the device is no longer in one of the deferred lists and
182 * kick off retrying all pending devices
183 */
184 driver_deferred_probe_del(dev);
185 driver_deferred_probe_trigger();
186
45 if (dev->bus) 187 if (dev->bus)
46 blocking_notifier_call_chain(&dev->bus->p->bus_notifier, 188 blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
47 BUS_NOTIFY_BOUND_DRIVER, dev); 189 BUS_NOTIFY_BOUND_DRIVER, dev);
@@ -142,7 +284,11 @@ probe_failed:
142 driver_sysfs_remove(dev); 284 driver_sysfs_remove(dev);
143 dev->driver = NULL; 285 dev->driver = NULL;
144 286
145 if (ret != -ENODEV && ret != -ENXIO) { 287 if (ret == -EPROBE_DEFER) {
288 /* Driver requested deferred probing */
289 dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
290 driver_deferred_probe_add(dev);
291 } else if (ret != -ENODEV && ret != -ENXIO) {
146 /* driver matched but the probe failed */ 292 /* driver matched but the probe failed */
147 printk(KERN_WARNING 293 printk(KERN_WARNING
148 "%s: probe of %s failed with error %d\n", 294 "%s: probe of %s failed with error %d\n",
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index e38ad243b4bb..07cbbc6fddb4 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -71,7 +71,7 @@ static inline int is_dma_buf_file(struct file *file)
71 * ops, or error in allocating struct dma_buf, will return negative error. 71 * ops, or error in allocating struct dma_buf, will return negative error.
72 * 72 *
73 */ 73 */
74struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops, 74struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
75 size_t size, int flags) 75 size_t size, int flags)
76{ 76{
77 struct dma_buf *dmabuf; 77 struct dma_buf *dmabuf;
@@ -80,7 +80,9 @@ struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
80 if (WARN_ON(!priv || !ops 80 if (WARN_ON(!priv || !ops
81 || !ops->map_dma_buf 81 || !ops->map_dma_buf
82 || !ops->unmap_dma_buf 82 || !ops->unmap_dma_buf
83 || !ops->release)) { 83 || !ops->release
84 || !ops->kmap_atomic
85 || !ops->kmap)) {
84 return ERR_PTR(-EINVAL); 86 return ERR_PTR(-EINVAL);
85 } 87 }
86 88
@@ -107,17 +109,18 @@ EXPORT_SYMBOL_GPL(dma_buf_export);
107/** 109/**
108 * dma_buf_fd - returns a file descriptor for the given dma_buf 110 * dma_buf_fd - returns a file descriptor for the given dma_buf
109 * @dmabuf: [in] pointer to dma_buf for which fd is required. 111 * @dmabuf: [in] pointer to dma_buf for which fd is required.
112 * @flags: [in] flags to give to fd
110 * 113 *
111 * On success, returns an associated 'fd'. Else, returns error. 114 * On success, returns an associated 'fd'. Else, returns error.
112 */ 115 */
113int dma_buf_fd(struct dma_buf *dmabuf) 116int dma_buf_fd(struct dma_buf *dmabuf, int flags)
114{ 117{
115 int error, fd; 118 int error, fd;
116 119
117 if (!dmabuf || !dmabuf->file) 120 if (!dmabuf || !dmabuf->file)
118 return -EINVAL; 121 return -EINVAL;
119 122
120 error = get_unused_fd(); 123 error = get_unused_fd_flags(flags);
121 if (error < 0) 124 if (error < 0)
122 return error; 125 return error;
123 fd = error; 126 fd = error;
@@ -185,17 +188,18 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
185 struct dma_buf_attachment *attach; 188 struct dma_buf_attachment *attach;
186 int ret; 189 int ret;
187 190
188 if (WARN_ON(!dmabuf || !dev || !dmabuf->ops)) 191 if (WARN_ON(!dmabuf || !dev))
189 return ERR_PTR(-EINVAL); 192 return ERR_PTR(-EINVAL);
190 193
191 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL); 194 attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
192 if (attach == NULL) 195 if (attach == NULL)
193 goto err_alloc; 196 return ERR_PTR(-ENOMEM);
194
195 mutex_lock(&dmabuf->lock);
196 197
197 attach->dev = dev; 198 attach->dev = dev;
198 attach->dmabuf = dmabuf; 199 attach->dmabuf = dmabuf;
200
201 mutex_lock(&dmabuf->lock);
202
199 if (dmabuf->ops->attach) { 203 if (dmabuf->ops->attach) {
200 ret = dmabuf->ops->attach(dmabuf, dev, attach); 204 ret = dmabuf->ops->attach(dmabuf, dev, attach);
201 if (ret) 205 if (ret)
@@ -206,8 +210,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
206 mutex_unlock(&dmabuf->lock); 210 mutex_unlock(&dmabuf->lock);
207 return attach; 211 return attach;
208 212
209err_alloc:
210 return ERR_PTR(-ENOMEM);
211err_attach: 213err_attach:
212 kfree(attach); 214 kfree(attach);
213 mutex_unlock(&dmabuf->lock); 215 mutex_unlock(&dmabuf->lock);
@@ -224,7 +226,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach);
224 */ 226 */
225void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) 227void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
226{ 228{
227 if (WARN_ON(!dmabuf || !attach || !dmabuf->ops)) 229 if (WARN_ON(!dmabuf || !attach))
228 return; 230 return;
229 231
230 mutex_lock(&dmabuf->lock); 232 mutex_lock(&dmabuf->lock);
@@ -255,13 +257,10 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
255 257
256 might_sleep(); 258 might_sleep();
257 259
258 if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops)) 260 if (WARN_ON(!attach || !attach->dmabuf))
259 return ERR_PTR(-EINVAL); 261 return ERR_PTR(-EINVAL);
260 262
261 mutex_lock(&attach->dmabuf->lock); 263 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
262 if (attach->dmabuf->ops->map_dma_buf)
263 sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
264 mutex_unlock(&attach->dmabuf->lock);
265 264
266 return sg_table; 265 return sg_table;
267} 266}
@@ -273,19 +272,137 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
273 * dma_buf_ops. 272 * dma_buf_ops.
274 * @attach: [in] attachment to unmap buffer from 273 * @attach: [in] attachment to unmap buffer from
275 * @sg_table: [in] scatterlist info of the buffer to unmap 274 * @sg_table: [in] scatterlist info of the buffer to unmap
275 * @direction: [in] direction of DMA transfer
276 * 276 *
277 */ 277 */
278void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, 278void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
279 struct sg_table *sg_table) 279 struct sg_table *sg_table,
280 enum dma_data_direction direction)
280{ 281{
281 if (WARN_ON(!attach || !attach->dmabuf || !sg_table 282 if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
282 || !attach->dmabuf->ops))
283 return; 283 return;
284 284
285 mutex_lock(&attach->dmabuf->lock); 285 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
286 if (attach->dmabuf->ops->unmap_dma_buf) 286 direction);
287 attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
288 mutex_unlock(&attach->dmabuf->lock);
289
290} 287}
291EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); 288EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
289
290
291/**
292 * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
293 * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
294 * preparations. Coherency is only guaranteed in the specified range for the
295 * specified access direction.
296 * @dma_buf: [in] buffer to prepare cpu access for.
297 * @start: [in] start of range for cpu access.
298 * @len: [in] length of range for cpu access.
299 * @direction: [in] length of range for cpu access.
300 *
301 * Can return negative error values, returns 0 on success.
302 */
303int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
304 enum dma_data_direction direction)
305{
306 int ret = 0;
307
308 if (WARN_ON(!dmabuf))
309 return -EINVAL;
310
311 if (dmabuf->ops->begin_cpu_access)
312 ret = dmabuf->ops->begin_cpu_access(dmabuf, start, len, direction);
313
314 return ret;
315}
316EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
317
318/**
319 * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
320 * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
321 * actions. Coherency is only guaranteed in the specified range for the
322 * specified access direction.
323 * @dma_buf: [in] buffer to complete cpu access for.
324 * @start: [in] start of range for cpu access.
325 * @len: [in] length of range for cpu access.
326 * @direction: [in] length of range for cpu access.
327 *
328 * This call must always succeed.
329 */
330void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
331 enum dma_data_direction direction)
332{
333 WARN_ON(!dmabuf);
334
335 if (dmabuf->ops->end_cpu_access)
336 dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
337}
338EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
339
340/**
341 * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
342 * space. The same restrictions as for kmap_atomic and friends apply.
343 * @dma_buf: [in] buffer to map page from.
344 * @page_num: [in] page in PAGE_SIZE units to map.
345 *
346 * This call must always succeed, any necessary preparations that might fail
347 * need to be done in begin_cpu_access.
348 */
349void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
350{
351 WARN_ON(!dmabuf);
352
353 return dmabuf->ops->kmap_atomic(dmabuf, page_num);
354}
355EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
356
357/**
358 * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
359 * @dma_buf: [in] buffer to unmap page from.
360 * @page_num: [in] page in PAGE_SIZE units to unmap.
361 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap_atomic.
362 *
363 * This call must always succeed.
364 */
365void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
366 void *vaddr)
367{
368 WARN_ON(!dmabuf);
369
370 if (dmabuf->ops->kunmap_atomic)
371 dmabuf->ops->kunmap_atomic(dmabuf, page_num, vaddr);
372}
373EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
374
375/**
376 * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
377 * same restrictions as for kmap and friends apply.
378 * @dma_buf: [in] buffer to map page from.
379 * @page_num: [in] page in PAGE_SIZE units to map.
380 *
381 * This call must always succeed, any necessary preparations that might fail
382 * need to be done in begin_cpu_access.
383 */
384void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
385{
386 WARN_ON(!dmabuf);
387
388 return dmabuf->ops->kmap(dmabuf, page_num);
389}
390EXPORT_SYMBOL_GPL(dma_buf_kmap);
391
392/**
393 * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
394 * @dma_buf: [in] buffer to unmap page from.
395 * @page_num: [in] page in PAGE_SIZE units to unmap.
396 * @vaddr: [in] kernel space pointer obtained from dma_buf_kmap.
397 *
398 * This call must always succeed.
399 */
400void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
401 void *vaddr)
402{
403 WARN_ON(!dmabuf);
404
405 if (dmabuf->ops->kunmap)
406 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
407}
408EXPORT_SYMBOL_GPL(dma_buf_kunmap);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index b631f7c59453..3ec3896c83a6 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -123,64 +123,6 @@ void driver_remove_file(struct device_driver *drv,
123} 123}
124EXPORT_SYMBOL_GPL(driver_remove_file); 124EXPORT_SYMBOL_GPL(driver_remove_file);
125 125
126/**
127 * driver_add_kobj - add a kobject below the specified driver
128 * @drv: requesting device driver
129 * @kobj: kobject to add below this driver
130 * @fmt: format string that names the kobject
131 *
132 * You really don't want to do this, this is only here due to one looney
133 * iseries driver, go poke those developers if you are annoyed about
134 * this...
135 */
136int driver_add_kobj(struct device_driver *drv, struct kobject *kobj,
137 const char *fmt, ...)
138{
139 va_list args;
140 char *name;
141 int ret;
142
143 va_start(args, fmt);
144 name = kvasprintf(GFP_KERNEL, fmt, args);
145 va_end(args);
146
147 if (!name)
148 return -ENOMEM;
149
150 ret = kobject_add(kobj, &drv->p->kobj, "%s", name);
151 kfree(name);
152 return ret;
153}
154EXPORT_SYMBOL_GPL(driver_add_kobj);
155
156/**
157 * get_driver - increment driver reference count.
158 * @drv: driver.
159 */
160struct device_driver *get_driver(struct device_driver *drv)
161{
162 if (drv) {
163 struct driver_private *priv;
164 struct kobject *kobj;
165
166 kobj = kobject_get(&drv->p->kobj);
167 priv = to_driver(kobj);
168 return priv->driver;
169 }
170 return NULL;
171}
172EXPORT_SYMBOL_GPL(get_driver);
173
174/**
175 * put_driver - decrement driver's refcount.
176 * @drv: driver.
177 */
178void put_driver(struct device_driver *drv)
179{
180 kobject_put(&drv->p->kobj);
181}
182EXPORT_SYMBOL_GPL(put_driver);
183
184static int driver_add_groups(struct device_driver *drv, 126static int driver_add_groups(struct device_driver *drv,
185 const struct attribute_group **groups) 127 const struct attribute_group **groups)
186{ 128{
@@ -234,7 +176,6 @@ int driver_register(struct device_driver *drv)
234 176
235 other = driver_find(drv->name, drv->bus); 177 other = driver_find(drv->name, drv->bus);
236 if (other) { 178 if (other) {
237 put_driver(other);
238 printk(KERN_ERR "Error: Driver '%s' is already registered, " 179 printk(KERN_ERR "Error: Driver '%s' is already registered, "
239 "aborting...\n", drv->name); 180 "aborting...\n", drv->name);
240 return -EBUSY; 181 return -EBUSY;
@@ -275,7 +216,9 @@ EXPORT_SYMBOL_GPL(driver_unregister);
275 * Call kset_find_obj() to iterate over list of drivers on 216 * Call kset_find_obj() to iterate over list of drivers on
276 * a bus to find driver by name. Return driver if found. 217 * a bus to find driver by name. Return driver if found.
277 * 218 *
278 * Note that kset_find_obj increments driver's reference count. 219 * This routine provides no locking to prevent the driver it returns
220 * from being unregistered or unloaded while the caller is using it.
221 * The caller is responsible for preventing this.
279 */ 222 */
280struct device_driver *driver_find(const char *name, struct bus_type *bus) 223struct device_driver *driver_find(const char *name, struct bus_type *bus)
281{ 224{
@@ -283,6 +226,8 @@ struct device_driver *driver_find(const char *name, struct bus_type *bus)
283 struct driver_private *priv; 226 struct driver_private *priv;
284 227
285 if (k) { 228 if (k) {
229 /* Drop reference added by kset_find_obj() */
230 kobject_put(k);
286 priv = to_driver(k); 231 priv = to_driver(k);
287 return priv->driver; 232 return priv->driver;
288 } 233 }
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 6c9387d646ec..5401814c874d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -16,10 +16,11 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/kthread.h> 19#include <linux/workqueue.h>
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/sched.h>
23 24
24#define to_dev(obj) container_of(obj, struct device, kobj) 25#define to_dev(obj) container_of(obj, struct device, kobj)
25 26
@@ -81,6 +82,11 @@ enum {
81 82
82static int loading_timeout = 60; /* In seconds */ 83static int loading_timeout = 60; /* In seconds */
83 84
85static inline long firmware_loading_timeout(void)
86{
87 return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
88}
89
84/* fw_lock could be moved to 'struct firmware_priv' but since it is just 90/* fw_lock could be moved to 'struct firmware_priv' but since it is just
85 * guarding for corner cases a global lock should be OK */ 91 * guarding for corner cases a global lock should be OK */
86static DEFINE_MUTEX(fw_lock); 92static DEFINE_MUTEX(fw_lock);
@@ -440,13 +446,11 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
440{ 446{
441 struct firmware_priv *fw_priv; 447 struct firmware_priv *fw_priv;
442 struct device *f_dev; 448 struct device *f_dev;
443 int error;
444 449
445 fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL); 450 fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
446 if (!fw_priv) { 451 if (!fw_priv) {
447 dev_err(device, "%s: kmalloc failed\n", __func__); 452 dev_err(device, "%s: kmalloc failed\n", __func__);
448 error = -ENOMEM; 453 return ERR_PTR(-ENOMEM);
449 goto err_out;
450 } 454 }
451 455
452 fw_priv->fw = firmware; 456 fw_priv->fw = firmware;
@@ -463,98 +467,80 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
463 f_dev->parent = device; 467 f_dev->parent = device;
464 f_dev->class = &firmware_class; 468 f_dev->class = &firmware_class;
465 469
466 dev_set_uevent_suppress(f_dev, true);
467
468 /* Need to pin this module until class device is destroyed */
469 __module_get(THIS_MODULE);
470
471 error = device_add(f_dev);
472 if (error) {
473 dev_err(device, "%s: device_register failed\n", __func__);
474 goto err_put_dev;
475 }
476
477 error = device_create_bin_file(f_dev, &firmware_attr_data);
478 if (error) {
479 dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
480 goto err_del_dev;
481 }
482
483 error = device_create_file(f_dev, &dev_attr_loading);
484 if (error) {
485 dev_err(device, "%s: device_create_file failed\n", __func__);
486 goto err_del_bin_attr;
487 }
488
489 if (uevent)
490 dev_set_uevent_suppress(f_dev, false);
491
492 return fw_priv; 470 return fw_priv;
493
494err_del_bin_attr:
495 device_remove_bin_file(f_dev, &firmware_attr_data);
496err_del_dev:
497 device_del(f_dev);
498err_put_dev:
499 put_device(f_dev);
500err_out:
501 return ERR_PTR(error);
502} 471}
503 472
504static void fw_destroy_instance(struct firmware_priv *fw_priv) 473static struct firmware_priv *
505{ 474_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
506 struct device *f_dev = &fw_priv->dev; 475 struct device *device, bool uevent, bool nowait)
507
508 device_remove_file(f_dev, &dev_attr_loading);
509 device_remove_bin_file(f_dev, &firmware_attr_data);
510 device_unregister(f_dev);
511}
512
513static int _request_firmware(const struct firmware **firmware_p,
514 const char *name, struct device *device,
515 bool uevent, bool nowait)
516{ 476{
517 struct firmware_priv *fw_priv;
518 struct firmware *firmware; 477 struct firmware *firmware;
519 int retval = 0; 478 struct firmware_priv *fw_priv;
520 479
521 if (!firmware_p) 480 if (!firmware_p)
522 return -EINVAL; 481 return ERR_PTR(-EINVAL);
523 482
524 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 483 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
525 if (!firmware) { 484 if (!firmware) {
526 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 485 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
527 __func__); 486 __func__);
528 return -ENOMEM; 487 return ERR_PTR(-ENOMEM);
529 } 488 }
530 489
531 if (fw_get_builtin_firmware(firmware, name)) { 490 if (fw_get_builtin_firmware(firmware, name)) {
532 dev_dbg(device, "firmware: using built-in firmware %s\n", name); 491 dev_dbg(device, "firmware: using built-in firmware %s\n", name);
533 return 0; 492 return NULL;
493 }
494
495 fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
496 if (IS_ERR(fw_priv)) {
497 release_firmware(firmware);
498 *firmware_p = NULL;
534 } 499 }
500 return fw_priv;
501}
535 502
536 read_lock_usermodehelper(); 503static void _request_firmware_cleanup(const struct firmware **firmware_p)
504{
505 release_firmware(*firmware_p);
506 *firmware_p = NULL;
507}
537 508
538 if (WARN_ON(usermodehelper_is_disabled())) { 509static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
539 dev_err(device, "firmware: %s will not be loaded\n", name); 510 long timeout)
540 retval = -EBUSY; 511{
541 goto out; 512 int retval = 0;
513 struct device *f_dev = &fw_priv->dev;
514
515 dev_set_uevent_suppress(f_dev, true);
516
517 /* Need to pin this module until class device is destroyed */
518 __module_get(THIS_MODULE);
519
520 retval = device_add(f_dev);
521 if (retval) {
522 dev_err(f_dev, "%s: device_register failed\n", __func__);
523 goto err_put_dev;
542 } 524 }
543 525
544 if (uevent) 526 retval = device_create_bin_file(f_dev, &firmware_attr_data);
545 dev_dbg(device, "firmware: requesting %s\n", name); 527 if (retval) {
528 dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
529 goto err_del_dev;
530 }
546 531
547 fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); 532 retval = device_create_file(f_dev, &dev_attr_loading);
548 if (IS_ERR(fw_priv)) { 533 if (retval) {
549 retval = PTR_ERR(fw_priv); 534 dev_err(f_dev, "%s: device_create_file failed\n", __func__);
550 goto out; 535 goto err_del_bin_attr;
551 } 536 }
552 537
553 if (uevent) { 538 if (uevent) {
554 if (loading_timeout > 0) 539 dev_set_uevent_suppress(f_dev, false);
540 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id);
541 if (timeout != MAX_SCHEDULE_TIMEOUT)
555 mod_timer(&fw_priv->timeout, 542 mod_timer(&fw_priv->timeout,
556 round_jiffies_up(jiffies + 543 round_jiffies_up(jiffies + timeout));
557 loading_timeout * HZ));
558 544
559 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); 545 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
560 } 546 }
@@ -570,16 +556,13 @@ static int _request_firmware(const struct firmware **firmware_p,
570 fw_priv->fw = NULL; 556 fw_priv->fw = NULL;
571 mutex_unlock(&fw_lock); 557 mutex_unlock(&fw_lock);
572 558
573 fw_destroy_instance(fw_priv); 559 device_remove_file(f_dev, &dev_attr_loading);
574 560err_del_bin_attr:
575out: 561 device_remove_bin_file(f_dev, &firmware_attr_data);
576 read_unlock_usermodehelper(); 562err_del_dev:
577 563 device_del(f_dev);
578 if (retval) { 564err_put_dev:
579 release_firmware(firmware); 565 put_device(f_dev);
580 *firmware_p = NULL;
581 }
582
583 return retval; 566 return retval;
584} 567}
585 568
@@ -602,7 +585,26 @@ int
602request_firmware(const struct firmware **firmware_p, const char *name, 585request_firmware(const struct firmware **firmware_p, const char *name,
603 struct device *device) 586 struct device *device)
604{ 587{
605 return _request_firmware(firmware_p, name, device, true, false); 588 struct firmware_priv *fw_priv;
589 int ret;
590
591 fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
592 false);
593 if (IS_ERR_OR_NULL(fw_priv))
594 return PTR_RET(fw_priv);
595
596 ret = usermodehelper_read_trylock();
597 if (WARN_ON(ret)) {
598 dev_err(device, "firmware: %s will not be loaded\n", name);
599 } else {
600 ret = _request_firmware_load(fw_priv, true,
601 firmware_loading_timeout());
602 usermodehelper_read_unlock();
603 }
604 if (ret)
605 _request_firmware_cleanup(firmware_p);
606
607 return ret;
606} 608}
607 609
608/** 610/**
@@ -629,25 +631,39 @@ struct firmware_work {
629 bool uevent; 631 bool uevent;
630}; 632};
631 633
632static int request_firmware_work_func(void *arg) 634static void request_firmware_work_func(struct work_struct *work)
633{ 635{
634 struct firmware_work *fw_work = arg; 636 struct firmware_work *fw_work;
635 const struct firmware *fw; 637 const struct firmware *fw;
638 struct firmware_priv *fw_priv;
639 long timeout;
636 int ret; 640 int ret;
637 641
638 if (!arg) { 642 fw_work = container_of(work, struct firmware_work, work);
639 WARN_ON(1); 643 fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
640 return 0; 644 fw_work->uevent, true);
645 if (IS_ERR_OR_NULL(fw_priv)) {
646 ret = PTR_RET(fw_priv);
647 goto out;
648 }
649
650 timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
651 if (timeout) {
652 ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
653 usermodehelper_read_unlock();
654 } else {
655 dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
656 fw_work->name);
657 ret = -EAGAIN;
641 } 658 }
659 if (ret)
660 _request_firmware_cleanup(&fw);
642 661
643 ret = _request_firmware(&fw, fw_work->name, fw_work->device, 662 out:
644 fw_work->uevent, true);
645 fw_work->cont(fw, fw_work->context); 663 fw_work->cont(fw, fw_work->context);
646 664
647 module_put(fw_work->module); 665 module_put(fw_work->module);
648 kfree(fw_work); 666 kfree(fw_work);
649
650 return ret;
651} 667}
652 668
653/** 669/**
@@ -673,7 +689,6 @@ request_firmware_nowait(
673 const char *name, struct device *device, gfp_t gfp, void *context, 689 const char *name, struct device *device, gfp_t gfp, void *context,
674 void (*cont)(const struct firmware *fw, void *context)) 690 void (*cont)(const struct firmware *fw, void *context))
675{ 691{
676 struct task_struct *task;
677 struct firmware_work *fw_work; 692 struct firmware_work *fw_work;
678 693
679 fw_work = kzalloc(sizeof (struct firmware_work), gfp); 694 fw_work = kzalloc(sizeof (struct firmware_work), gfp);
@@ -692,15 +707,8 @@ request_firmware_nowait(
692 return -EFAULT; 707 return -EFAULT;
693 } 708 }
694 709
695 task = kthread_run(request_firmware_work_func, fw_work, 710 INIT_WORK(&fw_work->work, request_firmware_work_func);
696 "firmware/%s", name); 711 schedule_work(&fw_work->work);
697 if (IS_ERR(task)) {
698 fw_work->cont(NULL, fw_work->context);
699 module_put(fw_work->module);
700 kfree(fw_work);
701 return PTR_ERR(task);
702 }
703
704 return 0; 712 return 0;
705} 713}
706 714
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9e60dbe9fd94..7dda4f790f00 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -466,7 +466,7 @@ store_hard_offline_page(struct device *dev,
466 if (strict_strtoull(buf, 0, &pfn) < 0) 466 if (strict_strtoull(buf, 0, &pfn) < 0)
467 return -EINVAL; 467 return -EINVAL;
468 pfn >>= PAGE_SHIFT; 468 pfn >>= PAGE_SHIFT;
469 ret = __memory_failure(pfn, 0, 0); 469 ret = memory_failure(pfn, 0, 0);
470 return ret ? ret : count; 470 return ret ? ret : count;
471} 471}
472 472
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index f0c605e99ade..a1a722502587 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -621,7 +621,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
621 int rc; 621 int rc;
622 622
623 /* Some devices have extra OF data and an OF-style MODALIAS */ 623 /* Some devices have extra OF data and an OF-style MODALIAS */
624 rc = of_device_uevent(dev,env); 624 rc = of_device_uevent_modalias(dev,env);
625 if (rc != -ENODEV) 625 if (rc != -ENODEV)
626 return rc; 626 return rc;
627 627
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 428e55e012dc..869d7ff2227f 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/device.h>
11#include <linux/io.h> 12#include <linux/io.h>
12#include <linux/pm.h> 13#include <linux/pm.h>
13#include <linux/pm_clock.h> 14#include <linux/pm_clock.h>
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 4af7c1cbf909..a14085cc613f 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/device.h>
11#include <linux/export.h> 12#include <linux/export.h>
12#include <linux/slab.h> 13#include <linux/slab.h>
13#include <linux/pm_clock.h> 14#include <linux/pm_clock.h>
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 978bbf7ac6af..73ce9fbe9839 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -366,7 +366,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
366 not_suspended = 0; 366 not_suspended = 0;
367 list_for_each_entry(pdd, &genpd->dev_list, list_node) 367 list_for_each_entry(pdd, &genpd->dev_list, list_node)
368 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev) 368 if (pdd->dev->driver && (!pm_runtime_suspended(pdd->dev)
369 || pdd->dev->power.irq_safe)) 369 || pdd->dev->power.irq_safe || to_gpd_data(pdd)->always_on))
370 not_suspended++; 370 not_suspended++;
371 371
372 if (not_suspended > genpd->in_progress) 372 if (not_suspended > genpd->in_progress)
@@ -503,6 +503,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
503 503
504 might_sleep_if(!genpd->dev_irq_safe); 504 might_sleep_if(!genpd->dev_irq_safe);
505 505
506 if (dev_gpd_data(dev)->always_on)
507 return -EBUSY;
508
506 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL; 509 stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
507 if (stop_ok && !stop_ok(dev)) 510 if (stop_ok && !stop_ok(dev))
508 return -EBUSY; 511 return -EBUSY;
@@ -764,8 +767,10 @@ static int pm_genpd_prepare(struct device *dev)
764 767
765 genpd_acquire_lock(genpd); 768 genpd_acquire_lock(genpd);
766 769
767 if (genpd->prepared_count++ == 0) 770 if (genpd->prepared_count++ == 0) {
771 genpd->suspended_count = 0;
768 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; 772 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
773 }
769 774
770 genpd_release_lock(genpd); 775 genpd_release_lock(genpd);
771 776
@@ -820,17 +825,16 @@ static int pm_genpd_suspend(struct device *dev)
820} 825}
821 826
822/** 827/**
823 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. 828 * pm_genpd_suspend_late - Late suspend of a device from an I/O PM domain.
824 * @dev: Device to suspend. 829 * @dev: Device to suspend.
825 * 830 *
826 * Carry out a late suspend of a device under the assumption that its 831 * Carry out a late suspend of a device under the assumption that its
827 * pm_domain field points to the domain member of an object of type 832 * pm_domain field points to the domain member of an object of type
828 * struct generic_pm_domain representing a PM domain consisting of I/O devices. 833 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
829 */ 834 */
830static int pm_genpd_suspend_noirq(struct device *dev) 835static int pm_genpd_suspend_late(struct device *dev)
831{ 836{
832 struct generic_pm_domain *genpd; 837 struct generic_pm_domain *genpd;
833 int ret;
834 838
835 dev_dbg(dev, "%s()\n", __func__); 839 dev_dbg(dev, "%s()\n", __func__);
836 840
@@ -838,14 +842,28 @@ static int pm_genpd_suspend_noirq(struct device *dev)
838 if (IS_ERR(genpd)) 842 if (IS_ERR(genpd))
839 return -EINVAL; 843 return -EINVAL;
840 844
841 if (genpd->suspend_power_off) 845 return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
842 return 0; 846}
843 847
844 ret = genpd_suspend_late(genpd, dev); 848/**
845 if (ret) 849 * pm_genpd_suspend_noirq - Completion of suspend of device in an I/O PM domain.
846 return ret; 850 * @dev: Device to suspend.
851 *
852 * Stop the device and remove power from the domain if all devices in it have
853 * been stopped.
854 */
855static int pm_genpd_suspend_noirq(struct device *dev)
856{
857 struct generic_pm_domain *genpd;
847 858
848 if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)) 859 dev_dbg(dev, "%s()\n", __func__);
860
861 genpd = dev_to_genpd(dev);
862 if (IS_ERR(genpd))
863 return -EINVAL;
864
865 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
866 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
849 return 0; 867 return 0;
850 868
851 genpd_stop_dev(genpd, dev); 869 genpd_stop_dev(genpd, dev);
@@ -862,13 +880,10 @@ static int pm_genpd_suspend_noirq(struct device *dev)
862} 880}
863 881
864/** 882/**
865 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. 883 * pm_genpd_resume_noirq - Start of resume of device in an I/O PM domain.
866 * @dev: Device to resume. 884 * @dev: Device to resume.
867 * 885 *
868 * Carry out an early resume of a device under the assumption that its 886 * Restore power to the device's PM domain, if necessary, and start the device.
869 * pm_domain field points to the domain member of an object of type
870 * struct generic_pm_domain representing a power domain consisting of I/O
871 * devices.
872 */ 887 */
873static int pm_genpd_resume_noirq(struct device *dev) 888static int pm_genpd_resume_noirq(struct device *dev)
874{ 889{
@@ -880,7 +895,8 @@ static int pm_genpd_resume_noirq(struct device *dev)
880 if (IS_ERR(genpd)) 895 if (IS_ERR(genpd))
881 return -EINVAL; 896 return -EINVAL;
882 897
883 if (genpd->suspend_power_off) 898 if (genpd->suspend_power_off || dev_gpd_data(dev)->always_on
899 || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev)))
884 return 0; 900 return 0;
885 901
886 /* 902 /*
@@ -890,13 +906,34 @@ static int pm_genpd_resume_noirq(struct device *dev)
890 */ 906 */
891 pm_genpd_poweron(genpd); 907 pm_genpd_poweron(genpd);
892 genpd->suspended_count--; 908 genpd->suspended_count--;
893 genpd_start_dev(genpd, dev);
894 909
895 return genpd_resume_early(genpd, dev); 910 return genpd_start_dev(genpd, dev);
896} 911}
897 912
898/** 913/**
899 * pm_genpd_resume - Resume a device belonging to an I/O power domain. 914 * pm_genpd_resume_early - Early resume of a device in an I/O PM domain.
915 * @dev: Device to resume.
916 *
917 * Carry out an early resume of a device under the assumption that its
918 * pm_domain field points to the domain member of an object of type
919 * struct generic_pm_domain representing a power domain consisting of I/O
920 * devices.
921 */
922static int pm_genpd_resume_early(struct device *dev)
923{
924 struct generic_pm_domain *genpd;
925
926 dev_dbg(dev, "%s()\n", __func__);
927
928 genpd = dev_to_genpd(dev);
929 if (IS_ERR(genpd))
930 return -EINVAL;
931
932 return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
933}
934
935/**
936 * pm_genpd_resume - Resume of device in an I/O PM domain.
900 * @dev: Device to resume. 937 * @dev: Device to resume.
901 * 938 *
902 * Resume a device under the assumption that its pm_domain field points to the 939 * Resume a device under the assumption that its pm_domain field points to the
@@ -917,7 +954,7 @@ static int pm_genpd_resume(struct device *dev)
917} 954}
918 955
919/** 956/**
920 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. 957 * pm_genpd_freeze - Freezing a device in an I/O PM domain.
921 * @dev: Device to freeze. 958 * @dev: Device to freeze.
922 * 959 *
923 * Freeze a device under the assumption that its pm_domain field points to the 960 * Freeze a device under the assumption that its pm_domain field points to the
@@ -938,7 +975,29 @@ static int pm_genpd_freeze(struct device *dev)
938} 975}
939 976
940/** 977/**
941 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. 978 * pm_genpd_freeze_late - Late freeze of a device in an I/O PM domain.
979 * @dev: Device to freeze.
980 *
981 * Carry out a late freeze of a device under the assumption that its
982 * pm_domain field points to the domain member of an object of type
983 * struct generic_pm_domain representing a power domain consisting of I/O
984 * devices.
985 */
986static int pm_genpd_freeze_late(struct device *dev)
987{
988 struct generic_pm_domain *genpd;
989
990 dev_dbg(dev, "%s()\n", __func__);
991
992 genpd = dev_to_genpd(dev);
993 if (IS_ERR(genpd))
994 return -EINVAL;
995
996 return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
997}
998
999/**
1000 * pm_genpd_freeze_noirq - Completion of freezing a device in an I/O PM domain.
942 * @dev: Device to freeze. 1001 * @dev: Device to freeze.
943 * 1002 *
944 * Carry out a late freeze of a device under the assumption that its 1003 * Carry out a late freeze of a device under the assumption that its
@@ -949,7 +1008,6 @@ static int pm_genpd_freeze(struct device *dev)
949static int pm_genpd_freeze_noirq(struct device *dev) 1008static int pm_genpd_freeze_noirq(struct device *dev)
950{ 1009{
951 struct generic_pm_domain *genpd; 1010 struct generic_pm_domain *genpd;
952 int ret;
953 1011
954 dev_dbg(dev, "%s()\n", __func__); 1012 dev_dbg(dev, "%s()\n", __func__);
955 1013
@@ -957,20 +1015,33 @@ static int pm_genpd_freeze_noirq(struct device *dev)
957 if (IS_ERR(genpd)) 1015 if (IS_ERR(genpd))
958 return -EINVAL; 1016 return -EINVAL;
959 1017
960 if (genpd->suspend_power_off) 1018 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
961 return 0; 1019 0 : genpd_stop_dev(genpd, dev);
1020}
962 1021
963 ret = genpd_freeze_late(genpd, dev); 1022/**
964 if (ret) 1023 * pm_genpd_thaw_noirq - Early thaw of device in an I/O PM domain.
965 return ret; 1024 * @dev: Device to thaw.
1025 *
1026 * Start the device, unless power has been removed from the domain already
1027 * before the system transition.
1028 */
1029static int pm_genpd_thaw_noirq(struct device *dev)
1030{
1031 struct generic_pm_domain *genpd;
966 1032
967 genpd_stop_dev(genpd, dev); 1033 dev_dbg(dev, "%s()\n", __func__);
968 1034
969 return 0; 1035 genpd = dev_to_genpd(dev);
1036 if (IS_ERR(genpd))
1037 return -EINVAL;
1038
1039 return genpd->suspend_power_off || dev_gpd_data(dev)->always_on ?
1040 0 : genpd_start_dev(genpd, dev);
970} 1041}
971 1042
972/** 1043/**
973 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. 1044 * pm_genpd_thaw_early - Early thaw of device in an I/O PM domain.
974 * @dev: Device to thaw. 1045 * @dev: Device to thaw.
975 * 1046 *
976 * Carry out an early thaw of a device under the assumption that its 1047 * Carry out an early thaw of a device under the assumption that its
@@ -978,7 +1049,7 @@ static int pm_genpd_freeze_noirq(struct device *dev)
978 * struct generic_pm_domain representing a power domain consisting of I/O 1049 * struct generic_pm_domain representing a power domain consisting of I/O
979 * devices. 1050 * devices.
980 */ 1051 */
981static int pm_genpd_thaw_noirq(struct device *dev) 1052static int pm_genpd_thaw_early(struct device *dev)
982{ 1053{
983 struct generic_pm_domain *genpd; 1054 struct generic_pm_domain *genpd;
984 1055
@@ -988,12 +1059,7 @@ static int pm_genpd_thaw_noirq(struct device *dev)
988 if (IS_ERR(genpd)) 1059 if (IS_ERR(genpd))
989 return -EINVAL; 1060 return -EINVAL;
990 1061
991 if (genpd->suspend_power_off) 1062 return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
992 return 0;
993
994 genpd_start_dev(genpd, dev);
995
996 return genpd_thaw_early(genpd, dev);
997} 1063}
998 1064
999/** 1065/**
@@ -1018,13 +1084,11 @@ static int pm_genpd_thaw(struct device *dev)
1018} 1084}
1019 1085
1020/** 1086/**
1021 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. 1087 * pm_genpd_restore_noirq - Start of restore of device in an I/O PM domain.
1022 * @dev: Device to resume. 1088 * @dev: Device to resume.
1023 * 1089 *
1024 * Carry out an early restore of a device under the assumption that its 1090 * Make sure the domain will be in the same power state as before the
1025 * pm_domain field points to the domain member of an object of type 1091 * hibernation the system is resuming from and start the device if necessary.
1026 * struct generic_pm_domain representing a power domain consisting of I/O
1027 * devices.
1028 */ 1092 */
1029static int pm_genpd_restore_noirq(struct device *dev) 1093static int pm_genpd_restore_noirq(struct device *dev)
1030{ 1094{
@@ -1040,23 +1104,35 @@ static int pm_genpd_restore_noirq(struct device *dev)
1040 * Since all of the "noirq" callbacks are executed sequentially, it is 1104 * Since all of the "noirq" callbacks are executed sequentially, it is
1041 * guaranteed that this function will never run twice in parallel for 1105 * guaranteed that this function will never run twice in parallel for
1042 * the same PM domain, so it is not necessary to use locking here. 1106 * the same PM domain, so it is not necessary to use locking here.
1107 *
1108 * At this point suspended_count == 0 means we are being run for the
1109 * first time for the given domain in the present cycle.
1043 */ 1110 */
1044 genpd->status = GPD_STATE_POWER_OFF; 1111 if (genpd->suspended_count++ == 0) {
1045 if (genpd->suspend_power_off) {
1046 /* 1112 /*
1047 * The boot kernel might put the domain into the power on state, 1113 * The boot kernel might put the domain into arbitrary state,
1048 * so make sure it really is powered off. 1114 * so make it appear as powered off to pm_genpd_poweron(), so
1115 * that it tries to power it on in case it was really off.
1049 */ 1116 */
1050 if (genpd->power_off) 1117 genpd->status = GPD_STATE_POWER_OFF;
1051 genpd->power_off(genpd); 1118 if (genpd->suspend_power_off) {
1052 return 0; 1119 /*
1120 * If the domain was off before the hibernation, make
1121 * sure it will be off going forward.
1122 */
1123 if (genpd->power_off)
1124 genpd->power_off(genpd);
1125
1126 return 0;
1127 }
1053 } 1128 }
1054 1129
1130 if (genpd->suspend_power_off)
1131 return 0;
1132
1055 pm_genpd_poweron(genpd); 1133 pm_genpd_poweron(genpd);
1056 genpd->suspended_count--;
1057 genpd_start_dev(genpd, dev);
1058 1134
1059 return genpd_resume_early(genpd, dev); 1135 return dev_gpd_data(dev)->always_on ? 0 : genpd_start_dev(genpd, dev);
1060} 1136}
1061 1137
1062/** 1138/**
@@ -1099,11 +1175,15 @@ static void pm_genpd_complete(struct device *dev)
1099 1175
1100#define pm_genpd_prepare NULL 1176#define pm_genpd_prepare NULL
1101#define pm_genpd_suspend NULL 1177#define pm_genpd_suspend NULL
1178#define pm_genpd_suspend_late NULL
1102#define pm_genpd_suspend_noirq NULL 1179#define pm_genpd_suspend_noirq NULL
1180#define pm_genpd_resume_early NULL
1103#define pm_genpd_resume_noirq NULL 1181#define pm_genpd_resume_noirq NULL
1104#define pm_genpd_resume NULL 1182#define pm_genpd_resume NULL
1105#define pm_genpd_freeze NULL 1183#define pm_genpd_freeze NULL
1184#define pm_genpd_freeze_late NULL
1106#define pm_genpd_freeze_noirq NULL 1185#define pm_genpd_freeze_noirq NULL
1186#define pm_genpd_thaw_early NULL
1107#define pm_genpd_thaw_noirq NULL 1187#define pm_genpd_thaw_noirq NULL
1108#define pm_genpd_thaw NULL 1188#define pm_genpd_thaw NULL
1109#define pm_genpd_restore_noirq NULL 1189#define pm_genpd_restore_noirq NULL
@@ -1171,6 +1251,38 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1171} 1251}
1172 1252
1173/** 1253/**
1254 * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
1255 * @genpd_node: Device tree node pointer representing a PM domain to which the
1256 * the device is added to.
1257 * @dev: Device to be added.
1258 * @td: Set of PM QoS timing parameters to attach to the device.
1259 */
1260int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
1261 struct gpd_timing_data *td)
1262{
1263 struct generic_pm_domain *genpd = NULL, *gpd;
1264
1265 dev_dbg(dev, "%s()\n", __func__);
1266
1267 if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
1268 return -EINVAL;
1269
1270 mutex_lock(&gpd_list_lock);
1271 list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
1272 if (gpd->of_node == genpd_node) {
1273 genpd = gpd;
1274 break;
1275 }
1276 }
1277 mutex_unlock(&gpd_list_lock);
1278
1279 if (!genpd)
1280 return -EINVAL;
1281
1282 return __pm_genpd_add_device(genpd, dev, td);
1283}
1284
1285/**
1174 * pm_genpd_remove_device - Remove a device from an I/O PM domain. 1286 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1175 * @genpd: PM domain to remove the device from. 1287 * @genpd: PM domain to remove the device from.
1176 * @dev: Device to be removed. 1288 * @dev: Device to be removed.
@@ -1216,6 +1328,26 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1216} 1328}
1217 1329
1218/** 1330/**
1331 * pm_genpd_dev_always_on - Set/unset the "always on" flag for a given device.
1332 * @dev: Device to set/unset the flag for.
1333 * @val: The new value of the device's "always on" flag.
1334 */
1335void pm_genpd_dev_always_on(struct device *dev, bool val)
1336{
1337 struct pm_subsys_data *psd;
1338 unsigned long flags;
1339
1340 spin_lock_irqsave(&dev->power.lock, flags);
1341
1342 psd = dev_to_psd(dev);
1343 if (psd && psd->domain_data)
1344 to_gpd_data(psd->domain_data)->always_on = val;
1345
1346 spin_unlock_irqrestore(&dev->power.lock, flags);
1347}
1348EXPORT_SYMBOL_GPL(pm_genpd_dev_always_on);
1349
1350/**
1219 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain. 1351 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1220 * @genpd: Master PM domain to add the subdomain to. 1352 * @genpd: Master PM domain to add the subdomain to.
1221 * @subdomain: Subdomain to be added. 1353 * @subdomain: Subdomain to be added.
@@ -1450,7 +1582,7 @@ static int pm_genpd_default_suspend_late(struct device *dev)
1450{ 1582{
1451 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late; 1583 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
1452 1584
1453 return cb ? cb(dev) : pm_generic_suspend_noirq(dev); 1585 return cb ? cb(dev) : pm_generic_suspend_late(dev);
1454} 1586}
1455 1587
1456/** 1588/**
@@ -1461,7 +1593,7 @@ static int pm_genpd_default_resume_early(struct device *dev)
1461{ 1593{
1462 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early; 1594 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
1463 1595
1464 return cb ? cb(dev) : pm_generic_resume_noirq(dev); 1596 return cb ? cb(dev) : pm_generic_resume_early(dev);
1465} 1597}
1466 1598
1467/** 1599/**
@@ -1494,7 +1626,7 @@ static int pm_genpd_default_freeze_late(struct device *dev)
1494{ 1626{
1495 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late; 1627 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
1496 1628
1497 return cb ? cb(dev) : pm_generic_freeze_noirq(dev); 1629 return cb ? cb(dev) : pm_generic_freeze_late(dev);
1498} 1630}
1499 1631
1500/** 1632/**
@@ -1505,7 +1637,7 @@ static int pm_genpd_default_thaw_early(struct device *dev)
1505{ 1637{
1506 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early; 1638 int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
1507 1639
1508 return cb ? cb(dev) : pm_generic_thaw_noirq(dev); 1640 return cb ? cb(dev) : pm_generic_thaw_early(dev);
1509} 1641}
1510 1642
1511/** 1643/**
@@ -1557,23 +1689,28 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
1557 genpd->poweroff_task = NULL; 1689 genpd->poweroff_task = NULL;
1558 genpd->resume_count = 0; 1690 genpd->resume_count = 0;
1559 genpd->device_count = 0; 1691 genpd->device_count = 0;
1560 genpd->suspended_count = 0;
1561 genpd->max_off_time_ns = -1; 1692 genpd->max_off_time_ns = -1;
1562 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; 1693 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1563 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; 1694 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1564 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; 1695 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1565 genpd->domain.ops.prepare = pm_genpd_prepare; 1696 genpd->domain.ops.prepare = pm_genpd_prepare;
1566 genpd->domain.ops.suspend = pm_genpd_suspend; 1697 genpd->domain.ops.suspend = pm_genpd_suspend;
1698 genpd->domain.ops.suspend_late = pm_genpd_suspend_late;
1567 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; 1699 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1568 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; 1700 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1701 genpd->domain.ops.resume_early = pm_genpd_resume_early;
1569 genpd->domain.ops.resume = pm_genpd_resume; 1702 genpd->domain.ops.resume = pm_genpd_resume;
1570 genpd->domain.ops.freeze = pm_genpd_freeze; 1703 genpd->domain.ops.freeze = pm_genpd_freeze;
1704 genpd->domain.ops.freeze_late = pm_genpd_freeze_late;
1571 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; 1705 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1572 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; 1706 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1707 genpd->domain.ops.thaw_early = pm_genpd_thaw_early;
1573 genpd->domain.ops.thaw = pm_genpd_thaw; 1708 genpd->domain.ops.thaw = pm_genpd_thaw;
1574 genpd->domain.ops.poweroff = pm_genpd_suspend; 1709 genpd->domain.ops.poweroff = pm_genpd_suspend;
1710 genpd->domain.ops.poweroff_late = pm_genpd_suspend_late;
1575 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq; 1711 genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
1576 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; 1712 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1713 genpd->domain.ops.restore_early = pm_genpd_resume_early;
1577 genpd->domain.ops.restore = pm_genpd_resume; 1714 genpd->domain.ops.restore = pm_genpd_resume;
1578 genpd->domain.ops.complete = pm_genpd_complete; 1715 genpd->domain.ops.complete = pm_genpd_complete;
1579 genpd->dev_ops.save_state = pm_genpd_default_save_state; 1716 genpd->dev_ops.save_state = pm_genpd_default_save_state;
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 10bdd793f0bd..d03d290f31c2 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -92,59 +92,28 @@ int pm_generic_prepare(struct device *dev)
92} 92}
93 93
94/** 94/**
95 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. 95 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
96 * @dev: Device to handle. 96 * @dev: Device to suspend.
97 * @event: PM transition of the system under way.
98 * @bool: Whether or not this is the "noirq" stage.
99 *
100 * Execute the PM callback corresponding to @event provided by the driver of
101 * @dev, if defined, and return its error code. Return 0 if the callback is
102 * not present.
103 */ 97 */
104static int __pm_generic_call(struct device *dev, int event, bool noirq) 98int pm_generic_suspend_noirq(struct device *dev)
105{ 99{
106 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 100 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
107 int (*callback)(struct device *);
108
109 if (!pm)
110 return 0;
111
112 switch (event) {
113 case PM_EVENT_SUSPEND:
114 callback = noirq ? pm->suspend_noirq : pm->suspend;
115 break;
116 case PM_EVENT_FREEZE:
117 callback = noirq ? pm->freeze_noirq : pm->freeze;
118 break;
119 case PM_EVENT_HIBERNATE:
120 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
121 break;
122 case PM_EVENT_RESUME:
123 callback = noirq ? pm->resume_noirq : pm->resume;
124 break;
125 case PM_EVENT_THAW:
126 callback = noirq ? pm->thaw_noirq : pm->thaw;
127 break;
128 case PM_EVENT_RESTORE:
129 callback = noirq ? pm->restore_noirq : pm->restore;
130 break;
131 default:
132 callback = NULL;
133 break;
134 }
135 101
136 return callback ? callback(dev) : 0; 102 return pm && pm->suspend_noirq ? pm->suspend_noirq(dev) : 0;
137} 103}
104EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
138 105
139/** 106/**
140 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems. 107 * pm_generic_suspend_late - Generic suspend_late callback for subsystems.
141 * @dev: Device to suspend. 108 * @dev: Device to suspend.
142 */ 109 */
143int pm_generic_suspend_noirq(struct device *dev) 110int pm_generic_suspend_late(struct device *dev)
144{ 111{
145 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true); 112 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
113
114 return pm && pm->suspend_late ? pm->suspend_late(dev) : 0;
146} 115}
147EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq); 116EXPORT_SYMBOL_GPL(pm_generic_suspend_late);
148 117
149/** 118/**
150 * pm_generic_suspend - Generic suspend callback for subsystems. 119 * pm_generic_suspend - Generic suspend callback for subsystems.
@@ -152,7 +121,9 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
152 */ 121 */
153int pm_generic_suspend(struct device *dev) 122int pm_generic_suspend(struct device *dev)
154{ 123{
155 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false); 124 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
125
126 return pm && pm->suspend ? pm->suspend(dev) : 0;
156} 127}
157EXPORT_SYMBOL_GPL(pm_generic_suspend); 128EXPORT_SYMBOL_GPL(pm_generic_suspend);
158 129
@@ -162,17 +133,33 @@ EXPORT_SYMBOL_GPL(pm_generic_suspend);
162 */ 133 */
163int pm_generic_freeze_noirq(struct device *dev) 134int pm_generic_freeze_noirq(struct device *dev)
164{ 135{
165 return __pm_generic_call(dev, PM_EVENT_FREEZE, true); 136 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
137
138 return pm && pm->freeze_noirq ? pm->freeze_noirq(dev) : 0;
166} 139}
167EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq); 140EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
168 141
169/** 142/**
143 * pm_generic_freeze_late - Generic freeze_late callback for subsystems.
144 * @dev: Device to freeze.
145 */
146int pm_generic_freeze_late(struct device *dev)
147{
148 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
149
150 return pm && pm->freeze_late ? pm->freeze_late(dev) : 0;
151}
152EXPORT_SYMBOL_GPL(pm_generic_freeze_late);
153
154/**
170 * pm_generic_freeze - Generic freeze callback for subsystems. 155 * pm_generic_freeze - Generic freeze callback for subsystems.
171 * @dev: Device to freeze. 156 * @dev: Device to freeze.
172 */ 157 */
173int pm_generic_freeze(struct device *dev) 158int pm_generic_freeze(struct device *dev)
174{ 159{
175 return __pm_generic_call(dev, PM_EVENT_FREEZE, false); 160 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
161
162 return pm && pm->freeze ? pm->freeze(dev) : 0;
176} 163}
177EXPORT_SYMBOL_GPL(pm_generic_freeze); 164EXPORT_SYMBOL_GPL(pm_generic_freeze);
178 165
@@ -182,17 +169,33 @@ EXPORT_SYMBOL_GPL(pm_generic_freeze);
182 */ 169 */
183int pm_generic_poweroff_noirq(struct device *dev) 170int pm_generic_poweroff_noirq(struct device *dev)
184{ 171{
185 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true); 172 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
173
174 return pm && pm->poweroff_noirq ? pm->poweroff_noirq(dev) : 0;
186} 175}
187EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq); 176EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
188 177
189/** 178/**
179 * pm_generic_poweroff_late - Generic poweroff_late callback for subsystems.
180 * @dev: Device to handle.
181 */
182int pm_generic_poweroff_late(struct device *dev)
183{
184 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
185
186 return pm && pm->poweroff_late ? pm->poweroff_late(dev) : 0;
187}
188EXPORT_SYMBOL_GPL(pm_generic_poweroff_late);
189
190/**
190 * pm_generic_poweroff - Generic poweroff callback for subsystems. 191 * pm_generic_poweroff - Generic poweroff callback for subsystems.
191 * @dev: Device to handle. 192 * @dev: Device to handle.
192 */ 193 */
193int pm_generic_poweroff(struct device *dev) 194int pm_generic_poweroff(struct device *dev)
194{ 195{
195 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false); 196 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
197
198 return pm && pm->poweroff ? pm->poweroff(dev) : 0;
196} 199}
197EXPORT_SYMBOL_GPL(pm_generic_poweroff); 200EXPORT_SYMBOL_GPL(pm_generic_poweroff);
198 201
@@ -202,17 +205,33 @@ EXPORT_SYMBOL_GPL(pm_generic_poweroff);
202 */ 205 */
203int pm_generic_thaw_noirq(struct device *dev) 206int pm_generic_thaw_noirq(struct device *dev)
204{ 207{
205 return __pm_generic_call(dev, PM_EVENT_THAW, true); 208 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
209
210 return pm && pm->thaw_noirq ? pm->thaw_noirq(dev) : 0;
206} 211}
207EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq); 212EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
208 213
209/** 214/**
215 * pm_generic_thaw_early - Generic thaw_early callback for subsystems.
216 * @dev: Device to thaw.
217 */
218int pm_generic_thaw_early(struct device *dev)
219{
220 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
221
222 return pm && pm->thaw_early ? pm->thaw_early(dev) : 0;
223}
224EXPORT_SYMBOL_GPL(pm_generic_thaw_early);
225
226/**
210 * pm_generic_thaw - Generic thaw callback for subsystems. 227 * pm_generic_thaw - Generic thaw callback for subsystems.
211 * @dev: Device to thaw. 228 * @dev: Device to thaw.
212 */ 229 */
213int pm_generic_thaw(struct device *dev) 230int pm_generic_thaw(struct device *dev)
214{ 231{
215 return __pm_generic_call(dev, PM_EVENT_THAW, false); 232 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
233
234 return pm && pm->thaw ? pm->thaw(dev) : 0;
216} 235}
217EXPORT_SYMBOL_GPL(pm_generic_thaw); 236EXPORT_SYMBOL_GPL(pm_generic_thaw);
218 237
@@ -222,17 +241,33 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
222 */ 241 */
223int pm_generic_resume_noirq(struct device *dev) 242int pm_generic_resume_noirq(struct device *dev)
224{ 243{
225 return __pm_generic_call(dev, PM_EVENT_RESUME, true); 244 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
245
246 return pm && pm->resume_noirq ? pm->resume_noirq(dev) : 0;
226} 247}
227EXPORT_SYMBOL_GPL(pm_generic_resume_noirq); 248EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
228 249
229/** 250/**
251 * pm_generic_resume_early - Generic resume_early callback for subsystems.
252 * @dev: Device to resume.
253 */
254int pm_generic_resume_early(struct device *dev)
255{
256 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
257
258 return pm && pm->resume_early ? pm->resume_early(dev) : 0;
259}
260EXPORT_SYMBOL_GPL(pm_generic_resume_early);
261
262/**
230 * pm_generic_resume - Generic resume callback for subsystems. 263 * pm_generic_resume - Generic resume callback for subsystems.
231 * @dev: Device to resume. 264 * @dev: Device to resume.
232 */ 265 */
233int pm_generic_resume(struct device *dev) 266int pm_generic_resume(struct device *dev)
234{ 267{
235 return __pm_generic_call(dev, PM_EVENT_RESUME, false); 268 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
269
270 return pm && pm->resume ? pm->resume(dev) : 0;
236} 271}
237EXPORT_SYMBOL_GPL(pm_generic_resume); 272EXPORT_SYMBOL_GPL(pm_generic_resume);
238 273
@@ -242,17 +277,33 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
242 */ 277 */
243int pm_generic_restore_noirq(struct device *dev) 278int pm_generic_restore_noirq(struct device *dev)
244{ 279{
245 return __pm_generic_call(dev, PM_EVENT_RESTORE, true); 280 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
281
282 return pm && pm->restore_noirq ? pm->restore_noirq(dev) : 0;
246} 283}
247EXPORT_SYMBOL_GPL(pm_generic_restore_noirq); 284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
248 285
249/** 286/**
287 * pm_generic_restore_early - Generic restore_early callback for subsystems.
288 * @dev: Device to resume.
289 */
290int pm_generic_restore_early(struct device *dev)
291{
292 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
293
294 return pm && pm->restore_early ? pm->restore_early(dev) : 0;
295}
296EXPORT_SYMBOL_GPL(pm_generic_restore_early);
297
298/**
250 * pm_generic_restore - Generic restore callback for subsystems. 299 * pm_generic_restore - Generic restore callback for subsystems.
251 * @dev: Device to restore. 300 * @dev: Device to restore.
252 */ 301 */
253int pm_generic_restore(struct device *dev) 302int pm_generic_restore(struct device *dev)
254{ 303{
255 return __pm_generic_call(dev, PM_EVENT_RESTORE, false); 304 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
305
306 return pm && pm->restore ? pm->restore(dev) : 0;
256} 307}
257EXPORT_SYMBOL_GPL(pm_generic_restore); 308EXPORT_SYMBOL_GPL(pm_generic_restore);
258 309
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index e2cc3d2e0ecc..b462c0e341cb 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -47,6 +47,7 @@ typedef int (*pm_callback_t)(struct device *);
47LIST_HEAD(dpm_list); 47LIST_HEAD(dpm_list);
48LIST_HEAD(dpm_prepared_list); 48LIST_HEAD(dpm_prepared_list);
49LIST_HEAD(dpm_suspended_list); 49LIST_HEAD(dpm_suspended_list);
50LIST_HEAD(dpm_late_early_list);
50LIST_HEAD(dpm_noirq_list); 51LIST_HEAD(dpm_noirq_list);
51 52
52struct suspend_stats suspend_stats; 53struct suspend_stats suspend_stats;
@@ -246,6 +247,40 @@ static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
246} 247}
247 248
248/** 249/**
250 * pm_late_early_op - Return the PM operation appropriate for given PM event.
251 * @ops: PM operations to choose from.
252 * @state: PM transition of the system being carried out.
253 *
254 * Runtime PM is disabled for @dev while this function is being executed.
255 */
256static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
257 pm_message_t state)
258{
259 switch (state.event) {
260#ifdef CONFIG_SUSPEND
261 case PM_EVENT_SUSPEND:
262 return ops->suspend_late;
263 case PM_EVENT_RESUME:
264 return ops->resume_early;
265#endif /* CONFIG_SUSPEND */
266#ifdef CONFIG_HIBERNATE_CALLBACKS
267 case PM_EVENT_FREEZE:
268 case PM_EVENT_QUIESCE:
269 return ops->freeze_late;
270 case PM_EVENT_HIBERNATE:
271 return ops->poweroff_late;
272 case PM_EVENT_THAW:
273 case PM_EVENT_RECOVER:
274 return ops->thaw_early;
275 case PM_EVENT_RESTORE:
276 return ops->restore_early;
277#endif /* CONFIG_HIBERNATE_CALLBACKS */
278 }
279
280 return NULL;
281}
282
283/**
249 * pm_noirq_op - Return the PM operation appropriate for given PM event. 284 * pm_noirq_op - Return the PM operation appropriate for given PM event.
250 * @ops: PM operations to choose from. 285 * @ops: PM operations to choose from.
251 * @state: PM transition of the system being carried out. 286 * @state: PM transition of the system being carried out.
@@ -374,21 +409,21 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
374 TRACE_RESUME(0); 409 TRACE_RESUME(0);
375 410
376 if (dev->pm_domain) { 411 if (dev->pm_domain) {
377 info = "EARLY power domain "; 412 info = "noirq power domain ";
378 callback = pm_noirq_op(&dev->pm_domain->ops, state); 413 callback = pm_noirq_op(&dev->pm_domain->ops, state);
379 } else if (dev->type && dev->type->pm) { 414 } else if (dev->type && dev->type->pm) {
380 info = "EARLY type "; 415 info = "noirq type ";
381 callback = pm_noirq_op(dev->type->pm, state); 416 callback = pm_noirq_op(dev->type->pm, state);
382 } else if (dev->class && dev->class->pm) { 417 } else if (dev->class && dev->class->pm) {
383 info = "EARLY class "; 418 info = "noirq class ";
384 callback = pm_noirq_op(dev->class->pm, state); 419 callback = pm_noirq_op(dev->class->pm, state);
385 } else if (dev->bus && dev->bus->pm) { 420 } else if (dev->bus && dev->bus->pm) {
386 info = "EARLY bus "; 421 info = "noirq bus ";
387 callback = pm_noirq_op(dev->bus->pm, state); 422 callback = pm_noirq_op(dev->bus->pm, state);
388 } 423 }
389 424
390 if (!callback && dev->driver && dev->driver->pm) { 425 if (!callback && dev->driver && dev->driver->pm) {
391 info = "EARLY driver "; 426 info = "noirq driver ";
392 callback = pm_noirq_op(dev->driver->pm, state); 427 callback = pm_noirq_op(dev->driver->pm, state);
393 } 428 }
394 429
@@ -399,13 +434,13 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
399} 434}
400 435
401/** 436/**
402 * dpm_resume_noirq - Execute "early resume" callbacks for non-sysdev devices. 437 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
403 * @state: PM transition of the system being carried out. 438 * @state: PM transition of the system being carried out.
404 * 439 *
405 * Call the "noirq" resume handlers for all devices marked as DPM_OFF_IRQ and 440 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
406 * enable device drivers to receive interrupts. 441 * enable device drivers to receive interrupts.
407 */ 442 */
408void dpm_resume_noirq(pm_message_t state) 443static void dpm_resume_noirq(pm_message_t state)
409{ 444{
410 ktime_t starttime = ktime_get(); 445 ktime_t starttime = ktime_get();
411 446
@@ -415,7 +450,7 @@ void dpm_resume_noirq(pm_message_t state)
415 int error; 450 int error;
416 451
417 get_device(dev); 452 get_device(dev);
418 list_move_tail(&dev->power.entry, &dpm_suspended_list); 453 list_move_tail(&dev->power.entry, &dpm_late_early_list);
419 mutex_unlock(&dpm_list_mtx); 454 mutex_unlock(&dpm_list_mtx);
420 455
421 error = device_resume_noirq(dev, state); 456 error = device_resume_noirq(dev, state);
@@ -423,6 +458,80 @@ void dpm_resume_noirq(pm_message_t state)
423 suspend_stats.failed_resume_noirq++; 458 suspend_stats.failed_resume_noirq++;
424 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ); 459 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
425 dpm_save_failed_dev(dev_name(dev)); 460 dpm_save_failed_dev(dev_name(dev));
461 pm_dev_err(dev, state, " noirq", error);
462 }
463
464 mutex_lock(&dpm_list_mtx);
465 put_device(dev);
466 }
467 mutex_unlock(&dpm_list_mtx);
468 dpm_show_time(starttime, state, "noirq");
469 resume_device_irqs();
470}
471
472/**
473 * device_resume_early - Execute an "early resume" callback for given device.
474 * @dev: Device to handle.
475 * @state: PM transition of the system being carried out.
476 *
477 * Runtime PM is disabled for @dev while this function is being executed.
478 */
479static int device_resume_early(struct device *dev, pm_message_t state)
480{
481 pm_callback_t callback = NULL;
482 char *info = NULL;
483 int error = 0;
484
485 TRACE_DEVICE(dev);
486 TRACE_RESUME(0);
487
488 if (dev->pm_domain) {
489 info = "early power domain ";
490 callback = pm_late_early_op(&dev->pm_domain->ops, state);
491 } else if (dev->type && dev->type->pm) {
492 info = "early type ";
493 callback = pm_late_early_op(dev->type->pm, state);
494 } else if (dev->class && dev->class->pm) {
495 info = "early class ";
496 callback = pm_late_early_op(dev->class->pm, state);
497 } else if (dev->bus && dev->bus->pm) {
498 info = "early bus ";
499 callback = pm_late_early_op(dev->bus->pm, state);
500 }
501
502 if (!callback && dev->driver && dev->driver->pm) {
503 info = "early driver ";
504 callback = pm_late_early_op(dev->driver->pm, state);
505 }
506
507 error = dpm_run_callback(callback, dev, state, info);
508
509 TRACE_RESUME(error);
510 return error;
511}
512
513/**
514 * dpm_resume_early - Execute "early resume" callbacks for all devices.
515 * @state: PM transition of the system being carried out.
516 */
517static void dpm_resume_early(pm_message_t state)
518{
519 ktime_t starttime = ktime_get();
520
521 mutex_lock(&dpm_list_mtx);
522 while (!list_empty(&dpm_late_early_list)) {
523 struct device *dev = to_device(dpm_late_early_list.next);
524 int error;
525
526 get_device(dev);
527 list_move_tail(&dev->power.entry, &dpm_suspended_list);
528 mutex_unlock(&dpm_list_mtx);
529
530 error = device_resume_early(dev, state);
531 if (error) {
532 suspend_stats.failed_resume_early++;
533 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
534 dpm_save_failed_dev(dev_name(dev));
426 pm_dev_err(dev, state, " early", error); 535 pm_dev_err(dev, state, " early", error);
427 } 536 }
428 537
@@ -431,9 +540,18 @@ void dpm_resume_noirq(pm_message_t state)
431 } 540 }
432 mutex_unlock(&dpm_list_mtx); 541 mutex_unlock(&dpm_list_mtx);
433 dpm_show_time(starttime, state, "early"); 542 dpm_show_time(starttime, state, "early");
434 resume_device_irqs();
435} 543}
436EXPORT_SYMBOL_GPL(dpm_resume_noirq); 544
545/**
546 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
547 * @state: PM transition of the system being carried out.
548 */
549void dpm_resume_start(pm_message_t state)
550{
551 dpm_resume_noirq(state);
552 dpm_resume_early(state);
553}
554EXPORT_SYMBOL_GPL(dpm_resume_start);
437 555
438/** 556/**
439 * device_resume - Execute "resume" callbacks for given device. 557 * device_resume - Execute "resume" callbacks for given device.
@@ -716,21 +834,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
716 char *info = NULL; 834 char *info = NULL;
717 835
718 if (dev->pm_domain) { 836 if (dev->pm_domain) {
719 info = "LATE power domain "; 837 info = "noirq power domain ";
720 callback = pm_noirq_op(&dev->pm_domain->ops, state); 838 callback = pm_noirq_op(&dev->pm_domain->ops, state);
721 } else if (dev->type && dev->type->pm) { 839 } else if (dev->type && dev->type->pm) {
722 info = "LATE type "; 840 info = "noirq type ";
723 callback = pm_noirq_op(dev->type->pm, state); 841 callback = pm_noirq_op(dev->type->pm, state);
724 } else if (dev->class && dev->class->pm) { 842 } else if (dev->class && dev->class->pm) {
725 info = "LATE class "; 843 info = "noirq class ";
726 callback = pm_noirq_op(dev->class->pm, state); 844 callback = pm_noirq_op(dev->class->pm, state);
727 } else if (dev->bus && dev->bus->pm) { 845 } else if (dev->bus && dev->bus->pm) {
728 info = "LATE bus "; 846 info = "noirq bus ";
729 callback = pm_noirq_op(dev->bus->pm, state); 847 callback = pm_noirq_op(dev->bus->pm, state);
730 } 848 }
731 849
732 if (!callback && dev->driver && dev->driver->pm) { 850 if (!callback && dev->driver && dev->driver->pm) {
733 info = "LATE driver "; 851 info = "noirq driver ";
734 callback = pm_noirq_op(dev->driver->pm, state); 852 callback = pm_noirq_op(dev->driver->pm, state);
735 } 853 }
736 854
@@ -738,21 +856,21 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
738} 856}
739 857
740/** 858/**
741 * dpm_suspend_noirq - Execute "late suspend" callbacks for non-sysdev devices. 859 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
742 * @state: PM transition of the system being carried out. 860 * @state: PM transition of the system being carried out.
743 * 861 *
744 * Prevent device drivers from receiving interrupts and call the "noirq" suspend 862 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
745 * handlers for all non-sysdev devices. 863 * handlers for all non-sysdev devices.
746 */ 864 */
747int dpm_suspend_noirq(pm_message_t state) 865static int dpm_suspend_noirq(pm_message_t state)
748{ 866{
749 ktime_t starttime = ktime_get(); 867 ktime_t starttime = ktime_get();
750 int error = 0; 868 int error = 0;
751 869
752 suspend_device_irqs(); 870 suspend_device_irqs();
753 mutex_lock(&dpm_list_mtx); 871 mutex_lock(&dpm_list_mtx);
754 while (!list_empty(&dpm_suspended_list)) { 872 while (!list_empty(&dpm_late_early_list)) {
755 struct device *dev = to_device(dpm_suspended_list.prev); 873 struct device *dev = to_device(dpm_late_early_list.prev);
756 874
757 get_device(dev); 875 get_device(dev);
758 mutex_unlock(&dpm_list_mtx); 876 mutex_unlock(&dpm_list_mtx);
@@ -761,7 +879,7 @@ int dpm_suspend_noirq(pm_message_t state)
761 879
762 mutex_lock(&dpm_list_mtx); 880 mutex_lock(&dpm_list_mtx);
763 if (error) { 881 if (error) {
764 pm_dev_err(dev, state, " late", error); 882 pm_dev_err(dev, state, " noirq", error);
765 suspend_stats.failed_suspend_noirq++; 883 suspend_stats.failed_suspend_noirq++;
766 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 884 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
767 dpm_save_failed_dev(dev_name(dev)); 885 dpm_save_failed_dev(dev_name(dev));
@@ -776,10 +894,95 @@ int dpm_suspend_noirq(pm_message_t state)
776 if (error) 894 if (error)
777 dpm_resume_noirq(resume_event(state)); 895 dpm_resume_noirq(resume_event(state));
778 else 896 else
897 dpm_show_time(starttime, state, "noirq");
898 return error;
899}
900
901/**
902 * device_suspend_late - Execute a "late suspend" callback for given device.
903 * @dev: Device to handle.
904 * @state: PM transition of the system being carried out.
905 *
906 * Runtime PM is disabled for @dev while this function is being executed.
907 */
908static int device_suspend_late(struct device *dev, pm_message_t state)
909{
910 pm_callback_t callback = NULL;
911 char *info = NULL;
912
913 if (dev->pm_domain) {
914 info = "late power domain ";
915 callback = pm_late_early_op(&dev->pm_domain->ops, state);
916 } else if (dev->type && dev->type->pm) {
917 info = "late type ";
918 callback = pm_late_early_op(dev->type->pm, state);
919 } else if (dev->class && dev->class->pm) {
920 info = "late class ";
921 callback = pm_late_early_op(dev->class->pm, state);
922 } else if (dev->bus && dev->bus->pm) {
923 info = "late bus ";
924 callback = pm_late_early_op(dev->bus->pm, state);
925 }
926
927 if (!callback && dev->driver && dev->driver->pm) {
928 info = "late driver ";
929 callback = pm_late_early_op(dev->driver->pm, state);
930 }
931
932 return dpm_run_callback(callback, dev, state, info);
933}
934
935/**
936 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
937 * @state: PM transition of the system being carried out.
938 */
939static int dpm_suspend_late(pm_message_t state)
940{
941 ktime_t starttime = ktime_get();
942 int error = 0;
943
944 mutex_lock(&dpm_list_mtx);
945 while (!list_empty(&dpm_suspended_list)) {
946 struct device *dev = to_device(dpm_suspended_list.prev);
947
948 get_device(dev);
949 mutex_unlock(&dpm_list_mtx);
950
951 error = device_suspend_late(dev, state);
952
953 mutex_lock(&dpm_list_mtx);
954 if (error) {
955 pm_dev_err(dev, state, " late", error);
956 suspend_stats.failed_suspend_late++;
957 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
958 dpm_save_failed_dev(dev_name(dev));
959 put_device(dev);
960 break;
961 }
962 if (!list_empty(&dev->power.entry))
963 list_move(&dev->power.entry, &dpm_late_early_list);
964 put_device(dev);
965 }
966 mutex_unlock(&dpm_list_mtx);
967 if (error)
968 dpm_resume_early(resume_event(state));
969 else
779 dpm_show_time(starttime, state, "late"); 970 dpm_show_time(starttime, state, "late");
971
780 return error; 972 return error;
781} 973}
782EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 974
975/**
976 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
977 * @state: PM transition of the system being carried out.
978 */
979int dpm_suspend_end(pm_message_t state)
980{
981 int error = dpm_suspend_late(state);
982
983 return error ? : dpm_suspend_noirq(state);
984}
985EXPORT_SYMBOL_GPL(dpm_suspend_end);
783 986
784/** 987/**
785 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. 988 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 95706fa24c73..ac993eafec82 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/cpufreq.h> 19#include <linux/cpufreq.h>
20#include <linux/device.h>
20#include <linux/list.h> 21#include <linux/list.h>
21#include <linux/rculist.h> 22#include <linux/rculist.h>
22#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 9bf62323aaf3..eeb4bff9505c 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -71,6 +71,8 @@ extern void dpm_sysfs_remove(struct device *dev);
71extern void rpm_sysfs_remove(struct device *dev); 71extern void rpm_sysfs_remove(struct device *dev);
72extern int wakeup_sysfs_add(struct device *dev); 72extern int wakeup_sysfs_add(struct device *dev);
73extern void wakeup_sysfs_remove(struct device *dev); 73extern void wakeup_sysfs_remove(struct device *dev);
74extern int pm_qos_sysfs_add(struct device *dev);
75extern void pm_qos_sysfs_remove(struct device *dev);
74 76
75#else /* CONFIG_PM */ 77#else /* CONFIG_PM */
76 78
@@ -79,5 +81,7 @@ static inline void dpm_sysfs_remove(struct device *dev) {}
79static inline void rpm_sysfs_remove(struct device *dev) {} 81static inline void rpm_sysfs_remove(struct device *dev) {}
80static inline int wakeup_sysfs_add(struct device *dev) { return 0; } 82static inline int wakeup_sysfs_add(struct device *dev) { return 0; }
81static inline void wakeup_sysfs_remove(struct device *dev) {} 83static inline void wakeup_sysfs_remove(struct device *dev) {}
84static inline int pm_qos_sysfs_add(struct device *dev) { return 0; }
85static inline void pm_qos_sysfs_remove(struct device *dev) {}
82 86
83#endif 87#endif
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index c5d358837461..71855570922d 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -41,6 +41,7 @@
41#include <linux/mutex.h> 41#include <linux/mutex.h>
42#include <linux/export.h> 42#include <linux/export.h>
43 43
44#include "power.h"
44 45
45static DEFINE_MUTEX(dev_pm_qos_mtx); 46static DEFINE_MUTEX(dev_pm_qos_mtx);
46 47
@@ -166,6 +167,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
166 struct dev_pm_qos_request *req, *tmp; 167 struct dev_pm_qos_request *req, *tmp;
167 struct pm_qos_constraints *c; 168 struct pm_qos_constraints *c;
168 169
170 /*
171 * If the device's PM QoS resume latency limit has been exposed to user
172 * space, it has to be hidden at this point.
173 */
174 dev_pm_qos_hide_latency_limit(dev);
175
169 mutex_lock(&dev_pm_qos_mtx); 176 mutex_lock(&dev_pm_qos_mtx);
170 177
171 dev->power.power_state = PMSG_INVALID; 178 dev->power.power_state = PMSG_INVALID;
@@ -445,3 +452,57 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
445 return error; 452 return error;
446} 453}
447EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request); 454EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
455
456#ifdef CONFIG_PM_RUNTIME
457static void __dev_pm_qos_drop_user_request(struct device *dev)
458{
459 dev_pm_qos_remove_request(dev->power.pq_req);
460 dev->power.pq_req = 0;
461}
462
463/**
464 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
465 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
466 * @value: Initial value of the latency limit.
467 */
468int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
469{
470 struct dev_pm_qos_request *req;
471 int ret;
472
473 if (!device_is_registered(dev) || value < 0)
474 return -EINVAL;
475
476 if (dev->power.pq_req)
477 return -EEXIST;
478
479 req = kzalloc(sizeof(*req), GFP_KERNEL);
480 if (!req)
481 return -ENOMEM;
482
483 ret = dev_pm_qos_add_request(dev, req, value);
484 if (ret < 0)
485 return ret;
486
487 dev->power.pq_req = req;
488 ret = pm_qos_sysfs_add(dev);
489 if (ret)
490 __dev_pm_qos_drop_user_request(dev);
491
492 return ret;
493}
494EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
495
496/**
497 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
498 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
499 */
500void dev_pm_qos_hide_latency_limit(struct device *dev)
501{
502 if (dev->power.pq_req) {
503 pm_qos_sysfs_remove(dev);
504 __dev_pm_qos_drop_user_request(dev);
505 }
506}
507EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
508#endif /* CONFIG_PM_RUNTIME */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 541f821d4ea6..bd0f3949bcf9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -532,6 +532,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
532 dev->power.suspend_time = ktime_set(0, 0); 532 dev->power.suspend_time = ktime_set(0, 0);
533 dev->power.max_time_suspended_ns = -1; 533 dev->power.max_time_suspended_ns = -1;
534 dev->power.deferred_resume = false; 534 dev->power.deferred_resume = false;
535 wake_up_all(&dev->power.wait_queue);
536
535 if (retval == -EAGAIN || retval == -EBUSY) { 537 if (retval == -EAGAIN || retval == -EBUSY) {
536 dev->power.runtime_error = 0; 538 dev->power.runtime_error = 0;
537 539
@@ -547,7 +549,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
547 } else { 549 } else {
548 pm_runtime_cancel_pending(dev); 550 pm_runtime_cancel_pending(dev);
549 } 551 }
550 wake_up_all(&dev->power.wait_queue);
551 goto out; 552 goto out;
552} 553}
553 554
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index adf41be0ea66..95c12f6cb5b9 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -5,6 +5,7 @@
5#include <linux/device.h> 5#include <linux/device.h>
6#include <linux/string.h> 6#include <linux/string.h>
7#include <linux/export.h> 7#include <linux/export.h>
8#include <linux/pm_qos.h>
8#include <linux/pm_runtime.h> 9#include <linux/pm_runtime.h>
9#include <linux/atomic.h> 10#include <linux/atomic.h>
10#include <linux/jiffies.h> 11#include <linux/jiffies.h>
@@ -217,6 +218,31 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
217static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, 218static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show,
218 autosuspend_delay_ms_store); 219 autosuspend_delay_ms_store);
219 220
221static ssize_t pm_qos_latency_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 return sprintf(buf, "%d\n", dev->power.pq_req->node.prio);
225}
226
227static ssize_t pm_qos_latency_store(struct device *dev,
228 struct device_attribute *attr,
229 const char *buf, size_t n)
230{
231 s32 value;
232 int ret;
233
234 if (kstrtos32(buf, 0, &value))
235 return -EINVAL;
236
237 if (value < 0)
238 return -EINVAL;
239
240 ret = dev_pm_qos_update_request(dev->power.pq_req, value);
241 return ret < 0 ? ret : n;
242}
243
244static DEVICE_ATTR(pm_qos_resume_latency_us, 0644,
245 pm_qos_latency_show, pm_qos_latency_store);
220#endif /* CONFIG_PM_RUNTIME */ 246#endif /* CONFIG_PM_RUNTIME */
221 247
222#ifdef CONFIG_PM_SLEEP 248#ifdef CONFIG_PM_SLEEP
@@ -490,6 +516,17 @@ static struct attribute_group pm_runtime_attr_group = {
490 .attrs = runtime_attrs, 516 .attrs = runtime_attrs,
491}; 517};
492 518
519static struct attribute *pm_qos_attrs[] = {
520#ifdef CONFIG_PM_RUNTIME
521 &dev_attr_pm_qos_resume_latency_us.attr,
522#endif /* CONFIG_PM_RUNTIME */
523 NULL,
524};
525static struct attribute_group pm_qos_attr_group = {
526 .name = power_group_name,
527 .attrs = pm_qos_attrs,
528};
529
493int dpm_sysfs_add(struct device *dev) 530int dpm_sysfs_add(struct device *dev)
494{ 531{
495 int rc; 532 int rc;
@@ -530,6 +567,16 @@ void wakeup_sysfs_remove(struct device *dev)
530 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); 567 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
531} 568}
532 569
570int pm_qos_sysfs_add(struct device *dev)
571{
572 return sysfs_merge_group(&dev->kobj, &pm_qos_attr_group);
573}
574
575void pm_qos_sysfs_remove(struct device *dev)
576{
577 sysfs_unmerge_group(&dev->kobj, &pm_qos_attr_group);
578}
579
533void rpm_sysfs_remove(struct device *dev) 580void rpm_sysfs_remove(struct device *dev)
534{ 581{
535 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); 582 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index caf995fb774b..2a3e581b8dcd 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -53,6 +53,23 @@ static void pm_wakeup_timer_fn(unsigned long data);
53static LIST_HEAD(wakeup_sources); 53static LIST_HEAD(wakeup_sources);
54 54
55/** 55/**
56 * wakeup_source_prepare - Prepare a new wakeup source for initialization.
57 * @ws: Wakeup source to prepare.
58 * @name: Pointer to the name of the new wakeup source.
59 *
60 * Callers must ensure that the @name string won't be freed when @ws is still in
61 * use.
62 */
63void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
64{
65 if (ws) {
66 memset(ws, 0, sizeof(*ws));
67 ws->name = name;
68 }
69}
70EXPORT_SYMBOL_GPL(wakeup_source_prepare);
71
72/**
56 * wakeup_source_create - Create a struct wakeup_source object. 73 * wakeup_source_create - Create a struct wakeup_source object.
57 * @name: Name of the new wakeup source. 74 * @name: Name of the new wakeup source.
58 */ 75 */
@@ -60,37 +77,44 @@ struct wakeup_source *wakeup_source_create(const char *name)
60{ 77{
61 struct wakeup_source *ws; 78 struct wakeup_source *ws;
62 79
63 ws = kzalloc(sizeof(*ws), GFP_KERNEL); 80 ws = kmalloc(sizeof(*ws), GFP_KERNEL);
64 if (!ws) 81 if (!ws)
65 return NULL; 82 return NULL;
66 83
67 spin_lock_init(&ws->lock); 84 wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
68 if (name)
69 ws->name = kstrdup(name, GFP_KERNEL);
70
71 return ws; 85 return ws;
72} 86}
73EXPORT_SYMBOL_GPL(wakeup_source_create); 87EXPORT_SYMBOL_GPL(wakeup_source_create);
74 88
75/** 89/**
90 * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
91 * @ws: Wakeup source to prepare for destruction.
92 *
93 * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
94 * be run in parallel with this function for the same wakeup source object.
95 */
96void wakeup_source_drop(struct wakeup_source *ws)
97{
98 if (!ws)
99 return;
100
101 del_timer_sync(&ws->timer);
102 __pm_relax(ws);
103}
104EXPORT_SYMBOL_GPL(wakeup_source_drop);
105
106/**
76 * wakeup_source_destroy - Destroy a struct wakeup_source object. 107 * wakeup_source_destroy - Destroy a struct wakeup_source object.
77 * @ws: Wakeup source to destroy. 108 * @ws: Wakeup source to destroy.
109 *
110 * Use only for wakeup source objects created with wakeup_source_create().
78 */ 111 */
79void wakeup_source_destroy(struct wakeup_source *ws) 112void wakeup_source_destroy(struct wakeup_source *ws)
80{ 113{
81 if (!ws) 114 if (!ws)
82 return; 115 return;
83 116
84 spin_lock_irq(&ws->lock); 117 wakeup_source_drop(ws);
85 while (ws->active) {
86 spin_unlock_irq(&ws->lock);
87
88 schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT));
89
90 spin_lock_irq(&ws->lock);
91 }
92 spin_unlock_irq(&ws->lock);
93
94 kfree(ws->name); 118 kfree(ws->name);
95 kfree(ws); 119 kfree(ws);
96} 120}
@@ -105,6 +129,7 @@ void wakeup_source_add(struct wakeup_source *ws)
105 if (WARN_ON(!ws)) 129 if (WARN_ON(!ws))
106 return; 130 return;
107 131
132 spin_lock_init(&ws->lock);
108 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); 133 setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
109 ws->active = false; 134 ws->active = false;
110 135
@@ -152,8 +177,10 @@ EXPORT_SYMBOL_GPL(wakeup_source_register);
152 */ 177 */
153void wakeup_source_unregister(struct wakeup_source *ws) 178void wakeup_source_unregister(struct wakeup_source *ws)
154{ 179{
155 wakeup_source_remove(ws); 180 if (ws) {
156 wakeup_source_destroy(ws); 181 wakeup_source_remove(ws);
182 wakeup_source_destroy(ws);
183 }
157} 184}
158EXPORT_SYMBOL_GPL(wakeup_source_unregister); 185EXPORT_SYMBOL_GPL(wakeup_source_unregister);
159 186
@@ -349,7 +376,6 @@ static void wakeup_source_activate(struct wakeup_source *ws)
349{ 376{
350 ws->active = true; 377 ws->active = true;
351 ws->active_count++; 378 ws->active_count++;
352 ws->timer_expires = jiffies;
353 ws->last_time = ktime_get(); 379 ws->last_time = ktime_get();
354 380
355 /* Increment the counter of events in progress. */ 381 /* Increment the counter of events in progress. */
@@ -370,9 +396,14 @@ void __pm_stay_awake(struct wakeup_source *ws)
370 return; 396 return;
371 397
372 spin_lock_irqsave(&ws->lock, flags); 398 spin_lock_irqsave(&ws->lock, flags);
399
373 ws->event_count++; 400 ws->event_count++;
374 if (!ws->active) 401 if (!ws->active)
375 wakeup_source_activate(ws); 402 wakeup_source_activate(ws);
403
404 del_timer(&ws->timer);
405 ws->timer_expires = 0;
406
376 spin_unlock_irqrestore(&ws->lock, flags); 407 spin_unlock_irqrestore(&ws->lock, flags);
377} 408}
378EXPORT_SYMBOL_GPL(__pm_stay_awake); 409EXPORT_SYMBOL_GPL(__pm_stay_awake);
@@ -438,6 +469,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
438 ws->max_time = duration; 469 ws->max_time = duration;
439 470
440 del_timer(&ws->timer); 471 del_timer(&ws->timer);
472 ws->timer_expires = 0;
441 473
442 /* 474 /*
443 * Increment the counter of registered wakeup events and decrement the 475 * Increment the counter of registered wakeup events and decrement the
@@ -492,11 +524,22 @@ EXPORT_SYMBOL_GPL(pm_relax);
492 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. 524 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
493 * @data: Address of the wakeup source object associated with the event source. 525 * @data: Address of the wakeup source object associated with the event source.
494 * 526 *
495 * Call __pm_relax() for the wakeup source whose address is stored in @data. 527 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
528 * in @data if it is currently active and its timer has not been canceled and
529 * the expiration time of the timer is not in future.
496 */ 530 */
497static void pm_wakeup_timer_fn(unsigned long data) 531static void pm_wakeup_timer_fn(unsigned long data)
498{ 532{
499 __pm_relax((struct wakeup_source *)data); 533 struct wakeup_source *ws = (struct wakeup_source *)data;
534 unsigned long flags;
535
536 spin_lock_irqsave(&ws->lock, flags);
537
538 if (ws->active && ws->timer_expires
539 && time_after_eq(jiffies, ws->timer_expires))
540 wakeup_source_deactivate(ws);
541
542 spin_unlock_irqrestore(&ws->lock, flags);
500} 543}
501 544
502/** 545/**
@@ -534,7 +577,7 @@ void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
534 if (!expires) 577 if (!expires)
535 expires = 1; 578 expires = 1;
536 579
537 if (time_after(expires, ws->timer_expires)) { 580 if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
538 mod_timer(&ws->timer, expires); 581 mod_timer(&ws->timer, expires);
539 ws->timer_expires = expires; 582 ws->timer_expires = expires;
540 } 583 }
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 1a02b7537c8b..fcafc5b2e651 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -22,6 +22,7 @@ struct regcache_ops;
22struct regmap_format { 22struct regmap_format {
23 size_t buf_size; 23 size_t buf_size;
24 size_t reg_bytes; 24 size_t reg_bytes;
25 size_t pad_bytes;
25 size_t val_bytes; 26 size_t val_bytes;
26 void (*format_write)(struct regmap *map, 27 void (*format_write)(struct regmap *map,
27 unsigned int reg, unsigned int val); 28 unsigned int reg, unsigned int val);
@@ -65,16 +66,19 @@ struct regmap {
65 unsigned int num_reg_defaults_raw; 66 unsigned int num_reg_defaults_raw;
66 67
67 /* if set, only the cache is modified not the HW */ 68 /* if set, only the cache is modified not the HW */
68 unsigned int cache_only:1; 69 u32 cache_only;
69 /* if set, only the HW is modified not the cache */ 70 /* if set, only the HW is modified not the cache */
70 unsigned int cache_bypass:1; 71 u32 cache_bypass;
71 /* if set, remember to free reg_defaults_raw */ 72 /* if set, remember to free reg_defaults_raw */
72 unsigned int cache_free:1; 73 bool cache_free;
73 74
74 struct reg_default *reg_defaults; 75 struct reg_default *reg_defaults;
75 const void *reg_defaults_raw; 76 const void *reg_defaults_raw;
76 void *cache; 77 void *cache;
77 bool cache_dirty; 78 u32 cache_dirty;
79
80 struct reg_default *patch;
81 int patch_regs;
78}; 82};
79 83
80struct regcache_ops { 84struct regcache_ops {
@@ -84,7 +88,7 @@ struct regcache_ops {
84 int (*exit)(struct regmap *map); 88 int (*exit)(struct regmap *map);
85 int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); 89 int (*read)(struct regmap *map, unsigned int reg, unsigned int *value);
86 int (*write)(struct regmap *map, unsigned int reg, unsigned int value); 90 int (*write)(struct regmap *map, unsigned int reg, unsigned int value);
87 int (*sync)(struct regmap *map); 91 int (*sync)(struct regmap *map, unsigned int min, unsigned int max);
88}; 92};
89 93
90bool regmap_writeable(struct regmap *map, unsigned int reg); 94bool regmap_writeable(struct regmap *map, unsigned int reg);
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index b7d16143edeb..483b06d4a380 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/device.h>
14#include <linux/lzo.h> 15#include <linux/lzo.h>
15 16
16#include "internal.h" 17#include "internal.h"
@@ -331,7 +332,8 @@ out:
331 return ret; 332 return ret;
332} 333}
333 334
334static int regcache_lzo_sync(struct regmap *map) 335static int regcache_lzo_sync(struct regmap *map, unsigned int min,
336 unsigned int max)
335{ 337{
336 struct regcache_lzo_ctx **lzo_blocks; 338 struct regcache_lzo_ctx **lzo_blocks;
337 unsigned int val; 339 unsigned int val;
@@ -339,10 +341,21 @@ static int regcache_lzo_sync(struct regmap *map)
339 int ret; 341 int ret;
340 342
341 lzo_blocks = map->cache; 343 lzo_blocks = map->cache;
342 for_each_set_bit(i, lzo_blocks[0]->sync_bmp, lzo_blocks[0]->sync_bmp_nbits) { 344 i = min;
345 for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
346 lzo_blocks[0]->sync_bmp_nbits) {
347 if (i > max)
348 continue;
349
343 ret = regcache_read(map, i, &val); 350 ret = regcache_read(map, i, &val);
344 if (ret) 351 if (ret)
345 return ret; 352 return ret;
353
354 /* Is this the hardware default? If so skip. */
355 ret = regcache_lookup_reg(map, i);
356 if (ret > 0 && val == map->reg_defaults[ret].def)
357 continue;
358
346 map->cache_bypass = 1; 359 map->cache_bypass = 1;
347 ret = _regmap_write(map, i, val); 360 ret = _regmap_write(map, i, val);
348 map->cache_bypass = 0; 361 map->cache_bypass = 0;
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 32620c4f1683..92b779ee002b 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/device.h>
14#include <linux/debugfs.h> 15#include <linux/debugfs.h>
15#include <linux/rbtree.h> 16#include <linux/rbtree.h>
16#include <linux/seq_file.h> 17#include <linux/seq_file.h>
@@ -137,6 +138,7 @@ static int rbtree_show(struct seq_file *s, void *ignored)
137 unsigned int base, top; 138 unsigned int base, top;
138 int nodes = 0; 139 int nodes = 0;
139 int registers = 0; 140 int registers = 0;
141 int average;
140 142
141 mutex_lock(&map->lock); 143 mutex_lock(&map->lock);
142 144
@@ -151,8 +153,13 @@ static int rbtree_show(struct seq_file *s, void *ignored)
151 registers += top - base + 1; 153 registers += top - base + 1;
152 } 154 }
153 155
156 if (nodes)
157 average = registers / nodes;
158 else
159 average = 0;
160
154 seq_printf(s, "%d nodes, %d registers, average %d registers\n", 161 seq_printf(s, "%d nodes, %d registers, average %d registers\n",
155 nodes, registers, registers / nodes); 162 nodes, registers, average);
156 163
157 mutex_unlock(&map->lock); 164 mutex_unlock(&map->lock);
158 165
@@ -357,7 +364,8 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
357 return 0; 364 return 0;
358} 365}
359 366
360static int regcache_rbtree_sync(struct regmap *map) 367static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
368 unsigned int max)
361{ 369{
362 struct regcache_rbtree_ctx *rbtree_ctx; 370 struct regcache_rbtree_ctx *rbtree_ctx;
363 struct rb_node *node; 371 struct rb_node *node;
@@ -365,19 +373,37 @@ static int regcache_rbtree_sync(struct regmap *map)
365 unsigned int regtmp; 373 unsigned int regtmp;
366 unsigned int val; 374 unsigned int val;
367 int ret; 375 int ret;
368 int i; 376 int i, base, end;
369 377
370 rbtree_ctx = map->cache; 378 rbtree_ctx = map->cache;
371 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) { 379 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
372 rbnode = rb_entry(node, struct regcache_rbtree_node, node); 380 rbnode = rb_entry(node, struct regcache_rbtree_node, node);
373 for (i = 0; i < rbnode->blklen; i++) { 381
382 if (rbnode->base_reg < min)
383 continue;
384 if (rbnode->base_reg > max)
385 break;
386 if (rbnode->base_reg + rbnode->blklen < min)
387 continue;
388
389 if (min > rbnode->base_reg)
390 base = min - rbnode->base_reg;
391 else
392 base = 0;
393
394 if (max < rbnode->base_reg + rbnode->blklen)
395 end = rbnode->base_reg + rbnode->blklen - max;
396 else
397 end = rbnode->blklen;
398
399 for (i = base; i < end; i++) {
374 regtmp = rbnode->base_reg + i; 400 regtmp = rbnode->base_reg + i;
375 val = regcache_rbtree_get_register(rbnode, i, 401 val = regcache_rbtree_get_register(rbnode, i,
376 map->cache_word_size); 402 map->cache_word_size);
377 403
378 /* Is this the hardware default? If so skip. */ 404 /* Is this the hardware default? If so skip. */
379 ret = regcache_lookup_reg(map, i); 405 ret = regcache_lookup_reg(map, regtmp);
380 if (ret > 0 && val == map->reg_defaults[ret].def) 406 if (ret >= 0 && val == map->reg_defaults[ret].def)
381 continue; 407 continue;
382 408
383 map->cache_bypass = 1; 409 map->cache_bypass = 1;
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index d1daa5e9fadf..74b69095def6 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/export.h> 14#include <linux/export.h>
15#include <linux/device.h>
15#include <trace/events/regmap.h> 16#include <trace/events/regmap.h>
16#include <linux/bsearch.h> 17#include <linux/bsearch.h>
17#include <linux/sort.h> 18#include <linux/sort.h>
@@ -35,12 +36,17 @@ static int regcache_hw_init(struct regmap *map)
35 return -EINVAL; 36 return -EINVAL;
36 37
37 if (!map->reg_defaults_raw) { 38 if (!map->reg_defaults_raw) {
39 u32 cache_bypass = map->cache_bypass;
38 dev_warn(map->dev, "No cache defaults, reading back from HW\n"); 40 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
41
42 /* Bypass the cache access till data read from HW*/
43 map->cache_bypass = 1;
39 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL); 44 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
40 if (!tmp_buf) 45 if (!tmp_buf)
41 return -EINVAL; 46 return -EINVAL;
42 ret = regmap_bulk_read(map, 0, tmp_buf, 47 ret = regmap_bulk_read(map, 0, tmp_buf,
43 map->num_reg_defaults_raw); 48 map->num_reg_defaults_raw);
49 map->cache_bypass = cache_bypass;
44 if (ret < 0) { 50 if (ret < 0) {
45 kfree(tmp_buf); 51 kfree(tmp_buf);
46 return ret; 52 return ret;
@@ -211,7 +217,6 @@ int regcache_read(struct regmap *map,
211 217
212 return -EINVAL; 218 return -EINVAL;
213} 219}
214EXPORT_SYMBOL_GPL(regcache_read);
215 220
216/** 221/**
217 * regcache_write: Set the value of a given register in the cache. 222 * regcache_write: Set the value of a given register in the cache.
@@ -238,7 +243,6 @@ int regcache_write(struct regmap *map,
238 243
239 return 0; 244 return 0;
240} 245}
241EXPORT_SYMBOL_GPL(regcache_write);
242 246
243/** 247/**
244 * regcache_sync: Sync the register cache with the hardware. 248 * regcache_sync: Sync the register cache with the hardware.
@@ -254,12 +258,11 @@ EXPORT_SYMBOL_GPL(regcache_write);
254int regcache_sync(struct regmap *map) 258int regcache_sync(struct regmap *map)
255{ 259{
256 int ret = 0; 260 int ret = 0;
257 unsigned int val;
258 unsigned int i; 261 unsigned int i;
259 const char *name; 262 const char *name;
260 unsigned int bypass; 263 unsigned int bypass;
261 264
262 BUG_ON(!map->cache_ops); 265 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
263 266
264 mutex_lock(&map->lock); 267 mutex_lock(&map->lock);
265 /* Remember the initial bypass state */ 268 /* Remember the initial bypass state */
@@ -268,26 +271,27 @@ int regcache_sync(struct regmap *map)
268 map->cache_ops->name); 271 map->cache_ops->name);
269 name = map->cache_ops->name; 272 name = map->cache_ops->name;
270 trace_regcache_sync(map->dev, name, "start"); 273 trace_regcache_sync(map->dev, name, "start");
274
271 if (!map->cache_dirty) 275 if (!map->cache_dirty)
272 goto out; 276 goto out;
273 if (map->cache_ops->sync) {
274 ret = map->cache_ops->sync(map);
275 } else {
276 for (i = 0; i < map->num_reg_defaults; i++) {
277 ret = regcache_read(map, i, &val);
278 if (ret < 0)
279 goto out;
280 map->cache_bypass = 1;
281 ret = _regmap_write(map, i, val);
282 map->cache_bypass = 0;
283 if (ret < 0)
284 goto out;
285 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
286 map->reg_defaults[i].reg,
287 map->reg_defaults[i].def);
288 }
289 277
278 /* Apply any patch first */
279 map->cache_bypass = 1;
280 for (i = 0; i < map->patch_regs; i++) {
281 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
282 if (ret != 0) {
283 dev_err(map->dev, "Failed to write %x = %x: %d\n",
284 map->patch[i].reg, map->patch[i].def, ret);
285 goto out;
286 }
290 } 287 }
288 map->cache_bypass = 0;
289
290 ret = map->cache_ops->sync(map, 0, map->max_register);
291
292 if (ret == 0)
293 map->cache_dirty = false;
294
291out: 295out:
292 trace_regcache_sync(map->dev, name, "stop"); 296 trace_regcache_sync(map->dev, name, "stop");
293 /* Restore the bypass state */ 297 /* Restore the bypass state */
@@ -299,6 +303,52 @@ out:
299EXPORT_SYMBOL_GPL(regcache_sync); 303EXPORT_SYMBOL_GPL(regcache_sync);
300 304
301/** 305/**
306 * regcache_sync_region: Sync part of the register cache with the hardware.
307 *
308 * @map: map to sync.
309 * @min: first register to sync
310 * @max: last register to sync
311 *
312 * Write all non-default register values in the specified region to
313 * the hardware.
314 *
315 * Return a negative value on failure, 0 on success.
316 */
317int regcache_sync_region(struct regmap *map, unsigned int min,
318 unsigned int max)
319{
320 int ret = 0;
321 const char *name;
322 unsigned int bypass;
323
324 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
325
326 mutex_lock(&map->lock);
327
328 /* Remember the initial bypass state */
329 bypass = map->cache_bypass;
330
331 name = map->cache_ops->name;
332 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
333
334 trace_regcache_sync(map->dev, name, "start region");
335
336 if (!map->cache_dirty)
337 goto out;
338
339 ret = map->cache_ops->sync(map, min, max);
340
341out:
342 trace_regcache_sync(map->dev, name, "stop region");
343 /* Restore the bypass state */
344 map->cache_bypass = bypass;
345 mutex_unlock(&map->lock);
346
347 return ret;
348}
349EXPORT_SYMBOL_GPL(regcache_sync_region);
350
351/**
302 * regcache_cache_only: Put a register map into cache only mode 352 * regcache_cache_only: Put a register map into cache only mode
303 * 353 *
304 * @map: map to configure 354 * @map: map to configure
@@ -315,6 +365,7 @@ void regcache_cache_only(struct regmap *map, bool enable)
315 mutex_lock(&map->lock); 365 mutex_lock(&map->lock);
316 WARN_ON(map->cache_bypass && enable); 366 WARN_ON(map->cache_bypass && enable);
317 map->cache_only = enable; 367 map->cache_only = enable;
368 trace_regmap_cache_only(map->dev, enable);
318 mutex_unlock(&map->lock); 369 mutex_unlock(&map->lock);
319} 370}
320EXPORT_SYMBOL_GPL(regcache_cache_only); 371EXPORT_SYMBOL_GPL(regcache_cache_only);
@@ -352,6 +403,7 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
352 mutex_lock(&map->lock); 403 mutex_lock(&map->lock);
353 WARN_ON(map->cache_only && enable); 404 WARN_ON(map->cache_only && enable);
354 map->cache_bypass = enable; 405 map->cache_bypass = enable;
406 trace_regmap_cache_bypass(map->dev, enable);
355 mutex_unlock(&map->lock); 407 mutex_unlock(&map->lock);
356} 408}
357EXPORT_SYMBOL_GPL(regcache_cache_bypass); 409EXPORT_SYMBOL_GPL(regcache_cache_bypass);
@@ -374,10 +426,16 @@ bool regcache_set_val(void *base, unsigned int idx,
374 cache[idx] = val; 426 cache[idx] = val;
375 break; 427 break;
376 } 428 }
429 case 4: {
430 u32 *cache = base;
431 if (cache[idx] == val)
432 return true;
433 cache[idx] = val;
434 break;
435 }
377 default: 436 default:
378 BUG(); 437 BUG();
379 } 438 }
380 /* unreachable */
381 return false; 439 return false;
382} 440}
383 441
@@ -396,6 +454,10 @@ unsigned int regcache_get_val(const void *base, unsigned int idx,
396 const u16 *cache = base; 454 const u16 *cache = base;
397 return cache[idx]; 455 return cache[idx];
398 } 456 }
457 case 4: {
458 const u32 *cache = base;
459 return cache[idx];
460 }
399 default: 461 default:
400 BUG(); 462 BUG();
401 } 463 }
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 6f397476e27c..251eb70f83e7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -11,10 +11,10 @@
11 */ 11 */
12 12
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/mutex.h> 14#include <linux/mutex.h>
16#include <linux/debugfs.h> 15#include <linux/debugfs.h>
17#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/device.h>
18 18
19#include "internal.h" 19#include "internal.h"
20 20
@@ -27,12 +27,35 @@ static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size)
27 return strlen(buf); 27 return strlen(buf);
28} 28}
29 29
30static int regmap_open_file(struct inode *inode, struct file *file) 30static ssize_t regmap_name_read_file(struct file *file,
31 char __user *user_buf, size_t count,
32 loff_t *ppos)
31{ 33{
32 file->private_data = inode->i_private; 34 struct regmap *map = file->private_data;
33 return 0; 35 int ret;
36 char *buf;
37
38 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
39 if (!buf)
40 return -ENOMEM;
41
42 ret = snprintf(buf, PAGE_SIZE, "%s\n", map->dev->driver->name);
43 if (ret < 0) {
44 kfree(buf);
45 return ret;
46 }
47
48 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
49 kfree(buf);
50 return ret;
34} 51}
35 52
53static const struct file_operations regmap_name_fops = {
54 .open = simple_open,
55 .read = regmap_name_read_file,
56 .llseek = default_llseek,
57};
58
36static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf, 59static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos) 60 size_t count, loff_t *ppos)
38{ 61{
@@ -103,9 +126,51 @@ out:
103 return ret; 126 return ret;
104} 127}
105 128
129#undef REGMAP_ALLOW_WRITE_DEBUGFS
130#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
131/*
132 * This can be dangerous especially when we have clients such as
133 * PMICs, therefore don't provide any real compile time configuration option
134 * for this feature, people who want to use this will need to modify
135 * the source code directly.
136 */
137static ssize_t regmap_map_write_file(struct file *file,
138 const char __user *user_buf,
139 size_t count, loff_t *ppos)
140{
141 char buf[32];
142 size_t buf_size;
143 char *start = buf;
144 unsigned long reg, value;
145 struct regmap *map = file->private_data;
146
147 buf_size = min(count, (sizeof(buf)-1));
148 if (copy_from_user(buf, user_buf, buf_size))
149 return -EFAULT;
150 buf[buf_size] = 0;
151
152 while (*start == ' ')
153 start++;
154 reg = simple_strtoul(start, &start, 16);
155 while (*start == ' ')
156 start++;
157 if (strict_strtoul(start, 16, &value))
158 return -EINVAL;
159
160 /* Userspace has been fiddling around behind the kernel's back */
161 add_taint(TAINT_USER);
162
163 regmap_write(map, reg, value);
164 return buf_size;
165}
166#else
167#define regmap_map_write_file NULL
168#endif
169
106static const struct file_operations regmap_map_fops = { 170static const struct file_operations regmap_map_fops = {
107 .open = regmap_open_file, 171 .open = simple_open,
108 .read = regmap_map_read_file, 172 .read = regmap_map_read_file,
173 .write = regmap_map_write_file,
109 .llseek = default_llseek, 174 .llseek = default_llseek,
110}; 175};
111 176
@@ -172,7 +237,7 @@ out:
172} 237}
173 238
174static const struct file_operations regmap_access_fops = { 239static const struct file_operations regmap_access_fops = {
175 .open = regmap_open_file, 240 .open = simple_open,
176 .read = regmap_access_read_file, 241 .read = regmap_access_read_file,
177 .llseek = default_llseek, 242 .llseek = default_llseek,
178}; 243};
@@ -186,12 +251,24 @@ void regmap_debugfs_init(struct regmap *map)
186 return; 251 return;
187 } 252 }
188 253
254 debugfs_create_file("name", 0400, map->debugfs,
255 map, &regmap_name_fops);
256
189 if (map->max_register) { 257 if (map->max_register) {
190 debugfs_create_file("registers", 0400, map->debugfs, 258 debugfs_create_file("registers", 0400, map->debugfs,
191 map, &regmap_map_fops); 259 map, &regmap_map_fops);
192 debugfs_create_file("access", 0400, map->debugfs, 260 debugfs_create_file("access", 0400, map->debugfs,
193 map, &regmap_access_fops); 261 map, &regmap_access_fops);
194 } 262 }
263
264 if (map->cache_type) {
265 debugfs_create_bool("cache_only", 0400, map->debugfs,
266 &map->cache_only);
267 debugfs_create_bool("cache_dirty", 0400, map->debugfs,
268 &map->cache_dirty);
269 debugfs_create_bool("cache_bypass", 0400, map->debugfs,
270 &map->cache_bypass);
271 }
195} 272}
196 273
197void regmap_debugfs_exit(struct regmap *map) 274void regmap_debugfs_exit(struct regmap *map)
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
index 38621ec87c05..9a3a8c564389 100644
--- a/drivers/base/regmap/regmap-i2c.c
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -111,4 +111,21 @@ struct regmap *regmap_init_i2c(struct i2c_client *i2c,
111} 111}
112EXPORT_SYMBOL_GPL(regmap_init_i2c); 112EXPORT_SYMBOL_GPL(regmap_init_i2c);
113 113
114/**
115 * devm_regmap_init_i2c(): Initialise managed register map
116 *
117 * @i2c: Device that will be interacted with
118 * @config: Configuration for register map
119 *
120 * The return value will be an ERR_PTR() on error or a valid pointer
121 * to a struct regmap. The regmap will be automatically freed by the
122 * device management code.
123 */
124struct regmap *devm_regmap_init_i2c(struct i2c_client *i2c,
125 const struct regmap_config *config)
126{
127 return devm_regmap_init(&i2c->dev, &regmap_i2c, config);
128}
129EXPORT_SYMBOL_GPL(devm_regmap_init_i2c);
130
114MODULE_LICENSE("GPL"); 131MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 428836fc5835..1befaa7a31cb 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/export.h> 13#include <linux/export.h>
14#include <linux/device.h>
14#include <linux/regmap.h> 15#include <linux/regmap.h>
15#include <linux/irq.h> 16#include <linux/irq.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 2560658de344..7c0c35a39c33 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -70,4 +70,21 @@ struct regmap *regmap_init_spi(struct spi_device *spi,
70} 70}
71EXPORT_SYMBOL_GPL(regmap_init_spi); 71EXPORT_SYMBOL_GPL(regmap_init_spi);
72 72
73/**
74 * devm_regmap_init_spi(): Initialise register map
75 *
76 * @spi: Device that will be interacted with
77 * @config: Configuration for register map
78 *
79 * The return value will be an ERR_PTR() on error or a valid pointer
80 * to a struct regmap. The map will be automatically freed by the
81 * device management code.
82 */
83struct regmap *devm_regmap_init_spi(struct spi_device *spi,
84 const struct regmap_config *config)
85{
86 return devm_regmap_init(&spi->dev, &regmap_spi, config);
87}
88EXPORT_SYMBOL_GPL(devm_regmap_init_spi);
89
73MODULE_LICENSE("GPL"); 90MODULE_LICENSE("GPL");
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 65558034318f..7a3f535e481c 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -10,8 +10,9 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/device.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/module.h> 15#include <linux/export.h>
15#include <linux/mutex.h> 16#include <linux/mutex.h>
16#include <linux/err.h> 17#include <linux/err.h>
17 18
@@ -36,6 +37,9 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
36 if (map->max_register && reg > map->max_register) 37 if (map->max_register && reg > map->max_register)
37 return false; 38 return false;
38 39
40 if (map->format.format_write)
41 return false;
42
39 if (map->readable_reg) 43 if (map->readable_reg)
40 return map->readable_reg(map->dev, reg); 44 return map->readable_reg(map->dev, reg);
41 45
@@ -44,7 +48,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg)
44 48
45bool regmap_volatile(struct regmap *map, unsigned int reg) 49bool regmap_volatile(struct regmap *map, unsigned int reg)
46{ 50{
47 if (map->max_register && reg > map->max_register) 51 if (!regmap_readable(map, reg))
48 return false; 52 return false;
49 53
50 if (map->volatile_reg) 54 if (map->volatile_reg)
@@ -55,7 +59,7 @@ bool regmap_volatile(struct regmap *map, unsigned int reg)
55 59
56bool regmap_precious(struct regmap *map, unsigned int reg) 60bool regmap_precious(struct regmap *map, unsigned int reg)
57{ 61{
58 if (map->max_register && reg > map->max_register) 62 if (!regmap_readable(map, reg))
59 return false; 63 return false;
60 64
61 if (map->precious_reg) 65 if (map->precious_reg)
@@ -76,6 +80,14 @@ static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
76 return true; 80 return true;
77} 81}
78 82
83static void regmap_format_2_6_write(struct regmap *map,
84 unsigned int reg, unsigned int val)
85{
86 u8 *out = map->work_buf;
87
88 *out = (reg << 6) | val;
89}
90
79static void regmap_format_4_12_write(struct regmap *map, 91static void regmap_format_4_12_write(struct regmap *map,
80 unsigned int reg, unsigned int val) 92 unsigned int reg, unsigned int val)
81{ 93{
@@ -114,6 +126,13 @@ static void regmap_format_16(void *buf, unsigned int val)
114 b[0] = cpu_to_be16(val); 126 b[0] = cpu_to_be16(val);
115} 127}
116 128
129static void regmap_format_32(void *buf, unsigned int val)
130{
131 __be32 *b = buf;
132
133 b[0] = cpu_to_be32(val);
134}
135
117static unsigned int regmap_parse_8(void *buf) 136static unsigned int regmap_parse_8(void *buf)
118{ 137{
119 u8 *b = buf; 138 u8 *b = buf;
@@ -130,6 +149,15 @@ static unsigned int regmap_parse_16(void *buf)
130 return b[0]; 149 return b[0];
131} 150}
132 151
152static unsigned int regmap_parse_32(void *buf)
153{
154 __be32 *b = buf;
155
156 b[0] = be32_to_cpu(b[0]);
157
158 return b[0];
159}
160
133/** 161/**
134 * regmap_init(): Initialise register map 162 * regmap_init(): Initialise register map
135 * 163 *
@@ -159,8 +187,10 @@ struct regmap *regmap_init(struct device *dev,
159 187
160 mutex_init(&map->lock); 188 mutex_init(&map->lock);
161 map->format.buf_size = (config->reg_bits + config->val_bits) / 8; 189 map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
162 map->format.reg_bytes = config->reg_bits / 8; 190 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
163 map->format.val_bytes = config->val_bits / 8; 191 map->format.pad_bytes = config->pad_bits / 8;
192 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
193 map->format.buf_size += map->format.pad_bytes;
164 map->dev = dev; 194 map->dev = dev;
165 map->bus = bus; 195 map->bus = bus;
166 map->max_register = config->max_register; 196 map->max_register = config->max_register;
@@ -178,6 +208,16 @@ struct regmap *regmap_init(struct device *dev,
178 } 208 }
179 209
180 switch (config->reg_bits) { 210 switch (config->reg_bits) {
211 case 2:
212 switch (config->val_bits) {
213 case 6:
214 map->format.format_write = regmap_format_2_6_write;
215 break;
216 default:
217 goto err_map;
218 }
219 break;
220
181 case 4: 221 case 4:
182 switch (config->val_bits) { 222 switch (config->val_bits) {
183 case 12: 223 case 12:
@@ -216,6 +256,10 @@ struct regmap *regmap_init(struct device *dev,
216 map->format.format_reg = regmap_format_16; 256 map->format.format_reg = regmap_format_16;
217 break; 257 break;
218 258
259 case 32:
260 map->format.format_reg = regmap_format_32;
261 break;
262
219 default: 263 default:
220 goto err_map; 264 goto err_map;
221 } 265 }
@@ -229,13 +273,17 @@ struct regmap *regmap_init(struct device *dev,
229 map->format.format_val = regmap_format_16; 273 map->format.format_val = regmap_format_16;
230 map->format.parse_val = regmap_parse_16; 274 map->format.parse_val = regmap_parse_16;
231 break; 275 break;
276 case 32:
277 map->format.format_val = regmap_format_32;
278 map->format.parse_val = regmap_parse_32;
279 break;
232 } 280 }
233 281
234 if (!map->format.format_write && 282 if (!map->format.format_write &&
235 !(map->format.format_reg && map->format.format_val)) 283 !(map->format.format_reg && map->format.format_val))
236 goto err_map; 284 goto err_map;
237 285
238 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL); 286 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
239 if (map->work_buf == NULL) { 287 if (map->work_buf == NULL) {
240 ret = -ENOMEM; 288 ret = -ENOMEM;
241 goto err_map; 289 goto err_map;
@@ -258,6 +306,45 @@ err:
258} 306}
259EXPORT_SYMBOL_GPL(regmap_init); 307EXPORT_SYMBOL_GPL(regmap_init);
260 308
309static void devm_regmap_release(struct device *dev, void *res)
310{
311 regmap_exit(*(struct regmap **)res);
312}
313
314/**
315 * devm_regmap_init(): Initialise managed register map
316 *
317 * @dev: Device that will be interacted with
318 * @bus: Bus-specific callbacks to use with device
319 * @config: Configuration for register map
320 *
321 * The return value will be an ERR_PTR() on error or a valid pointer
322 * to a struct regmap. This function should generally not be called
323 * directly, it should be called by bus-specific init functions. The
324 * map will be automatically freed by the device management code.
325 */
326struct regmap *devm_regmap_init(struct device *dev,
327 const struct regmap_bus *bus,
328 const struct regmap_config *config)
329{
330 struct regmap **ptr, *regmap;
331
332 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
333 if (!ptr)
334 return ERR_PTR(-ENOMEM);
335
336 regmap = regmap_init(dev, bus, config);
337 if (!IS_ERR(regmap)) {
338 *ptr = regmap;
339 devres_add(dev, ptr);
340 } else {
341 devres_free(ptr);
342 }
343
344 return regmap;
345}
346EXPORT_SYMBOL_GPL(devm_regmap_init);
347
261/** 348/**
262 * regmap_reinit_cache(): Reinitialise the current register cache 349 * regmap_reinit_cache(): Reinitialise the current register cache
263 * 350 *
@@ -276,6 +363,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
276 mutex_lock(&map->lock); 363 mutex_lock(&map->lock);
277 364
278 regcache_exit(map); 365 regcache_exit(map);
366 regmap_debugfs_exit(map);
279 367
280 map->max_register = config->max_register; 368 map->max_register = config->max_register;
281 map->writeable_reg = config->writeable_reg; 369 map->writeable_reg = config->writeable_reg;
@@ -284,6 +372,8 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
284 map->precious_reg = config->precious_reg; 372 map->precious_reg = config->precious_reg;
285 map->cache_type = config->cache_type; 373 map->cache_type = config->cache_type;
286 374
375 regmap_debugfs_init(map);
376
287 map->cache_bypass = false; 377 map->cache_bypass = false;
288 map->cache_only = false; 378 map->cache_only = false;
289 379
@@ -321,6 +411,26 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
321 if (!map->writeable_reg(map->dev, reg + i)) 411 if (!map->writeable_reg(map->dev, reg + i))
322 return -EINVAL; 412 return -EINVAL;
323 413
414 if (!map->cache_bypass && map->format.parse_val) {
415 unsigned int ival;
416 int val_bytes = map->format.val_bytes;
417 for (i = 0; i < val_len / val_bytes; i++) {
418 memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
419 ival = map->format.parse_val(map->work_buf);
420 ret = regcache_write(map, reg + i, ival);
421 if (ret) {
422 dev_err(map->dev,
423 "Error in caching of register: %u ret: %d\n",
424 reg + i, ret);
425 return ret;
426 }
427 }
428 if (map->cache_only) {
429 map->cache_dirty = true;
430 return 0;
431 }
432 }
433
324 map->format.format_reg(map->work_buf, reg); 434 map->format.format_reg(map->work_buf, reg);
325 435
326 u8[0] |= map->write_flag_mask; 436 u8[0] |= map->write_flag_mask;
@@ -332,23 +442,28 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
332 * send the work_buf directly, otherwise try to do a gather 442 * send the work_buf directly, otherwise try to do a gather
333 * write. 443 * write.
334 */ 444 */
335 if (val == map->work_buf + map->format.reg_bytes) 445 if (val == (map->work_buf + map->format.pad_bytes +
446 map->format.reg_bytes))
336 ret = map->bus->write(map->dev, map->work_buf, 447 ret = map->bus->write(map->dev, map->work_buf,
337 map->format.reg_bytes + val_len); 448 map->format.reg_bytes +
449 map->format.pad_bytes +
450 val_len);
338 else if (map->bus->gather_write) 451 else if (map->bus->gather_write)
339 ret = map->bus->gather_write(map->dev, map->work_buf, 452 ret = map->bus->gather_write(map->dev, map->work_buf,
340 map->format.reg_bytes, 453 map->format.reg_bytes +
454 map->format.pad_bytes,
341 val, val_len); 455 val, val_len);
342 456
343 /* If that didn't work fall back on linearising by hand. */ 457 /* If that didn't work fall back on linearising by hand. */
344 if (ret == -ENOTSUPP) { 458 if (ret == -ENOTSUPP) {
345 len = map->format.reg_bytes + val_len; 459 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
346 buf = kmalloc(len, GFP_KERNEL); 460 buf = kzalloc(len, GFP_KERNEL);
347 if (!buf) 461 if (!buf)
348 return -ENOMEM; 462 return -ENOMEM;
349 463
350 memcpy(buf, map->work_buf, map->format.reg_bytes); 464 memcpy(buf, map->work_buf, map->format.reg_bytes);
351 memcpy(buf + map->format.reg_bytes, val, val_len); 465 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
466 val, val_len);
352 ret = map->bus->write(map->dev, buf, len); 467 ret = map->bus->write(map->dev, buf, len);
353 468
354 kfree(buf); 469 kfree(buf);
@@ -366,7 +481,7 @@ int _regmap_write(struct regmap *map, unsigned int reg,
366 int ret; 481 int ret;
367 BUG_ON(!map->format.format_write && !map->format.format_val); 482 BUG_ON(!map->format.format_write && !map->format.format_val);
368 483
369 if (!map->cache_bypass) { 484 if (!map->cache_bypass && map->format.format_write) {
370 ret = regcache_write(map, reg, val); 485 ret = regcache_write(map, reg, val);
371 if (ret != 0) 486 if (ret != 0)
372 return ret; 487 return ret;
@@ -390,10 +505,12 @@ int _regmap_write(struct regmap *map, unsigned int reg,
390 505
391 return ret; 506 return ret;
392 } else { 507 } else {
393 map->format.format_val(map->work_buf + map->format.reg_bytes, 508 map->format.format_val(map->work_buf + map->format.reg_bytes
394 val); 509 + map->format.pad_bytes, val);
395 return _regmap_raw_write(map, reg, 510 return _regmap_raw_write(map, reg,
396 map->work_buf + map->format.reg_bytes, 511 map->work_buf +
512 map->format.reg_bytes +
513 map->format.pad_bytes,
397 map->format.val_bytes); 514 map->format.val_bytes);
398 } 515 }
399} 516}
@@ -441,12 +558,8 @@ EXPORT_SYMBOL_GPL(regmap_write);
441int regmap_raw_write(struct regmap *map, unsigned int reg, 558int regmap_raw_write(struct regmap *map, unsigned int reg,
442 const void *val, size_t val_len) 559 const void *val, size_t val_len)
443{ 560{
444 size_t val_count = val_len / map->format.val_bytes;
445 int ret; 561 int ret;
446 562
447 WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
448 map->cache_type != REGCACHE_NONE);
449
450 mutex_lock(&map->lock); 563 mutex_lock(&map->lock);
451 564
452 ret = _regmap_raw_write(map, reg, val, val_len); 565 ret = _regmap_raw_write(map, reg, val, val_len);
@@ -457,6 +570,56 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
457} 570}
458EXPORT_SYMBOL_GPL(regmap_raw_write); 571EXPORT_SYMBOL_GPL(regmap_raw_write);
459 572
573/*
574 * regmap_bulk_write(): Write multiple registers to the device
575 *
576 * @map: Register map to write to
577 * @reg: First register to be write from
578 * @val: Block of data to be written, in native register size for device
579 * @val_count: Number of registers to write
580 *
581 * This function is intended to be used for writing a large block of
582 * data to be device either in single transfer or multiple transfer.
583 *
584 * A value of zero will be returned on success, a negative errno will
585 * be returned in error cases.
586 */
587int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
588 size_t val_count)
589{
590 int ret = 0, i;
591 size_t val_bytes = map->format.val_bytes;
592 void *wval;
593
594 if (!map->format.parse_val)
595 return -EINVAL;
596
597 mutex_lock(&map->lock);
598
599 /* No formatting is require if val_byte is 1 */
600 if (val_bytes == 1) {
601 wval = (void *)val;
602 } else {
603 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
604 if (!wval) {
605 ret = -ENOMEM;
606 dev_err(map->dev, "Error in memory allocation\n");
607 goto out;
608 }
609 for (i = 0; i < val_count * val_bytes; i += val_bytes)
610 map->format.parse_val(wval + i);
611 }
612 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
613
614 if (val_bytes != 1)
615 kfree(wval);
616
617out:
618 mutex_unlock(&map->lock);
619 return ret;
620}
621EXPORT_SYMBOL_GPL(regmap_bulk_write);
622
460static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 623static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
461 unsigned int val_len) 624 unsigned int val_len)
462{ 625{
@@ -476,7 +639,8 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
476 trace_regmap_hw_read_start(map->dev, reg, 639 trace_regmap_hw_read_start(map->dev, reg,
477 val_len / map->format.val_bytes); 640 val_len / map->format.val_bytes);
478 641
479 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes, 642 ret = map->bus->read(map->dev, map->work_buf,
643 map->format.reg_bytes + map->format.pad_bytes,
480 val, val_len); 644 val, val_len);
481 645
482 trace_regmap_hw_read_done(map->dev, reg, 646 trace_regmap_hw_read_done(map->dev, reg,
@@ -549,16 +713,32 @@ EXPORT_SYMBOL_GPL(regmap_read);
549int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, 713int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
550 size_t val_len) 714 size_t val_len)
551{ 715{
552 size_t val_count = val_len / map->format.val_bytes; 716 size_t val_bytes = map->format.val_bytes;
553 int ret; 717 size_t val_count = val_len / val_bytes;
554 718 unsigned int v;
555 WARN_ON(!regmap_volatile_range(map, reg, val_count) && 719 int ret, i;
556 map->cache_type != REGCACHE_NONE);
557 720
558 mutex_lock(&map->lock); 721 mutex_lock(&map->lock);
559 722
560 ret = _regmap_raw_read(map, reg, val, val_len); 723 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
724 map->cache_type == REGCACHE_NONE) {
725 /* Physical block read if there's no cache involved */
726 ret = _regmap_raw_read(map, reg, val, val_len);
561 727
728 } else {
729 /* Otherwise go word by word for the cache; should be low
730 * cost as we expect to hit the cache.
731 */
732 for (i = 0; i < val_count; i++) {
733 ret = _regmap_read(map, reg + i, &v);
734 if (ret != 0)
735 goto out;
736
737 map->format.format_val(val + (i * val_bytes), v);
738 }
739 }
740
741 out:
562 mutex_unlock(&map->lock); 742 mutex_unlock(&map->lock);
563 743
564 return ret; 744 return ret;
@@ -672,6 +852,79 @@ int regmap_update_bits_check(struct regmap *map, unsigned int reg,
672} 852}
673EXPORT_SYMBOL_GPL(regmap_update_bits_check); 853EXPORT_SYMBOL_GPL(regmap_update_bits_check);
674 854
855/**
856 * regmap_register_patch: Register and apply register updates to be applied
857 * on device initialistion
858 *
859 * @map: Register map to apply updates to.
860 * @regs: Values to update.
861 * @num_regs: Number of entries in regs.
862 *
863 * Register a set of register updates to be applied to the device
864 * whenever the device registers are synchronised with the cache and
865 * apply them immediately. Typically this is used to apply
866 * corrections to be applied to the device defaults on startup, such
867 * as the updates some vendors provide to undocumented registers.
868 */
869int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
870 int num_regs)
871{
872 int i, ret;
873 bool bypass;
874
875 /* If needed the implementation can be extended to support this */
876 if (map->patch)
877 return -EBUSY;
878
879 mutex_lock(&map->lock);
880
881 bypass = map->cache_bypass;
882
883 map->cache_bypass = true;
884
885 /* Write out first; it's useful to apply even if we fail later. */
886 for (i = 0; i < num_regs; i++) {
887 ret = _regmap_write(map, regs[i].reg, regs[i].def);
888 if (ret != 0) {
889 dev_err(map->dev, "Failed to write %x = %x: %d\n",
890 regs[i].reg, regs[i].def, ret);
891 goto out;
892 }
893 }
894
895 map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
896 if (map->patch != NULL) {
897 memcpy(map->patch, regs,
898 num_regs * sizeof(struct reg_default));
899 map->patch_regs = num_regs;
900 } else {
901 ret = -ENOMEM;
902 }
903
904out:
905 map->cache_bypass = bypass;
906
907 mutex_unlock(&map->lock);
908
909 return ret;
910}
911EXPORT_SYMBOL_GPL(regmap_register_patch);
912
913/*
914 * regmap_get_val_bytes(): Report the size of a register value
915 *
916 * Report the size of a register value, mainly intended to for use by
917 * generic infrastructure built on top of regmap.
918 */
919int regmap_get_val_bytes(struct regmap *map)
920{
921 if (map->format.format_write)
922 return -EINVAL;
923
924 return map->format.val_bytes;
925}
926EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
927
675static int __init regmap_initcall(void) 928static int __init regmap_initcall(void)
676{ 929{
677 regmap_debugfs_initcall(); 930 regmap_debugfs_initcall();
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
new file mode 100644
index 000000000000..05f150382da8
--- /dev/null
+++ b/drivers/base/soc.c
@@ -0,0 +1,183 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2011
3 *
4 * Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
5 * License terms: GNU General Public License (GPL), version 2
6 */
7
8#include <linux/sysfs.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/stat.h>
12#include <linux/slab.h>
13#include <linux/idr.h>
14#include <linux/spinlock.h>
15#include <linux/sys_soc.h>
16#include <linux/err.h>
17
18static DEFINE_IDR(soc_ida);
19static DEFINE_SPINLOCK(soc_lock);
20
21static ssize_t soc_info_get(struct device *dev,
22 struct device_attribute *attr,
23 char *buf);
24
25struct soc_device {
26 struct device dev;
27 struct soc_device_attribute *attr;
28 int soc_dev_num;
29};
30
31static struct bus_type soc_bus_type = {
32 .name = "soc",
33};
34
35static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
36static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
37static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
38static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
39
40struct device *soc_device_to_device(struct soc_device *soc_dev)
41{
42 return &soc_dev->dev;
43}
44
45static mode_t soc_attribute_mode(struct kobject *kobj,
46 struct attribute *attr,
47 int index)
48{
49 struct device *dev = container_of(kobj, struct device, kobj);
50 struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
51
52 if ((attr == &dev_attr_machine.attr)
53 && (soc_dev->attr->machine != NULL))
54 return attr->mode;
55 if ((attr == &dev_attr_family.attr)
56 && (soc_dev->attr->family != NULL))
57 return attr->mode;
58 if ((attr == &dev_attr_revision.attr)
59 && (soc_dev->attr->revision != NULL))
60 return attr->mode;
61 if ((attr == &dev_attr_soc_id.attr)
62 && (soc_dev->attr->soc_id != NULL))
63 return attr->mode;
64
65 /* Unknown or unfilled attribute. */
66 return 0;
67}
68
69static ssize_t soc_info_get(struct device *dev,
70 struct device_attribute *attr,
71 char *buf)
72{
73 struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
74
75 if (attr == &dev_attr_machine)
76 return sprintf(buf, "%s\n", soc_dev->attr->machine);
77 if (attr == &dev_attr_family)
78 return sprintf(buf, "%s\n", soc_dev->attr->family);
79 if (attr == &dev_attr_revision)
80 return sprintf(buf, "%s\n", soc_dev->attr->revision);
81 if (attr == &dev_attr_soc_id)
82 return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
83
84 return -EINVAL;
85
86}
87
88static struct attribute *soc_attr[] = {
89 &dev_attr_machine.attr,
90 &dev_attr_family.attr,
91 &dev_attr_soc_id.attr,
92 &dev_attr_revision.attr,
93 NULL,
94};
95
96static const struct attribute_group soc_attr_group = {
97 .attrs = soc_attr,
98 .is_visible = soc_attribute_mode,
99};
100
101static const struct attribute_group *soc_attr_groups[] = {
102 &soc_attr_group,
103 NULL,
104};
105
106static void soc_release(struct device *dev)
107{
108 struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
109
110 kfree(soc_dev);
111}
112
113struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
114{
115 struct soc_device *soc_dev;
116 int ret;
117
118 soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
119 if (!soc_dev) {
120 ret = -ENOMEM;
121 goto out1;
122 }
123
124 /* Fetch a unique (reclaimable) SOC ID. */
125 do {
126 if (!ida_pre_get(&soc_ida, GFP_KERNEL)) {
127 ret = -ENOMEM;
128 goto out2;
129 }
130
131 spin_lock(&soc_lock);
132 ret = ida_get_new(&soc_ida, &soc_dev->soc_dev_num);
133 spin_unlock(&soc_lock);
134
135 } while (ret == -EAGAIN);
136
137 if (ret)
138 goto out2;
139
140 soc_dev->attr = soc_dev_attr;
141 soc_dev->dev.bus = &soc_bus_type;
142 soc_dev->dev.groups = soc_attr_groups;
143 soc_dev->dev.release = soc_release;
144
145 dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
146
147 ret = device_register(&soc_dev->dev);
148 if (ret)
149 goto out3;
150
151 return soc_dev;
152
153out3:
154 ida_remove(&soc_ida, soc_dev->soc_dev_num);
155out2:
156 kfree(soc_dev);
157out1:
158 return ERR_PTR(ret);
159}
160
161/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
162void soc_device_unregister(struct soc_device *soc_dev)
163{
164 ida_remove(&soc_ida, soc_dev->soc_dev_num);
165
166 device_unregister(&soc_dev->dev);
167}
168
169static int __init soc_bus_register(void)
170{
171 spin_lock_init(&soc_lock);
172
173 return bus_register(&soc_bus_type);
174}
175core_initcall(soc_bus_register);
176
177static void __exit soc_bus_unregister(void)
178{
179 ida_destroy(&soc_ida);
180
181 bus_unregister(&soc_bus_type);
182}
183module_exit(soc_bus_unregister);