aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig2
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/devtmpfs.c338
-rw-r--r--drivers/base/iommu.c124
-rw-r--r--drivers/base/memory.c1
-rw-r--r--drivers/base/platform.c23
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/clock_ops.c252
-rw-r--r--drivers/base/power/domain.c1273
-rw-r--r--drivers/base/power/generic_ops.c98
-rw-r--r--drivers/base/power/main.c87
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/base/power/runtime.c89
-rw-r--r--drivers/base/power/sysfs.c6
-rw-r--r--drivers/base/power/trace.c2
-rw-r--r--drivers/base/regmap/Kconfig13
-rw-r--r--drivers/base/regmap/Makefile3
-rw-r--r--drivers/base/regmap/regmap-i2c.c115
-rw-r--r--drivers/base/regmap/regmap-spi.c72
-rw-r--r--drivers/base/regmap/regmap.c455
-rw-r--r--drivers/base/syscore.c8
21 files changed, 2526 insertions, 455 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index fa64fa04feec..21cf46f45245 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -172,4 +172,6 @@ config SYS_HYPERVISOR
172 bool 172 bool
173 default n 173 default n
174 174
175source "drivers/base/regmap/Kconfig"
176
175endmenu 177endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4c5701c15f53..99a375ad2cc9 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -13,11 +13,11 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o
13obj-$(CONFIG_NUMA) += node.o 13obj-$(CONFIG_NUMA) += node.o
14obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o 14obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
15obj-$(CONFIG_SMP) += topology.o 15obj-$(CONFIG_SMP) += topology.o
16obj-$(CONFIG_IOMMU_API) += iommu.o
17ifeq ($(CONFIG_SYSFS),y) 16ifeq ($(CONFIG_SYSFS),y)
18obj-$(CONFIG_MODULES) += module.o 17obj-$(CONFIG_MODULES) += module.o
19endif 18endif
20obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o 19obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
20obj-$(CONFIG_REGMAP) += regmap/
21 21
22ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 22ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
23 23
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 82bbb5967aa9..b89fffc1d777 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -21,12 +21,11 @@
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/shmem_fs.h> 22#include <linux/shmem_fs.h>
23#include <linux/ramfs.h> 23#include <linux/ramfs.h>
24#include <linux/cred.h>
25#include <linux/sched.h> 24#include <linux/sched.h>
26#include <linux/init_task.h>
27#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/kthread.h>
28 27
29static struct vfsmount *dev_mnt; 28static struct task_struct *thread;
30 29
31#if defined CONFIG_DEVTMPFS_MOUNT 30#if defined CONFIG_DEVTMPFS_MOUNT
32static int mount_dev = 1; 31static int mount_dev = 1;
@@ -34,7 +33,16 @@ static int mount_dev = 1;
34static int mount_dev; 33static int mount_dev;
35#endif 34#endif
36 35
37static DEFINE_MUTEX(dirlock); 36static DEFINE_SPINLOCK(req_lock);
37
38static struct req {
39 struct req *next;
40 struct completion done;
41 int err;
42 const char *name;
43 mode_t mode; /* 0 => delete */
44 struct device *dev;
45} *requests;
38 46
39static int __init mount_param(char *str) 47static int __init mount_param(char *str)
40{ 48{
@@ -68,131 +76,152 @@ static inline int is_blockdev(struct device *dev)
68static inline int is_blockdev(struct device *dev) { return 0; } 76static inline int is_blockdev(struct device *dev) { return 0; }
69#endif 77#endif
70 78
79int devtmpfs_create_node(struct device *dev)
80{
81 const char *tmp = NULL;
82 struct req req;
83
84 if (!thread)
85 return 0;
86
87 req.mode = 0;
88 req.name = device_get_devnode(dev, &req.mode, &tmp);
89 if (!req.name)
90 return -ENOMEM;
91
92 if (req.mode == 0)
93 req.mode = 0600;
94 if (is_blockdev(dev))
95 req.mode |= S_IFBLK;
96 else
97 req.mode |= S_IFCHR;
98
99 req.dev = dev;
100
101 init_completion(&req.done);
102
103 spin_lock(&req_lock);
104 req.next = requests;
105 requests = &req;
106 spin_unlock(&req_lock);
107
108 wake_up_process(thread);
109 wait_for_completion(&req.done);
110
111 kfree(tmp);
112
113 return req.err;
114}
115
116int devtmpfs_delete_node(struct device *dev)
117{
118 const char *tmp = NULL;
119 struct req req;
120
121 if (!thread)
122 return 0;
123
124 req.name = device_get_devnode(dev, NULL, &tmp);
125 if (!req.name)
126 return -ENOMEM;
127
128 req.mode = 0;
129 req.dev = dev;
130
131 init_completion(&req.done);
132
133 spin_lock(&req_lock);
134 req.next = requests;
135 requests = &req;
136 spin_unlock(&req_lock);
137
138 wake_up_process(thread);
139 wait_for_completion(&req.done);
140
141 kfree(tmp);
142 return req.err;
143}
144
71static int dev_mkdir(const char *name, mode_t mode) 145static int dev_mkdir(const char *name, mode_t mode)
72{ 146{
73 struct nameidata nd;
74 struct dentry *dentry; 147 struct dentry *dentry;
148 struct path path;
75 int err; 149 int err;
76 150
77 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 151 dentry = kern_path_create(AT_FDCWD, name, &path, 1);
78 name, LOOKUP_PARENT, &nd); 152 if (IS_ERR(dentry))
79 if (err) 153 return PTR_ERR(dentry);
80 return err; 154
81 155 err = vfs_mkdir(path.dentry->d_inode, dentry, mode);
82 dentry = lookup_create(&nd, 1); 156 if (!err)
83 if (!IS_ERR(dentry)) { 157 /* mark as kernel-created inode */
84 err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); 158 dentry->d_inode->i_private = &thread;
85 if (!err) 159 dput(dentry);
86 /* mark as kernel-created inode */ 160 mutex_unlock(&path.dentry->d_inode->i_mutex);
87 dentry->d_inode->i_private = &dev_mnt; 161 path_put(&path);
88 dput(dentry);
89 } else {
90 err = PTR_ERR(dentry);
91 }
92
93 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
94 path_put(&nd.path);
95 return err; 162 return err;
96} 163}
97 164
98static int create_path(const char *nodepath) 165static int create_path(const char *nodepath)
99{ 166{
167 char *path;
168 char *s;
100 int err; 169 int err;
101 170
102 mutex_lock(&dirlock); 171 /* parent directories do not exist, create them */
103 err = dev_mkdir(nodepath, 0755); 172 path = kstrdup(nodepath, GFP_KERNEL);
104 if (err == -ENOENT) { 173 if (!path)
105 char *path; 174 return -ENOMEM;
106 char *s; 175
107 176 s = path;
108 /* parent directories do not exist, create them */ 177 for (;;) {
109 path = kstrdup(nodepath, GFP_KERNEL); 178 s = strchr(s, '/');
110 if (!path) { 179 if (!s)
111 err = -ENOMEM; 180 break;
112 goto out; 181 s[0] = '\0';
113 } 182 err = dev_mkdir(path, 0755);
114 s = path; 183 if (err && err != -EEXIST)
115 for (;;) { 184 break;
116 s = strchr(s, '/'); 185 s[0] = '/';
117 if (!s) 186 s++;
118 break;
119 s[0] = '\0';
120 err = dev_mkdir(path, 0755);
121 if (err && err != -EEXIST)
122 break;
123 s[0] = '/';
124 s++;
125 }
126 kfree(path);
127 } 187 }
128out: 188 kfree(path);
129 mutex_unlock(&dirlock);
130 return err; 189 return err;
131} 190}
132 191
133int devtmpfs_create_node(struct device *dev) 192static int handle_create(const char *nodename, mode_t mode, struct device *dev)
134{ 193{
135 const char *tmp = NULL;
136 const char *nodename;
137 const struct cred *curr_cred;
138 mode_t mode = 0;
139 struct nameidata nd;
140 struct dentry *dentry; 194 struct dentry *dentry;
195 struct path path;
141 int err; 196 int err;
142 197
143 if (!dev_mnt) 198 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
144 return 0; 199 if (dentry == ERR_PTR(-ENOENT)) {
145
146 nodename = device_get_devnode(dev, &mode, &tmp);
147 if (!nodename)
148 return -ENOMEM;
149
150 if (mode == 0)
151 mode = 0600;
152 if (is_blockdev(dev))
153 mode |= S_IFBLK;
154 else
155 mode |= S_IFCHR;
156
157 curr_cred = override_creds(&init_cred);
158
159 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
160 nodename, LOOKUP_PARENT, &nd);
161 if (err == -ENOENT) {
162 create_path(nodename); 200 create_path(nodename);
163 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 201 dentry = kern_path_create(AT_FDCWD, nodename, &path, 0);
164 nodename, LOOKUP_PARENT, &nd);
165 } 202 }
166 if (err) 203 if (IS_ERR(dentry))
167 goto out; 204 return PTR_ERR(dentry);
168 205
169 dentry = lookup_create(&nd, 0); 206 err = vfs_mknod(path.dentry->d_inode,
170 if (!IS_ERR(dentry)) { 207 dentry, mode, dev->devt);
171 err = vfs_mknod(nd.path.dentry->d_inode, 208 if (!err) {
172 dentry, mode, dev->devt); 209 struct iattr newattrs;
173 if (!err) { 210
174 struct iattr newattrs; 211 /* fixup possibly umasked mode */
175 212 newattrs.ia_mode = mode;
176 /* fixup possibly umasked mode */ 213 newattrs.ia_valid = ATTR_MODE;
177 newattrs.ia_mode = mode; 214 mutex_lock(&dentry->d_inode->i_mutex);
178 newattrs.ia_valid = ATTR_MODE; 215 notify_change(dentry, &newattrs);
179 mutex_lock(&dentry->d_inode->i_mutex); 216 mutex_unlock(&dentry->d_inode->i_mutex);
180 notify_change(dentry, &newattrs); 217
181 mutex_unlock(&dentry->d_inode->i_mutex); 218 /* mark as kernel-created inode */
182 219 dentry->d_inode->i_private = &thread;
183 /* mark as kernel-created inode */
184 dentry->d_inode->i_private = &dev_mnt;
185 }
186 dput(dentry);
187 } else {
188 err = PTR_ERR(dentry);
189 } 220 }
221 dput(dentry);
190 222
191 mutex_unlock(&nd.path.dentry->d_inode->i_mutex); 223 mutex_unlock(&path.dentry->d_inode->i_mutex);
192 path_put(&nd.path); 224 path_put(&path);
193out:
194 kfree(tmp);
195 revert_creds(curr_cred);
196 return err; 225 return err;
197} 226}
198 227
@@ -202,8 +231,7 @@ static int dev_rmdir(const char *name)
202 struct dentry *dentry; 231 struct dentry *dentry;
203 int err; 232 int err;
204 233
205 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 234 err = kern_path_parent(name, &nd);
206 name, LOOKUP_PARENT, &nd);
207 if (err) 235 if (err)
208 return err; 236 return err;
209 237
@@ -211,7 +239,7 @@ static int dev_rmdir(const char *name)
211 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); 239 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
212 if (!IS_ERR(dentry)) { 240 if (!IS_ERR(dentry)) {
213 if (dentry->d_inode) { 241 if (dentry->d_inode) {
214 if (dentry->d_inode->i_private == &dev_mnt) 242 if (dentry->d_inode->i_private == &thread)
215 err = vfs_rmdir(nd.path.dentry->d_inode, 243 err = vfs_rmdir(nd.path.dentry->d_inode,
216 dentry); 244 dentry);
217 else 245 else
@@ -238,7 +266,6 @@ static int delete_path(const char *nodepath)
238 if (!path) 266 if (!path)
239 return -ENOMEM; 267 return -ENOMEM;
240 268
241 mutex_lock(&dirlock);
242 for (;;) { 269 for (;;) {
243 char *base; 270 char *base;
244 271
@@ -250,7 +277,6 @@ static int delete_path(const char *nodepath)
250 if (err) 277 if (err)
251 break; 278 break;
252 } 279 }
253 mutex_unlock(&dirlock);
254 280
255 kfree(path); 281 kfree(path);
256 return err; 282 return err;
@@ -259,7 +285,7 @@ static int delete_path(const char *nodepath)
259static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat) 285static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *stat)
260{ 286{
261 /* did we create it */ 287 /* did we create it */
262 if (inode->i_private != &dev_mnt) 288 if (inode->i_private != &thread)
263 return 0; 289 return 0;
264 290
265 /* does the dev_t match */ 291 /* does the dev_t match */
@@ -277,29 +303,17 @@ static int dev_mynode(struct device *dev, struct inode *inode, struct kstat *sta
277 return 1; 303 return 1;
278} 304}
279 305
280int devtmpfs_delete_node(struct device *dev) 306static int handle_remove(const char *nodename, struct device *dev)
281{ 307{
282 const char *tmp = NULL;
283 const char *nodename;
284 const struct cred *curr_cred;
285 struct nameidata nd; 308 struct nameidata nd;
286 struct dentry *dentry; 309 struct dentry *dentry;
287 struct kstat stat; 310 struct kstat stat;
288 int deleted = 1; 311 int deleted = 1;
289 int err; 312 int err;
290 313
291 if (!dev_mnt) 314 err = kern_path_parent(nodename, &nd);
292 return 0;
293
294 nodename = device_get_devnode(dev, NULL, &tmp);
295 if (!nodename)
296 return -ENOMEM;
297
298 curr_cred = override_creds(&init_cred);
299 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
300 nodename, LOOKUP_PARENT, &nd);
301 if (err) 315 if (err)
302 goto out; 316 return err;
303 317
304 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 318 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
305 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); 319 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
@@ -337,9 +351,6 @@ int devtmpfs_delete_node(struct device *dev)
337 path_put(&nd.path); 351 path_put(&nd.path);
338 if (deleted && strchr(nodename, '/')) 352 if (deleted && strchr(nodename, '/'))
339 delete_path(nodename); 353 delete_path(nodename);
340out:
341 kfree(tmp);
342 revert_creds(curr_cred);
343 return err; 354 return err;
344} 355}
345 356
@@ -354,7 +365,7 @@ int devtmpfs_mount(const char *mntdir)
354 if (!mount_dev) 365 if (!mount_dev)
355 return 0; 366 return 0;
356 367
357 if (!dev_mnt) 368 if (!thread)
358 return 0; 369 return 0;
359 370
360 err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); 371 err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
@@ -365,31 +376,80 @@ int devtmpfs_mount(const char *mntdir)
365 return err; 376 return err;
366} 377}
367 378
379static __initdata DECLARE_COMPLETION(setup_done);
380
381static int handle(const char *name, mode_t mode, struct device *dev)
382{
383 if (mode)
384 return handle_create(name, mode, dev);
385 else
386 return handle_remove(name, dev);
387}
388
389static int devtmpfsd(void *p)
390{
391 char options[] = "mode=0755";
392 int *err = p;
393 *err = sys_unshare(CLONE_NEWNS);
394 if (*err)
395 goto out;
396 *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
397 if (*err)
398 goto out;
399 sys_chdir("/.."); /* will traverse into overmounted root */
400 sys_chroot(".");
401 complete(&setup_done);
402 while (1) {
403 spin_lock(&req_lock);
404 while (requests) {
405 struct req *req = requests;
406 requests = NULL;
407 spin_unlock(&req_lock);
408 while (req) {
409 struct req *next = req->next;
410 req->err = handle(req->name, req->mode, req->dev);
411 complete(&req->done);
412 req = next;
413 }
414 spin_lock(&req_lock);
415 }
416 set_current_state(TASK_INTERRUPTIBLE);
417 spin_unlock(&req_lock);
418 schedule();
419 __set_current_state(TASK_RUNNING);
420 }
421 return 0;
422out:
423 complete(&setup_done);
424 return *err;
425}
426
368/* 427/*
369 * Create devtmpfs instance, driver-core devices will add their device 428 * Create devtmpfs instance, driver-core devices will add their device
370 * nodes here. 429 * nodes here.
371 */ 430 */
372int __init devtmpfs_init(void) 431int __init devtmpfs_init(void)
373{ 432{
374 int err; 433 int err = register_filesystem(&dev_fs_type);
375 struct vfsmount *mnt;
376 char options[] = "mode=0755";
377
378 err = register_filesystem(&dev_fs_type);
379 if (err) { 434 if (err) {
380 printk(KERN_ERR "devtmpfs: unable to register devtmpfs " 435 printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
381 "type %i\n", err); 436 "type %i\n", err);
382 return err; 437 return err;
383 } 438 }
384 439
385 mnt = kern_mount_data(&dev_fs_type, options); 440 thread = kthread_run(devtmpfsd, &err, "kdevtmpfs");
386 if (IS_ERR(mnt)) { 441 if (!IS_ERR(thread)) {
387 err = PTR_ERR(mnt); 442 wait_for_completion(&setup_done);
443 } else {
444 err = PTR_ERR(thread);
445 thread = NULL;
446 }
447
448 if (err) {
388 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); 449 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
389 unregister_filesystem(&dev_fs_type); 450 unregister_filesystem(&dev_fs_type);
390 return err; 451 return err;
391 } 452 }
392 dev_mnt = mnt;
393 453
394 printk(KERN_INFO "devtmpfs: initialized\n"); 454 printk(KERN_INFO "devtmpfs: initialized\n");
395 return 0; 455 return 0;
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c
deleted file mode 100644
index 6e6b6a11b3ce..000000000000
--- a/drivers/base/iommu.c
+++ /dev/null
@@ -1,124 +0,0 @@
1/*
2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
3 * Author: Joerg Roedel <joerg.roedel@amd.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/bug.h>
20#include <linux/types.h>
21#include <linux/module.h>
22#include <linux/slab.h>
23#include <linux/errno.h>
24#include <linux/iommu.h>
25
26static struct iommu_ops *iommu_ops;
27
28void register_iommu(struct iommu_ops *ops)
29{
30 if (iommu_ops)
31 BUG();
32
33 iommu_ops = ops;
34}
35
36bool iommu_found(void)
37{
38 return iommu_ops != NULL;
39}
40EXPORT_SYMBOL_GPL(iommu_found);
41
42struct iommu_domain *iommu_domain_alloc(void)
43{
44 struct iommu_domain *domain;
45 int ret;
46
47 domain = kmalloc(sizeof(*domain), GFP_KERNEL);
48 if (!domain)
49 return NULL;
50
51 ret = iommu_ops->domain_init(domain);
52 if (ret)
53 goto out_free;
54
55 return domain;
56
57out_free:
58 kfree(domain);
59
60 return NULL;
61}
62EXPORT_SYMBOL_GPL(iommu_domain_alloc);
63
64void iommu_domain_free(struct iommu_domain *domain)
65{
66 iommu_ops->domain_destroy(domain);
67 kfree(domain);
68}
69EXPORT_SYMBOL_GPL(iommu_domain_free);
70
71int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
72{
73 return iommu_ops->attach_dev(domain, dev);
74}
75EXPORT_SYMBOL_GPL(iommu_attach_device);
76
77void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
78{
79 iommu_ops->detach_dev(domain, dev);
80}
81EXPORT_SYMBOL_GPL(iommu_detach_device);
82
83phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
84 unsigned long iova)
85{
86 return iommu_ops->iova_to_phys(domain, iova);
87}
88EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
89
90int iommu_domain_has_cap(struct iommu_domain *domain,
91 unsigned long cap)
92{
93 return iommu_ops->domain_has_cap(domain, cap);
94}
95EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
96
97int iommu_map(struct iommu_domain *domain, unsigned long iova,
98 phys_addr_t paddr, int gfp_order, int prot)
99{
100 unsigned long invalid_mask;
101 size_t size;
102
103 size = 0x1000UL << gfp_order;
104 invalid_mask = size - 1;
105
106 BUG_ON((iova | paddr) & invalid_mask);
107
108 return iommu_ops->map(domain, iova, paddr, gfp_order, prot);
109}
110EXPORT_SYMBOL_GPL(iommu_map);
111
112int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
113{
114 unsigned long invalid_mask;
115 size_t size;
116
117 size = 0x1000UL << gfp_order;
118 invalid_mask = size - 1;
119
120 BUG_ON(iova & invalid_mask);
121
122 return iommu_ops->unmap(domain, iova, gfp_order);
123}
124EXPORT_SYMBOL_GPL(iommu_unmap);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9f9b2359f718..45d7c8fc73bd 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -30,7 +30,6 @@
30static DEFINE_MUTEX(mem_sysfs_mutex); 30static DEFINE_MUTEX(mem_sysfs_mutex);
31 31
32#define MEMORY_CLASS_NAME "memory" 32#define MEMORY_CLASS_NAME "memory"
33#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
34 33
35static int sections_per_block; 34static int sections_per_block;
36 35
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1c291af637b3..0cad9c7f6bb5 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -32,6 +32,25 @@ struct device platform_bus = {
32EXPORT_SYMBOL_GPL(platform_bus); 32EXPORT_SYMBOL_GPL(platform_bus);
33 33
34/** 34/**
35 * arch_setup_pdev_archdata - Allow manipulation of archdata before its used
36 * @dev: platform device
37 *
38 * This is called before platform_device_add() such that any pdev_archdata may
39 * be setup before the platform_notifier is called. So if a user needs to
40 * manipulate any relevant information in the pdev_archdata they can do:
41 *
42 * platform_devic_alloc()
43 * ... manipulate ...
44 * platform_device_add()
45 *
46 * And if they don't care they can just call platform_device_register() and
47 * everything will just work out.
48 */
49void __weak arch_setup_pdev_archdata(struct platform_device *pdev)
50{
51}
52
53/**
35 * platform_get_resource - get a resource for a device 54 * platform_get_resource - get a resource for a device
36 * @dev: platform device 55 * @dev: platform device
37 * @type: resource type 56 * @type: resource type
@@ -173,6 +192,7 @@ struct platform_device *platform_device_alloc(const char *name, int id)
173 pa->pdev.id = id; 192 pa->pdev.id = id;
174 device_initialize(&pa->pdev.dev); 193 device_initialize(&pa->pdev.dev);
175 pa->pdev.dev.release = platform_device_release; 194 pa->pdev.dev.release = platform_device_release;
195 arch_setup_pdev_archdata(&pa->pdev);
176 } 196 }
177 197
178 return pa ? &pa->pdev : NULL; 198 return pa ? &pa->pdev : NULL;
@@ -334,6 +354,7 @@ EXPORT_SYMBOL_GPL(platform_device_del);
334int platform_device_register(struct platform_device *pdev) 354int platform_device_register(struct platform_device *pdev)
335{ 355{
336 device_initialize(&pdev->dev); 356 device_initialize(&pdev->dev);
357 arch_setup_pdev_archdata(pdev);
337 return platform_device_add(pdev); 358 return platform_device_add(pdev);
338} 359}
339EXPORT_SYMBOL_GPL(platform_device_register); 360EXPORT_SYMBOL_GPL(platform_device_register);
@@ -367,7 +388,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
367 * 388 *
368 * Returns &struct platform_device pointer on success, or ERR_PTR() on error. 389 * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
369 */ 390 */
370struct platform_device *__init_or_module platform_device_register_resndata( 391struct platform_device *platform_device_register_resndata(
371 struct device *parent, 392 struct device *parent,
372 const char *name, int id, 393 const char *name, int id,
373 const struct resource *res, unsigned int num, 394 const struct resource *res, unsigned int num,
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3647e114d0e7..2639ae79a372 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,6 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
3obj-$(CONFIG_PM_RUNTIME) += runtime.o 3obj-$(CONFIG_PM_RUNTIME) += runtime.o
4obj-$(CONFIG_PM_TRACE_RTC) += trace.o 4obj-$(CONFIG_PM_TRACE_RTC) += trace.o
5obj-$(CONFIG_PM_OPP) += opp.o 5obj-$(CONFIG_PM_OPP) += opp.o
6obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
6obj-$(CONFIG_HAVE_CLK) += clock_ops.o 7obj-$(CONFIG_HAVE_CLK) += clock_ops.o
7 8
8ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file 9ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index c0dd09df7be8..a846b2f95cfb 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -15,9 +15,9 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/err.h> 16#include <linux/err.h>
17 17
18#ifdef CONFIG_PM_RUNTIME 18#ifdef CONFIG_PM
19 19
20struct pm_runtime_clk_data { 20struct pm_clk_data {
21 struct list_head clock_list; 21 struct list_head clock_list;
22 struct mutex lock; 22 struct mutex lock;
23}; 23};
@@ -36,25 +36,25 @@ struct pm_clock_entry {
36 enum pce_status status; 36 enum pce_status status;
37}; 37};
38 38
39static struct pm_runtime_clk_data *__to_prd(struct device *dev) 39static struct pm_clk_data *__to_pcd(struct device *dev)
40{ 40{
41 return dev ? dev->power.subsys_data : NULL; 41 return dev ? dev->power.subsys_data : NULL;
42} 42}
43 43
44/** 44/**
45 * pm_runtime_clk_add - Start using a device clock for runtime PM. 45 * pm_clk_add - Start using a device clock for power management.
46 * @dev: Device whose clock is going to be used for runtime PM. 46 * @dev: Device whose clock is going to be used for power management.
47 * @con_id: Connection ID of the clock. 47 * @con_id: Connection ID of the clock.
48 * 48 *
49 * Add the clock represented by @con_id to the list of clocks used for 49 * Add the clock represented by @con_id to the list of clocks used for
50 * the runtime PM of @dev. 50 * the power management of @dev.
51 */ 51 */
52int pm_runtime_clk_add(struct device *dev, const char *con_id) 52int pm_clk_add(struct device *dev, const char *con_id)
53{ 53{
54 struct pm_runtime_clk_data *prd = __to_prd(dev); 54 struct pm_clk_data *pcd = __to_pcd(dev);
55 struct pm_clock_entry *ce; 55 struct pm_clock_entry *ce;
56 56
57 if (!prd) 57 if (!pcd)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 ce = kzalloc(sizeof(*ce), GFP_KERNEL); 60 ce = kzalloc(sizeof(*ce), GFP_KERNEL);
@@ -73,20 +73,20 @@ int pm_runtime_clk_add(struct device *dev, const char *con_id)
73 } 73 }
74 } 74 }
75 75
76 mutex_lock(&prd->lock); 76 mutex_lock(&pcd->lock);
77 list_add_tail(&ce->node, &prd->clock_list); 77 list_add_tail(&ce->node, &pcd->clock_list);
78 mutex_unlock(&prd->lock); 78 mutex_unlock(&pcd->lock);
79 return 0; 79 return 0;
80} 80}
81 81
82/** 82/**
83 * __pm_runtime_clk_remove - Destroy runtime PM clock entry. 83 * __pm_clk_remove - Destroy PM clock entry.
84 * @ce: Runtime PM clock entry to destroy. 84 * @ce: PM clock entry to destroy.
85 * 85 *
86 * This routine must be called under the mutex protecting the runtime PM list 86 * This routine must be called under the mutex protecting the PM list of clocks
87 * of clocks corresponding the the @ce's device. 87 * corresponding the the @ce's device.
88 */ 88 */
89static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) 89static void __pm_clk_remove(struct pm_clock_entry *ce)
90{ 90{
91 if (!ce) 91 if (!ce)
92 return; 92 return;
@@ -108,95 +108,99 @@ static void __pm_runtime_clk_remove(struct pm_clock_entry *ce)
108} 108}
109 109
110/** 110/**
111 * pm_runtime_clk_remove - Stop using a device clock for runtime PM. 111 * pm_clk_remove - Stop using a device clock for power management.
112 * @dev: Device whose clock should not be used for runtime PM any more. 112 * @dev: Device whose clock should not be used for PM any more.
113 * @con_id: Connection ID of the clock. 113 * @con_id: Connection ID of the clock.
114 * 114 *
115 * Remove the clock represented by @con_id from the list of clocks used for 115 * Remove the clock represented by @con_id from the list of clocks used for
116 * the runtime PM of @dev. 116 * the power management of @dev.
117 */ 117 */
118void pm_runtime_clk_remove(struct device *dev, const char *con_id) 118void pm_clk_remove(struct device *dev, const char *con_id)
119{ 119{
120 struct pm_runtime_clk_data *prd = __to_prd(dev); 120 struct pm_clk_data *pcd = __to_pcd(dev);
121 struct pm_clock_entry *ce; 121 struct pm_clock_entry *ce;
122 122
123 if (!prd) 123 if (!pcd)
124 return; 124 return;
125 125
126 mutex_lock(&prd->lock); 126 mutex_lock(&pcd->lock);
127 127
128 list_for_each_entry(ce, &prd->clock_list, node) { 128 list_for_each_entry(ce, &pcd->clock_list, node) {
129 if (!con_id && !ce->con_id) { 129 if (!con_id && !ce->con_id) {
130 __pm_runtime_clk_remove(ce); 130 __pm_clk_remove(ce);
131 break; 131 break;
132 } else if (!con_id || !ce->con_id) { 132 } else if (!con_id || !ce->con_id) {
133 continue; 133 continue;
134 } else if (!strcmp(con_id, ce->con_id)) { 134 } else if (!strcmp(con_id, ce->con_id)) {
135 __pm_runtime_clk_remove(ce); 135 __pm_clk_remove(ce);
136 break; 136 break;
137 } 137 }
138 } 138 }
139 139
140 mutex_unlock(&prd->lock); 140 mutex_unlock(&pcd->lock);
141} 141}
142 142
143/** 143/**
144 * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. 144 * pm_clk_init - Initialize a device's list of power management clocks.
145 * @dev: Device to initialize the list of runtime PM clocks for. 145 * @dev: Device to initialize the list of PM clocks for.
146 * 146 *
147 * Allocate a struct pm_runtime_clk_data object, initialize its lock member and 147 * Allocate a struct pm_clk_data object, initialize its lock member and
148 * make the @dev's power.subsys_data field point to it. 148 * make the @dev's power.subsys_data field point to it.
149 */ 149 */
150int pm_runtime_clk_init(struct device *dev) 150int pm_clk_init(struct device *dev)
151{ 151{
152 struct pm_runtime_clk_data *prd; 152 struct pm_clk_data *pcd;
153 153
154 prd = kzalloc(sizeof(*prd), GFP_KERNEL); 154 pcd = kzalloc(sizeof(*pcd), GFP_KERNEL);
155 if (!prd) { 155 if (!pcd) {
156 dev_err(dev, "Not enough memory fo runtime PM data.\n"); 156 dev_err(dev, "Not enough memory for PM clock data.\n");
157 return -ENOMEM; 157 return -ENOMEM;
158 } 158 }
159 159
160 INIT_LIST_HEAD(&prd->clock_list); 160 INIT_LIST_HEAD(&pcd->clock_list);
161 mutex_init(&prd->lock); 161 mutex_init(&pcd->lock);
162 dev->power.subsys_data = prd; 162 dev->power.subsys_data = pcd;
163 return 0; 163 return 0;
164} 164}
165 165
166/** 166/**
167 * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. 167 * pm_clk_destroy - Destroy a device's list of power management clocks.
168 * @dev: Device to destroy the list of runtime PM clocks for. 168 * @dev: Device to destroy the list of PM clocks for.
169 * 169 *
170 * Clear the @dev's power.subsys_data field, remove the list of clock entries 170 * Clear the @dev's power.subsys_data field, remove the list of clock entries
171 * from the struct pm_runtime_clk_data object pointed to by it before and free 171 * from the struct pm_clk_data object pointed to by it before and free
172 * that object. 172 * that object.
173 */ 173 */
174void pm_runtime_clk_destroy(struct device *dev) 174void pm_clk_destroy(struct device *dev)
175{ 175{
176 struct pm_runtime_clk_data *prd = __to_prd(dev); 176 struct pm_clk_data *pcd = __to_pcd(dev);
177 struct pm_clock_entry *ce, *c; 177 struct pm_clock_entry *ce, *c;
178 178
179 if (!prd) 179 if (!pcd)
180 return; 180 return;
181 181
182 dev->power.subsys_data = NULL; 182 dev->power.subsys_data = NULL;
183 183
184 mutex_lock(&prd->lock); 184 mutex_lock(&pcd->lock);
185 185
186 list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) 186 list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node)
187 __pm_runtime_clk_remove(ce); 187 __pm_clk_remove(ce);
188 188
189 mutex_unlock(&prd->lock); 189 mutex_unlock(&pcd->lock);
190 190
191 kfree(prd); 191 kfree(pcd);
192} 192}
193 193
194#endif /* CONFIG_PM */
195
196#ifdef CONFIG_PM_RUNTIME
197
194/** 198/**
195 * pm_runtime_clk_acquire - Acquire a device clock. 199 * pm_clk_acquire - Acquire a device clock.
196 * @dev: Device whose clock is to be acquired. 200 * @dev: Device whose clock is to be acquired.
197 * @con_id: Connection ID of the clock. 201 * @con_id: Connection ID of the clock.
198 */ 202 */
199static void pm_runtime_clk_acquire(struct device *dev, 203static void pm_clk_acquire(struct device *dev,
200 struct pm_clock_entry *ce) 204 struct pm_clock_entry *ce)
201{ 205{
202 ce->clk = clk_get(dev, ce->con_id); 206 ce->clk = clk_get(dev, ce->con_id);
@@ -209,24 +213,24 @@ static void pm_runtime_clk_acquire(struct device *dev,
209} 213}
210 214
211/** 215/**
212 * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. 216 * pm_clk_suspend - Disable clocks in a device's PM clock list.
213 * @dev: Device to disable the clocks for. 217 * @dev: Device to disable the clocks for.
214 */ 218 */
215int pm_runtime_clk_suspend(struct device *dev) 219int pm_clk_suspend(struct device *dev)
216{ 220{
217 struct pm_runtime_clk_data *prd = __to_prd(dev); 221 struct pm_clk_data *pcd = __to_pcd(dev);
218 struct pm_clock_entry *ce; 222 struct pm_clock_entry *ce;
219 223
220 dev_dbg(dev, "%s()\n", __func__); 224 dev_dbg(dev, "%s()\n", __func__);
221 225
222 if (!prd) 226 if (!pcd)
223 return 0; 227 return 0;
224 228
225 mutex_lock(&prd->lock); 229 mutex_lock(&pcd->lock);
226 230
227 list_for_each_entry_reverse(ce, &prd->clock_list, node) { 231 list_for_each_entry_reverse(ce, &pcd->clock_list, node) {
228 if (ce->status == PCE_STATUS_NONE) 232 if (ce->status == PCE_STATUS_NONE)
229 pm_runtime_clk_acquire(dev, ce); 233 pm_clk_acquire(dev, ce);
230 234
231 if (ce->status < PCE_STATUS_ERROR) { 235 if (ce->status < PCE_STATUS_ERROR) {
232 clk_disable(ce->clk); 236 clk_disable(ce->clk);
@@ -234,30 +238,30 @@ int pm_runtime_clk_suspend(struct device *dev)
234 } 238 }
235 } 239 }
236 240
237 mutex_unlock(&prd->lock); 241 mutex_unlock(&pcd->lock);
238 242
239 return 0; 243 return 0;
240} 244}
241 245
242/** 246/**
243 * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. 247 * pm_clk_resume - Enable clocks in a device's PM clock list.
244 * @dev: Device to enable the clocks for. 248 * @dev: Device to enable the clocks for.
245 */ 249 */
246int pm_runtime_clk_resume(struct device *dev) 250int pm_clk_resume(struct device *dev)
247{ 251{
248 struct pm_runtime_clk_data *prd = __to_prd(dev); 252 struct pm_clk_data *pcd = __to_pcd(dev);
249 struct pm_clock_entry *ce; 253 struct pm_clock_entry *ce;
250 254
251 dev_dbg(dev, "%s()\n", __func__); 255 dev_dbg(dev, "%s()\n", __func__);
252 256
253 if (!prd) 257 if (!pcd)
254 return 0; 258 return 0;
255 259
256 mutex_lock(&prd->lock); 260 mutex_lock(&pcd->lock);
257 261
258 list_for_each_entry(ce, &prd->clock_list, node) { 262 list_for_each_entry(ce, &pcd->clock_list, node) {
259 if (ce->status == PCE_STATUS_NONE) 263 if (ce->status == PCE_STATUS_NONE)
260 pm_runtime_clk_acquire(dev, ce); 264 pm_clk_acquire(dev, ce);
261 265
262 if (ce->status < PCE_STATUS_ERROR) { 266 if (ce->status < PCE_STATUS_ERROR) {
263 clk_enable(ce->clk); 267 clk_enable(ce->clk);
@@ -265,33 +269,33 @@ int pm_runtime_clk_resume(struct device *dev)
265 } 269 }
266 } 270 }
267 271
268 mutex_unlock(&prd->lock); 272 mutex_unlock(&pcd->lock);
269 273
270 return 0; 274 return 0;
271} 275}
272 276
273/** 277/**
274 * pm_runtime_clk_notify - Notify routine for device addition and removal. 278 * pm_clk_notify - Notify routine for device addition and removal.
275 * @nb: Notifier block object this function is a member of. 279 * @nb: Notifier block object this function is a member of.
276 * @action: Operation being carried out by the caller. 280 * @action: Operation being carried out by the caller.
277 * @data: Device the routine is being run for. 281 * @data: Device the routine is being run for.
278 * 282 *
279 * For this function to work, @nb must be a member of an object of type 283 * For this function to work, @nb must be a member of an object of type
280 * struct pm_clk_notifier_block containing all of the requisite data. 284 * struct pm_clk_notifier_block containing all of the requisite data.
281 * Specifically, the pwr_domain member of that object is copied to the device's 285 * Specifically, the pm_domain member of that object is copied to the device's
282 * pwr_domain field and its con_ids member is used to populate the device's list 286 * pm_domain field and its con_ids member is used to populate the device's list
283 * of runtime PM clocks, depending on @action. 287 * of PM clocks, depending on @action.
284 * 288 *
285 * If the device's pwr_domain field is already populated with a value different 289 * If the device's pm_domain field is already populated with a value different
286 * from the one stored in the struct pm_clk_notifier_block object, the function 290 * from the one stored in the struct pm_clk_notifier_block object, the function
287 * does nothing. 291 * does nothing.
288 */ 292 */
289static int pm_runtime_clk_notify(struct notifier_block *nb, 293static int pm_clk_notify(struct notifier_block *nb,
290 unsigned long action, void *data) 294 unsigned long action, void *data)
291{ 295{
292 struct pm_clk_notifier_block *clknb; 296 struct pm_clk_notifier_block *clknb;
293 struct device *dev = data; 297 struct device *dev = data;
294 char *con_id; 298 char **con_id;
295 int error; 299 int error;
296 300
297 dev_dbg(dev, "%s() %ld\n", __func__, action); 301 dev_dbg(dev, "%s() %ld\n", __func__, action);
@@ -300,28 +304,28 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
300 304
301 switch (action) { 305 switch (action) {
302 case BUS_NOTIFY_ADD_DEVICE: 306 case BUS_NOTIFY_ADD_DEVICE:
303 if (dev->pwr_domain) 307 if (dev->pm_domain)
304 break; 308 break;
305 309
306 error = pm_runtime_clk_init(dev); 310 error = pm_clk_init(dev);
307 if (error) 311 if (error)
308 break; 312 break;
309 313
310 dev->pwr_domain = clknb->pwr_domain; 314 dev->pm_domain = clknb->pm_domain;
311 if (clknb->con_ids[0]) { 315 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids[0]; *con_id; con_id++) 316 for (con_id = clknb->con_ids; *con_id; con_id++)
313 pm_runtime_clk_add(dev, con_id); 317 pm_clk_add(dev, *con_id);
314 } else { 318 } else {
315 pm_runtime_clk_add(dev, NULL); 319 pm_clk_add(dev, NULL);
316 } 320 }
317 321
318 break; 322 break;
319 case BUS_NOTIFY_DEL_DEVICE: 323 case BUS_NOTIFY_DEL_DEVICE:
320 if (dev->pwr_domain != clknb->pwr_domain) 324 if (dev->pm_domain != clknb->pm_domain)
321 break; 325 break;
322 326
323 dev->pwr_domain = NULL; 327 dev->pm_domain = NULL;
324 pm_runtime_clk_destroy(dev); 328 pm_clk_destroy(dev);
325 break; 329 break;
326 } 330 }
327 331
@@ -330,6 +334,60 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
330 334
331#else /* !CONFIG_PM_RUNTIME */ 335#else /* !CONFIG_PM_RUNTIME */
332 336
337#ifdef CONFIG_PM
338
339/**
340 * pm_clk_suspend - Disable clocks in a device's PM clock list.
341 * @dev: Device to disable the clocks for.
342 */
343int pm_clk_suspend(struct device *dev)
344{
345 struct pm_clk_data *pcd = __to_pcd(dev);
346 struct pm_clock_entry *ce;
347
348 dev_dbg(dev, "%s()\n", __func__);
349
350 /* If there is no driver, the clocks are already disabled. */
351 if (!pcd || !dev->driver)
352 return 0;
353
354 mutex_lock(&pcd->lock);
355
356 list_for_each_entry_reverse(ce, &pcd->clock_list, node)
357 clk_disable(ce->clk);
358
359 mutex_unlock(&pcd->lock);
360
361 return 0;
362}
363
364/**
365 * pm_clk_resume - Enable clocks in a device's PM clock list.
366 * @dev: Device to enable the clocks for.
367 */
368int pm_clk_resume(struct device *dev)
369{
370 struct pm_clk_data *pcd = __to_pcd(dev);
371 struct pm_clock_entry *ce;
372
373 dev_dbg(dev, "%s()\n", __func__);
374
375 /* If there is no driver, the clocks should remain disabled. */
376 if (!pcd || !dev->driver)
377 return 0;
378
379 mutex_lock(&pcd->lock);
380
381 list_for_each_entry(ce, &pcd->clock_list, node)
382 clk_enable(ce->clk);
383
384 mutex_unlock(&pcd->lock);
385
386 return 0;
387}
388
389#endif /* CONFIG_PM */
390
333/** 391/**
334 * enable_clock - Enable a device clock. 392 * enable_clock - Enable a device clock.
335 * @dev: Device whose clock is to be enabled. 393 * @dev: Device whose clock is to be enabled.
@@ -365,7 +423,7 @@ static void disable_clock(struct device *dev, const char *con_id)
365} 423}
366 424
367/** 425/**
368 * pm_runtime_clk_notify - Notify routine for device addition and removal. 426 * pm_clk_notify - Notify routine for device addition and removal.
369 * @nb: Notifier block object this function is a member of. 427 * @nb: Notifier block object this function is a member of.
370 * @action: Operation being carried out by the caller. 428 * @action: Operation being carried out by the caller.
371 * @data: Device the routine is being run for. 429 * @data: Device the routine is being run for.
@@ -375,30 +433,30 @@ static void disable_clock(struct device *dev, const char *con_id)
375 * Specifically, the con_ids member of that object is used to enable or disable 433 * Specifically, the con_ids member of that object is used to enable or disable
376 * the device's clocks, depending on @action. 434 * the device's clocks, depending on @action.
377 */ 435 */
378static int pm_runtime_clk_notify(struct notifier_block *nb, 436static int pm_clk_notify(struct notifier_block *nb,
379 unsigned long action, void *data) 437 unsigned long action, void *data)
380{ 438{
381 struct pm_clk_notifier_block *clknb; 439 struct pm_clk_notifier_block *clknb;
382 struct device *dev = data; 440 struct device *dev = data;
383 char *con_id; 441 char **con_id;
384 442
385 dev_dbg(dev, "%s() %ld\n", __func__, action); 443 dev_dbg(dev, "%s() %ld\n", __func__, action);
386 444
387 clknb = container_of(nb, struct pm_clk_notifier_block, nb); 445 clknb = container_of(nb, struct pm_clk_notifier_block, nb);
388 446
389 switch (action) { 447 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE: 448 case BUS_NOTIFY_BIND_DRIVER:
391 if (clknb->con_ids[0]) { 449 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids[0]; *con_id; con_id++) 450 for (con_id = clknb->con_ids; *con_id; con_id++)
393 enable_clock(dev, con_id); 451 enable_clock(dev, *con_id);
394 } else { 452 } else {
395 enable_clock(dev, NULL); 453 enable_clock(dev, NULL);
396 } 454 }
397 break; 455 break;
398 case BUS_NOTIFY_DEL_DEVICE: 456 case BUS_NOTIFY_UNBOUND_DRIVER:
399 if (clknb->con_ids[0]) { 457 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids[0]; *con_id; con_id++) 458 for (con_id = clknb->con_ids; *con_id; con_id++)
401 disable_clock(dev, con_id); 459 disable_clock(dev, *con_id);
402 } else { 460 } else {
403 disable_clock(dev, NULL); 461 disable_clock(dev, NULL);
404 } 462 }
@@ -411,21 +469,21 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
411#endif /* !CONFIG_PM_RUNTIME */ 469#endif /* !CONFIG_PM_RUNTIME */
412 470
413/** 471/**
414 * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. 472 * pm_clk_add_notifier - Add bus type notifier for power management clocks.
415 * @bus: Bus type to add the notifier to. 473 * @bus: Bus type to add the notifier to.
416 * @clknb: Notifier to be added to the given bus type. 474 * @clknb: Notifier to be added to the given bus type.
417 * 475 *
418 * The nb member of @clknb is not expected to be initialized and its 476 * The nb member of @clknb is not expected to be initialized and its
419 * notifier_call member will be replaced with pm_runtime_clk_notify(). However, 477 * notifier_call member will be replaced with pm_clk_notify(). However,
420 * the remaining members of @clknb should be populated prior to calling this 478 * the remaining members of @clknb should be populated prior to calling this
421 * routine. 479 * routine.
422 */ 480 */
423void pm_runtime_clk_add_notifier(struct bus_type *bus, 481void pm_clk_add_notifier(struct bus_type *bus,
424 struct pm_clk_notifier_block *clknb) 482 struct pm_clk_notifier_block *clknb)
425{ 483{
426 if (!bus || !clknb) 484 if (!bus || !clknb)
427 return; 485 return;
428 486
429 clknb->nb.notifier_call = pm_runtime_clk_notify; 487 clknb->nb.notifier_call = pm_clk_notify;
430 bus_register_notifier(bus, &clknb->nb); 488 bus_register_notifier(bus, &clknb->nb);
431} 489}
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
new file mode 100644
index 000000000000..be8714aa9dd6
--- /dev/null
+++ b/drivers/base/power/domain.c
@@ -0,0 +1,1273 @@
1/*
2 * drivers/base/power/domain.c - Common code related to device power domains.
3 *
4 * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/init.h>
10#include <linux/kernel.h>
11#include <linux/io.h>
12#include <linux/pm_runtime.h>
13#include <linux/pm_domain.h>
14#include <linux/slab.h>
15#include <linux/err.h>
16#include <linux/sched.h>
17#include <linux/suspend.h>
18
19static LIST_HEAD(gpd_list);
20static DEFINE_MUTEX(gpd_list_lock);
21
22#ifdef CONFIG_PM
23
24static struct generic_pm_domain *dev_to_genpd(struct device *dev)
25{
26 if (IS_ERR_OR_NULL(dev->pm_domain))
27 return ERR_PTR(-EINVAL);
28
29 return pd_to_genpd(dev->pm_domain);
30}
31
32static void genpd_sd_counter_dec(struct generic_pm_domain *genpd)
33{
34 if (!WARN_ON(genpd->sd_count == 0))
35 genpd->sd_count--;
36}
37
38static void genpd_acquire_lock(struct generic_pm_domain *genpd)
39{
40 DEFINE_WAIT(wait);
41
42 mutex_lock(&genpd->lock);
43 /*
44 * Wait for the domain to transition into either the active,
45 * or the power off state.
46 */
47 for (;;) {
48 prepare_to_wait(&genpd->status_wait_queue, &wait,
49 TASK_UNINTERRUPTIBLE);
50 if (genpd->status == GPD_STATE_ACTIVE
51 || genpd->status == GPD_STATE_POWER_OFF)
52 break;
53 mutex_unlock(&genpd->lock);
54
55 schedule();
56
57 mutex_lock(&genpd->lock);
58 }
59 finish_wait(&genpd->status_wait_queue, &wait);
60}
61
62static void genpd_release_lock(struct generic_pm_domain *genpd)
63{
64 mutex_unlock(&genpd->lock);
65}
66
67static void genpd_set_active(struct generic_pm_domain *genpd)
68{
69 if (genpd->resume_count == 0)
70 genpd->status = GPD_STATE_ACTIVE;
71}
72
73/**
74 * pm_genpd_poweron - Restore power to a given PM domain and its parents.
75 * @genpd: PM domain to power up.
76 *
77 * Restore power to @genpd and all of its parents so that it is possible to
78 * resume a device belonging to it.
79 */
80int pm_genpd_poweron(struct generic_pm_domain *genpd)
81{
82 struct generic_pm_domain *parent = genpd->parent;
83 DEFINE_WAIT(wait);
84 int ret = 0;
85
86 start:
87 if (parent) {
88 genpd_acquire_lock(parent);
89 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
90 } else {
91 mutex_lock(&genpd->lock);
92 }
93
94 if (genpd->status == GPD_STATE_ACTIVE
95 || (genpd->prepared_count > 0 && genpd->suspend_power_off))
96 goto out;
97
98 if (genpd->status != GPD_STATE_POWER_OFF) {
99 genpd_set_active(genpd);
100 goto out;
101 }
102
103 if (parent && parent->status != GPD_STATE_ACTIVE) {
104 mutex_unlock(&genpd->lock);
105 genpd_release_lock(parent);
106
107 ret = pm_genpd_poweron(parent);
108 if (ret)
109 return ret;
110
111 goto start;
112 }
113
114 if (genpd->power_on) {
115 int ret = genpd->power_on(genpd);
116 if (ret)
117 goto out;
118 }
119
120 genpd_set_active(genpd);
121 if (parent)
122 parent->sd_count++;
123
124 out:
125 mutex_unlock(&genpd->lock);
126 if (parent)
127 genpd_release_lock(parent);
128
129 return ret;
130}
131
132#endif /* CONFIG_PM */
133
134#ifdef CONFIG_PM_RUNTIME
135
136/**
137 * __pm_genpd_save_device - Save the pre-suspend state of a device.
138 * @dle: Device list entry of the device to save the state of.
139 * @genpd: PM domain the device belongs to.
140 */
141static int __pm_genpd_save_device(struct dev_list_entry *dle,
142 struct generic_pm_domain *genpd)
143 __releases(&genpd->lock) __acquires(&genpd->lock)
144{
145 struct device *dev = dle->dev;
146 struct device_driver *drv = dev->driver;
147 int ret = 0;
148
149 if (dle->need_restore)
150 return 0;
151
152 mutex_unlock(&genpd->lock);
153
154 if (drv && drv->pm && drv->pm->runtime_suspend) {
155 if (genpd->start_device)
156 genpd->start_device(dev);
157
158 ret = drv->pm->runtime_suspend(dev);
159
160 if (genpd->stop_device)
161 genpd->stop_device(dev);
162 }
163
164 mutex_lock(&genpd->lock);
165
166 if (!ret)
167 dle->need_restore = true;
168
169 return ret;
170}
171
172/**
173 * __pm_genpd_restore_device - Restore the pre-suspend state of a device.
174 * @dle: Device list entry of the device to restore the state of.
175 * @genpd: PM domain the device belongs to.
176 */
177static void __pm_genpd_restore_device(struct dev_list_entry *dle,
178 struct generic_pm_domain *genpd)
179 __releases(&genpd->lock) __acquires(&genpd->lock)
180{
181 struct device *dev = dle->dev;
182 struct device_driver *drv = dev->driver;
183
184 if (!dle->need_restore)
185 return;
186
187 mutex_unlock(&genpd->lock);
188
189 if (drv && drv->pm && drv->pm->runtime_resume) {
190 if (genpd->start_device)
191 genpd->start_device(dev);
192
193 drv->pm->runtime_resume(dev);
194
195 if (genpd->stop_device)
196 genpd->stop_device(dev);
197 }
198
199 mutex_lock(&genpd->lock);
200
201 dle->need_restore = false;
202}
203
204/**
205 * genpd_abort_poweroff - Check if a PM domain power off should be aborted.
206 * @genpd: PM domain to check.
207 *
208 * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during
209 * a "power off" operation, which means that a "power on" has occured in the
210 * meantime, or if its resume_count field is different from zero, which means
211 * that one of its devices has been resumed in the meantime.
212 */
213static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
214{
215 return genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0;
216}
217
218/**
219 * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
220 * @genpd: PM domait to power off.
221 *
222 * Queue up the execution of pm_genpd_poweroff() unless it's already been done
223 * before.
224 */
225void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
226{
227 if (!work_pending(&genpd->power_off_work))
228 queue_work(pm_wq, &genpd->power_off_work);
229}
230
231/**
232 * pm_genpd_poweroff - Remove power from a given PM domain.
233 * @genpd: PM domain to power down.
234 *
235 * If all of the @genpd's devices have been suspended and all of its subdomains
236 * have been powered down, run the runtime suspend callbacks provided by all of
237 * the @genpd's devices' drivers and remove power from @genpd.
238 */
239static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
240 __releases(&genpd->lock) __acquires(&genpd->lock)
241{
242 struct generic_pm_domain *parent;
243 struct dev_list_entry *dle;
244 unsigned int not_suspended;
245 int ret = 0;
246
247 start:
248 /*
249 * Do not try to power off the domain in the following situations:
250 * (1) The domain is already in the "power off" state.
251 * (2) System suspend is in progress.
252 * (3) One of the domain's devices is being resumed right now.
253 */
254 if (genpd->status == GPD_STATE_POWER_OFF || genpd->prepared_count > 0
255 || genpd->resume_count > 0)
256 return 0;
257
258 if (genpd->sd_count > 0)
259 return -EBUSY;
260
261 not_suspended = 0;
262 list_for_each_entry(dle, &genpd->dev_list, node)
263 if (dle->dev->driver && !pm_runtime_suspended(dle->dev))
264 not_suspended++;
265
266 if (not_suspended > genpd->in_progress)
267 return -EBUSY;
268
269 if (genpd->poweroff_task) {
270 /*
271 * Another instance of pm_genpd_poweroff() is executing
272 * callbacks, so tell it to start over and return.
273 */
274 genpd->status = GPD_STATE_REPEAT;
275 return 0;
276 }
277
278 if (genpd->gov && genpd->gov->power_down_ok) {
279 if (!genpd->gov->power_down_ok(&genpd->domain))
280 return -EAGAIN;
281 }
282
283 genpd->status = GPD_STATE_BUSY;
284 genpd->poweroff_task = current;
285
286 list_for_each_entry_reverse(dle, &genpd->dev_list, node) {
287 ret = __pm_genpd_save_device(dle, genpd);
288 if (ret) {
289 genpd_set_active(genpd);
290 goto out;
291 }
292
293 if (genpd_abort_poweroff(genpd))
294 goto out;
295
296 if (genpd->status == GPD_STATE_REPEAT) {
297 genpd->poweroff_task = NULL;
298 goto start;
299 }
300 }
301
302 parent = genpd->parent;
303 if (parent) {
304 mutex_unlock(&genpd->lock);
305
306 genpd_acquire_lock(parent);
307 mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
308
309 if (genpd_abort_poweroff(genpd)) {
310 genpd_release_lock(parent);
311 goto out;
312 }
313 }
314
315 if (genpd->power_off) {
316 ret = genpd->power_off(genpd);
317 if (ret == -EBUSY) {
318 genpd_set_active(genpd);
319 if (parent)
320 genpd_release_lock(parent);
321
322 goto out;
323 }
324 }
325
326 genpd->status = GPD_STATE_POWER_OFF;
327
328 if (parent) {
329 genpd_sd_counter_dec(parent);
330 if (parent->sd_count == 0)
331 genpd_queue_power_off_work(parent);
332
333 genpd_release_lock(parent);
334 }
335
336 out:
337 genpd->poweroff_task = NULL;
338 wake_up_all(&genpd->status_wait_queue);
339 return ret;
340}
341
342/**
343 * genpd_power_off_work_fn - Power off PM domain whose subdomain count is 0.
344 * @work: Work structure used for scheduling the execution of this function.
345 */
346static void genpd_power_off_work_fn(struct work_struct *work)
347{
348 struct generic_pm_domain *genpd;
349
350 genpd = container_of(work, struct generic_pm_domain, power_off_work);
351
352 genpd_acquire_lock(genpd);
353 pm_genpd_poweroff(genpd);
354 genpd_release_lock(genpd);
355}
356
357/**
358 * pm_genpd_runtime_suspend - Suspend a device belonging to I/O PM domain.
359 * @dev: Device to suspend.
360 *
361 * Carry out a runtime suspend of a device under the assumption that its
362 * pm_domain field points to the domain member of an object of type
363 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
364 */
365static int pm_genpd_runtime_suspend(struct device *dev)
366{
367 struct generic_pm_domain *genpd;
368
369 dev_dbg(dev, "%s()\n", __func__);
370
371 genpd = dev_to_genpd(dev);
372 if (IS_ERR(genpd))
373 return -EINVAL;
374
375 if (genpd->stop_device) {
376 int ret = genpd->stop_device(dev);
377 if (ret)
378 return ret;
379 }
380
381 mutex_lock(&genpd->lock);
382 genpd->in_progress++;
383 pm_genpd_poweroff(genpd);
384 genpd->in_progress--;
385 mutex_unlock(&genpd->lock);
386
387 return 0;
388}
389
390/**
391 * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
392 * @dev: Device to resume.
393 * @genpd: PM domain the device belongs to.
394 */
395static void __pm_genpd_runtime_resume(struct device *dev,
396 struct generic_pm_domain *genpd)
397{
398 struct dev_list_entry *dle;
399
400 list_for_each_entry(dle, &genpd->dev_list, node) {
401 if (dle->dev == dev) {
402 __pm_genpd_restore_device(dle, genpd);
403 break;
404 }
405 }
406}
407
408/**
409 * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain.
410 * @dev: Device to resume.
411 *
412 * Carry out a runtime resume of a device under the assumption that its
413 * pm_domain field points to the domain member of an object of type
414 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
415 */
416static int pm_genpd_runtime_resume(struct device *dev)
417{
418 struct generic_pm_domain *genpd;
419 DEFINE_WAIT(wait);
420 int ret;
421
422 dev_dbg(dev, "%s()\n", __func__);
423
424 genpd = dev_to_genpd(dev);
425 if (IS_ERR(genpd))
426 return -EINVAL;
427
428 ret = pm_genpd_poweron(genpd);
429 if (ret)
430 return ret;
431
432 mutex_lock(&genpd->lock);
433 genpd->status = GPD_STATE_BUSY;
434 genpd->resume_count++;
435 for (;;) {
436 prepare_to_wait(&genpd->status_wait_queue, &wait,
437 TASK_UNINTERRUPTIBLE);
438 /*
439 * If current is the powering off task, we have been called
440 * reentrantly from one of the device callbacks, so we should
441 * not wait.
442 */
443 if (!genpd->poweroff_task || genpd->poweroff_task == current)
444 break;
445 mutex_unlock(&genpd->lock);
446
447 schedule();
448
449 mutex_lock(&genpd->lock);
450 }
451 finish_wait(&genpd->status_wait_queue, &wait);
452 __pm_genpd_runtime_resume(dev, genpd);
453 genpd->resume_count--;
454 genpd_set_active(genpd);
455 wake_up_all(&genpd->status_wait_queue);
456 mutex_unlock(&genpd->lock);
457
458 if (genpd->start_device)
459 genpd->start_device(dev);
460
461 return 0;
462}
463
464#else
465
466static inline void genpd_power_off_work_fn(struct work_struct *work) {}
467static inline void __pm_genpd_runtime_resume(struct device *dev,
468 struct generic_pm_domain *genpd) {}
469
470#define pm_genpd_runtime_suspend NULL
471#define pm_genpd_runtime_resume NULL
472
473#endif /* CONFIG_PM_RUNTIME */
474
475#ifdef CONFIG_PM_SLEEP
476
477/**
478 * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents.
479 * @genpd: PM domain to power off, if possible.
480 *
481 * Check if the given PM domain can be powered off (during system suspend or
482 * hibernation) and do that if so. Also, in that case propagate to its parent.
483 *
484 * This function is only called in "noirq" stages of system power transitions,
485 * so it need not acquire locks (all of the "noirq" callbacks are executed
486 * sequentially, so it is guaranteed that it will never run twice in parallel).
487 */
488static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd)
489{
490 struct generic_pm_domain *parent = genpd->parent;
491
492 if (genpd->status == GPD_STATE_POWER_OFF)
493 return;
494
495 if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0)
496 return;
497
498 if (genpd->power_off)
499 genpd->power_off(genpd);
500
501 genpd->status = GPD_STATE_POWER_OFF;
502 if (parent) {
503 genpd_sd_counter_dec(parent);
504 pm_genpd_sync_poweroff(parent);
505 }
506}
507
508/**
509 * resume_needed - Check whether to resume a device before system suspend.
510 * @dev: Device to check.
511 * @genpd: PM domain the device belongs to.
512 *
513 * There are two cases in which a device that can wake up the system from sleep
514 * states should be resumed by pm_genpd_prepare(): (1) if the device is enabled
515 * to wake up the system and it has to remain active for this purpose while the
516 * system is in the sleep state and (2) if the device is not enabled to wake up
517 * the system from sleep states and it generally doesn't generate wakeup signals
518 * by itself (those signals are generated on its behalf by other parts of the
519 * system). In the latter case it may be necessary to reconfigure the device's
520 * wakeup settings during system suspend, because it may have been set up to
521 * signal remote wakeup from the system's working state as needed by runtime PM.
522 * Return 'true' in either of the above cases.
523 */
524static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
525{
526 bool active_wakeup;
527
528 if (!device_can_wakeup(dev))
529 return false;
530
531 active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
532 return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
533}
534
535/**
536 * pm_genpd_prepare - Start power transition of a device in a PM domain.
537 * @dev: Device to start the transition of.
538 *
539 * Start a power transition of a device (during a system-wide power transition)
540 * under the assumption that its pm_domain field points to the domain member of
541 * an object of type struct generic_pm_domain representing a PM domain
542 * consisting of I/O devices.
543 */
544static int pm_genpd_prepare(struct device *dev)
545{
546 struct generic_pm_domain *genpd;
547 int ret;
548
549 dev_dbg(dev, "%s()\n", __func__);
550
551 genpd = dev_to_genpd(dev);
552 if (IS_ERR(genpd))
553 return -EINVAL;
554
555 /*
556 * If a wakeup request is pending for the device, it should be woken up
557 * at this point and a system wakeup event should be reported if it's
558 * set up to wake up the system from sleep states.
559 */
560 pm_runtime_get_noresume(dev);
561 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
562 pm_wakeup_event(dev, 0);
563
564 if (pm_wakeup_pending()) {
565 pm_runtime_put_sync(dev);
566 return -EBUSY;
567 }
568
569 if (resume_needed(dev, genpd))
570 pm_runtime_resume(dev);
571
572 genpd_acquire_lock(genpd);
573
574 if (genpd->prepared_count++ == 0)
575 genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF;
576
577 genpd_release_lock(genpd);
578
579 if (genpd->suspend_power_off) {
580 pm_runtime_put_noidle(dev);
581 return 0;
582 }
583
584 /*
585 * The PM domain must be in the GPD_STATE_ACTIVE state at this point,
586 * so pm_genpd_poweron() will return immediately, but if the device
587 * is suspended (e.g. it's been stopped by .stop_device()), we need
588 * to make it operational.
589 */
590 pm_runtime_resume(dev);
591 __pm_runtime_disable(dev, false);
592
593 ret = pm_generic_prepare(dev);
594 if (ret) {
595 mutex_lock(&genpd->lock);
596
597 if (--genpd->prepared_count == 0)
598 genpd->suspend_power_off = false;
599
600 mutex_unlock(&genpd->lock);
601 pm_runtime_enable(dev);
602 }
603
604 pm_runtime_put_sync(dev);
605 return ret;
606}
607
608/**
609 * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain.
610 * @dev: Device to suspend.
611 *
612 * Suspend a device under the assumption that its pm_domain field points to the
613 * domain member of an object of type struct generic_pm_domain representing
614 * a PM domain consisting of I/O devices.
615 */
616static int pm_genpd_suspend(struct device *dev)
617{
618 struct generic_pm_domain *genpd;
619
620 dev_dbg(dev, "%s()\n", __func__);
621
622 genpd = dev_to_genpd(dev);
623 if (IS_ERR(genpd))
624 return -EINVAL;
625
626 return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
627}
628
629/**
630 * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain.
631 * @dev: Device to suspend.
632 *
633 * Carry out a late suspend of a device under the assumption that its
634 * pm_domain field points to the domain member of an object of type
635 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
636 */
637static int pm_genpd_suspend_noirq(struct device *dev)
638{
639 struct generic_pm_domain *genpd;
640 int ret;
641
642 dev_dbg(dev, "%s()\n", __func__);
643
644 genpd = dev_to_genpd(dev);
645 if (IS_ERR(genpd))
646 return -EINVAL;
647
648 if (genpd->suspend_power_off)
649 return 0;
650
651 ret = pm_generic_suspend_noirq(dev);
652 if (ret)
653 return ret;
654
655 if (device_may_wakeup(dev)
656 && genpd->active_wakeup && genpd->active_wakeup(dev))
657 return 0;
658
659 if (genpd->stop_device)
660 genpd->stop_device(dev);
661
662 /*
663 * Since all of the "noirq" callbacks are executed sequentially, it is
664 * guaranteed that this function will never run twice in parallel for
665 * the same PM domain, so it is not necessary to use locking here.
666 */
667 genpd->suspended_count++;
668 pm_genpd_sync_poweroff(genpd);
669
670 return 0;
671}
672
673/**
674 * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain.
675 * @dev: Device to resume.
676 *
677 * Carry out an early resume of a device under the assumption that its
678 * pm_domain field points to the domain member of an object of type
679 * struct generic_pm_domain representing a power domain consisting of I/O
680 * devices.
681 */
682static int pm_genpd_resume_noirq(struct device *dev)
683{
684 struct generic_pm_domain *genpd;
685
686 dev_dbg(dev, "%s()\n", __func__);
687
688 genpd = dev_to_genpd(dev);
689 if (IS_ERR(genpd))
690 return -EINVAL;
691
692 if (genpd->suspend_power_off)
693 return 0;
694
695 /*
696 * Since all of the "noirq" callbacks are executed sequentially, it is
697 * guaranteed that this function will never run twice in parallel for
698 * the same PM domain, so it is not necessary to use locking here.
699 */
700 pm_genpd_poweron(genpd);
701 genpd->suspended_count--;
702 if (genpd->start_device)
703 genpd->start_device(dev);
704
705 return pm_generic_resume_noirq(dev);
706}
707
708/**
709 * pm_genpd_resume - Resume a device belonging to an I/O power domain.
710 * @dev: Device to resume.
711 *
712 * Resume a device under the assumption that its pm_domain field points to the
713 * domain member of an object of type struct generic_pm_domain representing
714 * a power domain consisting of I/O devices.
715 */
716static int pm_genpd_resume(struct device *dev)
717{
718 struct generic_pm_domain *genpd;
719
720 dev_dbg(dev, "%s()\n", __func__);
721
722 genpd = dev_to_genpd(dev);
723 if (IS_ERR(genpd))
724 return -EINVAL;
725
726 return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
727}
728
729/**
730 * pm_genpd_freeze - Freeze a device belonging to an I/O power domain.
731 * @dev: Device to freeze.
732 *
733 * Freeze a device under the assumption that its pm_domain field points to the
734 * domain member of an object of type struct generic_pm_domain representing
735 * a power domain consisting of I/O devices.
736 */
737static int pm_genpd_freeze(struct device *dev)
738{
739 struct generic_pm_domain *genpd;
740
741 dev_dbg(dev, "%s()\n", __func__);
742
743 genpd = dev_to_genpd(dev);
744 if (IS_ERR(genpd))
745 return -EINVAL;
746
747 return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
748}
749
750/**
751 * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain.
752 * @dev: Device to freeze.
753 *
754 * Carry out a late freeze of a device under the assumption that its
755 * pm_domain field points to the domain member of an object of type
756 * struct generic_pm_domain representing a power domain consisting of I/O
757 * devices.
758 */
759static int pm_genpd_freeze_noirq(struct device *dev)
760{
761 struct generic_pm_domain *genpd;
762 int ret;
763
764 dev_dbg(dev, "%s()\n", __func__);
765
766 genpd = dev_to_genpd(dev);
767 if (IS_ERR(genpd))
768 return -EINVAL;
769
770 if (genpd->suspend_power_off)
771 return 0;
772
773 ret = pm_generic_freeze_noirq(dev);
774 if (ret)
775 return ret;
776
777 if (genpd->stop_device)
778 genpd->stop_device(dev);
779
780 return 0;
781}
782
783/**
784 * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain.
785 * @dev: Device to thaw.
786 *
787 * Carry out an early thaw of a device under the assumption that its
788 * pm_domain field points to the domain member of an object of type
789 * struct generic_pm_domain representing a power domain consisting of I/O
790 * devices.
791 */
792static int pm_genpd_thaw_noirq(struct device *dev)
793{
794 struct generic_pm_domain *genpd;
795
796 dev_dbg(dev, "%s()\n", __func__);
797
798 genpd = dev_to_genpd(dev);
799 if (IS_ERR(genpd))
800 return -EINVAL;
801
802 if (genpd->suspend_power_off)
803 return 0;
804
805 if (genpd->start_device)
806 genpd->start_device(dev);
807
808 return pm_generic_thaw_noirq(dev);
809}
810
811/**
812 * pm_genpd_thaw - Thaw a device belonging to an I/O power domain.
813 * @dev: Device to thaw.
814 *
815 * Thaw a device under the assumption that its pm_domain field points to the
816 * domain member of an object of type struct generic_pm_domain representing
817 * a power domain consisting of I/O devices.
818 */
819static int pm_genpd_thaw(struct device *dev)
820{
821 struct generic_pm_domain *genpd;
822
823 dev_dbg(dev, "%s()\n", __func__);
824
825 genpd = dev_to_genpd(dev);
826 if (IS_ERR(genpd))
827 return -EINVAL;
828
829 return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
830}
831
832/**
833 * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
834 * @dev: Device to suspend.
835 *
836 * Power off a device under the assumption that its pm_domain field points to
837 * the domain member of an object of type struct generic_pm_domain representing
838 * a PM domain consisting of I/O devices.
839 */
840static int pm_genpd_dev_poweroff(struct device *dev)
841{
842 struct generic_pm_domain *genpd;
843
844 dev_dbg(dev, "%s()\n", __func__);
845
846 genpd = dev_to_genpd(dev);
847 if (IS_ERR(genpd))
848 return -EINVAL;
849
850 return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
851}
852
853/**
854 * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
855 * @dev: Device to suspend.
856 *
857 * Carry out a late powering off of a device under the assumption that its
858 * pm_domain field points to the domain member of an object of type
859 * struct generic_pm_domain representing a PM domain consisting of I/O devices.
860 */
861static int pm_genpd_dev_poweroff_noirq(struct device *dev)
862{
863 struct generic_pm_domain *genpd;
864 int ret;
865
866 dev_dbg(dev, "%s()\n", __func__);
867
868 genpd = dev_to_genpd(dev);
869 if (IS_ERR(genpd))
870 return -EINVAL;
871
872 if (genpd->suspend_power_off)
873 return 0;
874
875 ret = pm_generic_poweroff_noirq(dev);
876 if (ret)
877 return ret;
878
879 if (device_may_wakeup(dev)
880 && genpd->active_wakeup && genpd->active_wakeup(dev))
881 return 0;
882
883 if (genpd->stop_device)
884 genpd->stop_device(dev);
885
886 /*
887 * Since all of the "noirq" callbacks are executed sequentially, it is
888 * guaranteed that this function will never run twice in parallel for
889 * the same PM domain, so it is not necessary to use locking here.
890 */
891 genpd->suspended_count++;
892 pm_genpd_sync_poweroff(genpd);
893
894 return 0;
895}
896
897/**
898 * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain.
899 * @dev: Device to resume.
900 *
901 * Carry out an early restore of a device under the assumption that its
902 * pm_domain field points to the domain member of an object of type
903 * struct generic_pm_domain representing a power domain consisting of I/O
904 * devices.
905 */
906static int pm_genpd_restore_noirq(struct device *dev)
907{
908 struct generic_pm_domain *genpd;
909
910 dev_dbg(dev, "%s()\n", __func__);
911
912 genpd = dev_to_genpd(dev);
913 if (IS_ERR(genpd))
914 return -EINVAL;
915
916 /*
917 * Since all of the "noirq" callbacks are executed sequentially, it is
918 * guaranteed that this function will never run twice in parallel for
919 * the same PM domain, so it is not necessary to use locking here.
920 */
921 genpd->status = GPD_STATE_POWER_OFF;
922 if (genpd->suspend_power_off) {
923 /*
924 * The boot kernel might put the domain into the power on state,
925 * so make sure it really is powered off.
926 */
927 if (genpd->power_off)
928 genpd->power_off(genpd);
929 return 0;
930 }
931
932 pm_genpd_poweron(genpd);
933 genpd->suspended_count--;
934 if (genpd->start_device)
935 genpd->start_device(dev);
936
937 return pm_generic_restore_noirq(dev);
938}
939
940/**
941 * pm_genpd_restore - Restore a device belonging to an I/O power domain.
942 * @dev: Device to resume.
943 *
944 * Restore a device under the assumption that its pm_domain field points to the
945 * domain member of an object of type struct generic_pm_domain representing
946 * a power domain consisting of I/O devices.
947 */
948static int pm_genpd_restore(struct device *dev)
949{
950 struct generic_pm_domain *genpd;
951
952 dev_dbg(dev, "%s()\n", __func__);
953
954 genpd = dev_to_genpd(dev);
955 if (IS_ERR(genpd))
956 return -EINVAL;
957
958 return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
959}
960
961/**
962 * pm_genpd_complete - Complete power transition of a device in a power domain.
963 * @dev: Device to complete the transition of.
964 *
965 * Complete a power transition of a device (during a system-wide power
966 * transition) under the assumption that its pm_domain field points to the
967 * domain member of an object of type struct generic_pm_domain representing
968 * a power domain consisting of I/O devices.
969 */
970static void pm_genpd_complete(struct device *dev)
971{
972 struct generic_pm_domain *genpd;
973 bool run_complete;
974
975 dev_dbg(dev, "%s()\n", __func__);
976
977 genpd = dev_to_genpd(dev);
978 if (IS_ERR(genpd))
979 return;
980
981 mutex_lock(&genpd->lock);
982
983 run_complete = !genpd->suspend_power_off;
984 if (--genpd->prepared_count == 0)
985 genpd->suspend_power_off = false;
986
987 mutex_unlock(&genpd->lock);
988
989 if (run_complete) {
990 pm_generic_complete(dev);
991 pm_runtime_set_active(dev);
992 pm_runtime_enable(dev);
993 pm_runtime_idle(dev);
994 }
995}
996
997#else
998
999#define pm_genpd_prepare NULL
1000#define pm_genpd_suspend NULL
1001#define pm_genpd_suspend_noirq NULL
1002#define pm_genpd_resume_noirq NULL
1003#define pm_genpd_resume NULL
1004#define pm_genpd_freeze NULL
1005#define pm_genpd_freeze_noirq NULL
1006#define pm_genpd_thaw_noirq NULL
1007#define pm_genpd_thaw NULL
1008#define pm_genpd_dev_poweroff_noirq NULL
1009#define pm_genpd_dev_poweroff NULL
1010#define pm_genpd_restore_noirq NULL
1011#define pm_genpd_restore NULL
1012#define pm_genpd_complete NULL
1013
1014#endif /* CONFIG_PM_SLEEP */
1015
1016/**
1017 * pm_genpd_add_device - Add a device to an I/O PM domain.
1018 * @genpd: PM domain to add the device to.
1019 * @dev: Device to be added.
1020 */
1021int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
1022{
1023 struct dev_list_entry *dle;
1024 int ret = 0;
1025
1026 dev_dbg(dev, "%s()\n", __func__);
1027
1028 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1029 return -EINVAL;
1030
1031 genpd_acquire_lock(genpd);
1032
1033 if (genpd->status == GPD_STATE_POWER_OFF) {
1034 ret = -EINVAL;
1035 goto out;
1036 }
1037
1038 if (genpd->prepared_count > 0) {
1039 ret = -EAGAIN;
1040 goto out;
1041 }
1042
1043 list_for_each_entry(dle, &genpd->dev_list, node)
1044 if (dle->dev == dev) {
1045 ret = -EINVAL;
1046 goto out;
1047 }
1048
1049 dle = kzalloc(sizeof(*dle), GFP_KERNEL);
1050 if (!dle) {
1051 ret = -ENOMEM;
1052 goto out;
1053 }
1054
1055 dle->dev = dev;
1056 dle->need_restore = false;
1057 list_add_tail(&dle->node, &genpd->dev_list);
1058 genpd->device_count++;
1059
1060 spin_lock_irq(&dev->power.lock);
1061 dev->pm_domain = &genpd->domain;
1062 spin_unlock_irq(&dev->power.lock);
1063
1064 out:
1065 genpd_release_lock(genpd);
1066
1067 return ret;
1068}
1069
1070/**
1071 * pm_genpd_remove_device - Remove a device from an I/O PM domain.
1072 * @genpd: PM domain to remove the device from.
1073 * @dev: Device to be removed.
1074 */
1075int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1076 struct device *dev)
1077{
1078 struct dev_list_entry *dle;
1079 int ret = -EINVAL;
1080
1081 dev_dbg(dev, "%s()\n", __func__);
1082
1083 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(dev))
1084 return -EINVAL;
1085
1086 genpd_acquire_lock(genpd);
1087
1088 if (genpd->prepared_count > 0) {
1089 ret = -EAGAIN;
1090 goto out;
1091 }
1092
1093 list_for_each_entry(dle, &genpd->dev_list, node) {
1094 if (dle->dev != dev)
1095 continue;
1096
1097 spin_lock_irq(&dev->power.lock);
1098 dev->pm_domain = NULL;
1099 spin_unlock_irq(&dev->power.lock);
1100
1101 genpd->device_count--;
1102 list_del(&dle->node);
1103 kfree(dle);
1104
1105 ret = 0;
1106 break;
1107 }
1108
1109 out:
1110 genpd_release_lock(genpd);
1111
1112 return ret;
1113}
1114
1115/**
1116 * pm_genpd_add_subdomain - Add a subdomain to an I/O PM domain.
1117 * @genpd: Master PM domain to add the subdomain to.
1118 * @new_subdomain: Subdomain to be added.
1119 */
1120int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
1121 struct generic_pm_domain *new_subdomain)
1122{
1123 struct generic_pm_domain *subdomain;
1124 int ret = 0;
1125
1126 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(new_subdomain))
1127 return -EINVAL;
1128
1129 start:
1130 genpd_acquire_lock(genpd);
1131 mutex_lock_nested(&new_subdomain->lock, SINGLE_DEPTH_NESTING);
1132
1133 if (new_subdomain->status != GPD_STATE_POWER_OFF
1134 && new_subdomain->status != GPD_STATE_ACTIVE) {
1135 mutex_unlock(&new_subdomain->lock);
1136 genpd_release_lock(genpd);
1137 goto start;
1138 }
1139
1140 if (genpd->status == GPD_STATE_POWER_OFF
1141 && new_subdomain->status != GPD_STATE_POWER_OFF) {
1142 ret = -EINVAL;
1143 goto out;
1144 }
1145
1146 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1147 if (subdomain == new_subdomain) {
1148 ret = -EINVAL;
1149 goto out;
1150 }
1151 }
1152
1153 list_add_tail(&new_subdomain->sd_node, &genpd->sd_list);
1154 new_subdomain->parent = genpd;
1155 if (subdomain->status != GPD_STATE_POWER_OFF)
1156 genpd->sd_count++;
1157
1158 out:
1159 mutex_unlock(&new_subdomain->lock);
1160 genpd_release_lock(genpd);
1161
1162 return ret;
1163}
1164
1165/**
1166 * pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
1167 * @genpd: Master PM domain to remove the subdomain from.
1168 * @target: Subdomain to be removed.
1169 */
1170int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
1171 struct generic_pm_domain *target)
1172{
1173 struct generic_pm_domain *subdomain;
1174 int ret = -EINVAL;
1175
1176 if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(target))
1177 return -EINVAL;
1178
1179 start:
1180 genpd_acquire_lock(genpd);
1181
1182 list_for_each_entry(subdomain, &genpd->sd_list, sd_node) {
1183 if (subdomain != target)
1184 continue;
1185
1186 mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
1187
1188 if (subdomain->status != GPD_STATE_POWER_OFF
1189 && subdomain->status != GPD_STATE_ACTIVE) {
1190 mutex_unlock(&subdomain->lock);
1191 genpd_release_lock(genpd);
1192 goto start;
1193 }
1194
1195 list_del(&subdomain->sd_node);
1196 subdomain->parent = NULL;
1197 if (subdomain->status != GPD_STATE_POWER_OFF)
1198 genpd_sd_counter_dec(genpd);
1199
1200 mutex_unlock(&subdomain->lock);
1201
1202 ret = 0;
1203 break;
1204 }
1205
1206 genpd_release_lock(genpd);
1207
1208 return ret;
1209}
1210
1211/**
1212 * pm_genpd_init - Initialize a generic I/O PM domain object.
1213 * @genpd: PM domain object to initialize.
1214 * @gov: PM domain governor to associate with the domain (may be NULL).
1215 * @is_off: Initial value of the domain's power_is_off field.
1216 */
1217void pm_genpd_init(struct generic_pm_domain *genpd,
1218 struct dev_power_governor *gov, bool is_off)
1219{
1220 if (IS_ERR_OR_NULL(genpd))
1221 return;
1222
1223 INIT_LIST_HEAD(&genpd->sd_node);
1224 genpd->parent = NULL;
1225 INIT_LIST_HEAD(&genpd->dev_list);
1226 INIT_LIST_HEAD(&genpd->sd_list);
1227 mutex_init(&genpd->lock);
1228 genpd->gov = gov;
1229 INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
1230 genpd->in_progress = 0;
1231 genpd->sd_count = 0;
1232 genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
1233 init_waitqueue_head(&genpd->status_wait_queue);
1234 genpd->poweroff_task = NULL;
1235 genpd->resume_count = 0;
1236 genpd->device_count = 0;
1237 genpd->suspended_count = 0;
1238 genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
1239 genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
1240 genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
1241 genpd->domain.ops.prepare = pm_genpd_prepare;
1242 genpd->domain.ops.suspend = pm_genpd_suspend;
1243 genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq;
1244 genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq;
1245 genpd->domain.ops.resume = pm_genpd_resume;
1246 genpd->domain.ops.freeze = pm_genpd_freeze;
1247 genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
1248 genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
1249 genpd->domain.ops.thaw = pm_genpd_thaw;
1250 genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
1251 genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
1252 genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
1253 genpd->domain.ops.restore = pm_genpd_restore;
1254 genpd->domain.ops.complete = pm_genpd_complete;
1255 mutex_lock(&gpd_list_lock);
1256 list_add(&genpd->gpd_list_node, &gpd_list);
1257 mutex_unlock(&gpd_list_lock);
1258}
1259
1260/**
1261 * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
1262 */
1263void pm_genpd_poweroff_unused(void)
1264{
1265 struct generic_pm_domain *genpd;
1266
1267 mutex_lock(&gpd_list_lock);
1268
1269 list_for_each_entry(genpd, &gpd_list, gpd_list_node)
1270 genpd_queue_power_off_work(genpd);
1271
1272 mutex_unlock(&gpd_list_lock);
1273}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index cb3bb368681c..9508df71274b 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -94,12 +94,13 @@ int pm_generic_prepare(struct device *dev)
94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. 94 * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
95 * @dev: Device to handle. 95 * @dev: Device to handle.
96 * @event: PM transition of the system under way. 96 * @event: PM transition of the system under way.
97 * @bool: Whether or not this is the "noirq" stage.
97 * 98 *
98 * If the device has not been suspended at run time, execute the 99 * If the device has not been suspended at run time, execute the
99 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and 100 * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
100 * return its error code. Otherwise, return zero. 101 * return its error code. Otherwise, return zero.
101 */ 102 */
102static int __pm_generic_call(struct device *dev, int event) 103static int __pm_generic_call(struct device *dev, int event, bool noirq)
103{ 104{
104 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 105 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
105 int (*callback)(struct device *); 106 int (*callback)(struct device *);
@@ -109,16 +110,16 @@ static int __pm_generic_call(struct device *dev, int event)
109 110
110 switch (event) { 111 switch (event) {
111 case PM_EVENT_SUSPEND: 112 case PM_EVENT_SUSPEND:
112 callback = pm->suspend; 113 callback = noirq ? pm->suspend_noirq : pm->suspend;
113 break; 114 break;
114 case PM_EVENT_FREEZE: 115 case PM_EVENT_FREEZE:
115 callback = pm->freeze; 116 callback = noirq ? pm->freeze_noirq : pm->freeze;
116 break; 117 break;
117 case PM_EVENT_HIBERNATE: 118 case PM_EVENT_HIBERNATE:
118 callback = pm->poweroff; 119 callback = noirq ? pm->poweroff_noirq : pm->poweroff;
119 break; 120 break;
120 case PM_EVENT_THAW: 121 case PM_EVENT_THAW:
121 callback = pm->thaw; 122 callback = noirq ? pm->thaw_noirq : pm->thaw;
122 break; 123 break;
123 default: 124 default:
124 callback = NULL; 125 callback = NULL;
@@ -129,42 +130,82 @@ static int __pm_generic_call(struct device *dev, int event)
129} 130}
130 131
131/** 132/**
133 * pm_generic_suspend_noirq - Generic suspend_noirq callback for subsystems.
134 * @dev: Device to suspend.
135 */
136int pm_generic_suspend_noirq(struct device *dev)
137{
138 return __pm_generic_call(dev, PM_EVENT_SUSPEND, true);
139}
140EXPORT_SYMBOL_GPL(pm_generic_suspend_noirq);
141
142/**
132 * pm_generic_suspend - Generic suspend callback for subsystems. 143 * pm_generic_suspend - Generic suspend callback for subsystems.
133 * @dev: Device to suspend. 144 * @dev: Device to suspend.
134 */ 145 */
135int pm_generic_suspend(struct device *dev) 146int pm_generic_suspend(struct device *dev)
136{ 147{
137 return __pm_generic_call(dev, PM_EVENT_SUSPEND); 148 return __pm_generic_call(dev, PM_EVENT_SUSPEND, false);
138} 149}
139EXPORT_SYMBOL_GPL(pm_generic_suspend); 150EXPORT_SYMBOL_GPL(pm_generic_suspend);
140 151
141/** 152/**
153 * pm_generic_freeze_noirq - Generic freeze_noirq callback for subsystems.
154 * @dev: Device to freeze.
155 */
156int pm_generic_freeze_noirq(struct device *dev)
157{
158 return __pm_generic_call(dev, PM_EVENT_FREEZE, true);
159}
160EXPORT_SYMBOL_GPL(pm_generic_freeze_noirq);
161
162/**
142 * pm_generic_freeze - Generic freeze callback for subsystems. 163 * pm_generic_freeze - Generic freeze callback for subsystems.
143 * @dev: Device to freeze. 164 * @dev: Device to freeze.
144 */ 165 */
145int pm_generic_freeze(struct device *dev) 166int pm_generic_freeze(struct device *dev)
146{ 167{
147 return __pm_generic_call(dev, PM_EVENT_FREEZE); 168 return __pm_generic_call(dev, PM_EVENT_FREEZE, false);
148} 169}
149EXPORT_SYMBOL_GPL(pm_generic_freeze); 170EXPORT_SYMBOL_GPL(pm_generic_freeze);
150 171
151/** 172/**
173 * pm_generic_poweroff_noirq - Generic poweroff_noirq callback for subsystems.
174 * @dev: Device to handle.
175 */
176int pm_generic_poweroff_noirq(struct device *dev)
177{
178 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, true);
179}
180EXPORT_SYMBOL_GPL(pm_generic_poweroff_noirq);
181
182/**
152 * pm_generic_poweroff - Generic poweroff callback for subsystems. 183 * pm_generic_poweroff - Generic poweroff callback for subsystems.
153 * @dev: Device to handle. 184 * @dev: Device to handle.
154 */ 185 */
155int pm_generic_poweroff(struct device *dev) 186int pm_generic_poweroff(struct device *dev)
156{ 187{
157 return __pm_generic_call(dev, PM_EVENT_HIBERNATE); 188 return __pm_generic_call(dev, PM_EVENT_HIBERNATE, false);
158} 189}
159EXPORT_SYMBOL_GPL(pm_generic_poweroff); 190EXPORT_SYMBOL_GPL(pm_generic_poweroff);
160 191
161/** 192/**
193 * pm_generic_thaw_noirq - Generic thaw_noirq callback for subsystems.
194 * @dev: Device to thaw.
195 */
196int pm_generic_thaw_noirq(struct device *dev)
197{
198 return __pm_generic_call(dev, PM_EVENT_THAW, true);
199}
200EXPORT_SYMBOL_GPL(pm_generic_thaw_noirq);
201
202/**
162 * pm_generic_thaw - Generic thaw callback for subsystems. 203 * pm_generic_thaw - Generic thaw callback for subsystems.
163 * @dev: Device to thaw. 204 * @dev: Device to thaw.
164 */ 205 */
165int pm_generic_thaw(struct device *dev) 206int pm_generic_thaw(struct device *dev)
166{ 207{
167 return __pm_generic_call(dev, PM_EVENT_THAW); 208 return __pm_generic_call(dev, PM_EVENT_THAW, false);
168} 209}
169EXPORT_SYMBOL_GPL(pm_generic_thaw); 210EXPORT_SYMBOL_GPL(pm_generic_thaw);
170 211
@@ -172,12 +213,13 @@ EXPORT_SYMBOL_GPL(pm_generic_thaw);
172 * __pm_generic_resume - Generic resume/restore callback for subsystems. 213 * __pm_generic_resume - Generic resume/restore callback for subsystems.
173 * @dev: Device to handle. 214 * @dev: Device to handle.
174 * @event: PM transition of the system under way. 215 * @event: PM transition of the system under way.
216 * @bool: Whether or not this is the "noirq" stage.
175 * 217 *
176 * Execute the resume/resotre callback provided by the @dev's driver, if 218 * Execute the resume/resotre callback provided by the @dev's driver, if
177 * defined. If it returns 0, change the device's runtime PM status to 'active'. 219 * defined. If it returns 0, change the device's runtime PM status to 'active'.
178 * Return the callback's error code. 220 * Return the callback's error code.
179 */ 221 */
180static int __pm_generic_resume(struct device *dev, int event) 222static int __pm_generic_resume(struct device *dev, int event, bool noirq)
181{ 223{
182 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; 224 const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
183 int (*callback)(struct device *); 225 int (*callback)(struct device *);
@@ -188,10 +230,10 @@ static int __pm_generic_resume(struct device *dev, int event)
188 230
189 switch (event) { 231 switch (event) {
190 case PM_EVENT_RESUME: 232 case PM_EVENT_RESUME:
191 callback = pm->resume; 233 callback = noirq ? pm->resume_noirq : pm->resume;
192 break; 234 break;
193 case PM_EVENT_RESTORE: 235 case PM_EVENT_RESTORE:
194 callback = pm->restore; 236 callback = noirq ? pm->restore_noirq : pm->restore;
195 break; 237 break;
196 default: 238 default:
197 callback = NULL; 239 callback = NULL;
@@ -202,7 +244,7 @@ static int __pm_generic_resume(struct device *dev, int event)
202 return 0; 244 return 0;
203 245
204 ret = callback(dev); 246 ret = callback(dev);
205 if (!ret && pm_runtime_enabled(dev)) { 247 if (!ret && !noirq && pm_runtime_enabled(dev)) {
206 pm_runtime_disable(dev); 248 pm_runtime_disable(dev);
207 pm_runtime_set_active(dev); 249 pm_runtime_set_active(dev);
208 pm_runtime_enable(dev); 250 pm_runtime_enable(dev);
@@ -212,22 +254,42 @@ static int __pm_generic_resume(struct device *dev, int event)
212} 254}
213 255
214/** 256/**
257 * pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
258 * @dev: Device to resume.
259 */
260int pm_generic_resume_noirq(struct device *dev)
261{
262 return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
263}
264EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
265
266/**
215 * pm_generic_resume - Generic resume callback for subsystems. 267 * pm_generic_resume - Generic resume callback for subsystems.
216 * @dev: Device to resume. 268 * @dev: Device to resume.
217 */ 269 */
218int pm_generic_resume(struct device *dev) 270int pm_generic_resume(struct device *dev)
219{ 271{
220 return __pm_generic_resume(dev, PM_EVENT_RESUME); 272 return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
221} 273}
222EXPORT_SYMBOL_GPL(pm_generic_resume); 274EXPORT_SYMBOL_GPL(pm_generic_resume);
223 275
224/** 276/**
277 * pm_generic_restore_noirq - Generic restore_noirq callback for subsystems.
278 * @dev: Device to restore.
279 */
280int pm_generic_restore_noirq(struct device *dev)
281{
282 return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
283}
284EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
285
286/**
225 * pm_generic_restore - Generic restore callback for subsystems. 287 * pm_generic_restore - Generic restore callback for subsystems.
226 * @dev: Device to restore. 288 * @dev: Device to restore.
227 */ 289 */
228int pm_generic_restore(struct device *dev) 290int pm_generic_restore(struct device *dev)
229{ 291{
230 return __pm_generic_resume(dev, PM_EVENT_RESTORE); 292 return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
231} 293}
232EXPORT_SYMBOL_GPL(pm_generic_restore); 294EXPORT_SYMBOL_GPL(pm_generic_restore);
233 295
@@ -256,11 +318,17 @@ struct dev_pm_ops generic_subsys_pm_ops = {
256#ifdef CONFIG_PM_SLEEP 318#ifdef CONFIG_PM_SLEEP
257 .prepare = pm_generic_prepare, 319 .prepare = pm_generic_prepare,
258 .suspend = pm_generic_suspend, 320 .suspend = pm_generic_suspend,
321 .suspend_noirq = pm_generic_suspend_noirq,
259 .resume = pm_generic_resume, 322 .resume = pm_generic_resume,
323 .resume_noirq = pm_generic_resume_noirq,
260 .freeze = pm_generic_freeze, 324 .freeze = pm_generic_freeze,
325 .freeze_noirq = pm_generic_freeze_noirq,
261 .thaw = pm_generic_thaw, 326 .thaw = pm_generic_thaw,
327 .thaw_noirq = pm_generic_thaw_noirq,
262 .poweroff = pm_generic_poweroff, 328 .poweroff = pm_generic_poweroff,
329 .poweroff_noirq = pm_generic_poweroff_noirq,
263 .restore = pm_generic_restore, 330 .restore = pm_generic_restore,
331 .restore_noirq = pm_generic_restore_noirq,
264 .complete = pm_generic_complete, 332 .complete = pm_generic_complete,
265#endif 333#endif
266#ifdef CONFIG_PM_RUNTIME 334#ifdef CONFIG_PM_RUNTIME
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index aa6320207745..a85459126bc6 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -57,7 +57,8 @@ static int async_error;
57 */ 57 */
58void device_pm_init(struct device *dev) 58void device_pm_init(struct device *dev)
59{ 59{
60 dev->power.in_suspend = false; 60 dev->power.is_prepared = false;
61 dev->power.is_suspended = false;
61 init_completion(&dev->power.completion); 62 init_completion(&dev->power.completion);
62 complete_all(&dev->power.completion); 63 complete_all(&dev->power.completion);
63 dev->power.wakeup = NULL; 64 dev->power.wakeup = NULL;
@@ -91,7 +92,7 @@ void device_pm_add(struct device *dev)
91 pr_debug("PM: Adding info for %s:%s\n", 92 pr_debug("PM: Adding info for %s:%s\n",
92 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); 93 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
93 mutex_lock(&dpm_list_mtx); 94 mutex_lock(&dpm_list_mtx);
94 if (dev->parent && dev->parent->power.in_suspend) 95 if (dev->parent && dev->parent->power.is_prepared)
95 dev_warn(dev, "parent %s should not be sleeping\n", 96 dev_warn(dev, "parent %s should not be sleeping\n",
96 dev_name(dev->parent)); 97 dev_name(dev->parent));
97 list_add_tail(&dev->power.entry, &dpm_list); 98 list_add_tail(&dev->power.entry, &dpm_list);
@@ -424,9 +425,9 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
424 TRACE_DEVICE(dev); 425 TRACE_DEVICE(dev);
425 TRACE_RESUME(0); 426 TRACE_RESUME(0);
426 427
427 if (dev->pwr_domain) { 428 if (dev->pm_domain) {
428 pm_dev_dbg(dev, state, "EARLY power domain "); 429 pm_dev_dbg(dev, state, "EARLY power domain ");
429 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 430 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
430 } else if (dev->type && dev->type->pm) { 431 } else if (dev->type && dev->type->pm) {
431 pm_dev_dbg(dev, state, "EARLY type "); 432 pm_dev_dbg(dev, state, "EARLY type ");
432 error = pm_noirq_op(dev, dev->type->pm, state); 433 error = pm_noirq_op(dev, dev->type->pm, state);
@@ -504,6 +505,7 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
504static int device_resume(struct device *dev, pm_message_t state, bool async) 505static int device_resume(struct device *dev, pm_message_t state, bool async)
505{ 506{
506 int error = 0; 507 int error = 0;
508 bool put = false;
507 509
508 TRACE_DEVICE(dev); 510 TRACE_DEVICE(dev);
509 TRACE_RESUME(0); 511 TRACE_RESUME(0);
@@ -511,11 +513,21 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
511 dpm_wait(dev->parent, async); 513 dpm_wait(dev->parent, async);
512 device_lock(dev); 514 device_lock(dev);
513 515
514 dev->power.in_suspend = false; 516 /*
517 * This is a fib. But we'll allow new children to be added below
518 * a resumed device, even if the device hasn't been completed yet.
519 */
520 dev->power.is_prepared = false;
515 521
516 if (dev->pwr_domain) { 522 if (!dev->power.is_suspended)
523 goto Unlock;
524
525 pm_runtime_enable(dev);
526 put = true;
527
528 if (dev->pm_domain) {
517 pm_dev_dbg(dev, state, "power domain "); 529 pm_dev_dbg(dev, state, "power domain ");
518 error = pm_op(dev, &dev->pwr_domain->ops, state); 530 error = pm_op(dev, &dev->pm_domain->ops, state);
519 goto End; 531 goto End;
520 } 532 }
521 533
@@ -548,10 +560,17 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
548 } 560 }
549 561
550 End: 562 End:
563 dev->power.is_suspended = false;
564
565 Unlock:
551 device_unlock(dev); 566 device_unlock(dev);
552 complete_all(&dev->power.completion); 567 complete_all(&dev->power.completion);
553 568
554 TRACE_RESUME(error); 569 TRACE_RESUME(error);
570
571 if (put)
572 pm_runtime_put_sync(dev);
573
555 return error; 574 return error;
556} 575}
557 576
@@ -630,10 +649,10 @@ static void device_complete(struct device *dev, pm_message_t state)
630{ 649{
631 device_lock(dev); 650 device_lock(dev);
632 651
633 if (dev->pwr_domain) { 652 if (dev->pm_domain) {
634 pm_dev_dbg(dev, state, "completing power domain "); 653 pm_dev_dbg(dev, state, "completing power domain ");
635 if (dev->pwr_domain->ops.complete) 654 if (dev->pm_domain->ops.complete)
636 dev->pwr_domain->ops.complete(dev); 655 dev->pm_domain->ops.complete(dev);
637 } else if (dev->type && dev->type->pm) { 656 } else if (dev->type && dev->type->pm) {
638 pm_dev_dbg(dev, state, "completing type "); 657 pm_dev_dbg(dev, state, "completing type ");
639 if (dev->type->pm->complete) 658 if (dev->type->pm->complete)
@@ -670,7 +689,7 @@ void dpm_complete(pm_message_t state)
670 struct device *dev = to_device(dpm_prepared_list.prev); 689 struct device *dev = to_device(dpm_prepared_list.prev);
671 690
672 get_device(dev); 691 get_device(dev);
673 dev->power.in_suspend = false; 692 dev->power.is_prepared = false;
674 list_move(&dev->power.entry, &list); 693 list_move(&dev->power.entry, &list);
675 mutex_unlock(&dpm_list_mtx); 694 mutex_unlock(&dpm_list_mtx);
676 695
@@ -733,9 +752,9 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
733{ 752{
734 int error; 753 int error;
735 754
736 if (dev->pwr_domain) { 755 if (dev->pm_domain) {
737 pm_dev_dbg(dev, state, "LATE power domain "); 756 pm_dev_dbg(dev, state, "LATE power domain ");
738 error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); 757 error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
739 if (error) 758 if (error)
740 return error; 759 return error;
741 } else if (dev->type && dev->type->pm) { 760 } else if (dev->type && dev->type->pm) {
@@ -832,19 +851,25 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
832 int error = 0; 851 int error = 0;
833 852
834 dpm_wait_for_children(dev, async); 853 dpm_wait_for_children(dev, async);
835 device_lock(dev);
836 854
837 if (async_error) 855 if (async_error)
838 goto End; 856 return 0;
857
858 pm_runtime_get_noresume(dev);
859 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
860 pm_wakeup_event(dev, 0);
839 861
840 if (pm_wakeup_pending()) { 862 if (pm_wakeup_pending()) {
863 pm_runtime_put_sync(dev);
841 async_error = -EBUSY; 864 async_error = -EBUSY;
842 goto End; 865 return 0;
843 } 866 }
844 867
845 if (dev->pwr_domain) { 868 device_lock(dev);
869
870 if (dev->pm_domain) {
846 pm_dev_dbg(dev, state, "power domain "); 871 pm_dev_dbg(dev, state, "power domain ");
847 error = pm_op(dev, &dev->pwr_domain->ops, state); 872 error = pm_op(dev, &dev->pm_domain->ops, state);
848 goto End; 873 goto End;
849 } 874 }
850 875
@@ -877,11 +902,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
877 } 902 }
878 903
879 End: 904 End:
905 dev->power.is_suspended = !error;
906
880 device_unlock(dev); 907 device_unlock(dev);
881 complete_all(&dev->power.completion); 908 complete_all(&dev->power.completion);
882 909
883 if (error) 910 if (error) {
911 pm_runtime_put_sync(dev);
884 async_error = error; 912 async_error = error;
913 } else if (dev->power.is_suspended) {
914 __pm_runtime_disable(dev, false);
915 }
885 916
886 return error; 917 return error;
887} 918}
@@ -968,11 +999,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
968 999
969 device_lock(dev); 1000 device_lock(dev);
970 1001
971 if (dev->pwr_domain) { 1002 if (dev->pm_domain) {
972 pm_dev_dbg(dev, state, "preparing power domain "); 1003 pm_dev_dbg(dev, state, "preparing power domain ");
973 if (dev->pwr_domain->ops.prepare) 1004 if (dev->pm_domain->ops.prepare)
974 error = dev->pwr_domain->ops.prepare(dev); 1005 error = dev->pm_domain->ops.prepare(dev);
975 suspend_report_result(dev->pwr_domain->ops.prepare, error); 1006 suspend_report_result(dev->pm_domain->ops.prepare, error);
976 if (error) 1007 if (error)
977 goto End; 1008 goto End;
978 } else if (dev->type && dev->type->pm) { 1009 } else if (dev->type && dev->type->pm) {
@@ -1021,13 +1052,7 @@ int dpm_prepare(pm_message_t state)
1021 get_device(dev); 1052 get_device(dev);
1022 mutex_unlock(&dpm_list_mtx); 1053 mutex_unlock(&dpm_list_mtx);
1023 1054
1024 pm_runtime_get_noresume(dev); 1055 error = device_prepare(dev, state);
1025 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1026 pm_wakeup_event(dev, 0);
1027
1028 pm_runtime_put_sync(dev);
1029 error = pm_wakeup_pending() ?
1030 -EBUSY : device_prepare(dev, state);
1031 1056
1032 mutex_lock(&dpm_list_mtx); 1057 mutex_lock(&dpm_list_mtx);
1033 if (error) { 1058 if (error) {
@@ -1042,7 +1067,7 @@ int dpm_prepare(pm_message_t state)
1042 put_device(dev); 1067 put_device(dev);
1043 break; 1068 break;
1044 } 1069 }
1045 dev->power.in_suspend = true; 1070 dev->power.is_prepared = true;
1046 if (!list_empty(&dev->power.entry)) 1071 if (!list_empty(&dev->power.entry))
1047 list_move_tail(&dev->power.entry, &dpm_prepared_list); 1072 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1048 put_device(dev); 1073 put_device(dev);
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899f5e9e..5cc12322ef32 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
625 625
626 return 0; 626 return 0;
627} 627}
628
629/**
630 * opp_free_cpufreq_table() - free the cpufreq table
631 * @dev: device for which we do this operation
632 * @table: table to free
633 *
634 * Free up the table allocated by opp_init_cpufreq_table
635 */
636void opp_free_cpufreq_table(struct device *dev,
637 struct cpufreq_frequency_table **table)
638{
639 if (!table)
640 return;
641
642 kfree(*table);
643 *table = NULL;
644}
628#endif /* CONFIG_CPU_FREQ */ 645#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0d4587b15c55..8dc247c974af 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * drivers/base/power/runtime.c - Helper functions for device run-time PM 2 * drivers/base/power/runtime.c - Helper functions for device runtime PM
3 * 3 *
4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 4 * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> 5 * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu>
@@ -135,8 +135,9 @@ static int rpm_check_suspend_allowed(struct device *dev)
135 135
136 if (dev->power.runtime_error) 136 if (dev->power.runtime_error)
137 retval = -EINVAL; 137 retval = -EINVAL;
138 else if (atomic_read(&dev->power.usage_count) > 0 138 else if (dev->power.disable_depth > 0)
139 || dev->power.disable_depth > 0) 139 retval = -EACCES;
140 else if (atomic_read(&dev->power.usage_count) > 0)
140 retval = -EAGAIN; 141 retval = -EAGAIN;
141 else if (!pm_children_suspended(dev)) 142 else if (!pm_children_suspended(dev))
142 retval = -EBUSY; 143 retval = -EBUSY;
@@ -158,7 +159,7 @@ static int rpm_check_suspend_allowed(struct device *dev)
158 * @dev: Device to notify the bus type about. 159 * @dev: Device to notify the bus type about.
159 * @rpmflags: Flag bits. 160 * @rpmflags: Flag bits.
160 * 161 *
161 * Check if the device's run-time PM status allows it to be suspended. If 162 * Check if the device's runtime PM status allows it to be suspended. If
162 * another idle notification has been started earlier, return immediately. If 163 * another idle notification has been started earlier, return immediately. If
163 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 164 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
164 * run the ->runtime_idle() callback directly. 165 * run the ->runtime_idle() callback directly.
@@ -213,8 +214,8 @@ static int rpm_idle(struct device *dev, int rpmflags)
213 214
214 dev->power.idle_notification = true; 215 dev->power.idle_notification = true;
215 216
216 if (dev->pwr_domain) 217 if (dev->pm_domain)
217 callback = dev->pwr_domain->ops.runtime_idle; 218 callback = dev->pm_domain->ops.runtime_idle;
218 else if (dev->type && dev->type->pm) 219 else if (dev->type && dev->type->pm)
219 callback = dev->type->pm->runtime_idle; 220 callback = dev->type->pm->runtime_idle;
220 else if (dev->class && dev->class->pm) 221 else if (dev->class && dev->class->pm)
@@ -262,15 +263,15 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
262 spin_lock_irq(&dev->power.lock); 263 spin_lock_irq(&dev->power.lock);
263 } 264 }
264 dev->power.runtime_error = retval; 265 dev->power.runtime_error = retval;
265 return retval; 266 return retval != -EACCES ? retval : -EIO;
266} 267}
267 268
268/** 269/**
269 * rpm_suspend - Carry out run-time suspend of given device. 270 * rpm_suspend - Carry out runtime suspend of given device.
270 * @dev: Device to suspend. 271 * @dev: Device to suspend.
271 * @rpmflags: Flag bits. 272 * @rpmflags: Flag bits.
272 * 273 *
273 * Check if the device's run-time PM status allows it to be suspended. If 274 * Check if the device's runtime PM status allows it to be suspended. If
274 * another suspend has been started earlier, either return immediately or wait 275 * another suspend has been started earlier, either return immediately or wait
275 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a 276 * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a
276 * pending idle notification. If the RPM_ASYNC flag is set then queue a 277 * pending idle notification. If the RPM_ASYNC flag is set then queue a
@@ -374,8 +375,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
374 375
375 __update_runtime_status(dev, RPM_SUSPENDING); 376 __update_runtime_status(dev, RPM_SUSPENDING);
376 377
377 if (dev->pwr_domain) 378 if (dev->pm_domain)
378 callback = dev->pwr_domain->ops.runtime_suspend; 379 callback = dev->pm_domain->ops.runtime_suspend;
379 else if (dev->type && dev->type->pm) 380 else if (dev->type && dev->type->pm)
380 callback = dev->type->pm->runtime_suspend; 381 callback = dev->type->pm->runtime_suspend;
381 else if (dev->class && dev->class->pm) 382 else if (dev->class && dev->class->pm)
@@ -388,7 +389,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
388 retval = rpm_callback(callback, dev); 389 retval = rpm_callback(callback, dev);
389 if (retval) { 390 if (retval) {
390 __update_runtime_status(dev, RPM_ACTIVE); 391 __update_runtime_status(dev, RPM_ACTIVE);
391 dev->power.deferred_resume = 0; 392 dev->power.deferred_resume = false;
392 if (retval == -EAGAIN || retval == -EBUSY) 393 if (retval == -EAGAIN || retval == -EBUSY)
393 dev->power.runtime_error = 0; 394 dev->power.runtime_error = 0;
394 else 395 else
@@ -429,11 +430,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
429} 430}
430 431
431/** 432/**
432 * rpm_resume - Carry out run-time resume of given device. 433 * rpm_resume - Carry out runtime resume of given device.
433 * @dev: Device to resume. 434 * @dev: Device to resume.
434 * @rpmflags: Flag bits. 435 * @rpmflags: Flag bits.
435 * 436 *
436 * Check if the device's run-time PM status allows it to be resumed. Cancel 437 * Check if the device's runtime PM status allows it to be resumed. Cancel
437 * any scheduled or pending requests. If another resume has been started 438 * any scheduled or pending requests. If another resume has been started
438 * earlier, either return immediately or wait for it to finish, depending on the 439 * earlier, either return immediately or wait for it to finish, depending on the
439 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in 440 * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in
@@ -458,7 +459,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
458 if (dev->power.runtime_error) 459 if (dev->power.runtime_error)
459 retval = -EINVAL; 460 retval = -EINVAL;
460 else if (dev->power.disable_depth > 0) 461 else if (dev->power.disable_depth > 0)
461 retval = -EAGAIN; 462 retval = -EACCES;
462 if (retval) 463 if (retval)
463 goto out; 464 goto out;
464 465
@@ -550,7 +551,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
550 551
551 spin_lock(&parent->power.lock); 552 spin_lock(&parent->power.lock);
552 /* 553 /*
553 * We can resume if the parent's run-time PM is disabled or it 554 * We can resume if the parent's runtime PM is disabled or it
554 * is set to ignore children. 555 * is set to ignore children.
555 */ 556 */
556 if (!parent->power.disable_depth 557 if (!parent->power.disable_depth
@@ -573,8 +574,8 @@ static int rpm_resume(struct device *dev, int rpmflags)
573 574
574 __update_runtime_status(dev, RPM_RESUMING); 575 __update_runtime_status(dev, RPM_RESUMING);
575 576
576 if (dev->pwr_domain) 577 if (dev->pm_domain)
577 callback = dev->pwr_domain->ops.runtime_resume; 578 callback = dev->pm_domain->ops.runtime_resume;
578 else if (dev->type && dev->type->pm) 579 else if (dev->type && dev->type->pm)
579 callback = dev->type->pm->runtime_resume; 580 callback = dev->type->pm->runtime_resume;
580 else if (dev->class && dev->class->pm) 581 else if (dev->class && dev->class->pm)
@@ -614,11 +615,11 @@ static int rpm_resume(struct device *dev, int rpmflags)
614} 615}
615 616
616/** 617/**
617 * pm_runtime_work - Universal run-time PM work function. 618 * pm_runtime_work - Universal runtime PM work function.
618 * @work: Work structure used for scheduling the execution of this function. 619 * @work: Work structure used for scheduling the execution of this function.
619 * 620 *
620 * Use @work to get the device object the work is to be done for, determine what 621 * Use @work to get the device object the work is to be done for, determine what
621 * is to be done and execute the appropriate run-time PM function. 622 * is to be done and execute the appropriate runtime PM function.
622 */ 623 */
623static void pm_runtime_work(struct work_struct *work) 624static void pm_runtime_work(struct work_struct *work)
624{ 625{
@@ -717,7 +718,7 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
717EXPORT_SYMBOL_GPL(pm_schedule_suspend); 718EXPORT_SYMBOL_GPL(pm_schedule_suspend);
718 719
719/** 720/**
720 * __pm_runtime_idle - Entry point for run-time idle operations. 721 * __pm_runtime_idle - Entry point for runtime idle operations.
721 * @dev: Device to send idle notification for. 722 * @dev: Device to send idle notification for.
722 * @rpmflags: Flag bits. 723 * @rpmflags: Flag bits.
723 * 724 *
@@ -746,7 +747,7 @@ int __pm_runtime_idle(struct device *dev, int rpmflags)
746EXPORT_SYMBOL_GPL(__pm_runtime_idle); 747EXPORT_SYMBOL_GPL(__pm_runtime_idle);
747 748
748/** 749/**
749 * __pm_runtime_suspend - Entry point for run-time put/suspend operations. 750 * __pm_runtime_suspend - Entry point for runtime put/suspend operations.
750 * @dev: Device to suspend. 751 * @dev: Device to suspend.
751 * @rpmflags: Flag bits. 752 * @rpmflags: Flag bits.
752 * 753 *
@@ -775,7 +776,7 @@ int __pm_runtime_suspend(struct device *dev, int rpmflags)
775EXPORT_SYMBOL_GPL(__pm_runtime_suspend); 776EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
776 777
777/** 778/**
778 * __pm_runtime_resume - Entry point for run-time resume operations. 779 * __pm_runtime_resume - Entry point for runtime resume operations.
779 * @dev: Device to resume. 780 * @dev: Device to resume.
780 * @rpmflags: Flag bits. 781 * @rpmflags: Flag bits.
781 * 782 *
@@ -801,11 +802,11 @@ int __pm_runtime_resume(struct device *dev, int rpmflags)
801EXPORT_SYMBOL_GPL(__pm_runtime_resume); 802EXPORT_SYMBOL_GPL(__pm_runtime_resume);
802 803
803/** 804/**
804 * __pm_runtime_set_status - Set run-time PM status of a device. 805 * __pm_runtime_set_status - Set runtime PM status of a device.
805 * @dev: Device to handle. 806 * @dev: Device to handle.
806 * @status: New run-time PM status of the device. 807 * @status: New runtime PM status of the device.
807 * 808 *
808 * If run-time PM of the device is disabled or its power.runtime_error field is 809 * If runtime PM of the device is disabled or its power.runtime_error field is
809 * different from zero, the status may be changed either to RPM_ACTIVE, or to 810 * different from zero, the status may be changed either to RPM_ACTIVE, or to
810 * RPM_SUSPENDED, as long as that reflects the actual state of the device. 811 * RPM_SUSPENDED, as long as that reflects the actual state of the device.
811 * However, if the device has a parent and the parent is not active, and the 812 * However, if the device has a parent and the parent is not active, and the
@@ -851,7 +852,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
851 852
852 /* 853 /*
853 * It is invalid to put an active child under a parent that is 854 * It is invalid to put an active child under a parent that is
854 * not active, has run-time PM enabled and the 855 * not active, has runtime PM enabled and the
855 * 'power.ignore_children' flag unset. 856 * 'power.ignore_children' flag unset.
856 */ 857 */
857 if (!parent->power.disable_depth 858 if (!parent->power.disable_depth
@@ -885,7 +886,7 @@ EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
885 * @dev: Device to handle. 886 * @dev: Device to handle.
886 * 887 *
887 * Flush all pending requests for the device from pm_wq and wait for all 888 * Flush all pending requests for the device from pm_wq and wait for all
888 * run-time PM operations involving the device in progress to complete. 889 * runtime PM operations involving the device in progress to complete.
889 * 890 *
890 * Should be called under dev->power.lock with interrupts disabled. 891 * Should be called under dev->power.lock with interrupts disabled.
891 */ 892 */
@@ -933,7 +934,7 @@ static void __pm_runtime_barrier(struct device *dev)
933 * Prevent the device from being suspended by incrementing its usage counter and 934 * Prevent the device from being suspended by incrementing its usage counter and
934 * if there's a pending resume request for the device, wake the device up. 935 * if there's a pending resume request for the device, wake the device up.
935 * Next, make sure that all pending requests for the device have been flushed 936 * Next, make sure that all pending requests for the device have been flushed
936 * from pm_wq and wait for all run-time PM operations involving the device in 937 * from pm_wq and wait for all runtime PM operations involving the device in
937 * progress to complete. 938 * progress to complete.
938 * 939 *
939 * Return value: 940 * Return value:
@@ -963,18 +964,18 @@ int pm_runtime_barrier(struct device *dev)
963EXPORT_SYMBOL_GPL(pm_runtime_barrier); 964EXPORT_SYMBOL_GPL(pm_runtime_barrier);
964 965
965/** 966/**
966 * __pm_runtime_disable - Disable run-time PM of a device. 967 * __pm_runtime_disable - Disable runtime PM of a device.
967 * @dev: Device to handle. 968 * @dev: Device to handle.
968 * @check_resume: If set, check if there's a resume request for the device. 969 * @check_resume: If set, check if there's a resume request for the device.
969 * 970 *
970 * Increment power.disable_depth for the device and if was zero previously, 971 * Increment power.disable_depth for the device and if was zero previously,
971 * cancel all pending run-time PM requests for the device and wait for all 972 * cancel all pending runtime PM requests for the device and wait for all
972 * operations in progress to complete. The device can be either active or 973 * operations in progress to complete. The device can be either active or
973 * suspended after its run-time PM has been disabled. 974 * suspended after its runtime PM has been disabled.
974 * 975 *
975 * If @check_resume is set and there's a resume request pending when 976 * If @check_resume is set and there's a resume request pending when
976 * __pm_runtime_disable() is called and power.disable_depth is zero, the 977 * __pm_runtime_disable() is called and power.disable_depth is zero, the
977 * function will wake up the device before disabling its run-time PM. 978 * function will wake up the device before disabling its runtime PM.
978 */ 979 */
979void __pm_runtime_disable(struct device *dev, bool check_resume) 980void __pm_runtime_disable(struct device *dev, bool check_resume)
980{ 981{
@@ -987,7 +988,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
987 988
988 /* 989 /*
989 * Wake up the device if there's a resume request pending, because that 990 * Wake up the device if there's a resume request pending, because that
990 * means there probably is some I/O to process and disabling run-time PM 991 * means there probably is some I/O to process and disabling runtime PM
991 * shouldn't prevent the device from processing the I/O. 992 * shouldn't prevent the device from processing the I/O.
992 */ 993 */
993 if (check_resume && dev->power.request_pending 994 if (check_resume && dev->power.request_pending
@@ -1012,7 +1013,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume)
1012EXPORT_SYMBOL_GPL(__pm_runtime_disable); 1013EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1013 1014
1014/** 1015/**
1015 * pm_runtime_enable - Enable run-time PM of a device. 1016 * pm_runtime_enable - Enable runtime PM of a device.
1016 * @dev: Device to handle. 1017 * @dev: Device to handle.
1017 */ 1018 */
1018void pm_runtime_enable(struct device *dev) 1019void pm_runtime_enable(struct device *dev)
@@ -1031,7 +1032,7 @@ void pm_runtime_enable(struct device *dev)
1031EXPORT_SYMBOL_GPL(pm_runtime_enable); 1032EXPORT_SYMBOL_GPL(pm_runtime_enable);
1032 1033
1033/** 1034/**
1034 * pm_runtime_forbid - Block run-time PM of a device. 1035 * pm_runtime_forbid - Block runtime PM of a device.
1035 * @dev: Device to handle. 1036 * @dev: Device to handle.
1036 * 1037 *
1037 * Increase the device's usage count and clear its power.runtime_auto flag, 1038 * Increase the device's usage count and clear its power.runtime_auto flag,
@@ -1054,7 +1055,7 @@ void pm_runtime_forbid(struct device *dev)
1054EXPORT_SYMBOL_GPL(pm_runtime_forbid); 1055EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1055 1056
1056/** 1057/**
1057 * pm_runtime_allow - Unblock run-time PM of a device. 1058 * pm_runtime_allow - Unblock runtime PM of a device.
1058 * @dev: Device to handle. 1059 * @dev: Device to handle.
1059 * 1060 *
1060 * Decrease the device's usage count and set its power.runtime_auto flag. 1061 * Decrease the device's usage count and set its power.runtime_auto flag.
@@ -1075,12 +1076,12 @@ void pm_runtime_allow(struct device *dev)
1075EXPORT_SYMBOL_GPL(pm_runtime_allow); 1076EXPORT_SYMBOL_GPL(pm_runtime_allow);
1076 1077
1077/** 1078/**
1078 * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. 1079 * pm_runtime_no_callbacks - Ignore runtime PM callbacks for a device.
1079 * @dev: Device to handle. 1080 * @dev: Device to handle.
1080 * 1081 *
1081 * Set the power.no_callbacks flag, which tells the PM core that this 1082 * Set the power.no_callbacks flag, which tells the PM core that this
1082 * device is power-managed through its parent and has no run-time PM 1083 * device is power-managed through its parent and has no runtime PM
1083 * callbacks of its own. The run-time sysfs attributes will be removed. 1084 * callbacks of its own. The runtime sysfs attributes will be removed.
1084 */ 1085 */
1085void pm_runtime_no_callbacks(struct device *dev) 1086void pm_runtime_no_callbacks(struct device *dev)
1086{ 1087{
@@ -1156,8 +1157,8 @@ static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1156 * @delay: Value of the new delay in milliseconds. 1157 * @delay: Value of the new delay in milliseconds.
1157 * 1158 *
1158 * Set the device's power.autosuspend_delay value. If it changes to negative 1159 * Set the device's power.autosuspend_delay value. If it changes to negative
1159 * and the power.use_autosuspend flag is set, prevent run-time suspends. If it 1160 * and the power.use_autosuspend flag is set, prevent runtime suspends. If it
1160 * changes the other way, allow run-time suspends. 1161 * changes the other way, allow runtime suspends.
1161 */ 1162 */
1162void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) 1163void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1163{ 1164{
@@ -1177,7 +1178,7 @@ EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1177 * @dev: Device to handle. 1178 * @dev: Device to handle.
1178 * @use: New value for use_autosuspend. 1179 * @use: New value for use_autosuspend.
1179 * 1180 *
1180 * Set the device's power.use_autosuspend flag, and allow or prevent run-time 1181 * Set the device's power.use_autosuspend flag, and allow or prevent runtime
1181 * suspends as needed. 1182 * suspends as needed.
1182 */ 1183 */
1183void __pm_runtime_use_autosuspend(struct device *dev, bool use) 1184void __pm_runtime_use_autosuspend(struct device *dev, bool use)
@@ -1194,7 +1195,7 @@ void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1194EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); 1195EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1195 1196
1196/** 1197/**
1197 * pm_runtime_init - Initialize run-time PM fields in given device object. 1198 * pm_runtime_init - Initialize runtime PM fields in given device object.
1198 * @dev: Device object to initialize. 1199 * @dev: Device object to initialize.
1199 */ 1200 */
1200void pm_runtime_init(struct device *dev) 1201void pm_runtime_init(struct device *dev)
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a9f5b8979611..942d6a7c9ae1 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -116,12 +116,14 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr,
116 cp = memchr(buf, '\n', n); 116 cp = memchr(buf, '\n', n);
117 if (cp) 117 if (cp)
118 len = cp - buf; 118 len = cp - buf;
119 device_lock(dev);
119 if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) 120 if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0)
120 pm_runtime_allow(dev); 121 pm_runtime_allow(dev);
121 else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) 122 else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0)
122 pm_runtime_forbid(dev); 123 pm_runtime_forbid(dev);
123 else 124 else
124 return -EINVAL; 125 n = -EINVAL;
126 device_unlock(dev);
125 return n; 127 return n;
126} 128}
127 129
@@ -205,7 +207,9 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
205 if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) 207 if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
206 return -EINVAL; 208 return -EINVAL;
207 209
210 device_lock(dev);
208 pm_runtime_set_autosuspend_delay(dev, delay); 211 pm_runtime_set_autosuspend_delay(dev, delay);
212 device_unlock(dev);
209 return n; 213 return n;
210} 214}
211 215
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index c80e138b62fe..af10abecb99b 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void)
112 unsigned int val; 112 unsigned int val;
113 113
114 get_rtc_time(&time); 114 get_rtc_time(&time);
115 pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", 115 pr_info("RTC time: %2d:%02d:%02d, date: %02d/%02d/%02d\n",
116 time.tm_hour, time.tm_min, time.tm_sec, 116 time.tm_hour, time.tm_min, time.tm_sec,
117 time.tm_mon + 1, time.tm_mday, time.tm_year % 100); 117 time.tm_mon + 1, time.tm_mday, time.tm_year % 100);
118 val = time.tm_year; /* 100 years */ 118 val = time.tm_year; /* 100 years */
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
new file mode 100644
index 000000000000..fabbf6cc5367
--- /dev/null
+++ b/drivers/base/regmap/Kconfig
@@ -0,0 +1,13 @@
1# Generic register map support. There are no user servicable options here,
2# this is an API intended to be used by other kernel subsystems. These
3# subsystems should select the appropriate symbols.
4
5config REGMAP
6 default y if (REGMAP_I2C || REGMAP_SPI)
7 bool
8
9config REGMAP_I2C
10 tristate
11
12config REGMAP_SPI
13 tristate
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
new file mode 100644
index 000000000000..f476f4571295
--- /dev/null
+++ b/drivers/base/regmap/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_REGMAP) += regmap.o
2obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
3obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c
new file mode 100644
index 000000000000..c2231ff06cbc
--- /dev/null
+++ b/drivers/base/regmap/regmap-i2c.c
@@ -0,0 +1,115 @@
1/*
2 * Register map access API - I2C support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/regmap.h>
14#include <linux/i2c.h>
15#include <linux/module.h>
16#include <linux/init.h>
17
18static int regmap_i2c_write(struct device *dev, const void *data, size_t count)
19{
20 struct i2c_client *i2c = to_i2c_client(dev);
21 int ret;
22
23 ret = i2c_master_send(i2c, data, count);
24 if (ret == count)
25 return 0;
26 else if (ret < 0)
27 return ret;
28 else
29 return -EIO;
30}
31
32static int regmap_i2c_gather_write(struct device *dev,
33 const void *reg, size_t reg_size,
34 const void *val, size_t val_size)
35{
36 struct i2c_client *i2c = to_i2c_client(dev);
37 struct i2c_msg xfer[2];
38 int ret;
39
40 /* If the I2C controller can't do a gather tell the core, it
41 * will substitute in a linear write for us.
42 */
43 if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_PROTOCOL_MANGLING))
44 return -ENOTSUPP;
45
46 xfer[0].addr = i2c->addr;
47 xfer[0].flags = 0;
48 xfer[0].len = reg_size;
49 xfer[0].buf = (void *)reg;
50
51 xfer[1].addr = i2c->addr;
52 xfer[1].flags = I2C_M_NOSTART;
53 xfer[1].len = val_size;
54 xfer[1].buf = (void *)val;
55
56 ret = i2c_transfer(i2c->adapter, xfer, 2);
57 if (ret == 2)
58 return 0;
59 if (ret < 0)
60 return ret;
61 else
62 return -EIO;
63}
64
65static int regmap_i2c_read(struct device *dev,
66 const void *reg, size_t reg_size,
67 void *val, size_t val_size)
68{
69 struct i2c_client *i2c = to_i2c_client(dev);
70 struct i2c_msg xfer[2];
71 int ret;
72
73 xfer[0].addr = i2c->addr;
74 xfer[0].flags = 0;
75 xfer[0].len = reg_size;
76 xfer[0].buf = (void *)reg;
77
78 xfer[1].addr = i2c->addr;
79 xfer[1].flags = I2C_M_RD;
80 xfer[1].len = val_size;
81 xfer[1].buf = val;
82
83 ret = i2c_transfer(i2c->adapter, xfer, 2);
84 if (ret == 2)
85 return 0;
86 else if (ret < 0)
87 return ret;
88 else
89 return -EIO;
90}
91
92static struct regmap_bus regmap_i2c = {
93 .type = &i2c_bus_type,
94 .write = regmap_i2c_write,
95 .gather_write = regmap_i2c_gather_write,
96 .read = regmap_i2c_read,
97 .owner = THIS_MODULE,
98};
99
100/**
101 * regmap_init_i2c(): Initialise register map
102 *
103 * @i2c: Device that will be interacted with
104 * @config: Configuration for register map
105 *
106 * The return value will be an ERR_PTR() on error or a valid pointer to
107 * a struct regmap.
108 */
109struct regmap *regmap_init_i2c(struct i2c_client *i2c,
110 const struct regmap_config *config)
111{
112 return regmap_init(&i2c->dev, &regmap_i2c, config);
113}
114EXPORT_SYMBOL_GPL(regmap_init_i2c);
115
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
new file mode 100644
index 000000000000..4deba0621bc7
--- /dev/null
+++ b/drivers/base/regmap/regmap-spi.c
@@ -0,0 +1,72 @@
1/*
2 * Register map access API - SPI support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/regmap.h>
14#include <linux/spi/spi.h>
15#include <linux/init.h>
16
17static int regmap_spi_write(struct device *dev, const void *data, size_t count)
18{
19 struct spi_device *spi = to_spi_device(dev);
20
21 return spi_write(spi, data, count);
22}
23
24static int regmap_spi_gather_write(struct device *dev,
25 const void *reg, size_t reg_len,
26 const void *val, size_t val_len)
27{
28 struct spi_device *spi = to_spi_device(dev);
29 struct spi_message m;
30 struct spi_transfer t[2] = { { .tx_buf = reg, .len = reg_len, },
31 { .tx_buf = val, .len = val_len, }, };
32
33 spi_message_init(&m);
34 spi_message_add_tail(&t[0], &m);
35 spi_message_add_tail(&t[1], &m);
36
37 return spi_sync(spi, &m);
38}
39
40static int regmap_spi_read(struct device *dev,
41 const void *reg, size_t reg_size,
42 void *val, size_t val_size)
43{
44 struct spi_device *spi = to_spi_device(dev);
45
46 return spi_write_then_read(spi, reg, reg_size, val, val_size);
47}
48
49static struct regmap_bus regmap_spi = {
50 .type = &spi_bus_type,
51 .write = regmap_spi_write,
52 .gather_write = regmap_spi_gather_write,
53 .read = regmap_spi_read,
54 .owner = THIS_MODULE,
55 .read_flag_mask = 0x80,
56};
57
58/**
59 * regmap_init_spi(): Initialise register map
60 *
61 * @spi: Device that will be interacted with
62 * @config: Configuration for register map
63 *
64 * The return value will be an ERR_PTR() on error or a valid pointer to
65 * a struct regmap.
66 */
67struct regmap *regmap_init_spi(struct spi_device *spi,
68 const struct regmap_config *config)
69{
70 return regmap_init(&spi->dev, &regmap_spi, config);
71}
72EXPORT_SYMBOL_GPL(regmap_init_spi);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
new file mode 100644
index 000000000000..cf3565cae93d
--- /dev/null
+++ b/drivers/base/regmap/regmap.c
@@ -0,0 +1,455 @@
1/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/mutex.h>
16#include <linux/err.h>
17
18#include <linux/regmap.h>
19
20struct regmap;
21
22struct regmap_format {
23 size_t buf_size;
24 size_t reg_bytes;
25 size_t val_bytes;
26 void (*format_write)(struct regmap *map,
27 unsigned int reg, unsigned int val);
28 void (*format_reg)(void *buf, unsigned int reg);
29 void (*format_val)(void *buf, unsigned int val);
30 unsigned int (*parse_val)(void *buf);
31};
32
33struct regmap {
34 struct mutex lock;
35
36 struct device *dev; /* Device we do I/O on */
37 void *work_buf; /* Scratch buffer used to format I/O */
38 struct regmap_format format; /* Buffer format */
39 const struct regmap_bus *bus;
40};
41
42static void regmap_format_4_12_write(struct regmap *map,
43 unsigned int reg, unsigned int val)
44{
45 __be16 *out = map->work_buf;
46 *out = cpu_to_be16((reg << 12) | val);
47}
48
49static void regmap_format_7_9_write(struct regmap *map,
50 unsigned int reg, unsigned int val)
51{
52 __be16 *out = map->work_buf;
53 *out = cpu_to_be16((reg << 9) | val);
54}
55
56static void regmap_format_8(void *buf, unsigned int val)
57{
58 u8 *b = buf;
59
60 b[0] = val;
61}
62
63static void regmap_format_16(void *buf, unsigned int val)
64{
65 __be16 *b = buf;
66
67 b[0] = cpu_to_be16(val);
68}
69
70static unsigned int regmap_parse_8(void *buf)
71{
72 u8 *b = buf;
73
74 return b[0];
75}
76
77static unsigned int regmap_parse_16(void *buf)
78{
79 __be16 *b = buf;
80
81 b[0] = be16_to_cpu(b[0]);
82
83 return b[0];
84}
85
86/**
87 * regmap_init(): Initialise register map
88 *
89 * @dev: Device that will be interacted with
90 * @bus: Bus-specific callbacks to use with device
91 * @config: Configuration for register map
92 *
93 * The return value will be an ERR_PTR() on error or a valid pointer to
94 * a struct regmap. This function should generally not be called
95 * directly, it should be called by bus-specific init functions.
96 */
97struct regmap *regmap_init(struct device *dev,
98 const struct regmap_bus *bus,
99 const struct regmap_config *config)
100{
101 struct regmap *map;
102 int ret = -EINVAL;
103
104 if (!bus || !config)
105 return NULL;
106
107 map = kzalloc(sizeof(*map), GFP_KERNEL);
108 if (map == NULL) {
109 ret = -ENOMEM;
110 goto err;
111 }
112
113 mutex_init(&map->lock);
114 map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
115 map->format.reg_bytes = config->reg_bits / 8;
116 map->format.val_bytes = config->val_bits / 8;
117 map->dev = dev;
118 map->bus = bus;
119
120 switch (config->reg_bits) {
121 case 4:
122 switch (config->val_bits) {
123 case 12:
124 map->format.format_write = regmap_format_4_12_write;
125 break;
126 default:
127 goto err_map;
128 }
129 break;
130
131 case 7:
132 switch (config->val_bits) {
133 case 9:
134 map->format.format_write = regmap_format_7_9_write;
135 break;
136 default:
137 goto err_map;
138 }
139 break;
140
141 case 8:
142 map->format.format_reg = regmap_format_8;
143 break;
144
145 case 16:
146 map->format.format_reg = regmap_format_16;
147 break;
148
149 default:
150 goto err_map;
151 }
152
153 switch (config->val_bits) {
154 case 8:
155 map->format.format_val = regmap_format_8;
156 map->format.parse_val = regmap_parse_8;
157 break;
158 case 16:
159 map->format.format_val = regmap_format_16;
160 map->format.parse_val = regmap_parse_16;
161 break;
162 }
163
164 if (!map->format.format_write &&
165 !(map->format.format_reg && map->format.format_val))
166 goto err_map;
167
168 map->work_buf = kmalloc(map->format.buf_size, GFP_KERNEL);
169 if (map->work_buf == NULL) {
170 ret = -ENOMEM;
171 goto err_bus;
172 }
173
174 return map;
175
176err_bus:
177 module_put(map->bus->owner);
178err_map:
179 kfree(map);
180err:
181 return ERR_PTR(ret);
182}
183EXPORT_SYMBOL_GPL(regmap_init);
184
185/**
186 * regmap_exit(): Free a previously allocated register map
187 */
188void regmap_exit(struct regmap *map)
189{
190 kfree(map->work_buf);
191 module_put(map->bus->owner);
192 kfree(map);
193}
194EXPORT_SYMBOL_GPL(regmap_exit);
195
196static int _regmap_raw_write(struct regmap *map, unsigned int reg,
197 const void *val, size_t val_len)
198{
199 void *buf;
200 int ret = -ENOTSUPP;
201 size_t len;
202
203 map->format.format_reg(map->work_buf, reg);
204
205 /* Try to do a gather write if we can */
206 if (map->bus->gather_write)
207 ret = map->bus->gather_write(map->dev, map->work_buf,
208 map->format.reg_bytes,
209 val, val_len);
210
211 /* Otherwise fall back on linearising by hand. */
212 if (ret == -ENOTSUPP) {
213 len = map->format.reg_bytes + val_len;
214 buf = kmalloc(len, GFP_KERNEL);
215 if (!buf)
216 return -ENOMEM;
217
218 memcpy(buf, map->work_buf, map->format.reg_bytes);
219 memcpy(buf + map->format.reg_bytes, val, val_len);
220 ret = map->bus->write(map->dev, buf, len);
221
222 kfree(buf);
223 }
224
225 return ret;
226}
227
228static int _regmap_write(struct regmap *map, unsigned int reg,
229 unsigned int val)
230{
231 BUG_ON(!map->format.format_write && !map->format.format_val);
232
233 if (map->format.format_write) {
234 map->format.format_write(map, reg, val);
235
236 return map->bus->write(map->dev, map->work_buf,
237 map->format.buf_size);
238 } else {
239 map->format.format_val(map->work_buf + map->format.reg_bytes,
240 val);
241 return _regmap_raw_write(map, reg,
242 map->work_buf + map->format.reg_bytes,
243 map->format.val_bytes);
244 }
245}
246
247/**
248 * regmap_write(): Write a value to a single register
249 *
250 * @map: Register map to write to
251 * @reg: Register to write to
252 * @val: Value to be written
253 *
254 * A value of zero will be returned on success, a negative errno will
255 * be returned in error cases.
256 */
257int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
258{
259 int ret;
260
261 mutex_lock(&map->lock);
262
263 ret = _regmap_write(map, reg, val);
264
265 mutex_unlock(&map->lock);
266
267 return ret;
268}
269EXPORT_SYMBOL_GPL(regmap_write);
270
271/**
272 * regmap_raw_write(): Write raw values to one or more registers
273 *
274 * @map: Register map to write to
275 * @reg: Initial register to write to
276 * @val: Block of data to be written, laid out for direct transmission to the
277 * device
278 * @val_len: Length of data pointed to by val.
279 *
280 * This function is intended to be used for things like firmware
281 * download where a large block of data needs to be transferred to the
282 * device. No formatting will be done on the data provided.
283 *
284 * A value of zero will be returned on success, a negative errno will
285 * be returned in error cases.
286 */
287int regmap_raw_write(struct regmap *map, unsigned int reg,
288 const void *val, size_t val_len)
289{
290 int ret;
291
292 mutex_lock(&map->lock);
293
294 ret = _regmap_raw_write(map, reg, val, val_len);
295
296 mutex_unlock(&map->lock);
297
298 return ret;
299}
300EXPORT_SYMBOL_GPL(regmap_raw_write);
301
302static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
303 unsigned int val_len)
304{
305 u8 *u8 = map->work_buf;
306 int ret;
307
308 map->format.format_reg(map->work_buf, reg);
309
310 /*
311 * Some buses flag reads by setting the high bits in the
312 * register addresss; since it's always the high bits for all
313 * current formats we can do this here rather than in
314 * formatting. This may break if we get interesting formats.
315 */
316 if (map->bus->read_flag_mask)
317 u8[0] |= map->bus->read_flag_mask;
318
319 ret = map->bus->read(map->dev, map->work_buf, map->format.reg_bytes,
320 val, map->format.val_bytes);
321 if (ret != 0)
322 return ret;
323
324 return 0;
325}
326
327static int _regmap_read(struct regmap *map, unsigned int reg,
328 unsigned int *val)
329{
330 int ret;
331
332 if (!map->format.parse_val)
333 return -EINVAL;
334
335 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
336 if (ret == 0)
337 *val = map->format.parse_val(map->work_buf);
338
339 return ret;
340}
341
342/**
343 * regmap_read(): Read a value from a single register
344 *
345 * @map: Register map to write to
346 * @reg: Register to be read from
347 * @val: Pointer to store read value
348 *
349 * A value of zero will be returned on success, a negative errno will
350 * be returned in error cases.
351 */
352int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
353{
354 int ret;
355
356 mutex_lock(&map->lock);
357
358 ret = _regmap_read(map, reg, val);
359
360 mutex_unlock(&map->lock);
361
362 return ret;
363}
364EXPORT_SYMBOL_GPL(regmap_read);
365
366/**
367 * regmap_raw_read(): Read raw data from the device
368 *
369 * @map: Register map to write to
370 * @reg: First register to be read from
371 * @val: Pointer to store read value
372 * @val_len: Size of data to read
373 *
374 * A value of zero will be returned on success, a negative errno will
375 * be returned in error cases.
376 */
377int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
378 size_t val_len)
379{
380 int ret;
381
382 mutex_lock(&map->lock);
383
384 ret = _regmap_raw_read(map, reg, val, val_len);
385
386 mutex_unlock(&map->lock);
387
388 return ret;
389}
390EXPORT_SYMBOL_GPL(regmap_raw_read);
391
392/**
393 * regmap_bulk_read(): Read multiple registers from the device
394 *
395 * @map: Register map to write to
396 * @reg: First register to be read from
397 * @val: Pointer to store read value, in native register size for device
398 * @val_count: Number of registers to read
399 *
400 * A value of zero will be returned on success, a negative errno will
401 * be returned in error cases.
402 */
403int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
404 size_t val_count)
405{
406 int ret, i;
407 size_t val_bytes = map->format.val_bytes;
408
409 if (!map->format.parse_val)
410 return -EINVAL;
411
412 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
413 if (ret != 0)
414 return ret;
415
416 for (i = 0; i < val_count * val_bytes; i += val_bytes)
417 map->format.parse_val(val + i);
418
419 return 0;
420}
421EXPORT_SYMBOL_GPL(regmap_bulk_read);
422
423/**
424 * remap_update_bits: Perform a read/modify/write cycle on the register map
425 *
426 * @map: Register map to update
427 * @reg: Register to update
428 * @mask: Bitmask to change
429 * @val: New value for bitmask
430 *
431 * Returns zero for success, a negative number on error.
432 */
433int regmap_update_bits(struct regmap *map, unsigned int reg,
434 unsigned int mask, unsigned int val)
435{
436 int ret;
437 unsigned int tmp;
438
439 mutex_lock(&map->lock);
440
441 ret = _regmap_read(map, reg, &tmp);
442 if (ret != 0)
443 goto out;
444
445 tmp &= ~mask;
446 tmp |= val & mask;
447
448 ret = _regmap_write(map, reg, tmp);
449
450out:
451 mutex_unlock(&map->lock);
452
453 return ret;
454}
455EXPORT_SYMBOL_GPL(regmap_update_bits);
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index c126db3cb7d1..e8d11b6630ee 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,6 +9,7 @@
9#include <linux/syscore_ops.h> 9#include <linux/syscore_ops.h>
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/interrupt.h>
12 13
13static LIST_HEAD(syscore_ops_list); 14static LIST_HEAD(syscore_ops_list);
14static DEFINE_MUTEX(syscore_ops_lock); 15static DEFINE_MUTEX(syscore_ops_lock);
@@ -48,6 +49,13 @@ int syscore_suspend(void)
48 struct syscore_ops *ops; 49 struct syscore_ops *ops;
49 int ret = 0; 50 int ret = 0;
50 51
52 pr_debug("Checking wakeup interrupts\n");
53
54 /* Return error code if there are any wakeup interrupts pending. */
55 ret = check_wakeup_irqs();
56 if (ret)
57 return ret;
58
51 WARN_ONCE(!irqs_disabled(), 59 WARN_ONCE(!irqs_disabled(),
52 "Interrupts enabled before system core suspend.\n"); 60 "Interrupts enabled before system core suspend.\n");
53 61