aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/core.c14
-rw-r--r--drivers/base/cpu.c36
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/devtmpfs.c100
-rw-r--r--drivers/base/firmware_class.c14
-rw-r--r--drivers/base/memory.c80
-rw-r--r--drivers/base/node.c196
-rw-r--r--drivers/base/platform.c29
-rw-r--r--drivers/base/power/main.c144
-rw-r--r--drivers/base/power/runtime.c78
10 files changed, 566 insertions, 127 deletions
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 6bee6af8d8e1..f1290cbd1350 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -56,7 +56,14 @@ static inline int device_is_not_partition(struct device *dev)
56 */ 56 */
57const char *dev_driver_string(const struct device *dev) 57const char *dev_driver_string(const struct device *dev)
58{ 58{
59 return dev->driver ? dev->driver->name : 59 struct device_driver *drv;
60
61 /* dev->driver can change to NULL underneath us because of unbinding,
62 * so be careful about accessing it. dev->bus and dev->class should
63 * never change once they are set, so they don't need special care.
64 */
65 drv = ACCESS_ONCE(dev->driver);
66 return drv ? drv->name :
60 (dev->bus ? dev->bus->name : 67 (dev->bus ? dev->bus->name :
61 (dev->class ? dev->class->name : "")); 68 (dev->class ? dev->class->name : ""));
62} 69}
@@ -987,6 +994,8 @@ done:
987 device_remove_class_symlinks(dev); 994 device_remove_class_symlinks(dev);
988 SymlinkError: 995 SymlinkError:
989 if (MAJOR(dev->devt)) 996 if (MAJOR(dev->devt))
997 devtmpfs_delete_node(dev);
998 if (MAJOR(dev->devt))
990 device_remove_sys_dev_entry(dev); 999 device_remove_sys_dev_entry(dev);
991 devtattrError: 1000 devtattrError:
992 if (MAJOR(dev->devt)) 1001 if (MAJOR(dev->devt))
@@ -1728,8 +1737,5 @@ void device_shutdown(void)
1728 dev->driver->shutdown(dev); 1737 dev->driver->shutdown(dev);
1729 } 1738 }
1730 } 1739 }
1731 kobject_put(sysfs_dev_char_kobj);
1732 kobject_put(sysfs_dev_block_kobj);
1733 kobject_put(dev_kobj);
1734 async_synchronize_full(); 1740 async_synchronize_full();
1735} 1741}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index e62a4ccea54d..958bd1540c30 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
35 struct cpu *cpu = container_of(dev, struct cpu, sysdev); 35 struct cpu *cpu = container_of(dev, struct cpu, sysdev);
36 ssize_t ret; 36 ssize_t ret;
37 37
38 cpu_hotplug_driver_lock();
38 switch (buf[0]) { 39 switch (buf[0]) {
39 case '0': 40 case '0':
40 ret = cpu_down(cpu->sysdev.id); 41 ret = cpu_down(cpu->sysdev.id);
@@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
49 default: 50 default:
50 ret = -EINVAL; 51 ret = -EINVAL;
51 } 52 }
53 cpu_hotplug_driver_unlock();
52 54
53 if (ret >= 0) 55 if (ret >= 0)
54 ret = count; 56 ret = count;
@@ -72,6 +74,38 @@ void unregister_cpu(struct cpu *cpu)
72 per_cpu(cpu_sys_devices, logical_cpu) = NULL; 74 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
73 return; 75 return;
74} 76}
77
78#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
79static ssize_t cpu_probe_store(struct class *class, const char *buf,
80 size_t count)
81{
82 return arch_cpu_probe(buf, count);
83}
84
85static ssize_t cpu_release_store(struct class *class, const char *buf,
86 size_t count)
87{
88 return arch_cpu_release(buf, count);
89}
90
91static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
92static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
93
94int __init cpu_probe_release_init(void)
95{
96 int rc;
97
98 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
99 &class_attr_probe.attr);
100 if (!rc)
101 rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
102 &class_attr_release.attr);
103
104 return rc;
105}
106device_initcall(cpu_probe_release_init);
107#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
108
75#else /* ... !CONFIG_HOTPLUG_CPU */ 109#else /* ... !CONFIG_HOTPLUG_CPU */
76static inline void register_cpu_control(struct cpu *cpu) 110static inline void register_cpu_control(struct cpu *cpu)
77{ 111{
@@ -97,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
97 * boot up and this data does not change there after. Hence this 131 * boot up and this data does not change there after. Hence this
98 * operation should be safe. No locking required. 132 * operation should be safe. No locking required.
99 */ 133 */
100 addr = __pa(per_cpu_ptr(crash_notes, cpunum)); 134 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
101 rc = sprintf(buf, "%Lx\n", addr); 135 rc = sprintf(buf, "%Lx\n", addr);
102 return rc; 136 return rc;
103} 137}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 979d159b5cd1..ee95c76bfd3d 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -188,7 +188,7 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe);
188 * @dev: device to try to bind to the driver 188 * @dev: device to try to bind to the driver
189 * 189 *
190 * This function returns -ENODEV if the device is not registered, 190 * This function returns -ENODEV if the device is not registered,
191 * 1 if the device is bound sucessfully and 0 otherwise. 191 * 1 if the device is bound successfully and 0 otherwise.
192 * 192 *
193 * This function must be called with @dev->sem held. When called for a 193 * This function must be called with @dev->sem held. When called for a
194 * USB interface, @dev->parent->sem must be held as well. 194 * USB interface, @dev->parent->sem must be held as well.
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index a1cb5afe6801..50375bb8e51d 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -32,6 +32,8 @@ static int dev_mount = 1;
32static int dev_mount; 32static int dev_mount;
33#endif 33#endif
34 34
35static rwlock_t dirlock;
36
35static int __init mount_param(char *str) 37static int __init mount_param(char *str)
36{ 38{
37 dev_mount = simple_strtoul(str, NULL, 0); 39 dev_mount = simple_strtoul(str, NULL, 0);
@@ -74,47 +76,35 @@ static int dev_mkdir(const char *name, mode_t mode)
74 dentry = lookup_create(&nd, 1); 76 dentry = lookup_create(&nd, 1);
75 if (!IS_ERR(dentry)) { 77 if (!IS_ERR(dentry)) {
76 err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); 78 err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
79 if (!err)
80 /* mark as kernel-created inode */
81 dentry->d_inode->i_private = &dev_mnt;
77 dput(dentry); 82 dput(dentry);
78 } else { 83 } else {
79 err = PTR_ERR(dentry); 84 err = PTR_ERR(dentry);
80 } 85 }
81 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
82 86
87 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
83 path_put(&nd.path); 88 path_put(&nd.path);
84 return err; 89 return err;
85} 90}
86 91
87static int create_path(const char *nodepath) 92static int create_path(const char *nodepath)
88{ 93{
89 char *path; 94 int err;
90 struct nameidata nd;
91 int err = 0;
92
93 path = kstrdup(nodepath, GFP_KERNEL);
94 if (!path)
95 return -ENOMEM;
96
97 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
98 path, LOOKUP_PARENT, &nd);
99 if (err == 0) {
100 struct dentry *dentry;
101
102 /* create directory right away */
103 dentry = lookup_create(&nd, 1);
104 if (!IS_ERR(dentry)) {
105 err = vfs_mkdir(nd.path.dentry->d_inode,
106 dentry, 0755);
107 dput(dentry);
108 }
109 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
110 95
111 path_put(&nd.path); 96 read_lock(&dirlock);
112 } else if (err == -ENOENT) { 97 err = dev_mkdir(nodepath, 0755);
98 if (err == -ENOENT) {
99 char *path;
113 char *s; 100 char *s;
114 101
115 /* parent directories do not exist, create them */ 102 /* parent directories do not exist, create them */
103 path = kstrdup(nodepath, GFP_KERNEL);
104 if (!path)
105 return -ENOMEM;
116 s = path; 106 s = path;
117 while (1) { 107 for (;;) {
118 s = strchr(s, '/'); 108 s = strchr(s, '/');
119 if (!s) 109 if (!s)
120 break; 110 break;
@@ -125,9 +115,9 @@ static int create_path(const char *nodepath)
125 s[0] = '/'; 115 s[0] = '/';
126 s++; 116 s++;
127 } 117 }
118 kfree(path);
128 } 119 }
129 120 read_unlock(&dirlock);
130 kfree(path);
131 return err; 121 return err;
132} 122}
133 123
@@ -156,34 +146,40 @@ int devtmpfs_create_node(struct device *dev)
156 mode |= S_IFCHR; 146 mode |= S_IFCHR;
157 147
158 curr_cred = override_creds(&init_cred); 148 curr_cred = override_creds(&init_cred);
149
159 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 150 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
160 nodename, LOOKUP_PARENT, &nd); 151 nodename, LOOKUP_PARENT, &nd);
161 if (err == -ENOENT) { 152 if (err == -ENOENT) {
162 /* create missing parent directories */
163 create_path(nodename); 153 create_path(nodename);
164 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, 154 err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt,
165 nodename, LOOKUP_PARENT, &nd); 155 nodename, LOOKUP_PARENT, &nd);
166 if (err)
167 goto out;
168 } 156 }
157 if (err)
158 goto out;
169 159
170 dentry = lookup_create(&nd, 0); 160 dentry = lookup_create(&nd, 0);
171 if (!IS_ERR(dentry)) { 161 if (!IS_ERR(dentry)) {
172 int umask;
173
174 umask = sys_umask(0000);
175 err = vfs_mknod(nd.path.dentry->d_inode, 162 err = vfs_mknod(nd.path.dentry->d_inode,
176 dentry, mode, dev->devt); 163 dentry, mode, dev->devt);
177 sys_umask(umask); 164 if (!err) {
178 /* mark as kernel created inode */ 165 struct iattr newattrs;
179 if (!err) 166
167 /* fixup possibly umasked mode */
168 newattrs.ia_mode = mode;
169 newattrs.ia_valid = ATTR_MODE;
170 mutex_lock(&dentry->d_inode->i_mutex);
171 notify_change(dentry, &newattrs);
172 mutex_unlock(&dentry->d_inode->i_mutex);
173
174 /* mark as kernel-created inode */
180 dentry->d_inode->i_private = &dev_mnt; 175 dentry->d_inode->i_private = &dev_mnt;
176 }
181 dput(dentry); 177 dput(dentry);
182 } else { 178 } else {
183 err = PTR_ERR(dentry); 179 err = PTR_ERR(dentry);
184 } 180 }
185 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
186 181
182 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
187 path_put(&nd.path); 183 path_put(&nd.path);
188out: 184out:
189 kfree(tmp); 185 kfree(tmp);
@@ -205,16 +201,21 @@ static int dev_rmdir(const char *name)
205 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); 201 mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
206 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); 202 dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len);
207 if (!IS_ERR(dentry)) { 203 if (!IS_ERR(dentry)) {
208 if (dentry->d_inode) 204 if (dentry->d_inode) {
209 err = vfs_rmdir(nd.path.dentry->d_inode, dentry); 205 if (dentry->d_inode->i_private == &dev_mnt)
210 else 206 err = vfs_rmdir(nd.path.dentry->d_inode,
207 dentry);
208 else
209 err = -EPERM;
210 } else {
211 err = -ENOENT; 211 err = -ENOENT;
212 }
212 dput(dentry); 213 dput(dentry);
213 } else { 214 } else {
214 err = PTR_ERR(dentry); 215 err = PTR_ERR(dentry);
215 } 216 }
216 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
217 217
218 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
218 path_put(&nd.path); 219 path_put(&nd.path);
219 return err; 220 return err;
220} 221}
@@ -228,7 +229,8 @@ static int delete_path(const char *nodepath)
228 if (!path) 229 if (!path)
229 return -ENOMEM; 230 return -ENOMEM;
230 231
231 while (1) { 232 write_lock(&dirlock);
233 for (;;) {
232 char *base; 234 char *base;
233 235
234 base = strrchr(path, '/'); 236 base = strrchr(path, '/');
@@ -239,6 +241,7 @@ static int delete_path(const char *nodepath)
239 if (err) 241 if (err)
240 break; 242 break;
241 } 243 }
244 write_unlock(&dirlock);
242 245
243 kfree(path); 246 kfree(path);
244 return err; 247 return err;
@@ -322,9 +325,8 @@ out:
322 * If configured, or requested by the commandline, devtmpfs will be 325 * If configured, or requested by the commandline, devtmpfs will be
323 * auto-mounted after the kernel mounted the root filesystem. 326 * auto-mounted after the kernel mounted the root filesystem.
324 */ 327 */
325int devtmpfs_mount(const char *mountpoint) 328int devtmpfs_mount(const char *mntdir)
326{ 329{
327 struct path path;
328 int err; 330 int err;
329 331
330 if (!dev_mount) 332 if (!dev_mount)
@@ -333,15 +335,11 @@ int devtmpfs_mount(const char *mountpoint)
333 if (!dev_mnt) 335 if (!dev_mnt)
334 return 0; 336 return 0;
335 337
336 err = kern_path(mountpoint, LOOKUP_FOLLOW, &path); 338 err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
337 if (err)
338 return err;
339 err = do_add_mount(dev_mnt, &path, 0, NULL);
340 if (err) 339 if (err)
341 printk(KERN_INFO "devtmpfs: error mounting %i\n", err); 340 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
342 else 341 else
343 printk(KERN_INFO "devtmpfs: mounted\n"); 342 printk(KERN_INFO "devtmpfs: mounted\n");
344 path_put(&path);
345 return err; 343 return err;
346} 344}
347 345
@@ -354,6 +352,8 @@ int __init devtmpfs_init(void)
354 int err; 352 int err;
355 struct vfsmount *mnt; 353 struct vfsmount *mnt;
356 354
355 rwlock_init(&dirlock);
356
357 err = register_filesystem(&dev_fs_type); 357 err = register_filesystem(&dev_fs_type);
358 if (err) { 358 if (err) {
359 printk(KERN_ERR "devtmpfs: unable to register devtmpfs " 359 printk(KERN_ERR "devtmpfs: unable to register devtmpfs "
@@ -361,7 +361,7 @@ int __init devtmpfs_init(void)
361 return err; 361 return err;
362 } 362 }
363 363
364 mnt = kern_mount(&dev_fs_type); 364 mnt = kern_mount_data(&dev_fs_type, "mode=0755");
365 if (IS_ERR(mnt)) { 365 if (IS_ERR(mnt)) {
366 err = PTR_ERR(mnt); 366 err = PTR_ERR(mnt);
367 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); 367 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 7376367bcb80..a95024166b66 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -601,12 +601,9 @@ request_firmware_work_func(void *arg)
601 } 601 }
602 ret = _request_firmware(&fw, fw_work->name, fw_work->device, 602 ret = _request_firmware(&fw, fw_work->name, fw_work->device,
603 fw_work->uevent); 603 fw_work->uevent);
604 if (ret < 0) 604
605 fw_work->cont(NULL, fw_work->context); 605 fw_work->cont(fw, fw_work->context);
606 else { 606
607 fw_work->cont(fw, fw_work->context);
608 release_firmware(fw);
609 }
610 module_put(fw_work->module); 607 module_put(fw_work->module);
611 kfree(fw_work); 608 kfree(fw_work);
612 return ret; 609 return ret;
@@ -619,6 +616,7 @@ request_firmware_work_func(void *arg)
619 * is non-zero else the firmware copy must be done manually. 616 * is non-zero else the firmware copy must be done manually.
620 * @name: name of firmware file 617 * @name: name of firmware file
621 * @device: device for which firmware is being loaded 618 * @device: device for which firmware is being loaded
619 * @gfp: allocation flags
622 * @context: will be passed over to @cont, and 620 * @context: will be passed over to @cont, and
623 * @fw may be %NULL if firmware request fails. 621 * @fw may be %NULL if firmware request fails.
624 * @cont: function will be called asynchronously when the firmware 622 * @cont: function will be called asynchronously when the firmware
@@ -631,12 +629,12 @@ request_firmware_work_func(void *arg)
631int 629int
632request_firmware_nowait( 630request_firmware_nowait(
633 struct module *module, int uevent, 631 struct module *module, int uevent,
634 const char *name, struct device *device, void *context, 632 const char *name, struct device *device, gfp_t gfp, void *context,
635 void (*cont)(const struct firmware *fw, void *context)) 633 void (*cont)(const struct firmware *fw, void *context))
636{ 634{
637 struct task_struct *task; 635 struct task_struct *task;
638 struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), 636 struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work),
639 GFP_ATOMIC); 637 gfp);
640 638
641 if (!fw_work) 639 if (!fw_work)
642 return -ENOMEM; 640 return -ENOMEM;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 989429cfed88..d7d77d4a402c 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb)
63} 63}
64EXPORT_SYMBOL(unregister_memory_notifier); 64EXPORT_SYMBOL(unregister_memory_notifier);
65 65
66static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
67
68int register_memory_isolate_notifier(struct notifier_block *nb)
69{
70 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
71}
72EXPORT_SYMBOL(register_memory_isolate_notifier);
73
74void unregister_memory_isolate_notifier(struct notifier_block *nb)
75{
76 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
77}
78EXPORT_SYMBOL(unregister_memory_isolate_notifier);
79
66/* 80/*
67 * register_memory - Setup a sysfs device for a memory block 81 * register_memory - Setup a sysfs device for a memory block
68 */ 82 */
@@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v)
157 return blocking_notifier_call_chain(&memory_chain, val, v); 171 return blocking_notifier_call_chain(&memory_chain, val, v);
158} 172}
159 173
174int memory_isolate_notify(unsigned long val, void *v)
175{
176 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
177}
178
160/* 179/*
161 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is 180 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
162 * OK to have direct references to sparsemem variables in here. 181 * OK to have direct references to sparsemem variables in here.
@@ -341,6 +360,64 @@ static inline int memory_probe_init(void)
341} 360}
342#endif 361#endif
343 362
363#ifdef CONFIG_MEMORY_FAILURE
364/*
365 * Support for offlining pages of memory
366 */
367
368/* Soft offline a page */
369static ssize_t
370store_soft_offline_page(struct class *class, const char *buf, size_t count)
371{
372 int ret;
373 u64 pfn;
374 if (!capable(CAP_SYS_ADMIN))
375 return -EPERM;
376 if (strict_strtoull(buf, 0, &pfn) < 0)
377 return -EINVAL;
378 pfn >>= PAGE_SHIFT;
379 if (!pfn_valid(pfn))
380 return -ENXIO;
381 ret = soft_offline_page(pfn_to_page(pfn), 0);
382 return ret == 0 ? count : ret;
383}
384
385/* Forcibly offline a page, including killing processes. */
386static ssize_t
387store_hard_offline_page(struct class *class, const char *buf, size_t count)
388{
389 int ret;
390 u64 pfn;
391 if (!capable(CAP_SYS_ADMIN))
392 return -EPERM;
393 if (strict_strtoull(buf, 0, &pfn) < 0)
394 return -EINVAL;
395 pfn >>= PAGE_SHIFT;
396 ret = __memory_failure(pfn, 0, 0);
397 return ret ? ret : count;
398}
399
400static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
401static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
402
403static __init int memory_fail_init(void)
404{
405 int err;
406
407 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
408 &class_attr_soft_offline_page.attr);
409 if (!err)
410 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
411 &class_attr_hard_offline_page.attr);
412 return err;
413}
414#else
415static inline int memory_fail_init(void)
416{
417 return 0;
418}
419#endif
420
344/* 421/*
345 * Note that phys_device is optional. It is here to allow for 422 * Note that phys_device is optional. It is here to allow for
346 * differentiation between which *physical* devices each 423 * differentiation between which *physical* devices each
@@ -473,6 +550,9 @@ int __init memory_dev_init(void)
473 err = memory_probe_init(); 550 err = memory_probe_init();
474 if (!ret) 551 if (!ret)
475 ret = err; 552 ret = err;
553 err = memory_fail_init();
554 if (!ret)
555 ret = err;
476 err = block_size_init(); 556 err = block_size_init();
477 if (!ret) 557 if (!ret)
478 ret = err; 558 ret = err;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1fe5536d404f..70122791683d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -173,6 +173,47 @@ static ssize_t node_read_distance(struct sys_device * dev,
173} 173}
174static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); 174static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
175 175
176#ifdef CONFIG_HUGETLBFS
177/*
178 * hugetlbfs per node attributes registration interface:
179 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
180 * it will register its per node attributes for all online nodes with
181 * memory. It will also call register_hugetlbfs_with_node(), below, to
182 * register its attribute registration functions with this node driver.
183 * Once these hooks have been initialized, the node driver will call into
184 * the hugetlb module to [un]register attributes for hot-plugged nodes.
185 */
186static node_registration_func_t __hugetlb_register_node;
187static node_registration_func_t __hugetlb_unregister_node;
188
189static inline bool hugetlb_register_node(struct node *node)
190{
191 if (__hugetlb_register_node &&
192 node_state(node->sysdev.id, N_HIGH_MEMORY)) {
193 __hugetlb_register_node(node);
194 return true;
195 }
196 return false;
197}
198
199static inline void hugetlb_unregister_node(struct node *node)
200{
201 if (__hugetlb_unregister_node)
202 __hugetlb_unregister_node(node);
203}
204
205void register_hugetlbfs_with_node(node_registration_func_t doregister,
206 node_registration_func_t unregister)
207{
208 __hugetlb_register_node = doregister;
209 __hugetlb_unregister_node = unregister;
210}
211#else
212static inline void hugetlb_register_node(struct node *node) {}
213
214static inline void hugetlb_unregister_node(struct node *node) {}
215#endif
216
176 217
177/* 218/*
178 * register_node - Setup a sysfs device for a node. 219 * register_node - Setup a sysfs device for a node.
@@ -196,6 +237,8 @@ int register_node(struct node *node, int num, struct node *parent)
196 sysdev_create_file(&node->sysdev, &attr_distance); 237 sysdev_create_file(&node->sysdev, &attr_distance);
197 238
198 scan_unevictable_register_node(node); 239 scan_unevictable_register_node(node);
240
241 hugetlb_register_node(node);
199 } 242 }
200 return error; 243 return error;
201} 244}
@@ -216,6 +259,7 @@ void unregister_node(struct node *node)
216 sysdev_remove_file(&node->sysdev, &attr_distance); 259 sysdev_remove_file(&node->sysdev, &attr_distance);
217 260
218 scan_unevictable_unregister_node(node); 261 scan_unevictable_unregister_node(node);
262 hugetlb_unregister_node(node); /* no-op, if memoryless node */
219 263
220 sysdev_unregister(&node->sysdev); 264 sysdev_unregister(&node->sysdev);
221} 265}
@@ -227,26 +271,43 @@ struct node node_devices[MAX_NUMNODES];
227 */ 271 */
228int register_cpu_under_node(unsigned int cpu, unsigned int nid) 272int register_cpu_under_node(unsigned int cpu, unsigned int nid)
229{ 273{
230 if (node_online(nid)) { 274 int ret;
231 struct sys_device *obj = get_cpu_sysdev(cpu); 275 struct sys_device *obj;
232 if (!obj)
233 return 0;
234 return sysfs_create_link(&node_devices[nid].sysdev.kobj,
235 &obj->kobj,
236 kobject_name(&obj->kobj));
237 }
238 276
239 return 0; 277 if (!node_online(nid))
278 return 0;
279
280 obj = get_cpu_sysdev(cpu);
281 if (!obj)
282 return 0;
283
284 ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
285 &obj->kobj,
286 kobject_name(&obj->kobj));
287 if (ret)
288 return ret;
289
290 return sysfs_create_link(&obj->kobj,
291 &node_devices[nid].sysdev.kobj,
292 kobject_name(&node_devices[nid].sysdev.kobj));
240} 293}
241 294
242int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) 295int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
243{ 296{
244 if (node_online(nid)) { 297 struct sys_device *obj;
245 struct sys_device *obj = get_cpu_sysdev(cpu); 298
246 if (obj) 299 if (!node_online(nid))
247 sysfs_remove_link(&node_devices[nid].sysdev.kobj, 300 return 0;
248 kobject_name(&obj->kobj)); 301
249 } 302 obj = get_cpu_sysdev(cpu);
303 if (!obj)
304 return 0;
305
306 sysfs_remove_link(&node_devices[nid].sysdev.kobj,
307 kobject_name(&obj->kobj));
308 sysfs_remove_link(&obj->kobj,
309 kobject_name(&node_devices[nid].sysdev.kobj));
310
250 return 0; 311 return 0;
251} 312}
252 313
@@ -268,6 +329,7 @@ static int get_nid_for_pfn(unsigned long pfn)
268/* register memory section under specified node if it spans that node */ 329/* register memory section under specified node if it spans that node */
269int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) 330int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
270{ 331{
332 int ret;
271 unsigned long pfn, sect_start_pfn, sect_end_pfn; 333 unsigned long pfn, sect_start_pfn, sect_end_pfn;
272 334
273 if (!mem_blk) 335 if (!mem_blk)
@@ -284,9 +346,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
284 continue; 346 continue;
285 if (page_nid != nid) 347 if (page_nid != nid)
286 continue; 348 continue;
287 return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, 349 ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
288 &mem_blk->sysdev.kobj, 350 &mem_blk->sysdev.kobj,
289 kobject_name(&mem_blk->sysdev.kobj)); 351 kobject_name(&mem_blk->sysdev.kobj));
352 if (ret)
353 return ret;
354
355 return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
356 &node_devices[nid].sysdev.kobj,
357 kobject_name(&node_devices[nid].sysdev.kobj));
290 } 358 }
291 /* mem section does not span the specified node */ 359 /* mem section does not span the specified node */
292 return 0; 360 return 0;
@@ -295,12 +363,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
295/* unregister memory section under all nodes that it spans */ 363/* unregister memory section under all nodes that it spans */
296int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) 364int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
297{ 365{
298 nodemask_t unlinked_nodes; 366 NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
299 unsigned long pfn, sect_start_pfn, sect_end_pfn; 367 unsigned long pfn, sect_start_pfn, sect_end_pfn;
300 368
301 if (!mem_blk) 369 if (!mem_blk) {
370 NODEMASK_FREE(unlinked_nodes);
302 return -EFAULT; 371 return -EFAULT;
303 nodes_clear(unlinked_nodes); 372 }
373 if (!unlinked_nodes)
374 return -ENOMEM;
375 nodes_clear(*unlinked_nodes);
304 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); 376 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
305 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; 377 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
306 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { 378 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
@@ -311,11 +383,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
311 continue; 383 continue;
312 if (!node_online(nid)) 384 if (!node_online(nid))
313 continue; 385 continue;
314 if (node_test_and_set(nid, unlinked_nodes)) 386 if (node_test_and_set(nid, *unlinked_nodes))
315 continue; 387 continue;
316 sysfs_remove_link(&node_devices[nid].sysdev.kobj, 388 sysfs_remove_link(&node_devices[nid].sysdev.kobj,
317 kobject_name(&mem_blk->sysdev.kobj)); 389 kobject_name(&mem_blk->sysdev.kobj));
390 sysfs_remove_link(&mem_blk->sysdev.kobj,
391 kobject_name(&node_devices[nid].sysdev.kobj));
318 } 392 }
393 NODEMASK_FREE(unlinked_nodes);
319 return 0; 394 return 0;
320} 395}
321 396
@@ -345,9 +420,77 @@ static int link_mem_sections(int nid)
345 } 420 }
346 return err; 421 return err;
347} 422}
348#else 423
424#ifdef CONFIG_HUGETLBFS
425/*
426 * Handle per node hstate attribute [un]registration on transistions
427 * to/from memoryless state.
428 */
429static void node_hugetlb_work(struct work_struct *work)
430{
431 struct node *node = container_of(work, struct node, node_work);
432
433 /*
434 * We only get here when a node transitions to/from memoryless state.
435 * We can detect which transition occurred by examining whether the
436 * node has memory now. hugetlb_register_node() already check this
437 * so we try to register the attributes. If that fails, then the
438 * node has transitioned to memoryless, try to unregister the
439 * attributes.
440 */
441 if (!hugetlb_register_node(node))
442 hugetlb_unregister_node(node);
443}
444
445static void init_node_hugetlb_work(int nid)
446{
447 INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
448}
449
450static int node_memory_callback(struct notifier_block *self,
451 unsigned long action, void *arg)
452{
453 struct memory_notify *mnb = arg;
454 int nid = mnb->status_change_nid;
455
456 switch (action) {
457 case MEM_ONLINE:
458 case MEM_OFFLINE:
459 /*
460 * offload per node hstate [un]registration to a work thread
461 * when transitioning to/from memoryless state.
462 */
463 if (nid != NUMA_NO_NODE)
464 schedule_work(&node_devices[nid].node_work);
465 break;
466
467 case MEM_GOING_ONLINE:
468 case MEM_GOING_OFFLINE:
469 case MEM_CANCEL_ONLINE:
470 case MEM_CANCEL_OFFLINE:
471 default:
472 break;
473 }
474
475 return NOTIFY_OK;
476}
477#endif /* CONFIG_HUGETLBFS */
478#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
479
349static int link_mem_sections(int nid) { return 0; } 480static int link_mem_sections(int nid) { return 0; }
350#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 481#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
482
483#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
484 !defined(CONFIG_HUGETLBFS)
485static inline int node_memory_callback(struct notifier_block *self,
486 unsigned long action, void *arg)
487{
488 return NOTIFY_OK;
489}
490
491static void init_node_hugetlb_work(int nid) { }
492
493#endif
351 494
352int register_one_node(int nid) 495int register_one_node(int nid)
353{ 496{
@@ -371,6 +514,9 @@ int register_one_node(int nid)
371 514
372 /* link memory sections under this node */ 515 /* link memory sections under this node */
373 error = link_mem_sections(nid); 516 error = link_mem_sections(nid);
517
518 /* initialize work queue for memory hot plug */
519 init_node_hugetlb_work(nid);
374 } 520 }
375 521
376 return error; 522 return error;
@@ -460,13 +606,17 @@ static int node_states_init(void)
460 return err; 606 return err;
461} 607}
462 608
609#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
463static int __init register_node_type(void) 610static int __init register_node_type(void)
464{ 611{
465 int ret; 612 int ret;
466 613
467 ret = sysdev_class_register(&node_class); 614 ret = sysdev_class_register(&node_class);
468 if (!ret) 615 if (!ret) {
469 ret = node_states_init(); 616 ret = node_states_init();
617 hotplug_memory_notifier(node_memory_callback,
618 NODE_CALLBACK_PRI);
619 }
470 620
471 /* 621 /*
472 * Note: we're not going to unregister the node class if we fail 622 * Note: we're not going to unregister the node class if we fail
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4fa954b07ac4..9d2ee25deaf5 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1000,7 +1000,7 @@ static __initdata LIST_HEAD(early_platform_device_list);
1000int __init early_platform_driver_register(struct early_platform_driver *epdrv, 1000int __init early_platform_driver_register(struct early_platform_driver *epdrv,
1001 char *buf) 1001 char *buf)
1002{ 1002{
1003 unsigned long index; 1003 char *tmp;
1004 int n; 1004 int n;
1005 1005
1006 /* Simply add the driver to the end of the global list. 1006 /* Simply add the driver to the end of the global list.
@@ -1019,13 +1019,28 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv,
1019 if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { 1019 if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) {
1020 list_move(&epdrv->list, &early_platform_driver_list); 1020 list_move(&epdrv->list, &early_platform_driver_list);
1021 1021
1022 if (!strcmp(buf, epdrv->pdrv->driver.name)) 1022 /* Allow passing parameters after device name */
1023 if (buf[n] == '\0' || buf[n] == ',')
1023 epdrv->requested_id = -1; 1024 epdrv->requested_id = -1;
1024 else if (buf[n] == '.' && strict_strtoul(&buf[n + 1], 10, 1025 else {
1025 &index) == 0) 1026 epdrv->requested_id = simple_strtoul(&buf[n + 1],
1026 epdrv->requested_id = index; 1027 &tmp, 10);
1027 else 1028
1028 epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; 1029 if (buf[n] != '.' || (tmp == &buf[n + 1])) {
1030 epdrv->requested_id = EARLY_PLATFORM_ID_ERROR;
1031 n = 0;
1032 } else
1033 n += strcspn(&buf[n + 1], ",") + 1;
1034 }
1035
1036 if (buf[n] == ',')
1037 n++;
1038
1039 if (epdrv->bufsize) {
1040 memcpy(epdrv->buffer, &buf[n],
1041 min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1));
1042 epdrv->buffer[epdrv->bufsize - 1] = '\0';
1043 }
1029 } 1044 }
1030 1045
1031 return 0; 1046 return 0;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 8aa2443182d5..48adf80926a0 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -23,8 +23,8 @@
23#include <linux/pm.h> 23#include <linux/pm.h>
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25#include <linux/resume-trace.h> 25#include <linux/resume-trace.h>
26#include <linux/rwsem.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/sched.h>
28 28
29#include "../base.h" 29#include "../base.h"
30#include "power.h" 30#include "power.h"
@@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev)
161 list_move_tail(&dev->power.entry, &dpm_list); 161 list_move_tail(&dev->power.entry, &dpm_list);
162} 162}
163 163
164static ktime_t initcall_debug_start(struct device *dev)
165{
166 ktime_t calltime = ktime_set(0, 0);
167
168 if (initcall_debug) {
169 pr_info("calling %s+ @ %i\n",
170 dev_name(dev), task_pid_nr(current));
171 calltime = ktime_get();
172 }
173
174 return calltime;
175}
176
177static void initcall_debug_report(struct device *dev, ktime_t calltime,
178 int error)
179{
180 ktime_t delta, rettime;
181
182 if (initcall_debug) {
183 rettime = ktime_get();
184 delta = ktime_sub(rettime, calltime);
185 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
186 error, (unsigned long long)ktime_to_ns(delta) >> 10);
187 }
188}
189
164/** 190/**
165 * pm_op - Execute the PM operation appropriate for given PM event. 191 * pm_op - Execute the PM operation appropriate for given PM event.
166 * @dev: Device to handle. 192 * @dev: Device to handle.
@@ -172,6 +198,9 @@ static int pm_op(struct device *dev,
172 pm_message_t state) 198 pm_message_t state)
173{ 199{
174 int error = 0; 200 int error = 0;
201 ktime_t calltime;
202
203 calltime = initcall_debug_start(dev);
175 204
176 switch (state.event) { 205 switch (state.event) {
177#ifdef CONFIG_SUSPEND 206#ifdef CONFIG_SUSPEND
@@ -219,6 +248,9 @@ static int pm_op(struct device *dev,
219 default: 248 default:
220 error = -EINVAL; 249 error = -EINVAL;
221 } 250 }
251
252 initcall_debug_report(dev, calltime, error);
253
222 return error; 254 return error;
223} 255}
224 256
@@ -236,6 +268,13 @@ static int pm_noirq_op(struct device *dev,
236 pm_message_t state) 268 pm_message_t state)
237{ 269{
238 int error = 0; 270 int error = 0;
271 ktime_t calltime, delta, rettime;
272
273 if (initcall_debug) {
274 pr_info("calling %s_i+ @ %i\n",
275 dev_name(dev), task_pid_nr(current));
276 calltime = ktime_get();
277 }
239 278
240 switch (state.event) { 279 switch (state.event) {
241#ifdef CONFIG_SUSPEND 280#ifdef CONFIG_SUSPEND
@@ -283,6 +322,15 @@ static int pm_noirq_op(struct device *dev,
283 default: 322 default:
284 error = -EINVAL; 323 error = -EINVAL;
285 } 324 }
325
326 if (initcall_debug) {
327 rettime = ktime_get();
328 delta = ktime_sub(rettime, calltime);
329 printk("initcall %s_i+ returned %d after %Ld usecs\n",
330 dev_name(dev), error,
331 (unsigned long long)ktime_to_ns(delta) >> 10);
332 }
333
286 return error; 334 return error;
287} 335}
288 336
@@ -324,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
324 kobject_name(&dev->kobj), pm_verb(state.event), info, error); 372 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
325} 373}
326 374
375static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
376{
377 ktime_t calltime;
378 s64 usecs64;
379 int usecs;
380
381 calltime = ktime_get();
382 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
383 do_div(usecs64, NSEC_PER_USEC);
384 usecs = usecs64;
385 if (usecs == 0)
386 usecs = 1;
387 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
388 info ?: "", info ? " " : "", pm_verb(state.event),
389 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
390}
391
327/*------------------------- Resume routines -------------------------*/ 392/*------------------------- Resume routines -------------------------*/
328 393
329/** 394/**
@@ -341,14 +406,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
341 TRACE_DEVICE(dev); 406 TRACE_DEVICE(dev);
342 TRACE_RESUME(0); 407 TRACE_RESUME(0);
343 408
344 if (!dev->bus) 409 if (dev->bus && dev->bus->pm) {
345 goto End;
346
347 if (dev->bus->pm) {
348 pm_dev_dbg(dev, state, "EARLY "); 410 pm_dev_dbg(dev, state, "EARLY ");
349 error = pm_noirq_op(dev, dev->bus->pm, state); 411 error = pm_noirq_op(dev, dev->bus->pm, state);
350 } 412 }
351 End: 413
352 TRACE_RESUME(error); 414 TRACE_RESUME(error);
353 return error; 415 return error;
354} 416}
@@ -363,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
363void dpm_resume_noirq(pm_message_t state) 425void dpm_resume_noirq(pm_message_t state)
364{ 426{
365 struct device *dev; 427 struct device *dev;
428 ktime_t starttime = ktime_get();
366 429
367 mutex_lock(&dpm_list_mtx); 430 mutex_lock(&dpm_list_mtx);
368 transition_started = false; 431 transition_started = false;
@@ -376,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state)
376 pm_dev_err(dev, state, " early", error); 439 pm_dev_err(dev, state, " early", error);
377 } 440 }
378 mutex_unlock(&dpm_list_mtx); 441 mutex_unlock(&dpm_list_mtx);
442 dpm_show_time(starttime, state, "early");
379 resume_device_irqs(); 443 resume_device_irqs();
380} 444}
381EXPORT_SYMBOL_GPL(dpm_resume_noirq); 445EXPORT_SYMBOL_GPL(dpm_resume_noirq);
382 446
383/** 447/**
448 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
449 * dev: Device to resume.
450 * cb: Resume callback to execute.
451 */
452static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
453{
454 int error;
455 ktime_t calltime;
456
457 calltime = initcall_debug_start(dev);
458
459 error = cb(dev);
460 suspend_report_result(cb, error);
461
462 initcall_debug_report(dev, calltime, error);
463
464 return error;
465}
466
467/**
384 * device_resume - Execute "resume" callbacks for given device. 468 * device_resume - Execute "resume" callbacks for given device.
385 * @dev: Device to handle. 469 * @dev: Device to handle.
386 * @state: PM transition of the system being carried out. 470 * @state: PM transition of the system being carried out.
@@ -400,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state)
400 error = pm_op(dev, dev->bus->pm, state); 484 error = pm_op(dev, dev->bus->pm, state);
401 } else if (dev->bus->resume) { 485 } else if (dev->bus->resume) {
402 pm_dev_dbg(dev, state, "legacy "); 486 pm_dev_dbg(dev, state, "legacy ");
403 error = dev->bus->resume(dev); 487 error = legacy_resume(dev, dev->bus->resume);
404 } 488 }
405 if (error) 489 if (error)
406 goto End; 490 goto End;
@@ -421,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state)
421 error = pm_op(dev, dev->class->pm, state); 505 error = pm_op(dev, dev->class->pm, state);
422 } else if (dev->class->resume) { 506 } else if (dev->class->resume) {
423 pm_dev_dbg(dev, state, "legacy class "); 507 pm_dev_dbg(dev, state, "legacy class ");
424 error = dev->class->resume(dev); 508 error = legacy_resume(dev, dev->class->resume);
425 } 509 }
426 } 510 }
427 End: 511 End:
@@ -441,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state)
441static void dpm_resume(pm_message_t state) 525static void dpm_resume(pm_message_t state)
442{ 526{
443 struct list_head list; 527 struct list_head list;
528 ktime_t starttime = ktime_get();
444 529
445 INIT_LIST_HEAD(&list); 530 INIT_LIST_HEAD(&list);
446 mutex_lock(&dpm_list_mtx); 531 mutex_lock(&dpm_list_mtx);
@@ -469,6 +554,7 @@ static void dpm_resume(pm_message_t state)
469 } 554 }
470 list_splice(&list, &dpm_list); 555 list_splice(&list, &dpm_list);
471 mutex_unlock(&dpm_list_mtx); 556 mutex_unlock(&dpm_list_mtx);
557 dpm_show_time(starttime, state, NULL);
472} 558}
473 559
474/** 560/**
@@ -521,7 +607,7 @@ static void dpm_complete(pm_message_t state)
521 mutex_unlock(&dpm_list_mtx); 607 mutex_unlock(&dpm_list_mtx);
522 608
523 device_complete(dev, state); 609 device_complete(dev, state);
524 pm_runtime_put_noidle(dev); 610 pm_runtime_put_sync(dev);
525 611
526 mutex_lock(&dpm_list_mtx); 612 mutex_lock(&dpm_list_mtx);
527 } 613 }
@@ -584,10 +670,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
584{ 670{
585 int error = 0; 671 int error = 0;
586 672
587 if (!dev->bus) 673 if (dev->bus && dev->bus->pm) {
588 return 0;
589
590 if (dev->bus->pm) {
591 pm_dev_dbg(dev, state, "LATE "); 674 pm_dev_dbg(dev, state, "LATE ");
592 error = pm_noirq_op(dev, dev->bus->pm, state); 675 error = pm_noirq_op(dev, dev->bus->pm, state);
593 } 676 }
@@ -604,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
604int dpm_suspend_noirq(pm_message_t state) 687int dpm_suspend_noirq(pm_message_t state)
605{ 688{
606 struct device *dev; 689 struct device *dev;
690 ktime_t starttime = ktime_get();
607 int error = 0; 691 int error = 0;
608 692
609 suspend_device_irqs(); 693 suspend_device_irqs();
@@ -619,11 +703,34 @@ int dpm_suspend_noirq(pm_message_t state)
619 mutex_unlock(&dpm_list_mtx); 703 mutex_unlock(&dpm_list_mtx);
620 if (error) 704 if (error)
621 dpm_resume_noirq(resume_event(state)); 705 dpm_resume_noirq(resume_event(state));
706 else
707 dpm_show_time(starttime, state, "late");
622 return error; 708 return error;
623} 709}
624EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 710EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
625 711
626/** 712/**
713 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
714 * dev: Device to suspend.
715 * cb: Suspend callback to execute.
716 */
717static int legacy_suspend(struct device *dev, pm_message_t state,
718 int (*cb)(struct device *dev, pm_message_t state))
719{
720 int error;
721 ktime_t calltime;
722
723 calltime = initcall_debug_start(dev);
724
725 error = cb(dev, state);
726 suspend_report_result(cb, error);
727
728 initcall_debug_report(dev, calltime, error);
729
730 return error;
731}
732
733/**
627 * device_suspend - Execute "suspend" callbacks for given device. 734 * device_suspend - Execute "suspend" callbacks for given device.
628 * @dev: Device to handle. 735 * @dev: Device to handle.
629 * @state: PM transition of the system being carried out. 736 * @state: PM transition of the system being carried out.
@@ -640,8 +747,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
640 error = pm_op(dev, dev->class->pm, state); 747 error = pm_op(dev, dev->class->pm, state);
641 } else if (dev->class->suspend) { 748 } else if (dev->class->suspend) {
642 pm_dev_dbg(dev, state, "legacy class "); 749 pm_dev_dbg(dev, state, "legacy class ");
643 error = dev->class->suspend(dev, state); 750 error = legacy_suspend(dev, state, dev->class->suspend);
644 suspend_report_result(dev->class->suspend, error);
645 } 751 }
646 if (error) 752 if (error)
647 goto End; 753 goto End;
@@ -662,8 +768,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
662 error = pm_op(dev, dev->bus->pm, state); 768 error = pm_op(dev, dev->bus->pm, state);
663 } else if (dev->bus->suspend) { 769 } else if (dev->bus->suspend) {
664 pm_dev_dbg(dev, state, "legacy "); 770 pm_dev_dbg(dev, state, "legacy ");
665 error = dev->bus->suspend(dev, state); 771 error = legacy_suspend(dev, state, dev->bus->suspend);
666 suspend_report_result(dev->bus->suspend, error);
667 } 772 }
668 } 773 }
669 End: 774 End:
@@ -679,6 +784,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
679static int dpm_suspend(pm_message_t state) 784static int dpm_suspend(pm_message_t state)
680{ 785{
681 struct list_head list; 786 struct list_head list;
787 ktime_t starttime = ktime_get();
682 int error = 0; 788 int error = 0;
683 789
684 INIT_LIST_HEAD(&list); 790 INIT_LIST_HEAD(&list);
@@ -704,6 +810,8 @@ static int dpm_suspend(pm_message_t state)
704 } 810 }
705 list_splice(&list, dpm_list.prev); 811 list_splice(&list, dpm_list.prev);
706 mutex_unlock(&dpm_list_mtx); 812 mutex_unlock(&dpm_list_mtx);
813 if (!error)
814 dpm_show_time(starttime, state, NULL);
707 return error; 815 return error;
708} 816}
709 817
@@ -772,7 +880,7 @@ static int dpm_prepare(pm_message_t state)
772 pm_runtime_get_noresume(dev); 880 pm_runtime_get_noresume(dev);
773 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { 881 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
774 /* Wake-up requested during system sleep transition. */ 882 /* Wake-up requested during system sleep transition. */
775 pm_runtime_put_noidle(dev); 883 pm_runtime_put_sync(dev);
776 error = -EBUSY; 884 error = -EBUSY;
777 } else { 885 } else {
778 error = device_prepare(dev, state); 886 error = device_prepare(dev, state);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 846d89e3d122..f8b044e8aef7 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev)
85 dev->bus->pm->runtime_idle(dev); 85 dev->bus->pm->runtime_idle(dev);
86 86
87 spin_lock_irq(&dev->power.lock); 87 spin_lock_irq(&dev->power.lock);
88 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
89 spin_unlock_irq(&dev->power.lock);
90
91 dev->type->pm->runtime_idle(dev);
92
93 spin_lock_irq(&dev->power.lock);
94 } else if (dev->class && dev->class->pm
95 && dev->class->pm->runtime_idle) {
96 spin_unlock_irq(&dev->power.lock);
97
98 dev->class->pm->runtime_idle(dev);
99
100 spin_lock_irq(&dev->power.lock);
88 } 101 }
89 102
90 dev->power.idle_notification = false; 103 dev->power.idle_notification = false;
@@ -185,6 +198,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
185 } 198 }
186 199
187 dev->power.runtime_status = RPM_SUSPENDING; 200 dev->power.runtime_status = RPM_SUSPENDING;
201 dev->power.deferred_resume = false;
188 202
189 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { 203 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) {
190 spin_unlock_irq(&dev->power.lock); 204 spin_unlock_irq(&dev->power.lock);
@@ -193,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
193 207
194 spin_lock_irq(&dev->power.lock); 208 spin_lock_irq(&dev->power.lock);
195 dev->power.runtime_error = retval; 209 dev->power.runtime_error = retval;
210 } else if (dev->type && dev->type->pm
211 && dev->type->pm->runtime_suspend) {
212 spin_unlock_irq(&dev->power.lock);
213
214 retval = dev->type->pm->runtime_suspend(dev);
215
216 spin_lock_irq(&dev->power.lock);
217 dev->power.runtime_error = retval;
218 } else if (dev->class && dev->class->pm
219 && dev->class->pm->runtime_suspend) {
220 spin_unlock_irq(&dev->power.lock);
221
222 retval = dev->class->pm->runtime_suspend(dev);
223
224 spin_lock_irq(&dev->power.lock);
225 dev->power.runtime_error = retval;
196 } else { 226 } else {
197 retval = -ENOSYS; 227 retval = -ENOSYS;
198 } 228 }
@@ -200,7 +230,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
200 if (retval) { 230 if (retval) {
201 dev->power.runtime_status = RPM_ACTIVE; 231 dev->power.runtime_status = RPM_ACTIVE;
202 pm_runtime_cancel_pending(dev); 232 pm_runtime_cancel_pending(dev);
203 dev->power.deferred_resume = false;
204 233
205 if (retval == -EAGAIN || retval == -EBUSY) { 234 if (retval == -EAGAIN || retval == -EBUSY) {
206 notify = true; 235 notify = true;
@@ -217,7 +246,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
217 wake_up_all(&dev->power.wait_queue); 246 wake_up_all(&dev->power.wait_queue);
218 247
219 if (dev->power.deferred_resume) { 248 if (dev->power.deferred_resume) {
220 dev->power.deferred_resume = false;
221 __pm_runtime_resume(dev, false); 249 __pm_runtime_resume(dev, false);
222 retval = -EAGAIN; 250 retval = -EAGAIN;
223 goto out; 251 goto out;
@@ -360,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
360 388
361 spin_lock_irq(&dev->power.lock); 389 spin_lock_irq(&dev->power.lock);
362 dev->power.runtime_error = retval; 390 dev->power.runtime_error = retval;
391 } else if (dev->type && dev->type->pm
392 && dev->type->pm->runtime_resume) {
393 spin_unlock_irq(&dev->power.lock);
394
395 retval = dev->type->pm->runtime_resume(dev);
396
397 spin_lock_irq(&dev->power.lock);
398 dev->power.runtime_error = retval;
399 } else if (dev->class && dev->class->pm
400 && dev->class->pm->runtime_resume) {
401 spin_unlock_irq(&dev->power.lock);
402
403 retval = dev->class->pm->runtime_resume(dev);
404
405 spin_lock_irq(&dev->power.lock);
406 dev->power.runtime_error = retval;
363 } else { 407 } else {
364 retval = -ENOSYS; 408 retval = -ENOSYS;
365 } 409 }
@@ -626,6 +670,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay)
626 goto out; 670 goto out;
627 671
628 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); 672 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay);
673 if (!dev->power.timer_expires)
674 dev->power.timer_expires = 1;
629 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); 675 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires);
630 676
631 out: 677 out:
@@ -659,13 +705,17 @@ static int __pm_request_resume(struct device *dev)
659 705
660 pm_runtime_deactivate_timer(dev); 706 pm_runtime_deactivate_timer(dev);
661 707
708 if (dev->power.runtime_status == RPM_SUSPENDING) {
709 dev->power.deferred_resume = true;
710 return retval;
711 }
662 if (dev->power.request_pending) { 712 if (dev->power.request_pending) {
663 /* If non-resume request is pending, we can overtake it. */ 713 /* If non-resume request is pending, we can overtake it. */
664 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; 714 dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME;
665 return retval; 715 return retval;
666 } else if (retval) {
667 return retval;
668 } 716 }
717 if (retval)
718 return retval;
669 719
670 dev->power.request = RPM_REQ_RESUME; 720 dev->power.request = RPM_REQ_RESUME;
671 dev->power.request_pending = true; 721 dev->power.request_pending = true;
@@ -696,15 +746,15 @@ EXPORT_SYMBOL_GPL(pm_request_resume);
696 * @dev: Device to handle. 746 * @dev: Device to handle.
697 * @sync: If set and the device is suspended, resume it synchronously. 747 * @sync: If set and the device is suspended, resume it synchronously.
698 * 748 *
699 * Increment the usage count of the device and if it was zero previously, 749 * Increment the usage count of the device and resume it or submit a resume
700 * resume it or submit a resume request for it, depending on the value of @sync. 750 * request for it, depending on the value of @sync.
701 */ 751 */
702int __pm_runtime_get(struct device *dev, bool sync) 752int __pm_runtime_get(struct device *dev, bool sync)
703{ 753{
704 int retval = 1; 754 int retval;
705 755
706 if (atomic_add_return(1, &dev->power.usage_count) == 1) 756 atomic_inc(&dev->power.usage_count);
707 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 757 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
708 758
709 return retval; 759 return retval;
710} 760}
@@ -777,7 +827,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
777 } 827 }
778 828
779 if (parent) { 829 if (parent) {
780 spin_lock(&parent->power.lock); 830 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
781 831
782 /* 832 /*
783 * It is invalid to put an active child under a parent that is 833 * It is invalid to put an active child under a parent that is
@@ -786,12 +836,10 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
786 */ 836 */
787 if (!parent->power.disable_depth 837 if (!parent->power.disable_depth
788 && !parent->power.ignore_children 838 && !parent->power.ignore_children
789 && parent->power.runtime_status != RPM_ACTIVE) { 839 && parent->power.runtime_status != RPM_ACTIVE)
790 error = -EBUSY; 840 error = -EBUSY;
791 } else { 841 else if (dev->power.runtime_status == RPM_SUSPENDED)
792 if (dev->power.runtime_status == RPM_SUSPENDED) 842 atomic_inc(&parent->power.child_count);
793 atomic_inc(&parent->power.child_count);
794 }
795 843
796 spin_unlock(&parent->power.lock); 844 spin_unlock(&parent->power.lock);
797 845