diff options
author | Tejun Heo <tj@kernel.org> | 2010-01-04 19:17:33 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-01-04 19:17:33 -0500 |
commit | 32032df6c2f6c9c6b2ada2ce42322231824f70c2 (patch) | |
tree | b1ce838a37044bb38dfc128e2116ca35630e629a /drivers/base | |
parent | 22b737f4c75197372d64afc6ed1bccd58c00e549 (diff) | |
parent | c5974b835a909ff15c3b7e6cf6789b5eb919f419 (diff) |
Merge branch 'master' into percpu
Conflicts:
arch/powerpc/platforms/pseries/hvCall.S
include/linux/percpu.h
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/bus.c | 19 | ||||
-rw-r--r-- | drivers/base/core.c | 30 | ||||
-rw-r--r-- | drivers/base/cpu.c | 36 | ||||
-rw-r--r-- | drivers/base/dd.c | 2 | ||||
-rw-r--r-- | drivers/base/devtmpfs.c | 101 | ||||
-rw-r--r-- | drivers/base/driver.c | 6 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 14 | ||||
-rw-r--r-- | drivers/base/memory.c | 80 | ||||
-rw-r--r-- | drivers/base/node.c | 196 | ||||
-rw-r--r-- | drivers/base/platform.c | 36 | ||||
-rw-r--r-- | drivers/base/power/main.c | 145 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 92 |
12 files changed, 605 insertions, 152 deletions
diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 973bf2ad4e0d..c0c5a43d9fb3 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c | |||
@@ -689,19 +689,23 @@ int bus_add_driver(struct device_driver *drv) | |||
689 | printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", | 689 | printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n", |
690 | __func__, drv->name); | 690 | __func__, drv->name); |
691 | } | 691 | } |
692 | error = add_bind_files(drv); | 692 | |
693 | if (error) { | 693 | if (!drv->suppress_bind_attrs) { |
694 | /* Ditto */ | 694 | error = add_bind_files(drv); |
695 | printk(KERN_ERR "%s: add_bind_files(%s) failed\n", | 695 | if (error) { |
696 | __func__, drv->name); | 696 | /* Ditto */ |
697 | printk(KERN_ERR "%s: add_bind_files(%s) failed\n", | ||
698 | __func__, drv->name); | ||
699 | } | ||
697 | } | 700 | } |
698 | 701 | ||
699 | kobject_uevent(&priv->kobj, KOBJ_ADD); | 702 | kobject_uevent(&priv->kobj, KOBJ_ADD); |
700 | return 0; | 703 | return 0; |
704 | |||
701 | out_unregister: | 705 | out_unregister: |
706 | kobject_put(&priv->kobj); | ||
702 | kfree(drv->p); | 707 | kfree(drv->p); |
703 | drv->p = NULL; | 708 | drv->p = NULL; |
704 | kobject_put(&priv->kobj); | ||
705 | out_put_bus: | 709 | out_put_bus: |
706 | bus_put(bus); | 710 | bus_put(bus); |
707 | return error; | 711 | return error; |
@@ -720,7 +724,8 @@ void bus_remove_driver(struct device_driver *drv) | |||
720 | if (!drv->bus) | 724 | if (!drv->bus) |
721 | return; | 725 | return; |
722 | 726 | ||
723 | remove_bind_files(drv); | 727 | if (!drv->suppress_bind_attrs) |
728 | remove_bind_files(drv); | ||
724 | driver_remove_attrs(drv->bus, drv); | 729 | driver_remove_attrs(drv->bus, drv); |
725 | driver_remove_file(drv, &driver_attr_uevent); | 730 | driver_remove_file(drv, &driver_attr_uevent); |
726 | klist_remove(&drv->p->knode_bus); | 731 | klist_remove(&drv->p->knode_bus); |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 6bee6af8d8e1..282025770429 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -56,7 +56,14 @@ static inline int device_is_not_partition(struct device *dev) | |||
56 | */ | 56 | */ |
57 | const char *dev_driver_string(const struct device *dev) | 57 | const char *dev_driver_string(const struct device *dev) |
58 | { | 58 | { |
59 | return dev->driver ? dev->driver->name : | 59 | struct device_driver *drv; |
60 | |||
61 | /* dev->driver can change to NULL underneath us because of unbinding, | ||
62 | * so be careful about accessing it. dev->bus and dev->class should | ||
63 | * never change once they are set, so they don't need special care. | ||
64 | */ | ||
65 | drv = ACCESS_ONCE(dev->driver); | ||
66 | return drv ? drv->name : | ||
60 | (dev->bus ? dev->bus->name : | 67 | (dev->bus ? dev->bus->name : |
61 | (dev->class ? dev->class->name : "")); | 68 | (dev->class ? dev->class->name : "")); |
62 | } | 69 | } |
@@ -439,7 +446,8 @@ struct kset *devices_kset; | |||
439 | * @dev: device. | 446 | * @dev: device. |
440 | * @attr: device attribute descriptor. | 447 | * @attr: device attribute descriptor. |
441 | */ | 448 | */ |
442 | int device_create_file(struct device *dev, struct device_attribute *attr) | 449 | int device_create_file(struct device *dev, |
450 | const struct device_attribute *attr) | ||
443 | { | 451 | { |
444 | int error = 0; | 452 | int error = 0; |
445 | if (dev) | 453 | if (dev) |
@@ -452,7 +460,8 @@ int device_create_file(struct device *dev, struct device_attribute *attr) | |||
452 | * @dev: device. | 460 | * @dev: device. |
453 | * @attr: device attribute descriptor. | 461 | * @attr: device attribute descriptor. |
454 | */ | 462 | */ |
455 | void device_remove_file(struct device *dev, struct device_attribute *attr) | 463 | void device_remove_file(struct device *dev, |
464 | const struct device_attribute *attr) | ||
456 | { | 465 | { |
457 | if (dev) | 466 | if (dev) |
458 | sysfs_remove_file(&dev->kobj, &attr->attr); | 467 | sysfs_remove_file(&dev->kobj, &attr->attr); |
@@ -463,7 +472,8 @@ void device_remove_file(struct device *dev, struct device_attribute *attr) | |||
463 | * @dev: device. | 472 | * @dev: device. |
464 | * @attr: device binary attribute descriptor. | 473 | * @attr: device binary attribute descriptor. |
465 | */ | 474 | */ |
466 | int device_create_bin_file(struct device *dev, struct bin_attribute *attr) | 475 | int device_create_bin_file(struct device *dev, |
476 | const struct bin_attribute *attr) | ||
467 | { | 477 | { |
468 | int error = -EINVAL; | 478 | int error = -EINVAL; |
469 | if (dev) | 479 | if (dev) |
@@ -477,7 +487,8 @@ EXPORT_SYMBOL_GPL(device_create_bin_file); | |||
477 | * @dev: device. | 487 | * @dev: device. |
478 | * @attr: device binary attribute descriptor. | 488 | * @attr: device binary attribute descriptor. |
479 | */ | 489 | */ |
480 | void device_remove_bin_file(struct device *dev, struct bin_attribute *attr) | 490 | void device_remove_bin_file(struct device *dev, |
491 | const struct bin_attribute *attr) | ||
481 | { | 492 | { |
482 | if (dev) | 493 | if (dev) |
483 | sysfs_remove_bin_file(&dev->kobj, attr); | 494 | sysfs_remove_bin_file(&dev->kobj, attr); |
@@ -898,8 +909,10 @@ int device_add(struct device *dev) | |||
898 | dev->init_name = NULL; | 909 | dev->init_name = NULL; |
899 | } | 910 | } |
900 | 911 | ||
901 | if (!dev_name(dev)) | 912 | if (!dev_name(dev)) { |
913 | error = -EINVAL; | ||
902 | goto name_error; | 914 | goto name_error; |
915 | } | ||
903 | 916 | ||
904 | pr_debug("device: '%s': %s\n", dev_name(dev), __func__); | 917 | pr_debug("device: '%s': %s\n", dev_name(dev), __func__); |
905 | 918 | ||
@@ -987,6 +1000,8 @@ done: | |||
987 | device_remove_class_symlinks(dev); | 1000 | device_remove_class_symlinks(dev); |
988 | SymlinkError: | 1001 | SymlinkError: |
989 | if (MAJOR(dev->devt)) | 1002 | if (MAJOR(dev->devt)) |
1003 | devtmpfs_delete_node(dev); | ||
1004 | if (MAJOR(dev->devt)) | ||
990 | device_remove_sys_dev_entry(dev); | 1005 | device_remove_sys_dev_entry(dev); |
991 | devtattrError: | 1006 | devtattrError: |
992 | if (MAJOR(dev->devt)) | 1007 | if (MAJOR(dev->devt)) |
@@ -1728,8 +1743,5 @@ void device_shutdown(void) | |||
1728 | dev->driver->shutdown(dev); | 1743 | dev->driver->shutdown(dev); |
1729 | } | 1744 | } |
1730 | } | 1745 | } |
1731 | kobject_put(sysfs_dev_char_kobj); | ||
1732 | kobject_put(sysfs_dev_block_kobj); | ||
1733 | kobject_put(dev_kobj); | ||
1734 | async_synchronize_full(); | 1746 | async_synchronize_full(); |
1735 | } | 1747 | } |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e62a4ccea54d..958bd1540c30 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -35,6 +35,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut | |||
35 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); | 35 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); |
36 | ssize_t ret; | 36 | ssize_t ret; |
37 | 37 | ||
38 | cpu_hotplug_driver_lock(); | ||
38 | switch (buf[0]) { | 39 | switch (buf[0]) { |
39 | case '0': | 40 | case '0': |
40 | ret = cpu_down(cpu->sysdev.id); | 41 | ret = cpu_down(cpu->sysdev.id); |
@@ -49,6 +50,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut | |||
49 | default: | 50 | default: |
50 | ret = -EINVAL; | 51 | ret = -EINVAL; |
51 | } | 52 | } |
53 | cpu_hotplug_driver_unlock(); | ||
52 | 54 | ||
53 | if (ret >= 0) | 55 | if (ret >= 0) |
54 | ret = count; | 56 | ret = count; |
@@ -72,6 +74,38 @@ void unregister_cpu(struct cpu *cpu) | |||
72 | per_cpu(cpu_sys_devices, logical_cpu) = NULL; | 74 | per_cpu(cpu_sys_devices, logical_cpu) = NULL; |
73 | return; | 75 | return; |
74 | } | 76 | } |
77 | |||
78 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
79 | static ssize_t cpu_probe_store(struct class *class, const char *buf, | ||
80 | size_t count) | ||
81 | { | ||
82 | return arch_cpu_probe(buf, count); | ||
83 | } | ||
84 | |||
85 | static ssize_t cpu_release_store(struct class *class, const char *buf, | ||
86 | size_t count) | ||
87 | { | ||
88 | return arch_cpu_release(buf, count); | ||
89 | } | ||
90 | |||
91 | static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); | ||
92 | static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); | ||
93 | |||
94 | int __init cpu_probe_release_init(void) | ||
95 | { | ||
96 | int rc; | ||
97 | |||
98 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
99 | &class_attr_probe.attr); | ||
100 | if (!rc) | ||
101 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
102 | &class_attr_release.attr); | ||
103 | |||
104 | return rc; | ||
105 | } | ||
106 | device_initcall(cpu_probe_release_init); | ||
107 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
108 | |||
75 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 109 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
76 | static inline void register_cpu_control(struct cpu *cpu) | 110 | static inline void register_cpu_control(struct cpu *cpu) |
77 | { | 111 | { |
@@ -97,7 +131,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute | |||
97 | * boot up and this data does not change there after. Hence this | 131 | * boot up and this data does not change there after. Hence this |
98 | * operation should be safe. No locking required. | 132 | * operation should be safe. No locking required. |
99 | */ | 133 | */ |
100 | addr = __pa(per_cpu_ptr(crash_notes, cpunum)); | 134 | addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); |
101 | rc = sprintf(buf, "%Lx\n", addr); | 135 | rc = sprintf(buf, "%Lx\n", addr); |
102 | return rc; | 136 | return rc; |
103 | } | 137 | } |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 979d159b5cd1..ee95c76bfd3d 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -188,7 +188,7 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); | |||
188 | * @dev: device to try to bind to the driver | 188 | * @dev: device to try to bind to the driver |
189 | * | 189 | * |
190 | * This function returns -ENODEV if the device is not registered, | 190 | * This function returns -ENODEV if the device is not registered, |
191 | * 1 if the device is bound sucessfully and 0 otherwise. | 191 | * 1 if the device is bound successfully and 0 otherwise. |
192 | * | 192 | * |
193 | * This function must be called with @dev->sem held. When called for a | 193 | * This function must be called with @dev->sem held. When called for a |
194 | * USB interface, @dev->parent->sem must be held as well. | 194 | * USB interface, @dev->parent->sem must be held as well. |
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index a1cb5afe6801..090dd4851301 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c | |||
@@ -32,6 +32,8 @@ static int dev_mount = 1; | |||
32 | static int dev_mount; | 32 | static int dev_mount; |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | static DEFINE_MUTEX(dirlock); | ||
36 | |||
35 | static int __init mount_param(char *str) | 37 | static int __init mount_param(char *str) |
36 | { | 38 | { |
37 | dev_mount = simple_strtoul(str, NULL, 0); | 39 | dev_mount = simple_strtoul(str, NULL, 0); |
@@ -74,47 +76,37 @@ static int dev_mkdir(const char *name, mode_t mode) | |||
74 | dentry = lookup_create(&nd, 1); | 76 | dentry = lookup_create(&nd, 1); |
75 | if (!IS_ERR(dentry)) { | 77 | if (!IS_ERR(dentry)) { |
76 | err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); | 78 | err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); |
79 | if (!err) | ||
80 | /* mark as kernel-created inode */ | ||
81 | dentry->d_inode->i_private = &dev_mnt; | ||
77 | dput(dentry); | 82 | dput(dentry); |
78 | } else { | 83 | } else { |
79 | err = PTR_ERR(dentry); | 84 | err = PTR_ERR(dentry); |
80 | } | 85 | } |
81 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
82 | 86 | ||
87 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
83 | path_put(&nd.path); | 88 | path_put(&nd.path); |
84 | return err; | 89 | return err; |
85 | } | 90 | } |
86 | 91 | ||
87 | static int create_path(const char *nodepath) | 92 | static int create_path(const char *nodepath) |
88 | { | 93 | { |
89 | char *path; | 94 | int err; |
90 | struct nameidata nd; | ||
91 | int err = 0; | ||
92 | |||
93 | path = kstrdup(nodepath, GFP_KERNEL); | ||
94 | if (!path) | ||
95 | return -ENOMEM; | ||
96 | |||
97 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, | ||
98 | path, LOOKUP_PARENT, &nd); | ||
99 | if (err == 0) { | ||
100 | struct dentry *dentry; | ||
101 | |||
102 | /* create directory right away */ | ||
103 | dentry = lookup_create(&nd, 1); | ||
104 | if (!IS_ERR(dentry)) { | ||
105 | err = vfs_mkdir(nd.path.dentry->d_inode, | ||
106 | dentry, 0755); | ||
107 | dput(dentry); | ||
108 | } | ||
109 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
110 | 95 | ||
111 | path_put(&nd.path); | 96 | mutex_lock(&dirlock); |
112 | } else if (err == -ENOENT) { | 97 | err = dev_mkdir(nodepath, 0755); |
98 | if (err == -ENOENT) { | ||
99 | char *path; | ||
113 | char *s; | 100 | char *s; |
114 | 101 | ||
115 | /* parent directories do not exist, create them */ | 102 | /* parent directories do not exist, create them */ |
103 | path = kstrdup(nodepath, GFP_KERNEL); | ||
104 | if (!path) { | ||
105 | err = -ENOMEM; | ||
106 | goto out; | ||
107 | } | ||
116 | s = path; | 108 | s = path; |
117 | while (1) { | 109 | for (;;) { |
118 | s = strchr(s, '/'); | 110 | s = strchr(s, '/'); |
119 | if (!s) | 111 | if (!s) |
120 | break; | 112 | break; |
@@ -125,9 +117,10 @@ static int create_path(const char *nodepath) | |||
125 | s[0] = '/'; | 117 | s[0] = '/'; |
126 | s++; | 118 | s++; |
127 | } | 119 | } |
120 | kfree(path); | ||
128 | } | 121 | } |
129 | 122 | out: | |
130 | kfree(path); | 123 | mutex_unlock(&dirlock); |
131 | return err; | 124 | return err; |
132 | } | 125 | } |
133 | 126 | ||
@@ -156,34 +149,40 @@ int devtmpfs_create_node(struct device *dev) | |||
156 | mode |= S_IFCHR; | 149 | mode |= S_IFCHR; |
157 | 150 | ||
158 | curr_cred = override_creds(&init_cred); | 151 | curr_cred = override_creds(&init_cred); |
152 | |||
159 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, | 153 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, |
160 | nodename, LOOKUP_PARENT, &nd); | 154 | nodename, LOOKUP_PARENT, &nd); |
161 | if (err == -ENOENT) { | 155 | if (err == -ENOENT) { |
162 | /* create missing parent directories */ | ||
163 | create_path(nodename); | 156 | create_path(nodename); |
164 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, | 157 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, |
165 | nodename, LOOKUP_PARENT, &nd); | 158 | nodename, LOOKUP_PARENT, &nd); |
166 | if (err) | ||
167 | goto out; | ||
168 | } | 159 | } |
160 | if (err) | ||
161 | goto out; | ||
169 | 162 | ||
170 | dentry = lookup_create(&nd, 0); | 163 | dentry = lookup_create(&nd, 0); |
171 | if (!IS_ERR(dentry)) { | 164 | if (!IS_ERR(dentry)) { |
172 | int umask; | ||
173 | |||
174 | umask = sys_umask(0000); | ||
175 | err = vfs_mknod(nd.path.dentry->d_inode, | 165 | err = vfs_mknod(nd.path.dentry->d_inode, |
176 | dentry, mode, dev->devt); | 166 | dentry, mode, dev->devt); |
177 | sys_umask(umask); | 167 | if (!err) { |
178 | /* mark as kernel created inode */ | 168 | struct iattr newattrs; |
179 | if (!err) | 169 | |
170 | /* fixup possibly umasked mode */ | ||
171 | newattrs.ia_mode = mode; | ||
172 | newattrs.ia_valid = ATTR_MODE; | ||
173 | mutex_lock(&dentry->d_inode->i_mutex); | ||
174 | notify_change(dentry, &newattrs); | ||
175 | mutex_unlock(&dentry->d_inode->i_mutex); | ||
176 | |||
177 | /* mark as kernel-created inode */ | ||
180 | dentry->d_inode->i_private = &dev_mnt; | 178 | dentry->d_inode->i_private = &dev_mnt; |
179 | } | ||
181 | dput(dentry); | 180 | dput(dentry); |
182 | } else { | 181 | } else { |
183 | err = PTR_ERR(dentry); | 182 | err = PTR_ERR(dentry); |
184 | } | 183 | } |
185 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
186 | 184 | ||
185 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
187 | path_put(&nd.path); | 186 | path_put(&nd.path); |
188 | out: | 187 | out: |
189 | kfree(tmp); | 188 | kfree(tmp); |
@@ -205,16 +204,21 @@ static int dev_rmdir(const char *name) | |||
205 | mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); | 204 | mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); |
206 | dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); | 205 | dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); |
207 | if (!IS_ERR(dentry)) { | 206 | if (!IS_ERR(dentry)) { |
208 | if (dentry->d_inode) | 207 | if (dentry->d_inode) { |
209 | err = vfs_rmdir(nd.path.dentry->d_inode, dentry); | 208 | if (dentry->d_inode->i_private == &dev_mnt) |
210 | else | 209 | err = vfs_rmdir(nd.path.dentry->d_inode, |
210 | dentry); | ||
211 | else | ||
212 | err = -EPERM; | ||
213 | } else { | ||
211 | err = -ENOENT; | 214 | err = -ENOENT; |
215 | } | ||
212 | dput(dentry); | 216 | dput(dentry); |
213 | } else { | 217 | } else { |
214 | err = PTR_ERR(dentry); | 218 | err = PTR_ERR(dentry); |
215 | } | 219 | } |
216 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
217 | 220 | ||
221 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
218 | path_put(&nd.path); | 222 | path_put(&nd.path); |
219 | return err; | 223 | return err; |
220 | } | 224 | } |
@@ -228,7 +232,8 @@ static int delete_path(const char *nodepath) | |||
228 | if (!path) | 232 | if (!path) |
229 | return -ENOMEM; | 233 | return -ENOMEM; |
230 | 234 | ||
231 | while (1) { | 235 | mutex_lock(&dirlock); |
236 | for (;;) { | ||
232 | char *base; | 237 | char *base; |
233 | 238 | ||
234 | base = strrchr(path, '/'); | 239 | base = strrchr(path, '/'); |
@@ -239,6 +244,7 @@ static int delete_path(const char *nodepath) | |||
239 | if (err) | 244 | if (err) |
240 | break; | 245 | break; |
241 | } | 246 | } |
247 | mutex_unlock(&dirlock); | ||
242 | 248 | ||
243 | kfree(path); | 249 | kfree(path); |
244 | return err; | 250 | return err; |
@@ -322,9 +328,8 @@ out: | |||
322 | * If configured, or requested by the commandline, devtmpfs will be | 328 | * If configured, or requested by the commandline, devtmpfs will be |
323 | * auto-mounted after the kernel mounted the root filesystem. | 329 | * auto-mounted after the kernel mounted the root filesystem. |
324 | */ | 330 | */ |
325 | int devtmpfs_mount(const char *mountpoint) | 331 | int devtmpfs_mount(const char *mntdir) |
326 | { | 332 | { |
327 | struct path path; | ||
328 | int err; | 333 | int err; |
329 | 334 | ||
330 | if (!dev_mount) | 335 | if (!dev_mount) |
@@ -333,15 +338,11 @@ int devtmpfs_mount(const char *mountpoint) | |||
333 | if (!dev_mnt) | 338 | if (!dev_mnt) |
334 | return 0; | 339 | return 0; |
335 | 340 | ||
336 | err = kern_path(mountpoint, LOOKUP_FOLLOW, &path); | 341 | err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); |
337 | if (err) | ||
338 | return err; | ||
339 | err = do_add_mount(dev_mnt, &path, 0, NULL); | ||
340 | if (err) | 342 | if (err) |
341 | printk(KERN_INFO "devtmpfs: error mounting %i\n", err); | 343 | printk(KERN_INFO "devtmpfs: error mounting %i\n", err); |
342 | else | 344 | else |
343 | printk(KERN_INFO "devtmpfs: mounted\n"); | 345 | printk(KERN_INFO "devtmpfs: mounted\n"); |
344 | path_put(&path); | ||
345 | return err; | 346 | return err; |
346 | } | 347 | } |
347 | 348 | ||
@@ -361,7 +362,7 @@ int __init devtmpfs_init(void) | |||
361 | return err; | 362 | return err; |
362 | } | 363 | } |
363 | 364 | ||
364 | mnt = kern_mount(&dev_fs_type); | 365 | mnt = kern_mount_data(&dev_fs_type, "mode=0755"); |
365 | if (IS_ERR(mnt)) { | 366 | if (IS_ERR(mnt)) { |
366 | err = PTR_ERR(mnt); | 367 | err = PTR_ERR(mnt); |
367 | printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); | 368 | printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); |
diff --git a/drivers/base/driver.c b/drivers/base/driver.c index ed2ebd3c287d..90c9fff09ead 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c | |||
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(driver_find_device); | |||
98 | * @attr: driver attribute descriptor. | 98 | * @attr: driver attribute descriptor. |
99 | */ | 99 | */ |
100 | int driver_create_file(struct device_driver *drv, | 100 | int driver_create_file(struct device_driver *drv, |
101 | struct driver_attribute *attr) | 101 | const struct driver_attribute *attr) |
102 | { | 102 | { |
103 | int error; | 103 | int error; |
104 | if (drv) | 104 | if (drv) |
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_create_file); | |||
115 | * @attr: driver attribute descriptor. | 115 | * @attr: driver attribute descriptor. |
116 | */ | 116 | */ |
117 | void driver_remove_file(struct device_driver *drv, | 117 | void driver_remove_file(struct device_driver *drv, |
118 | struct driver_attribute *attr) | 118 | const struct driver_attribute *attr) |
119 | { | 119 | { |
120 | if (drv) | 120 | if (drv) |
121 | sysfs_remove_file(&drv->p->kobj, &attr->attr); | 121 | sysfs_remove_file(&drv->p->kobj, &attr->attr); |
@@ -236,7 +236,7 @@ int driver_register(struct device_driver *drv) | |||
236 | put_driver(other); | 236 | put_driver(other); |
237 | printk(KERN_ERR "Error: Driver '%s' is already registered, " | 237 | printk(KERN_ERR "Error: Driver '%s' is already registered, " |
238 | "aborting...\n", drv->name); | 238 | "aborting...\n", drv->name); |
239 | return -EEXIST; | 239 | return -EBUSY; |
240 | } | 240 | } |
241 | 241 | ||
242 | ret = bus_add_driver(drv); | 242 | ret = bus_add_driver(drv); |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 7376367bcb80..a95024166b66 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -601,12 +601,9 @@ request_firmware_work_func(void *arg) | |||
601 | } | 601 | } |
602 | ret = _request_firmware(&fw, fw_work->name, fw_work->device, | 602 | ret = _request_firmware(&fw, fw_work->name, fw_work->device, |
603 | fw_work->uevent); | 603 | fw_work->uevent); |
604 | if (ret < 0) | 604 | |
605 | fw_work->cont(NULL, fw_work->context); | 605 | fw_work->cont(fw, fw_work->context); |
606 | else { | 606 | |
607 | fw_work->cont(fw, fw_work->context); | ||
608 | release_firmware(fw); | ||
609 | } | ||
610 | module_put(fw_work->module); | 607 | module_put(fw_work->module); |
611 | kfree(fw_work); | 608 | kfree(fw_work); |
612 | return ret; | 609 | return ret; |
@@ -619,6 +616,7 @@ request_firmware_work_func(void *arg) | |||
619 | * is non-zero else the firmware copy must be done manually. | 616 | * is non-zero else the firmware copy must be done manually. |
620 | * @name: name of firmware file | 617 | * @name: name of firmware file |
621 | * @device: device for which firmware is being loaded | 618 | * @device: device for which firmware is being loaded |
619 | * @gfp: allocation flags | ||
622 | * @context: will be passed over to @cont, and | 620 | * @context: will be passed over to @cont, and |
623 | * @fw may be %NULL if firmware request fails. | 621 | * @fw may be %NULL if firmware request fails. |
624 | * @cont: function will be called asynchronously when the firmware | 622 | * @cont: function will be called asynchronously when the firmware |
@@ -631,12 +629,12 @@ request_firmware_work_func(void *arg) | |||
631 | int | 629 | int |
632 | request_firmware_nowait( | 630 | request_firmware_nowait( |
633 | struct module *module, int uevent, | 631 | struct module *module, int uevent, |
634 | const char *name, struct device *device, void *context, | 632 | const char *name, struct device *device, gfp_t gfp, void *context, |
635 | void (*cont)(const struct firmware *fw, void *context)) | 633 | void (*cont)(const struct firmware *fw, void *context)) |
636 | { | 634 | { |
637 | struct task_struct *task; | 635 | struct task_struct *task; |
638 | struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), | 636 | struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), |
639 | GFP_ATOMIC); | 637 | gfp); |
640 | 638 | ||
641 | if (!fw_work) | 639 | if (!fw_work) |
642 | return -ENOMEM; | 640 | return -ENOMEM; |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 989429cfed88..d7d77d4a402c 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb) | |||
63 | } | 63 | } |
64 | EXPORT_SYMBOL(unregister_memory_notifier); | 64 | EXPORT_SYMBOL(unregister_memory_notifier); |
65 | 65 | ||
66 | static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); | ||
67 | |||
68 | int register_memory_isolate_notifier(struct notifier_block *nb) | ||
69 | { | ||
70 | return atomic_notifier_chain_register(&memory_isolate_chain, nb); | ||
71 | } | ||
72 | EXPORT_SYMBOL(register_memory_isolate_notifier); | ||
73 | |||
74 | void unregister_memory_isolate_notifier(struct notifier_block *nb) | ||
75 | { | ||
76 | atomic_notifier_chain_unregister(&memory_isolate_chain, nb); | ||
77 | } | ||
78 | EXPORT_SYMBOL(unregister_memory_isolate_notifier); | ||
79 | |||
66 | /* | 80 | /* |
67 | * register_memory - Setup a sysfs device for a memory block | 81 | * register_memory - Setup a sysfs device for a memory block |
68 | */ | 82 | */ |
@@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v) | |||
157 | return blocking_notifier_call_chain(&memory_chain, val, v); | 171 | return blocking_notifier_call_chain(&memory_chain, val, v); |
158 | } | 172 | } |
159 | 173 | ||
174 | int memory_isolate_notify(unsigned long val, void *v) | ||
175 | { | ||
176 | return atomic_notifier_call_chain(&memory_isolate_chain, val, v); | ||
177 | } | ||
178 | |||
160 | /* | 179 | /* |
161 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is | 180 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is |
162 | * OK to have direct references to sparsemem variables in here. | 181 | * OK to have direct references to sparsemem variables in here. |
@@ -341,6 +360,64 @@ static inline int memory_probe_init(void) | |||
341 | } | 360 | } |
342 | #endif | 361 | #endif |
343 | 362 | ||
363 | #ifdef CONFIG_MEMORY_FAILURE | ||
364 | /* | ||
365 | * Support for offlining pages of memory | ||
366 | */ | ||
367 | |||
368 | /* Soft offline a page */ | ||
369 | static ssize_t | ||
370 | store_soft_offline_page(struct class *class, const char *buf, size_t count) | ||
371 | { | ||
372 | int ret; | ||
373 | u64 pfn; | ||
374 | if (!capable(CAP_SYS_ADMIN)) | ||
375 | return -EPERM; | ||
376 | if (strict_strtoull(buf, 0, &pfn) < 0) | ||
377 | return -EINVAL; | ||
378 | pfn >>= PAGE_SHIFT; | ||
379 | if (!pfn_valid(pfn)) | ||
380 | return -ENXIO; | ||
381 | ret = soft_offline_page(pfn_to_page(pfn), 0); | ||
382 | return ret == 0 ? count : ret; | ||
383 | } | ||
384 | |||
385 | /* Forcibly offline a page, including killing processes. */ | ||
386 | static ssize_t | ||
387 | store_hard_offline_page(struct class *class, const char *buf, size_t count) | ||
388 | { | ||
389 | int ret; | ||
390 | u64 pfn; | ||
391 | if (!capable(CAP_SYS_ADMIN)) | ||
392 | return -EPERM; | ||
393 | if (strict_strtoull(buf, 0, &pfn) < 0) | ||
394 | return -EINVAL; | ||
395 | pfn >>= PAGE_SHIFT; | ||
396 | ret = __memory_failure(pfn, 0, 0); | ||
397 | return ret ? ret : count; | ||
398 | } | ||
399 | |||
400 | static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); | ||
401 | static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); | ||
402 | |||
403 | static __init int memory_fail_init(void) | ||
404 | { | ||
405 | int err; | ||
406 | |||
407 | err = sysfs_create_file(&memory_sysdev_class.kset.kobj, | ||
408 | &class_attr_soft_offline_page.attr); | ||
409 | if (!err) | ||
410 | err = sysfs_create_file(&memory_sysdev_class.kset.kobj, | ||
411 | &class_attr_hard_offline_page.attr); | ||
412 | return err; | ||
413 | } | ||
414 | #else | ||
415 | static inline int memory_fail_init(void) | ||
416 | { | ||
417 | return 0; | ||
418 | } | ||
419 | #endif | ||
420 | |||
344 | /* | 421 | /* |
345 | * Note that phys_device is optional. It is here to allow for | 422 | * Note that phys_device is optional. It is here to allow for |
346 | * differentiation between which *physical* devices each | 423 | * differentiation between which *physical* devices each |
@@ -473,6 +550,9 @@ int __init memory_dev_init(void) | |||
473 | err = memory_probe_init(); | 550 | err = memory_probe_init(); |
474 | if (!ret) | 551 | if (!ret) |
475 | ret = err; | 552 | ret = err; |
553 | err = memory_fail_init(); | ||
554 | if (!ret) | ||
555 | ret = err; | ||
476 | err = block_size_init(); | 556 | err = block_size_init(); |
477 | if (!ret) | 557 | if (!ret) |
478 | ret = err; | 558 | ret = err; |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 1fe5536d404f..70122791683d 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -173,6 +173,47 @@ static ssize_t node_read_distance(struct sys_device * dev, | |||
173 | } | 173 | } |
174 | static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); | 174 | static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); |
175 | 175 | ||
176 | #ifdef CONFIG_HUGETLBFS | ||
177 | /* | ||
178 | * hugetlbfs per node attributes registration interface: | ||
179 | * When/if hugetlb[fs] subsystem initializes [sometime after this module], | ||
180 | * it will register its per node attributes for all online nodes with | ||
181 | * memory. It will also call register_hugetlbfs_with_node(), below, to | ||
182 | * register its attribute registration functions with this node driver. | ||
183 | * Once these hooks have been initialized, the node driver will call into | ||
184 | * the hugetlb module to [un]register attributes for hot-plugged nodes. | ||
185 | */ | ||
186 | static node_registration_func_t __hugetlb_register_node; | ||
187 | static node_registration_func_t __hugetlb_unregister_node; | ||
188 | |||
189 | static inline bool hugetlb_register_node(struct node *node) | ||
190 | { | ||
191 | if (__hugetlb_register_node && | ||
192 | node_state(node->sysdev.id, N_HIGH_MEMORY)) { | ||
193 | __hugetlb_register_node(node); | ||
194 | return true; | ||
195 | } | ||
196 | return false; | ||
197 | } | ||
198 | |||
199 | static inline void hugetlb_unregister_node(struct node *node) | ||
200 | { | ||
201 | if (__hugetlb_unregister_node) | ||
202 | __hugetlb_unregister_node(node); | ||
203 | } | ||
204 | |||
205 | void register_hugetlbfs_with_node(node_registration_func_t doregister, | ||
206 | node_registration_func_t unregister) | ||
207 | { | ||
208 | __hugetlb_register_node = doregister; | ||
209 | __hugetlb_unregister_node = unregister; | ||
210 | } | ||
211 | #else | ||
212 | static inline void hugetlb_register_node(struct node *node) {} | ||
213 | |||
214 | static inline void hugetlb_unregister_node(struct node *node) {} | ||
215 | #endif | ||
216 | |||
176 | 217 | ||
177 | /* | 218 | /* |
178 | * register_node - Setup a sysfs device for a node. | 219 | * register_node - Setup a sysfs device for a node. |
@@ -196,6 +237,8 @@ int register_node(struct node *node, int num, struct node *parent) | |||
196 | sysdev_create_file(&node->sysdev, &attr_distance); | 237 | sysdev_create_file(&node->sysdev, &attr_distance); |
197 | 238 | ||
198 | scan_unevictable_register_node(node); | 239 | scan_unevictable_register_node(node); |
240 | |||
241 | hugetlb_register_node(node); | ||
199 | } | 242 | } |
200 | return error; | 243 | return error; |
201 | } | 244 | } |
@@ -216,6 +259,7 @@ void unregister_node(struct node *node) | |||
216 | sysdev_remove_file(&node->sysdev, &attr_distance); | 259 | sysdev_remove_file(&node->sysdev, &attr_distance); |
217 | 260 | ||
218 | scan_unevictable_unregister_node(node); | 261 | scan_unevictable_unregister_node(node); |
262 | hugetlb_unregister_node(node); /* no-op, if memoryless node */ | ||
219 | 263 | ||
220 | sysdev_unregister(&node->sysdev); | 264 | sysdev_unregister(&node->sysdev); |
221 | } | 265 | } |
@@ -227,26 +271,43 @@ struct node node_devices[MAX_NUMNODES]; | |||
227 | */ | 271 | */ |
228 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) | 272 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) |
229 | { | 273 | { |
230 | if (node_online(nid)) { | 274 | int ret; |
231 | struct sys_device *obj = get_cpu_sysdev(cpu); | 275 | struct sys_device *obj; |
232 | if (!obj) | ||
233 | return 0; | ||
234 | return sysfs_create_link(&node_devices[nid].sysdev.kobj, | ||
235 | &obj->kobj, | ||
236 | kobject_name(&obj->kobj)); | ||
237 | } | ||
238 | 276 | ||
239 | return 0; | 277 | if (!node_online(nid)) |
278 | return 0; | ||
279 | |||
280 | obj = get_cpu_sysdev(cpu); | ||
281 | if (!obj) | ||
282 | return 0; | ||
283 | |||
284 | ret = sysfs_create_link(&node_devices[nid].sysdev.kobj, | ||
285 | &obj->kobj, | ||
286 | kobject_name(&obj->kobj)); | ||
287 | if (ret) | ||
288 | return ret; | ||
289 | |||
290 | return sysfs_create_link(&obj->kobj, | ||
291 | &node_devices[nid].sysdev.kobj, | ||
292 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
240 | } | 293 | } |
241 | 294 | ||
242 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) | 295 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) |
243 | { | 296 | { |
244 | if (node_online(nid)) { | 297 | struct sys_device *obj; |
245 | struct sys_device *obj = get_cpu_sysdev(cpu); | 298 | |
246 | if (obj) | 299 | if (!node_online(nid)) |
247 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, | 300 | return 0; |
248 | kobject_name(&obj->kobj)); | 301 | |
249 | } | 302 | obj = get_cpu_sysdev(cpu); |
303 | if (!obj) | ||
304 | return 0; | ||
305 | |||
306 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, | ||
307 | kobject_name(&obj->kobj)); | ||
308 | sysfs_remove_link(&obj->kobj, | ||
309 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
310 | |||
250 | return 0; | 311 | return 0; |
251 | } | 312 | } |
252 | 313 | ||
@@ -268,6 +329,7 @@ static int get_nid_for_pfn(unsigned long pfn) | |||
268 | /* register memory section under specified node if it spans that node */ | 329 | /* register memory section under specified node if it spans that node */ |
269 | int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | 330 | int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) |
270 | { | 331 | { |
332 | int ret; | ||
271 | unsigned long pfn, sect_start_pfn, sect_end_pfn; | 333 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
272 | 334 | ||
273 | if (!mem_blk) | 335 | if (!mem_blk) |
@@ -284,9 +346,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
284 | continue; | 346 | continue; |
285 | if (page_nid != nid) | 347 | if (page_nid != nid) |
286 | continue; | 348 | continue; |
287 | return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, | 349 | ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, |
288 | &mem_blk->sysdev.kobj, | 350 | &mem_blk->sysdev.kobj, |
289 | kobject_name(&mem_blk->sysdev.kobj)); | 351 | kobject_name(&mem_blk->sysdev.kobj)); |
352 | if (ret) | ||
353 | return ret; | ||
354 | |||
355 | return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj, | ||
356 | &node_devices[nid].sysdev.kobj, | ||
357 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
290 | } | 358 | } |
291 | /* mem section does not span the specified node */ | 359 | /* mem section does not span the specified node */ |
292 | return 0; | 360 | return 0; |
@@ -295,12 +363,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
295 | /* unregister memory section under all nodes that it spans */ | 363 | /* unregister memory section under all nodes that it spans */ |
296 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | 364 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) |
297 | { | 365 | { |
298 | nodemask_t unlinked_nodes; | 366 | NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); |
299 | unsigned long pfn, sect_start_pfn, sect_end_pfn; | 367 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
300 | 368 | ||
301 | if (!mem_blk) | 369 | if (!mem_blk) { |
370 | NODEMASK_FREE(unlinked_nodes); | ||
302 | return -EFAULT; | 371 | return -EFAULT; |
303 | nodes_clear(unlinked_nodes); | 372 | } |
373 | if (!unlinked_nodes) | ||
374 | return -ENOMEM; | ||
375 | nodes_clear(*unlinked_nodes); | ||
304 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); | 376 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); |
305 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; | 377 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; |
306 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { | 378 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
@@ -311,11 +383,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | |||
311 | continue; | 383 | continue; |
312 | if (!node_online(nid)) | 384 | if (!node_online(nid)) |
313 | continue; | 385 | continue; |
314 | if (node_test_and_set(nid, unlinked_nodes)) | 386 | if (node_test_and_set(nid, *unlinked_nodes)) |
315 | continue; | 387 | continue; |
316 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, | 388 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, |
317 | kobject_name(&mem_blk->sysdev.kobj)); | 389 | kobject_name(&mem_blk->sysdev.kobj)); |
390 | sysfs_remove_link(&mem_blk->sysdev.kobj, | ||
391 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
318 | } | 392 | } |
393 | NODEMASK_FREE(unlinked_nodes); | ||
319 | return 0; | 394 | return 0; |
320 | } | 395 | } |
321 | 396 | ||
@@ -345,9 +420,77 @@ static int link_mem_sections(int nid) | |||
345 | } | 420 | } |
346 | return err; | 421 | return err; |
347 | } | 422 | } |
348 | #else | 423 | |
424 | #ifdef CONFIG_HUGETLBFS | ||
425 | /* | ||
426 | * Handle per node hstate attribute [un]registration on transistions | ||
427 | * to/from memoryless state. | ||
428 | */ | ||
429 | static void node_hugetlb_work(struct work_struct *work) | ||
430 | { | ||
431 | struct node *node = container_of(work, struct node, node_work); | ||
432 | |||
433 | /* | ||
434 | * We only get here when a node transitions to/from memoryless state. | ||
435 | * We can detect which transition occurred by examining whether the | ||
436 | * node has memory now. hugetlb_register_node() already check this | ||
437 | * so we try to register the attributes. If that fails, then the | ||
438 | * node has transitioned to memoryless, try to unregister the | ||
439 | * attributes. | ||
440 | */ | ||
441 | if (!hugetlb_register_node(node)) | ||
442 | hugetlb_unregister_node(node); | ||
443 | } | ||
444 | |||
445 | static void init_node_hugetlb_work(int nid) | ||
446 | { | ||
447 | INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work); | ||
448 | } | ||
449 | |||
450 | static int node_memory_callback(struct notifier_block *self, | ||
451 | unsigned long action, void *arg) | ||
452 | { | ||
453 | struct memory_notify *mnb = arg; | ||
454 | int nid = mnb->status_change_nid; | ||
455 | |||
456 | switch (action) { | ||
457 | case MEM_ONLINE: | ||
458 | case MEM_OFFLINE: | ||
459 | /* | ||
460 | * offload per node hstate [un]registration to a work thread | ||
461 | * when transitioning to/from memoryless state. | ||
462 | */ | ||
463 | if (nid != NUMA_NO_NODE) | ||
464 | schedule_work(&node_devices[nid].node_work); | ||
465 | break; | ||
466 | |||
467 | case MEM_GOING_ONLINE: | ||
468 | case MEM_GOING_OFFLINE: | ||
469 | case MEM_CANCEL_ONLINE: | ||
470 | case MEM_CANCEL_OFFLINE: | ||
471 | default: | ||
472 | break; | ||
473 | } | ||
474 | |||
475 | return NOTIFY_OK; | ||
476 | } | ||
477 | #endif /* CONFIG_HUGETLBFS */ | ||
478 | #else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */ | ||
479 | |||
349 | static int link_mem_sections(int nid) { return 0; } | 480 | static int link_mem_sections(int nid) { return 0; } |
350 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | 481 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
482 | |||
483 | #if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ | ||
484 | !defined(CONFIG_HUGETLBFS) | ||
485 | static inline int node_memory_callback(struct notifier_block *self, | ||
486 | unsigned long action, void *arg) | ||
487 | { | ||
488 | return NOTIFY_OK; | ||
489 | } | ||
490 | |||
491 | static void init_node_hugetlb_work(int nid) { } | ||
492 | |||
493 | #endif | ||
351 | 494 | ||
352 | int register_one_node(int nid) | 495 | int register_one_node(int nid) |
353 | { | 496 | { |
@@ -371,6 +514,9 @@ int register_one_node(int nid) | |||
371 | 514 | ||
372 | /* link memory sections under this node */ | 515 | /* link memory sections under this node */ |
373 | error = link_mem_sections(nid); | 516 | error = link_mem_sections(nid); |
517 | |||
518 | /* initialize work queue for memory hot plug */ | ||
519 | init_node_hugetlb_work(nid); | ||
374 | } | 520 | } |
375 | 521 | ||
376 | return error; | 522 | return error; |
@@ -460,13 +606,17 @@ static int node_states_init(void) | |||
460 | return err; | 606 | return err; |
461 | } | 607 | } |
462 | 608 | ||
609 | #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ | ||
463 | static int __init register_node_type(void) | 610 | static int __init register_node_type(void) |
464 | { | 611 | { |
465 | int ret; | 612 | int ret; |
466 | 613 | ||
467 | ret = sysdev_class_register(&node_class); | 614 | ret = sysdev_class_register(&node_class); |
468 | if (!ret) | 615 | if (!ret) { |
469 | ret = node_states_init(); | 616 | ret = node_states_init(); |
617 | hotplug_memory_notifier(node_memory_callback, | ||
618 | NODE_CALLBACK_PRI); | ||
619 | } | ||
470 | 620 | ||
471 | /* | 621 | /* |
472 | * Note: we're not going to unregister the node class if we fail | 622 | * Note: we're not going to unregister the node class if we fail |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index ed156a13aa40..58efaf2f1259 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -441,6 +441,7 @@ error: | |||
441 | platform_device_put(pdev); | 441 | platform_device_put(pdev); |
442 | return ERR_PTR(retval); | 442 | return ERR_PTR(retval); |
443 | } | 443 | } |
444 | EXPORT_SYMBOL_GPL(platform_device_register_data); | ||
444 | 445 | ||
445 | static int platform_drv_probe(struct device *_dev) | 446 | static int platform_drv_probe(struct device *_dev) |
446 | { | 447 | { |
@@ -521,11 +522,15 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv, | |||
521 | { | 522 | { |
522 | int retval, code; | 523 | int retval, code; |
523 | 524 | ||
525 | /* make sure driver won't have bind/unbind attributes */ | ||
526 | drv->driver.suppress_bind_attrs = true; | ||
527 | |||
524 | /* temporary section violation during probe() */ | 528 | /* temporary section violation during probe() */ |
525 | drv->probe = probe; | 529 | drv->probe = probe; |
526 | retval = code = platform_driver_register(drv); | 530 | retval = code = platform_driver_register(drv); |
527 | 531 | ||
528 | /* Fixup that section violation, being paranoid about code scanning | 532 | /* |
533 | * Fixup that section violation, being paranoid about code scanning | ||
529 | * the list of drivers in order to probe new devices. Check to see | 534 | * the list of drivers in order to probe new devices. Check to see |
530 | * if the probe was successful, and make sure any forced probes of | 535 | * if the probe was successful, and make sure any forced probes of |
531 | * new devices fail. | 536 | * new devices fail. |
@@ -996,7 +1001,7 @@ static __initdata LIST_HEAD(early_platform_device_list); | |||
996 | int __init early_platform_driver_register(struct early_platform_driver *epdrv, | 1001 | int __init early_platform_driver_register(struct early_platform_driver *epdrv, |
997 | char *buf) | 1002 | char *buf) |
998 | { | 1003 | { |
999 | unsigned long index; | 1004 | char *tmp; |
1000 | int n; | 1005 | int n; |
1001 | 1006 | ||
1002 | /* Simply add the driver to the end of the global list. | 1007 | /* Simply add the driver to the end of the global list. |
@@ -1015,13 +1020,28 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv, | |||
1015 | if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { | 1020 | if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { |
1016 | list_move(&epdrv->list, &early_platform_driver_list); | 1021 | list_move(&epdrv->list, &early_platform_driver_list); |
1017 | 1022 | ||
1018 | if (!strcmp(buf, epdrv->pdrv->driver.name)) | 1023 | /* Allow passing parameters after device name */ |
1024 | if (buf[n] == '\0' || buf[n] == ',') | ||
1019 | epdrv->requested_id = -1; | 1025 | epdrv->requested_id = -1; |
1020 | else if (buf[n] == '.' && strict_strtoul(&buf[n + 1], 10, | 1026 | else { |
1021 | &index) == 0) | 1027 | epdrv->requested_id = simple_strtoul(&buf[n + 1], |
1022 | epdrv->requested_id = index; | 1028 | &tmp, 10); |
1023 | else | 1029 | |
1024 | epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; | 1030 | if (buf[n] != '.' || (tmp == &buf[n + 1])) { |
1031 | epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; | ||
1032 | n = 0; | ||
1033 | } else | ||
1034 | n += strcspn(&buf[n + 1], ",") + 1; | ||
1035 | } | ||
1036 | |||
1037 | if (buf[n] == ',') | ||
1038 | n++; | ||
1039 | |||
1040 | if (epdrv->bufsize) { | ||
1041 | memcpy(epdrv->buffer, &buf[n], | ||
1042 | min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); | ||
1043 | epdrv->buffer[epdrv->bufsize - 1] = '\0'; | ||
1044 | } | ||
1025 | } | 1045 | } |
1026 | 1046 | ||
1027 | return 0; | 1047 | return 0; |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e0dc4071e088..48adf80926a0 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -23,8 +23,8 @@ | |||
23 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
24 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
25 | #include <linux/resume-trace.h> | 25 | #include <linux/resume-trace.h> |
26 | #include <linux/rwsem.h> | ||
27 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/sched.h> | ||
28 | 28 | ||
29 | #include "../base.h" | 29 | #include "../base.h" |
30 | #include "power.h" | 30 | #include "power.h" |
@@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev) | |||
161 | list_move_tail(&dev->power.entry, &dpm_list); | 161 | list_move_tail(&dev->power.entry, &dpm_list); |
162 | } | 162 | } |
163 | 163 | ||
164 | static ktime_t initcall_debug_start(struct device *dev) | ||
165 | { | ||
166 | ktime_t calltime = ktime_set(0, 0); | ||
167 | |||
168 | if (initcall_debug) { | ||
169 | pr_info("calling %s+ @ %i\n", | ||
170 | dev_name(dev), task_pid_nr(current)); | ||
171 | calltime = ktime_get(); | ||
172 | } | ||
173 | |||
174 | return calltime; | ||
175 | } | ||
176 | |||
177 | static void initcall_debug_report(struct device *dev, ktime_t calltime, | ||
178 | int error) | ||
179 | { | ||
180 | ktime_t delta, rettime; | ||
181 | |||
182 | if (initcall_debug) { | ||
183 | rettime = ktime_get(); | ||
184 | delta = ktime_sub(rettime, calltime); | ||
185 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | ||
186 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | ||
187 | } | ||
188 | } | ||
189 | |||
164 | /** | 190 | /** |
165 | * pm_op - Execute the PM operation appropriate for given PM event. | 191 | * pm_op - Execute the PM operation appropriate for given PM event. |
166 | * @dev: Device to handle. | 192 | * @dev: Device to handle. |
@@ -172,6 +198,9 @@ static int pm_op(struct device *dev, | |||
172 | pm_message_t state) | 198 | pm_message_t state) |
173 | { | 199 | { |
174 | int error = 0; | 200 | int error = 0; |
201 | ktime_t calltime; | ||
202 | |||
203 | calltime = initcall_debug_start(dev); | ||
175 | 204 | ||
176 | switch (state.event) { | 205 | switch (state.event) { |
177 | #ifdef CONFIG_SUSPEND | 206 | #ifdef CONFIG_SUSPEND |
@@ -219,6 +248,9 @@ static int pm_op(struct device *dev, | |||
219 | default: | 248 | default: |
220 | error = -EINVAL; | 249 | error = -EINVAL; |
221 | } | 250 | } |
251 | |||
252 | initcall_debug_report(dev, calltime, error); | ||
253 | |||
222 | return error; | 254 | return error; |
223 | } | 255 | } |
224 | 256 | ||
@@ -236,6 +268,13 @@ static int pm_noirq_op(struct device *dev, | |||
236 | pm_message_t state) | 268 | pm_message_t state) |
237 | { | 269 | { |
238 | int error = 0; | 270 | int error = 0; |
271 | ktime_t calltime, delta, rettime; | ||
272 | |||
273 | if (initcall_debug) { | ||
274 | pr_info("calling %s_i+ @ %i\n", | ||
275 | dev_name(dev), task_pid_nr(current)); | ||
276 | calltime = ktime_get(); | ||
277 | } | ||
239 | 278 | ||
240 | switch (state.event) { | 279 | switch (state.event) { |
241 | #ifdef CONFIG_SUSPEND | 280 | #ifdef CONFIG_SUSPEND |
@@ -283,6 +322,15 @@ static int pm_noirq_op(struct device *dev, | |||
283 | default: | 322 | default: |
284 | error = -EINVAL; | 323 | error = -EINVAL; |
285 | } | 324 | } |
325 | |||
326 | if (initcall_debug) { | ||
327 | rettime = ktime_get(); | ||
328 | delta = ktime_sub(rettime, calltime); | ||
329 | printk("initcall %s_i+ returned %d after %Ld usecs\n", | ||
330 | dev_name(dev), error, | ||
331 | (unsigned long long)ktime_to_ns(delta) >> 10); | ||
332 | } | ||
333 | |||
286 | return error; | 334 | return error; |
287 | } | 335 | } |
288 | 336 | ||
@@ -324,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
324 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 372 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); |
325 | } | 373 | } |
326 | 374 | ||
375 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | ||
376 | { | ||
377 | ktime_t calltime; | ||
378 | s64 usecs64; | ||
379 | int usecs; | ||
380 | |||
381 | calltime = ktime_get(); | ||
382 | usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); | ||
383 | do_div(usecs64, NSEC_PER_USEC); | ||
384 | usecs = usecs64; | ||
385 | if (usecs == 0) | ||
386 | usecs = 1; | ||
387 | pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", | ||
388 | info ?: "", info ? " " : "", pm_verb(state.event), | ||
389 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); | ||
390 | } | ||
391 | |||
327 | /*------------------------- Resume routines -------------------------*/ | 392 | /*------------------------- Resume routines -------------------------*/ |
328 | 393 | ||
329 | /** | 394 | /** |
@@ -341,14 +406,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
341 | TRACE_DEVICE(dev); | 406 | TRACE_DEVICE(dev); |
342 | TRACE_RESUME(0); | 407 | TRACE_RESUME(0); |
343 | 408 | ||
344 | if (!dev->bus) | 409 | if (dev->bus && dev->bus->pm) { |
345 | goto End; | ||
346 | |||
347 | if (dev->bus->pm) { | ||
348 | pm_dev_dbg(dev, state, "EARLY "); | 410 | pm_dev_dbg(dev, state, "EARLY "); |
349 | error = pm_noirq_op(dev, dev->bus->pm, state); | 411 | error = pm_noirq_op(dev, dev->bus->pm, state); |
350 | } | 412 | } |
351 | End: | 413 | |
352 | TRACE_RESUME(error); | 414 | TRACE_RESUME(error); |
353 | return error; | 415 | return error; |
354 | } | 416 | } |
@@ -363,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
363 | void dpm_resume_noirq(pm_message_t state) | 425 | void dpm_resume_noirq(pm_message_t state) |
364 | { | 426 | { |
365 | struct device *dev; | 427 | struct device *dev; |
428 | ktime_t starttime = ktime_get(); | ||
366 | 429 | ||
367 | mutex_lock(&dpm_list_mtx); | 430 | mutex_lock(&dpm_list_mtx); |
368 | transition_started = false; | 431 | transition_started = false; |
@@ -376,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state) | |||
376 | pm_dev_err(dev, state, " early", error); | 439 | pm_dev_err(dev, state, " early", error); |
377 | } | 440 | } |
378 | mutex_unlock(&dpm_list_mtx); | 441 | mutex_unlock(&dpm_list_mtx); |
442 | dpm_show_time(starttime, state, "early"); | ||
379 | resume_device_irqs(); | 443 | resume_device_irqs(); |
380 | } | 444 | } |
381 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 445 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); |
382 | 446 | ||
383 | /** | 447 | /** |
448 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. | ||
449 | * dev: Device to resume. | ||
450 | * cb: Resume callback to execute. | ||
451 | */ | ||
452 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | ||
453 | { | ||
454 | int error; | ||
455 | ktime_t calltime; | ||
456 | |||
457 | calltime = initcall_debug_start(dev); | ||
458 | |||
459 | error = cb(dev); | ||
460 | suspend_report_result(cb, error); | ||
461 | |||
462 | initcall_debug_report(dev, calltime, error); | ||
463 | |||
464 | return error; | ||
465 | } | ||
466 | |||
467 | /** | ||
384 | * device_resume - Execute "resume" callbacks for given device. | 468 | * device_resume - Execute "resume" callbacks for given device. |
385 | * @dev: Device to handle. | 469 | * @dev: Device to handle. |
386 | * @state: PM transition of the system being carried out. | 470 | * @state: PM transition of the system being carried out. |
@@ -400,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
400 | error = pm_op(dev, dev->bus->pm, state); | 484 | error = pm_op(dev, dev->bus->pm, state); |
401 | } else if (dev->bus->resume) { | 485 | } else if (dev->bus->resume) { |
402 | pm_dev_dbg(dev, state, "legacy "); | 486 | pm_dev_dbg(dev, state, "legacy "); |
403 | error = dev->bus->resume(dev); | 487 | error = legacy_resume(dev, dev->bus->resume); |
404 | } | 488 | } |
405 | if (error) | 489 | if (error) |
406 | goto End; | 490 | goto End; |
@@ -421,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
421 | error = pm_op(dev, dev->class->pm, state); | 505 | error = pm_op(dev, dev->class->pm, state); |
422 | } else if (dev->class->resume) { | 506 | } else if (dev->class->resume) { |
423 | pm_dev_dbg(dev, state, "legacy class "); | 507 | pm_dev_dbg(dev, state, "legacy class "); |
424 | error = dev->class->resume(dev); | 508 | error = legacy_resume(dev, dev->class->resume); |
425 | } | 509 | } |
426 | } | 510 | } |
427 | End: | 511 | End: |
@@ -441,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
441 | static void dpm_resume(pm_message_t state) | 525 | static void dpm_resume(pm_message_t state) |
442 | { | 526 | { |
443 | struct list_head list; | 527 | struct list_head list; |
528 | ktime_t starttime = ktime_get(); | ||
444 | 529 | ||
445 | INIT_LIST_HEAD(&list); | 530 | INIT_LIST_HEAD(&list); |
446 | mutex_lock(&dpm_list_mtx); | 531 | mutex_lock(&dpm_list_mtx); |
@@ -469,6 +554,7 @@ static void dpm_resume(pm_message_t state) | |||
469 | } | 554 | } |
470 | list_splice(&list, &dpm_list); | 555 | list_splice(&list, &dpm_list); |
471 | mutex_unlock(&dpm_list_mtx); | 556 | mutex_unlock(&dpm_list_mtx); |
557 | dpm_show_time(starttime, state, NULL); | ||
472 | } | 558 | } |
473 | 559 | ||
474 | /** | 560 | /** |
@@ -511,6 +597,7 @@ static void dpm_complete(pm_message_t state) | |||
511 | 597 | ||
512 | INIT_LIST_HEAD(&list); | 598 | INIT_LIST_HEAD(&list); |
513 | mutex_lock(&dpm_list_mtx); | 599 | mutex_lock(&dpm_list_mtx); |
600 | transition_started = false; | ||
514 | while (!list_empty(&dpm_list)) { | 601 | while (!list_empty(&dpm_list)) { |
515 | struct device *dev = to_device(dpm_list.prev); | 602 | struct device *dev = to_device(dpm_list.prev); |
516 | 603 | ||
@@ -520,7 +607,7 @@ static void dpm_complete(pm_message_t state) | |||
520 | mutex_unlock(&dpm_list_mtx); | 607 | mutex_unlock(&dpm_list_mtx); |
521 | 608 | ||
522 | device_complete(dev, state); | 609 | device_complete(dev, state); |
523 | pm_runtime_put_noidle(dev); | 610 | pm_runtime_put_sync(dev); |
524 | 611 | ||
525 | mutex_lock(&dpm_list_mtx); | 612 | mutex_lock(&dpm_list_mtx); |
526 | } | 613 | } |
@@ -583,10 +670,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
583 | { | 670 | { |
584 | int error = 0; | 671 | int error = 0; |
585 | 672 | ||
586 | if (!dev->bus) | 673 | if (dev->bus && dev->bus->pm) { |
587 | return 0; | ||
588 | |||
589 | if (dev->bus->pm) { | ||
590 | pm_dev_dbg(dev, state, "LATE "); | 674 | pm_dev_dbg(dev, state, "LATE "); |
591 | error = pm_noirq_op(dev, dev->bus->pm, state); | 675 | error = pm_noirq_op(dev, dev->bus->pm, state); |
592 | } | 676 | } |
@@ -603,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
603 | int dpm_suspend_noirq(pm_message_t state) | 687 | int dpm_suspend_noirq(pm_message_t state) |
604 | { | 688 | { |
605 | struct device *dev; | 689 | struct device *dev; |
690 | ktime_t starttime = ktime_get(); | ||
606 | int error = 0; | 691 | int error = 0; |
607 | 692 | ||
608 | suspend_device_irqs(); | 693 | suspend_device_irqs(); |
@@ -618,11 +703,34 @@ int dpm_suspend_noirq(pm_message_t state) | |||
618 | mutex_unlock(&dpm_list_mtx); | 703 | mutex_unlock(&dpm_list_mtx); |
619 | if (error) | 704 | if (error) |
620 | dpm_resume_noirq(resume_event(state)); | 705 | dpm_resume_noirq(resume_event(state)); |
706 | else | ||
707 | dpm_show_time(starttime, state, "late"); | ||
621 | return error; | 708 | return error; |
622 | } | 709 | } |
623 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | 710 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); |
624 | 711 | ||
625 | /** | 712 | /** |
713 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | ||
714 | * dev: Device to suspend. | ||
715 | * cb: Suspend callback to execute. | ||
716 | */ | ||
717 | static int legacy_suspend(struct device *dev, pm_message_t state, | ||
718 | int (*cb)(struct device *dev, pm_message_t state)) | ||
719 | { | ||
720 | int error; | ||
721 | ktime_t calltime; | ||
722 | |||
723 | calltime = initcall_debug_start(dev); | ||
724 | |||
725 | error = cb(dev, state); | ||
726 | suspend_report_result(cb, error); | ||
727 | |||
728 | initcall_debug_report(dev, calltime, error); | ||
729 | |||
730 | return error; | ||
731 | } | ||
732 | |||
733 | /** | ||
626 | * device_suspend - Execute "suspend" callbacks for given device. | 734 | * device_suspend - Execute "suspend" callbacks for given device. |
627 | * @dev: Device to handle. | 735 | * @dev: Device to handle. |
628 | * @state: PM transition of the system being carried out. | 736 | * @state: PM transition of the system being carried out. |
@@ -639,8 +747,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
639 | error = pm_op(dev, dev->class->pm, state); | 747 | error = pm_op(dev, dev->class->pm, state); |
640 | } else if (dev->class->suspend) { | 748 | } else if (dev->class->suspend) { |
641 | pm_dev_dbg(dev, state, "legacy class "); | 749 | pm_dev_dbg(dev, state, "legacy class "); |
642 | error = dev->class->suspend(dev, state); | 750 | error = legacy_suspend(dev, state, dev->class->suspend); |
643 | suspend_report_result(dev->class->suspend, error); | ||
644 | } | 751 | } |
645 | if (error) | 752 | if (error) |
646 | goto End; | 753 | goto End; |
@@ -661,8 +768,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
661 | error = pm_op(dev, dev->bus->pm, state); | 768 | error = pm_op(dev, dev->bus->pm, state); |
662 | } else if (dev->bus->suspend) { | 769 | } else if (dev->bus->suspend) { |
663 | pm_dev_dbg(dev, state, "legacy "); | 770 | pm_dev_dbg(dev, state, "legacy "); |
664 | error = dev->bus->suspend(dev, state); | 771 | error = legacy_suspend(dev, state, dev->bus->suspend); |
665 | suspend_report_result(dev->bus->suspend, error); | ||
666 | } | 772 | } |
667 | } | 773 | } |
668 | End: | 774 | End: |
@@ -678,6 +784,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
678 | static int dpm_suspend(pm_message_t state) | 784 | static int dpm_suspend(pm_message_t state) |
679 | { | 785 | { |
680 | struct list_head list; | 786 | struct list_head list; |
787 | ktime_t starttime = ktime_get(); | ||
681 | int error = 0; | 788 | int error = 0; |
682 | 789 | ||
683 | INIT_LIST_HEAD(&list); | 790 | INIT_LIST_HEAD(&list); |
@@ -703,6 +810,8 @@ static int dpm_suspend(pm_message_t state) | |||
703 | } | 810 | } |
704 | list_splice(&list, dpm_list.prev); | 811 | list_splice(&list, dpm_list.prev); |
705 | mutex_unlock(&dpm_list_mtx); | 812 | mutex_unlock(&dpm_list_mtx); |
813 | if (!error) | ||
814 | dpm_show_time(starttime, state, NULL); | ||
706 | return error; | 815 | return error; |
707 | } | 816 | } |
708 | 817 | ||
@@ -771,7 +880,7 @@ static int dpm_prepare(pm_message_t state) | |||
771 | pm_runtime_get_noresume(dev); | 880 | pm_runtime_get_noresume(dev); |
772 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 881 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { |
773 | /* Wake-up requested during system sleep transition. */ | 882 | /* Wake-up requested during system sleep transition. */ |
774 | pm_runtime_put_noidle(dev); | 883 | pm_runtime_put_sync(dev); |
775 | error = -EBUSY; | 884 | error = -EBUSY; |
776 | } else { | 885 | } else { |
777 | error = device_prepare(dev, state); | 886 | error = device_prepare(dev, state); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 38556f6cc22d..f8b044e8aef7 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -51,8 +51,6 @@ static int __pm_runtime_idle(struct device *dev) | |||
51 | { | 51 | { |
52 | int retval = 0; | 52 | int retval = 0; |
53 | 53 | ||
54 | dev_dbg(dev, "__pm_runtime_idle()!\n"); | ||
55 | |||
56 | if (dev->power.runtime_error) | 54 | if (dev->power.runtime_error) |
57 | retval = -EINVAL; | 55 | retval = -EINVAL; |
58 | else if (dev->power.idle_notification) | 56 | else if (dev->power.idle_notification) |
@@ -87,14 +85,25 @@ static int __pm_runtime_idle(struct device *dev) | |||
87 | dev->bus->pm->runtime_idle(dev); | 85 | dev->bus->pm->runtime_idle(dev); |
88 | 86 | ||
89 | spin_lock_irq(&dev->power.lock); | 87 | spin_lock_irq(&dev->power.lock); |
88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | ||
89 | spin_unlock_irq(&dev->power.lock); | ||
90 | |||
91 | dev->type->pm->runtime_idle(dev); | ||
92 | |||
93 | spin_lock_irq(&dev->power.lock); | ||
94 | } else if (dev->class && dev->class->pm | ||
95 | && dev->class->pm->runtime_idle) { | ||
96 | spin_unlock_irq(&dev->power.lock); | ||
97 | |||
98 | dev->class->pm->runtime_idle(dev); | ||
99 | |||
100 | spin_lock_irq(&dev->power.lock); | ||
90 | } | 101 | } |
91 | 102 | ||
92 | dev->power.idle_notification = false; | 103 | dev->power.idle_notification = false; |
93 | wake_up_all(&dev->power.wait_queue); | 104 | wake_up_all(&dev->power.wait_queue); |
94 | 105 | ||
95 | out: | 106 | out: |
96 | dev_dbg(dev, "__pm_runtime_idle() returns %d!\n", retval); | ||
97 | |||
98 | return retval; | 107 | return retval; |
99 | } | 108 | } |
100 | 109 | ||
@@ -189,6 +198,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
189 | } | 198 | } |
190 | 199 | ||
191 | dev->power.runtime_status = RPM_SUSPENDING; | 200 | dev->power.runtime_status = RPM_SUSPENDING; |
201 | dev->power.deferred_resume = false; | ||
192 | 202 | ||
193 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 203 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { |
194 | spin_unlock_irq(&dev->power.lock); | 204 | spin_unlock_irq(&dev->power.lock); |
@@ -197,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
197 | 207 | ||
198 | spin_lock_irq(&dev->power.lock); | 208 | spin_lock_irq(&dev->power.lock); |
199 | dev->power.runtime_error = retval; | 209 | dev->power.runtime_error = retval; |
210 | } else if (dev->type && dev->type->pm | ||
211 | && dev->type->pm->runtime_suspend) { | ||
212 | spin_unlock_irq(&dev->power.lock); | ||
213 | |||
214 | retval = dev->type->pm->runtime_suspend(dev); | ||
215 | |||
216 | spin_lock_irq(&dev->power.lock); | ||
217 | dev->power.runtime_error = retval; | ||
218 | } else if (dev->class && dev->class->pm | ||
219 | && dev->class->pm->runtime_suspend) { | ||
220 | spin_unlock_irq(&dev->power.lock); | ||
221 | |||
222 | retval = dev->class->pm->runtime_suspend(dev); | ||
223 | |||
224 | spin_lock_irq(&dev->power.lock); | ||
225 | dev->power.runtime_error = retval; | ||
200 | } else { | 226 | } else { |
201 | retval = -ENOSYS; | 227 | retval = -ENOSYS; |
202 | } | 228 | } |
@@ -204,7 +230,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
204 | if (retval) { | 230 | if (retval) { |
205 | dev->power.runtime_status = RPM_ACTIVE; | 231 | dev->power.runtime_status = RPM_ACTIVE; |
206 | pm_runtime_cancel_pending(dev); | 232 | pm_runtime_cancel_pending(dev); |
207 | dev->power.deferred_resume = false; | ||
208 | 233 | ||
209 | if (retval == -EAGAIN || retval == -EBUSY) { | 234 | if (retval == -EAGAIN || retval == -EBUSY) { |
210 | notify = true; | 235 | notify = true; |
@@ -221,7 +246,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
221 | wake_up_all(&dev->power.wait_queue); | 246 | wake_up_all(&dev->power.wait_queue); |
222 | 247 | ||
223 | if (dev->power.deferred_resume) { | 248 | if (dev->power.deferred_resume) { |
224 | dev->power.deferred_resume = false; | ||
225 | __pm_runtime_resume(dev, false); | 249 | __pm_runtime_resume(dev, false); |
226 | retval = -EAGAIN; | 250 | retval = -EAGAIN; |
227 | goto out; | 251 | goto out; |
@@ -332,11 +356,11 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
332 | * necessary. | 356 | * necessary. |
333 | */ | 357 | */ |
334 | parent = dev->parent; | 358 | parent = dev->parent; |
335 | spin_unlock_irq(&dev->power.lock); | 359 | spin_unlock(&dev->power.lock); |
336 | 360 | ||
337 | pm_runtime_get_noresume(parent); | 361 | pm_runtime_get_noresume(parent); |
338 | 362 | ||
339 | spin_lock_irq(&parent->power.lock); | 363 | spin_lock(&parent->power.lock); |
340 | /* | 364 | /* |
341 | * We can resume if the parent's run-time PM is disabled or it | 365 | * We can resume if the parent's run-time PM is disabled or it |
342 | * is set to ignore children. | 366 | * is set to ignore children. |
@@ -347,9 +371,9 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
347 | if (parent->power.runtime_status != RPM_ACTIVE) | 371 | if (parent->power.runtime_status != RPM_ACTIVE) |
348 | retval = -EBUSY; | 372 | retval = -EBUSY; |
349 | } | 373 | } |
350 | spin_unlock_irq(&parent->power.lock); | 374 | spin_unlock(&parent->power.lock); |
351 | 375 | ||
352 | spin_lock_irq(&dev->power.lock); | 376 | spin_lock(&dev->power.lock); |
353 | if (retval) | 377 | if (retval) |
354 | goto out; | 378 | goto out; |
355 | goto repeat; | 379 | goto repeat; |
@@ -364,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
364 | 388 | ||
365 | spin_lock_irq(&dev->power.lock); | 389 | spin_lock_irq(&dev->power.lock); |
366 | dev->power.runtime_error = retval; | 390 | dev->power.runtime_error = retval; |
391 | } else if (dev->type && dev->type->pm | ||
392 | && dev->type->pm->runtime_resume) { | ||
393 | spin_unlock_irq(&dev->power.lock); | ||
394 | |||
395 | retval = dev->type->pm->runtime_resume(dev); | ||
396 | |||
397 | spin_lock_irq(&dev->power.lock); | ||
398 | dev->power.runtime_error = retval; | ||
399 | } else if (dev->class && dev->class->pm | ||
400 | && dev->class->pm->runtime_resume) { | ||
401 | spin_unlock_irq(&dev->power.lock); | ||
402 | |||
403 | retval = dev->class->pm->runtime_resume(dev); | ||
404 | |||
405 | spin_lock_irq(&dev->power.lock); | ||
406 | dev->power.runtime_error = retval; | ||
367 | } else { | 407 | } else { |
368 | retval = -ENOSYS; | 408 | retval = -ENOSYS; |
369 | } | 409 | } |
@@ -630,6 +670,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
630 | goto out; | 670 | goto out; |
631 | 671 | ||
632 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 672 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
673 | if (!dev->power.timer_expires) | ||
674 | dev->power.timer_expires = 1; | ||
633 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 675 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
634 | 676 | ||
635 | out: | 677 | out: |
@@ -663,13 +705,17 @@ static int __pm_request_resume(struct device *dev) | |||
663 | 705 | ||
664 | pm_runtime_deactivate_timer(dev); | 706 | pm_runtime_deactivate_timer(dev); |
665 | 707 | ||
708 | if (dev->power.runtime_status == RPM_SUSPENDING) { | ||
709 | dev->power.deferred_resume = true; | ||
710 | return retval; | ||
711 | } | ||
666 | if (dev->power.request_pending) { | 712 | if (dev->power.request_pending) { |
667 | /* If non-resume request is pending, we can overtake it. */ | 713 | /* If non-resume request is pending, we can overtake it. */ |
668 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | 714 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; |
669 | return retval; | 715 | return retval; |
670 | } else if (retval) { | ||
671 | return retval; | ||
672 | } | 716 | } |
717 | if (retval) | ||
718 | return retval; | ||
673 | 719 | ||
674 | dev->power.request = RPM_REQ_RESUME; | 720 | dev->power.request = RPM_REQ_RESUME; |
675 | dev->power.request_pending = true; | 721 | dev->power.request_pending = true; |
@@ -700,15 +746,15 @@ EXPORT_SYMBOL_GPL(pm_request_resume); | |||
700 | * @dev: Device to handle. | 746 | * @dev: Device to handle. |
701 | * @sync: If set and the device is suspended, resume it synchronously. | 747 | * @sync: If set and the device is suspended, resume it synchronously. |
702 | * | 748 | * |
703 | * Increment the usage count of the device and if it was zero previously, | 749 | * Increment the usage count of the device and resume it or submit a resume |
704 | * resume it or submit a resume request for it, depending on the value of @sync. | 750 | * request for it, depending on the value of @sync. |
705 | */ | 751 | */ |
706 | int __pm_runtime_get(struct device *dev, bool sync) | 752 | int __pm_runtime_get(struct device *dev, bool sync) |
707 | { | 753 | { |
708 | int retval = 1; | 754 | int retval; |
709 | 755 | ||
710 | if (atomic_add_return(1, &dev->power.usage_count) == 1) | 756 | atomic_inc(&dev->power.usage_count); |
711 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 757 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); |
712 | 758 | ||
713 | return retval; | 759 | return retval; |
714 | } | 760 | } |
@@ -781,7 +827,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
781 | } | 827 | } |
782 | 828 | ||
783 | if (parent) { | 829 | if (parent) { |
784 | spin_lock_irq(&parent->power.lock); | 830 | spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); |
785 | 831 | ||
786 | /* | 832 | /* |
787 | * It is invalid to put an active child under a parent that is | 833 | * It is invalid to put an active child under a parent that is |
@@ -790,14 +836,12 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
790 | */ | 836 | */ |
791 | if (!parent->power.disable_depth | 837 | if (!parent->power.disable_depth |
792 | && !parent->power.ignore_children | 838 | && !parent->power.ignore_children |
793 | && parent->power.runtime_status != RPM_ACTIVE) { | 839 | && parent->power.runtime_status != RPM_ACTIVE) |
794 | error = -EBUSY; | 840 | error = -EBUSY; |
795 | } else { | 841 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
796 | if (dev->power.runtime_status == RPM_SUSPENDED) | 842 | atomic_inc(&parent->power.child_count); |
797 | atomic_inc(&parent->power.child_count); | ||
798 | } | ||
799 | 843 | ||
800 | spin_unlock_irq(&parent->power.lock); | 844 | spin_unlock(&parent->power.lock); |
801 | 845 | ||
802 | if (error) | 846 | if (error) |
803 | goto out; | 847 | goto out; |