diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-30 19:16:45 -0400 |
commit | ada47b5fe13d89735805b566185f4885f5a3f750 (patch) | |
tree | 644b88f8a71896307d71438e9b3af49126ffb22b /drivers/base | |
parent | 43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff) | |
parent | 3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff) |
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Kconfig | 51 | ||||
-rw-r--r-- | drivers/base/attribute_container.c | 1 | ||||
-rw-r--r-- | drivers/base/bus.c | 29 | ||||
-rw-r--r-- | drivers/base/class.c | 20 | ||||
-rw-r--r-- | drivers/base/core.c | 82 | ||||
-rw-r--r-- | drivers/base/cpu.c | 106 | ||||
-rw-r--r-- | drivers/base/dd.c | 40 | ||||
-rw-r--r-- | drivers/base/devres.c | 1 | ||||
-rw-r--r-- | drivers/base/devtmpfs.c | 116 | ||||
-rw-r--r-- | drivers/base/dma-coherent.c | 1 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 1 | ||||
-rw-r--r-- | drivers/base/driver.c | 5 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 28 | ||||
-rw-r--r-- | drivers/base/memory.c | 112 | ||||
-rw-r--r-- | drivers/base/module.c | 1 | ||||
-rw-r--r-- | drivers/base/node.c | 285 | ||||
-rw-r--r-- | drivers/base/platform.c | 139 | ||||
-rw-r--r-- | drivers/base/power/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 233 | ||||
-rw-r--r-- | drivers/base/power/main.c | 333 | ||||
-rw-r--r-- | drivers/base/power/power.h | 6 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 123 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 100 | ||||
-rw-r--r-- | drivers/base/sys.c | 18 |
24 files changed, 1470 insertions, 362 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index ee377270beb9..fd52c48ee762 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -3,35 +3,50 @@ menu "Generic Driver Options" | |||
3 | config UEVENT_HELPER_PATH | 3 | config UEVENT_HELPER_PATH |
4 | string "path to uevent helper" | 4 | string "path to uevent helper" |
5 | depends on HOTPLUG | 5 | depends on HOTPLUG |
6 | default "/sbin/hotplug" | 6 | default "" |
7 | help | 7 | help |
8 | Path to uevent helper program forked by the kernel for | 8 | Path to uevent helper program forked by the kernel for |
9 | every uevent. | 9 | every uevent. |
10 | Before the switch to the netlink-based uevent source, this was | ||
11 | used to hook hotplug scripts into kernel device events. It | ||
12 | usually pointed to a shell script at /sbin/hotplug. | ||
13 | This should not be used today, because usual systems create | ||
14 | many events at bootup or device discovery in a very short time | ||
15 | frame. One forked process per event can create so many processes | ||
16 | that it creates a high system load, or on smaller systems | ||
17 | it is known to create out-of-memory situations during bootup. | ||
10 | 18 | ||
11 | config DEVTMPFS | 19 | config DEVTMPFS |
12 | bool "Create a kernel maintained /dev tmpfs (EXPERIMENTAL)" | 20 | bool "Maintain a devtmpfs filesystem to mount at /dev" |
13 | depends on HOTPLUG && SHMEM && TMPFS | 21 | depends on HOTPLUG && SHMEM && TMPFS |
14 | help | 22 | help |
15 | This creates a tmpfs filesystem, and mounts it at bootup | 23 | This creates a tmpfs filesystem instance early at bootup. |
16 | and mounts it at /dev. The kernel driver core creates device | 24 | In this filesystem, the kernel driver core maintains device |
17 | nodes for all registered devices in that filesystem. All device | 25 | nodes with their default names and permissions for all |
18 | nodes are owned by root and have the default mode of 0600. | 26 | registered devices with an assigned major/minor number. |
19 | Userspace can add and delete the nodes as needed. This is | 27 | Userspace can modify the filesystem content as needed, add |
20 | intended to simplify bootup, and make it possible to delay | 28 | symlinks, and apply needed permissions. |
21 | the initial coldplug at bootup done by udev in userspace. | 29 | It provides a fully functional /dev directory, where usually |
22 | It should also provide a simpler way for rescue systems | 30 | udev runs on top, managing permissions and adding meaningful |
23 | to bring up a kernel with dynamic major/minor numbers. | 31 | symlinks. |
24 | Meaningful symlinks, permissions and device ownership must | 32 | In very limited environments, it may provide a sufficient |
25 | still be handled by userspace. | 33 | functional /dev without any further help. It also allows simple |
26 | If unsure, say N here. | 34 | rescue systems, and reliably handles dynamic major/minor numbers. |
27 | 35 | ||
28 | config DEVTMPFS_MOUNT | 36 | config DEVTMPFS_MOUNT |
29 | bool "Automount devtmpfs at /dev" | 37 | bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs" |
30 | depends on DEVTMPFS | 38 | depends on DEVTMPFS |
31 | help | 39 | help |
32 | This will mount devtmpfs at /dev if the kernel mounts the root | 40 | This will instruct the kernel to automatically mount the |
33 | filesystem. It will not affect initramfs based mounting. | 41 | devtmpfs filesystem at /dev, directly after the kernel has |
34 | If unsure, say N here. | 42 | mounted the root filesystem. The behavior can be overridden |
43 | with the commandline parameter: devtmpfs.mount=0|1. | ||
44 | This option does not affect initramfs based booting, here | ||
45 | the devtmpfs filesystem always needs to be mounted manually | ||
46 | after the roots is mounted. | ||
47 | With this option enabled, it allows to bring up a system in | ||
48 | rescue mode with init=/bin/sh, even when the /dev directory | ||
49 | on the rootfs is completely empty. | ||
35 | 50 | ||
36 | config STANDALONE | 51 | config STANDALONE |
37 | bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL | 52 | bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL |
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c index b9cda053d3c0..8fc200b2e2c0 100644 --- a/drivers/base/attribute_container.c +++ b/drivers/base/attribute_container.c | |||
@@ -328,6 +328,7 @@ attribute_container_add_attrs(struct device *classdev) | |||
328 | return sysfs_create_group(&classdev->kobj, cont->grp); | 328 | return sysfs_create_group(&classdev->kobj, cont->grp); |
329 | 329 | ||
330 | for (i = 0; attrs[i]; i++) { | 330 | for (i = 0; attrs[i]; i++) { |
331 | sysfs_attr_init(&attrs[i]->attr); | ||
331 | error = device_create_file(classdev, attrs[i]); | 332 | error = device_create_file(classdev, attrs[i]); |
332 | if (error) | 333 | if (error) |
333 | return error; | 334 | return error; |
diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 63c143e54a57..12eec3f633b1 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/string.h> | 18 | #include <linux/string.h> |
18 | #include "base.h" | 19 | #include "base.h" |
@@ -70,7 +71,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr, | |||
70 | return ret; | 71 | return ret; |
71 | } | 72 | } |
72 | 73 | ||
73 | static struct sysfs_ops driver_sysfs_ops = { | 74 | static const struct sysfs_ops driver_sysfs_ops = { |
74 | .show = drv_attr_show, | 75 | .show = drv_attr_show, |
75 | .store = drv_attr_store, | 76 | .store = drv_attr_store, |
76 | }; | 77 | }; |
@@ -115,7 +116,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, | |||
115 | return ret; | 116 | return ret; |
116 | } | 117 | } |
117 | 118 | ||
118 | static struct sysfs_ops bus_sysfs_ops = { | 119 | static const struct sysfs_ops bus_sysfs_ops = { |
119 | .show = bus_attr_show, | 120 | .show = bus_attr_show, |
120 | .store = bus_attr_store, | 121 | .store = bus_attr_store, |
121 | }; | 122 | }; |
@@ -154,7 +155,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj) | |||
154 | return 0; | 155 | return 0; |
155 | } | 156 | } |
156 | 157 | ||
157 | static struct kset_uevent_ops bus_uevent_ops = { | 158 | static const struct kset_uevent_ops bus_uevent_ops = { |
158 | .filter = bus_uevent_filter, | 159 | .filter = bus_uevent_filter, |
159 | }; | 160 | }; |
160 | 161 | ||
@@ -173,10 +174,10 @@ static ssize_t driver_unbind(struct device_driver *drv, | |||
173 | dev = bus_find_device_by_name(bus, NULL, buf); | 174 | dev = bus_find_device_by_name(bus, NULL, buf); |
174 | if (dev && dev->driver == drv) { | 175 | if (dev && dev->driver == drv) { |
175 | if (dev->parent) /* Needed for USB */ | 176 | if (dev->parent) /* Needed for USB */ |
176 | down(&dev->parent->sem); | 177 | device_lock(dev->parent); |
177 | device_release_driver(dev); | 178 | device_release_driver(dev); |
178 | if (dev->parent) | 179 | if (dev->parent) |
179 | up(&dev->parent->sem); | 180 | device_unlock(dev->parent); |
180 | err = count; | 181 | err = count; |
181 | } | 182 | } |
182 | put_device(dev); | 183 | put_device(dev); |
@@ -200,12 +201,12 @@ static ssize_t driver_bind(struct device_driver *drv, | |||
200 | dev = bus_find_device_by_name(bus, NULL, buf); | 201 | dev = bus_find_device_by_name(bus, NULL, buf); |
201 | if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { | 202 | if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { |
202 | if (dev->parent) /* Needed for USB */ | 203 | if (dev->parent) /* Needed for USB */ |
203 | down(&dev->parent->sem); | 204 | device_lock(dev->parent); |
204 | down(&dev->sem); | 205 | device_lock(dev); |
205 | err = driver_probe_device(drv, dev); | 206 | err = driver_probe_device(drv, dev); |
206 | up(&dev->sem); | 207 | device_unlock(dev); |
207 | if (dev->parent) | 208 | if (dev->parent) |
208 | up(&dev->parent->sem); | 209 | device_unlock(dev->parent); |
209 | 210 | ||
210 | if (err > 0) { | 211 | if (err > 0) { |
211 | /* success */ | 212 | /* success */ |
@@ -703,9 +704,9 @@ int bus_add_driver(struct device_driver *drv) | |||
703 | return 0; | 704 | return 0; |
704 | 705 | ||
705 | out_unregister: | 706 | out_unregister: |
707 | kobject_put(&priv->kobj); | ||
706 | kfree(drv->p); | 708 | kfree(drv->p); |
707 | drv->p = NULL; | 709 | drv->p = NULL; |
708 | kobject_put(&priv->kobj); | ||
709 | out_put_bus: | 710 | out_put_bus: |
710 | bus_put(bus); | 711 | bus_put(bus); |
711 | return error; | 712 | return error; |
@@ -744,10 +745,10 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, | |||
744 | 745 | ||
745 | if (!dev->driver) { | 746 | if (!dev->driver) { |
746 | if (dev->parent) /* Needed for USB */ | 747 | if (dev->parent) /* Needed for USB */ |
747 | down(&dev->parent->sem); | 748 | device_lock(dev->parent); |
748 | ret = device_attach(dev); | 749 | ret = device_attach(dev); |
749 | if (dev->parent) | 750 | if (dev->parent) |
750 | up(&dev->parent->sem); | 751 | device_unlock(dev->parent); |
751 | } | 752 | } |
752 | return ret < 0 ? ret : 0; | 753 | return ret < 0 ? ret : 0; |
753 | } | 754 | } |
@@ -779,10 +780,10 @@ int device_reprobe(struct device *dev) | |||
779 | { | 780 | { |
780 | if (dev->driver) { | 781 | if (dev->driver) { |
781 | if (dev->parent) /* Needed for USB */ | 782 | if (dev->parent) /* Needed for USB */ |
782 | down(&dev->parent->sem); | 783 | device_lock(dev->parent); |
783 | device_release_driver(dev); | 784 | device_release_driver(dev); |
784 | if (dev->parent) | 785 | if (dev->parent) |
785 | up(&dev->parent->sem); | 786 | device_unlock(dev->parent); |
786 | } | 787 | } |
787 | return bus_rescan_devices_helper(dev, NULL); | 788 | return bus_rescan_devices_helper(dev, NULL); |
788 | } | 789 | } |
diff --git a/drivers/base/class.c b/drivers/base/class.c index 161746deab4b..9c6a0d6408e7 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c | |||
@@ -31,7 +31,7 @@ static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr, | |||
31 | ssize_t ret = -EIO; | 31 | ssize_t ret = -EIO; |
32 | 32 | ||
33 | if (class_attr->show) | 33 | if (class_attr->show) |
34 | ret = class_attr->show(cp->class, buf); | 34 | ret = class_attr->show(cp->class, class_attr, buf); |
35 | return ret; | 35 | return ret; |
36 | } | 36 | } |
37 | 37 | ||
@@ -43,7 +43,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, | |||
43 | ssize_t ret = -EIO; | 43 | ssize_t ret = -EIO; |
44 | 44 | ||
45 | if (class_attr->store) | 45 | if (class_attr->store) |
46 | ret = class_attr->store(cp->class, buf, count); | 46 | ret = class_attr->store(cp->class, class_attr, buf, count); |
47 | return ret; | 47 | return ret; |
48 | } | 48 | } |
49 | 49 | ||
@@ -59,9 +59,11 @@ static void class_release(struct kobject *kobj) | |||
59 | else | 59 | else |
60 | pr_debug("class '%s' does not have a release() function, " | 60 | pr_debug("class '%s' does not have a release() function, " |
61 | "be careful\n", class->name); | 61 | "be careful\n", class->name); |
62 | |||
63 | kfree(cp); | ||
62 | } | 64 | } |
63 | 65 | ||
64 | static struct sysfs_ops class_sysfs_ops = { | 66 | static const struct sysfs_ops class_sysfs_ops = { |
65 | .show = class_attr_show, | 67 | .show = class_attr_show, |
66 | .store = class_attr_store, | 68 | .store = class_attr_store, |
67 | }; | 69 | }; |
@@ -217,6 +219,8 @@ static void class_create_release(struct class *cls) | |||
217 | * This is used to create a struct class pointer that can then be used | 219 | * This is used to create a struct class pointer that can then be used |
218 | * in calls to device_create(). | 220 | * in calls to device_create(). |
219 | * | 221 | * |
222 | * Returns &struct class pointer on success, or ERR_PTR() on error. | ||
223 | * | ||
220 | * Note, the pointer created here is to be destroyed when finished by | 224 | * Note, the pointer created here is to be destroyed when finished by |
221 | * making a call to class_destroy(). | 225 | * making a call to class_destroy(). |
222 | */ | 226 | */ |
@@ -488,6 +492,16 @@ void class_interface_unregister(struct class_interface *class_intf) | |||
488 | class_put(parent); | 492 | class_put(parent); |
489 | } | 493 | } |
490 | 494 | ||
495 | ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, | ||
496 | char *buf) | ||
497 | { | ||
498 | struct class_attribute_string *cs; | ||
499 | cs = container_of(attr, struct class_attribute_string, attr); | ||
500 | return snprintf(buf, PAGE_SIZE, "%s\n", cs->str); | ||
501 | } | ||
502 | |||
503 | EXPORT_SYMBOL_GPL(show_class_attr_string); | ||
504 | |||
491 | struct class_compat { | 505 | struct class_compat { |
492 | struct kobject *kobj; | 506 | struct kobject *kobj; |
493 | }; | 507 | }; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 6bee6af8d8e1..b56a0ba31d4a 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -56,7 +56,14 @@ static inline int device_is_not_partition(struct device *dev) | |||
56 | */ | 56 | */ |
57 | const char *dev_driver_string(const struct device *dev) | 57 | const char *dev_driver_string(const struct device *dev) |
58 | { | 58 | { |
59 | return dev->driver ? dev->driver->name : | 59 | struct device_driver *drv; |
60 | |||
61 | /* dev->driver can change to NULL underneath us because of unbinding, | ||
62 | * so be careful about accessing it. dev->bus and dev->class should | ||
63 | * never change once they are set, so they don't need special care. | ||
64 | */ | ||
65 | drv = ACCESS_ONCE(dev->driver); | ||
66 | return drv ? drv->name : | ||
60 | (dev->bus ? dev->bus->name : | 67 | (dev->bus ? dev->bus->name : |
61 | (dev->class ? dev->class->name : "")); | 68 | (dev->class ? dev->class->name : "")); |
62 | } | 69 | } |
@@ -93,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
93 | return ret; | 100 | return ret; |
94 | } | 101 | } |
95 | 102 | ||
96 | static struct sysfs_ops dev_sysfs_ops = { | 103 | static const struct sysfs_ops dev_sysfs_ops = { |
97 | .show = dev_attr_show, | 104 | .show = dev_attr_show, |
98 | .store = dev_attr_store, | 105 | .store = dev_attr_store, |
99 | }; | 106 | }; |
@@ -245,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, | |||
245 | return retval; | 252 | return retval; |
246 | } | 253 | } |
247 | 254 | ||
248 | static struct kset_uevent_ops device_uevent_ops = { | 255 | static const struct kset_uevent_ops device_uevent_ops = { |
249 | .filter = dev_uevent_filter, | 256 | .filter = dev_uevent_filter, |
250 | .name = dev_uevent_name, | 257 | .name = dev_uevent_name, |
251 | .uevent = dev_uevent, | 258 | .uevent = dev_uevent, |
@@ -299,15 +306,10 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, | |||
299 | { | 306 | { |
300 | enum kobject_action action; | 307 | enum kobject_action action; |
301 | 308 | ||
302 | if (kobject_action_type(buf, count, &action) == 0) { | 309 | if (kobject_action_type(buf, count, &action) == 0) |
303 | kobject_uevent(&dev->kobj, action); | 310 | kobject_uevent(&dev->kobj, action); |
304 | goto out; | 311 | else |
305 | } | 312 | dev_err(dev, "uevent: unknown action-string\n"); |
306 | |||
307 | dev_err(dev, "uevent: unsupported action-string; this will " | ||
308 | "be ignored in a future kernel version\n"); | ||
309 | kobject_uevent(&dev->kobj, KOBJ_ADD); | ||
310 | out: | ||
311 | return count; | 313 | return count; |
312 | } | 314 | } |
313 | 315 | ||
@@ -439,7 +441,8 @@ struct kset *devices_kset; | |||
439 | * @dev: device. | 441 | * @dev: device. |
440 | * @attr: device attribute descriptor. | 442 | * @attr: device attribute descriptor. |
441 | */ | 443 | */ |
442 | int device_create_file(struct device *dev, struct device_attribute *attr) | 444 | int device_create_file(struct device *dev, |
445 | const struct device_attribute *attr) | ||
443 | { | 446 | { |
444 | int error = 0; | 447 | int error = 0; |
445 | if (dev) | 448 | if (dev) |
@@ -452,7 +455,8 @@ int device_create_file(struct device *dev, struct device_attribute *attr) | |||
452 | * @dev: device. | 455 | * @dev: device. |
453 | * @attr: device attribute descriptor. | 456 | * @attr: device attribute descriptor. |
454 | */ | 457 | */ |
455 | void device_remove_file(struct device *dev, struct device_attribute *attr) | 458 | void device_remove_file(struct device *dev, |
459 | const struct device_attribute *attr) | ||
456 | { | 460 | { |
457 | if (dev) | 461 | if (dev) |
458 | sysfs_remove_file(&dev->kobj, &attr->attr); | 462 | sysfs_remove_file(&dev->kobj, &attr->attr); |
@@ -463,7 +467,8 @@ void device_remove_file(struct device *dev, struct device_attribute *attr) | |||
463 | * @dev: device. | 467 | * @dev: device. |
464 | * @attr: device binary attribute descriptor. | 468 | * @attr: device binary attribute descriptor. |
465 | */ | 469 | */ |
466 | int device_create_bin_file(struct device *dev, struct bin_attribute *attr) | 470 | int device_create_bin_file(struct device *dev, |
471 | const struct bin_attribute *attr) | ||
467 | { | 472 | { |
468 | int error = -EINVAL; | 473 | int error = -EINVAL; |
469 | if (dev) | 474 | if (dev) |
@@ -477,7 +482,8 @@ EXPORT_SYMBOL_GPL(device_create_bin_file); | |||
477 | * @dev: device. | 482 | * @dev: device. |
478 | * @attr: device binary attribute descriptor. | 483 | * @attr: device binary attribute descriptor. |
479 | */ | 484 | */ |
480 | void device_remove_bin_file(struct device *dev, struct bin_attribute *attr) | 485 | void device_remove_bin_file(struct device *dev, |
486 | const struct bin_attribute *attr) | ||
481 | { | 487 | { |
482 | if (dev) | 488 | if (dev) |
483 | sysfs_remove_bin_file(&dev->kobj, attr); | 489 | sysfs_remove_bin_file(&dev->kobj, attr); |
@@ -596,6 +602,7 @@ static struct kobject *get_device_parent(struct device *dev, | |||
596 | int retval; | 602 | int retval; |
597 | 603 | ||
598 | if (dev->class) { | 604 | if (dev->class) { |
605 | static DEFINE_MUTEX(gdp_mutex); | ||
599 | struct kobject *kobj = NULL; | 606 | struct kobject *kobj = NULL; |
600 | struct kobject *parent_kobj; | 607 | struct kobject *parent_kobj; |
601 | struct kobject *k; | 608 | struct kobject *k; |
@@ -612,6 +619,8 @@ static struct kobject *get_device_parent(struct device *dev, | |||
612 | else | 619 | else |
613 | parent_kobj = &parent->kobj; | 620 | parent_kobj = &parent->kobj; |
614 | 621 | ||
622 | mutex_lock(&gdp_mutex); | ||
623 | |||
615 | /* find our class-directory at the parent and reference it */ | 624 | /* find our class-directory at the parent and reference it */ |
616 | spin_lock(&dev->class->p->class_dirs.list_lock); | 625 | spin_lock(&dev->class->p->class_dirs.list_lock); |
617 | list_for_each_entry(k, &dev->class->p->class_dirs.list, entry) | 626 | list_for_each_entry(k, &dev->class->p->class_dirs.list, entry) |
@@ -620,20 +629,26 @@ static struct kobject *get_device_parent(struct device *dev, | |||
620 | break; | 629 | break; |
621 | } | 630 | } |
622 | spin_unlock(&dev->class->p->class_dirs.list_lock); | 631 | spin_unlock(&dev->class->p->class_dirs.list_lock); |
623 | if (kobj) | 632 | if (kobj) { |
633 | mutex_unlock(&gdp_mutex); | ||
624 | return kobj; | 634 | return kobj; |
635 | } | ||
625 | 636 | ||
626 | /* or create a new class-directory at the parent device */ | 637 | /* or create a new class-directory at the parent device */ |
627 | k = kobject_create(); | 638 | k = kobject_create(); |
628 | if (!k) | 639 | if (!k) { |
640 | mutex_unlock(&gdp_mutex); | ||
629 | return NULL; | 641 | return NULL; |
642 | } | ||
630 | k->kset = &dev->class->p->class_dirs; | 643 | k->kset = &dev->class->p->class_dirs; |
631 | retval = kobject_add(k, parent_kobj, "%s", dev->class->name); | 644 | retval = kobject_add(k, parent_kobj, "%s", dev->class->name); |
632 | if (retval < 0) { | 645 | if (retval < 0) { |
646 | mutex_unlock(&gdp_mutex); | ||
633 | kobject_put(k); | 647 | kobject_put(k); |
634 | return NULL; | 648 | return NULL; |
635 | } | 649 | } |
636 | /* do not emit an uevent for this simple "glue" directory */ | 650 | /* do not emit an uevent for this simple "glue" directory */ |
651 | mutex_unlock(&gdp_mutex); | ||
637 | return k; | 652 | return k; |
638 | } | 653 | } |
639 | 654 | ||
@@ -898,8 +913,10 @@ int device_add(struct device *dev) | |||
898 | dev->init_name = NULL; | 913 | dev->init_name = NULL; |
899 | } | 914 | } |
900 | 915 | ||
901 | if (!dev_name(dev)) | 916 | if (!dev_name(dev)) { |
917 | error = -EINVAL; | ||
902 | goto name_error; | 918 | goto name_error; |
919 | } | ||
903 | 920 | ||
904 | pr_debug("device: '%s': %s\n", dev_name(dev), __func__); | 921 | pr_debug("device: '%s': %s\n", dev_name(dev), __func__); |
905 | 922 | ||
@@ -987,6 +1004,8 @@ done: | |||
987 | device_remove_class_symlinks(dev); | 1004 | device_remove_class_symlinks(dev); |
988 | SymlinkError: | 1005 | SymlinkError: |
989 | if (MAJOR(dev->devt)) | 1006 | if (MAJOR(dev->devt)) |
1007 | devtmpfs_delete_node(dev); | ||
1008 | if (MAJOR(dev->devt)) | ||
990 | device_remove_sys_dev_entry(dev); | 1009 | device_remove_sys_dev_entry(dev); |
991 | devtattrError: | 1010 | devtattrError: |
992 | if (MAJOR(dev->devt)) | 1011 | if (MAJOR(dev->devt)) |
@@ -1326,6 +1345,8 @@ static void root_device_release(struct device *dev) | |||
1326 | * 'module' symlink which points to the @owner directory | 1345 | * 'module' symlink which points to the @owner directory |
1327 | * in sysfs. | 1346 | * in sysfs. |
1328 | * | 1347 | * |
1348 | * Returns &struct device pointer on success, or ERR_PTR() on error. | ||
1349 | * | ||
1329 | * Note: You probably want to use root_device_register(). | 1350 | * Note: You probably want to use root_device_register(). |
1330 | */ | 1351 | */ |
1331 | struct device *__root_device_register(const char *name, struct module *owner) | 1352 | struct device *__root_device_register(const char *name, struct module *owner) |
@@ -1413,6 +1434,8 @@ static void device_create_release(struct device *dev) | |||
1413 | * Any further sysfs files that might be required can be created using this | 1434 | * Any further sysfs files that might be required can be created using this |
1414 | * pointer. | 1435 | * pointer. |
1415 | * | 1436 | * |
1437 | * Returns &struct device pointer on success, or ERR_PTR() on error. | ||
1438 | * | ||
1416 | * Note: the struct class passed to this function must have previously | 1439 | * Note: the struct class passed to this function must have previously |
1417 | * been created with a call to class_create(). | 1440 | * been created with a call to class_create(). |
1418 | */ | 1441 | */ |
@@ -1473,6 +1496,8 @@ EXPORT_SYMBOL_GPL(device_create_vargs); | |||
1473 | * Any further sysfs files that might be required can be created using this | 1496 | * Any further sysfs files that might be required can be created using this |
1474 | * pointer. | 1497 | * pointer. |
1475 | * | 1498 | * |
1499 | * Returns &struct device pointer on success, or ERR_PTR() on error. | ||
1500 | * | ||
1476 | * Note: the struct class passed to this function must have previously | 1501 | * Note: the struct class passed to this function must have previously |
1477 | * been created with a call to class_create(). | 1502 | * been created with a call to class_create(). |
1478 | */ | 1503 | */ |
@@ -1559,22 +1584,16 @@ int device_rename(struct device *dev, char *new_name) | |||
1559 | if (old_class_name) { | 1584 | if (old_class_name) { |
1560 | new_class_name = make_class_name(dev->class->name, &dev->kobj); | 1585 | new_class_name = make_class_name(dev->class->name, &dev->kobj); |
1561 | if (new_class_name) { | 1586 | if (new_class_name) { |
1562 | error = sysfs_create_link_nowarn(&dev->parent->kobj, | 1587 | error = sysfs_rename_link(&dev->parent->kobj, |
1563 | &dev->kobj, | 1588 | &dev->kobj, |
1564 | new_class_name); | 1589 | old_class_name, |
1565 | if (error) | 1590 | new_class_name); |
1566 | goto out; | ||
1567 | sysfs_remove_link(&dev->parent->kobj, old_class_name); | ||
1568 | } | 1591 | } |
1569 | } | 1592 | } |
1570 | #else | 1593 | #else |
1571 | if (dev->class) { | 1594 | if (dev->class) { |
1572 | error = sysfs_create_link_nowarn(&dev->class->p->class_subsys.kobj, | 1595 | error = sysfs_rename_link(&dev->class->p->class_subsys.kobj, |
1573 | &dev->kobj, dev_name(dev)); | 1596 | &dev->kobj, old_device_name, new_name); |
1574 | if (error) | ||
1575 | goto out; | ||
1576 | sysfs_remove_link(&dev->class->p->class_subsys.kobj, | ||
1577 | old_device_name); | ||
1578 | } | 1597 | } |
1579 | #endif | 1598 | #endif |
1580 | 1599 | ||
@@ -1728,8 +1747,5 @@ void device_shutdown(void) | |||
1728 | dev->driver->shutdown(dev); | 1747 | dev->driver->shutdown(dev); |
1729 | } | 1748 | } |
1730 | } | 1749 | } |
1731 | kobject_put(sysfs_dev_char_kobj); | ||
1732 | kobject_put(sysfs_dev_block_kobj); | ||
1733 | kobject_put(dev_kobj); | ||
1734 | async_synchronize_full(); | 1750 | async_synchronize_full(); |
1735 | } | 1751 | } |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index e62a4ccea54d..f35719aab3c1 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -10,11 +10,15 @@ | |||
10 | #include <linux/topology.h> | 10 | #include <linux/topology.h> |
11 | #include <linux/device.h> | 11 | #include <linux/device.h> |
12 | #include <linux/node.h> | 12 | #include <linux/node.h> |
13 | #include <linux/gfp.h> | ||
13 | 14 | ||
14 | #include "base.h" | 15 | #include "base.h" |
15 | 16 | ||
17 | static struct sysdev_class_attribute *cpu_sysdev_class_attrs[]; | ||
18 | |||
16 | struct sysdev_class cpu_sysdev_class = { | 19 | struct sysdev_class cpu_sysdev_class = { |
17 | .name = "cpu", | 20 | .name = "cpu", |
21 | .attrs = cpu_sysdev_class_attrs, | ||
18 | }; | 22 | }; |
19 | EXPORT_SYMBOL(cpu_sysdev_class); | 23 | EXPORT_SYMBOL(cpu_sysdev_class); |
20 | 24 | ||
@@ -35,6 +39,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut | |||
35 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); | 39 | struct cpu *cpu = container_of(dev, struct cpu, sysdev); |
36 | ssize_t ret; | 40 | ssize_t ret; |
37 | 41 | ||
42 | cpu_hotplug_driver_lock(); | ||
38 | switch (buf[0]) { | 43 | switch (buf[0]) { |
39 | case '0': | 44 | case '0': |
40 | ret = cpu_down(cpu->sysdev.id); | 45 | ret = cpu_down(cpu->sysdev.id); |
@@ -49,6 +54,7 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut | |||
49 | default: | 54 | default: |
50 | ret = -EINVAL; | 55 | ret = -EINVAL; |
51 | } | 56 | } |
57 | cpu_hotplug_driver_unlock(); | ||
52 | 58 | ||
53 | if (ret >= 0) | 59 | if (ret >= 0) |
54 | ret = count; | 60 | ret = count; |
@@ -72,6 +78,28 @@ void unregister_cpu(struct cpu *cpu) | |||
72 | per_cpu(cpu_sys_devices, logical_cpu) = NULL; | 78 | per_cpu(cpu_sys_devices, logical_cpu) = NULL; |
73 | return; | 79 | return; |
74 | } | 80 | } |
81 | |||
82 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
83 | static ssize_t cpu_probe_store(struct sysdev_class *class, | ||
84 | struct sysdev_class_attribute *attr, | ||
85 | const char *buf, | ||
86 | size_t count) | ||
87 | { | ||
88 | return arch_cpu_probe(buf, count); | ||
89 | } | ||
90 | |||
91 | static ssize_t cpu_release_store(struct sysdev_class *class, | ||
92 | struct sysdev_class_attribute *attr, | ||
93 | const char *buf, | ||
94 | size_t count) | ||
95 | { | ||
96 | return arch_cpu_release(buf, count); | ||
97 | } | ||
98 | |||
99 | static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); | ||
100 | static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); | ||
101 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | ||
102 | |||
75 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 103 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
76 | static inline void register_cpu_control(struct cpu *cpu) | 104 | static inline void register_cpu_control(struct cpu *cpu) |
77 | { | 105 | { |
@@ -97,7 +125,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute | |||
97 | * boot up and this data does not change there after. Hence this | 125 | * boot up and this data does not change there after. Hence this |
98 | * operation should be safe. No locking required. | 126 | * operation should be safe. No locking required. |
99 | */ | 127 | */ |
100 | addr = __pa(per_cpu_ptr(crash_notes, cpunum)); | 128 | addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); |
101 | rc = sprintf(buf, "%Lx\n", addr); | 129 | rc = sprintf(buf, "%Lx\n", addr); |
102 | return rc; | 130 | return rc; |
103 | } | 131 | } |
@@ -107,31 +135,39 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); | |||
107 | /* | 135 | /* |
108 | * Print cpu online, possible, present, and system maps | 136 | * Print cpu online, possible, present, and system maps |
109 | */ | 137 | */ |
110 | static ssize_t print_cpus_map(char *buf, const struct cpumask *map) | 138 | |
139 | struct cpu_attr { | ||
140 | struct sysdev_class_attribute attr; | ||
141 | const struct cpumask *const * const map; | ||
142 | }; | ||
143 | |||
144 | static ssize_t show_cpus_attr(struct sysdev_class *class, | ||
145 | struct sysdev_class_attribute *attr, | ||
146 | char *buf) | ||
111 | { | 147 | { |
112 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); | 148 | struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); |
149 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map)); | ||
113 | 150 | ||
114 | buf[n++] = '\n'; | 151 | buf[n++] = '\n'; |
115 | buf[n] = '\0'; | 152 | buf[n] = '\0'; |
116 | return n; | 153 | return n; |
117 | } | 154 | } |
118 | 155 | ||
119 | #define print_cpus_func(type) \ | 156 | #define _CPU_ATTR(name, map) \ |
120 | static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ | 157 | { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map } |
121 | { \ | ||
122 | return print_cpus_map(buf, cpu_##type##_mask); \ | ||
123 | } \ | ||
124 | static struct sysdev_class_attribute attr_##type##_map = \ | ||
125 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) | ||
126 | 158 | ||
127 | print_cpus_func(online); | 159 | /* Keep in sync with cpu_sysdev_class_attrs */ |
128 | print_cpus_func(possible); | 160 | static struct cpu_attr cpu_attrs[] = { |
129 | print_cpus_func(present); | 161 | _CPU_ATTR(online, &cpu_online_mask), |
162 | _CPU_ATTR(possible, &cpu_possible_mask), | ||
163 | _CPU_ATTR(present, &cpu_present_mask), | ||
164 | }; | ||
130 | 165 | ||
131 | /* | 166 | /* |
132 | * Print values for NR_CPUS and offlined cpus | 167 | * Print values for NR_CPUS and offlined cpus |
133 | */ | 168 | */ |
134 | static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf) | 169 | static ssize_t print_cpus_kernel_max(struct sysdev_class *class, |
170 | struct sysdev_class_attribute *attr, char *buf) | ||
135 | { | 171 | { |
136 | int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); | 172 | int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); |
137 | return n; | 173 | return n; |
@@ -141,7 +177,8 @@ static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); | |||
141 | /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ | 177 | /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ |
142 | unsigned int total_cpus; | 178 | unsigned int total_cpus; |
143 | 179 | ||
144 | static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) | 180 | static ssize_t print_cpus_offline(struct sysdev_class *class, |
181 | struct sysdev_class_attribute *attr, char *buf) | ||
145 | { | 182 | { |
146 | int n = 0, len = PAGE_SIZE-2; | 183 | int n = 0, len = PAGE_SIZE-2; |
147 | cpumask_var_t offline; | 184 | cpumask_var_t offline; |
@@ -170,29 +207,6 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) | |||
170 | } | 207 | } |
171 | static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); | 208 | static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); |
172 | 209 | ||
173 | static struct sysdev_class_attribute *cpu_state_attr[] = { | ||
174 | &attr_online_map, | ||
175 | &attr_possible_map, | ||
176 | &attr_present_map, | ||
177 | &attr_kernel_max, | ||
178 | &attr_offline, | ||
179 | }; | ||
180 | |||
181 | static int cpu_states_init(void) | ||
182 | { | ||
183 | int i; | ||
184 | int err = 0; | ||
185 | |||
186 | for (i = 0; i < ARRAY_SIZE(cpu_state_attr); i++) { | ||
187 | int ret; | ||
188 | ret = sysdev_class_create_file(&cpu_sysdev_class, | ||
189 | cpu_state_attr[i]); | ||
190 | if (!err) | ||
191 | err = ret; | ||
192 | } | ||
193 | return err; | ||
194 | } | ||
195 | |||
196 | /* | 210 | /* |
197 | * register_cpu - Setup a sysfs device for a CPU. | 211 | * register_cpu - Setup a sysfs device for a CPU. |
198 | * @cpu - cpu->hotpluggable field set to 1 will generate a control file in | 212 | * @cpu - cpu->hotpluggable field set to 1 will generate a control file in |
@@ -238,9 +252,6 @@ int __init cpu_dev_init(void) | |||
238 | int err; | 252 | int err; |
239 | 253 | ||
240 | err = sysdev_class_register(&cpu_sysdev_class); | 254 | err = sysdev_class_register(&cpu_sysdev_class); |
241 | if (!err) | ||
242 | err = cpu_states_init(); | ||
243 | |||
244 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 255 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
245 | if (!err) | 256 | if (!err) |
246 | err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); | 257 | err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); |
@@ -248,3 +259,16 @@ int __init cpu_dev_init(void) | |||
248 | 259 | ||
249 | return err; | 260 | return err; |
250 | } | 261 | } |
262 | |||
263 | static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = { | ||
264 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
265 | &attr_probe, | ||
266 | &attr_release, | ||
267 | #endif | ||
268 | &cpu_attrs[0].attr, | ||
269 | &cpu_attrs[1].attr, | ||
270 | &cpu_attrs[2].attr, | ||
271 | &attr_kernel_max, | ||
272 | &attr_offline, | ||
273 | NULL | ||
274 | }; | ||
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 979d159b5cd1..c89291f8a16b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -85,7 +85,7 @@ static void driver_sysfs_remove(struct device *dev) | |||
85 | * for before calling this. (It is ok to call with no other effort | 85 | * for before calling this. (It is ok to call with no other effort |
86 | * from a driver's probe() method.) | 86 | * from a driver's probe() method.) |
87 | * | 87 | * |
88 | * This function must be called with @dev->sem held. | 88 | * This function must be called with the device lock held. |
89 | */ | 89 | */ |
90 | int device_bind_driver(struct device *dev) | 90 | int device_bind_driver(struct device *dev) |
91 | { | 91 | { |
@@ -188,10 +188,10 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); | |||
188 | * @dev: device to try to bind to the driver | 188 | * @dev: device to try to bind to the driver |
189 | * | 189 | * |
190 | * This function returns -ENODEV if the device is not registered, | 190 | * This function returns -ENODEV if the device is not registered, |
191 | * 1 if the device is bound sucessfully and 0 otherwise. | 191 | * 1 if the device is bound successfully and 0 otherwise. |
192 | * | 192 | * |
193 | * This function must be called with @dev->sem held. When called for a | 193 | * This function must be called with @dev lock held. When called for a |
194 | * USB interface, @dev->parent->sem must be held as well. | 194 | * USB interface, @dev->parent lock must be held as well. |
195 | */ | 195 | */ |
196 | int driver_probe_device(struct device_driver *drv, struct device *dev) | 196 | int driver_probe_device(struct device_driver *drv, struct device *dev) |
197 | { | 197 | { |
@@ -233,13 +233,13 @@ static int __device_attach(struct device_driver *drv, void *data) | |||
233 | * 0 if no matching driver was found; | 233 | * 0 if no matching driver was found; |
234 | * -ENODEV if the device is not registered. | 234 | * -ENODEV if the device is not registered. |
235 | * | 235 | * |
236 | * When called for a USB interface, @dev->parent->sem must be held. | 236 | * When called for a USB interface, @dev->parent lock must be held. |
237 | */ | 237 | */ |
238 | int device_attach(struct device *dev) | 238 | int device_attach(struct device *dev) |
239 | { | 239 | { |
240 | int ret = 0; | 240 | int ret = 0; |
241 | 241 | ||
242 | down(&dev->sem); | 242 | device_lock(dev); |
243 | if (dev->driver) { | 243 | if (dev->driver) { |
244 | ret = device_bind_driver(dev); | 244 | ret = device_bind_driver(dev); |
245 | if (ret == 0) | 245 | if (ret == 0) |
@@ -253,7 +253,7 @@ int device_attach(struct device *dev) | |||
253 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); | 253 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); |
254 | pm_runtime_put_sync(dev); | 254 | pm_runtime_put_sync(dev); |
255 | } | 255 | } |
256 | up(&dev->sem); | 256 | device_unlock(dev); |
257 | return ret; | 257 | return ret; |
258 | } | 258 | } |
259 | EXPORT_SYMBOL_GPL(device_attach); | 259 | EXPORT_SYMBOL_GPL(device_attach); |
@@ -276,13 +276,13 @@ static int __driver_attach(struct device *dev, void *data) | |||
276 | return 0; | 276 | return 0; |
277 | 277 | ||
278 | if (dev->parent) /* Needed for USB */ | 278 | if (dev->parent) /* Needed for USB */ |
279 | down(&dev->parent->sem); | 279 | device_lock(dev->parent); |
280 | down(&dev->sem); | 280 | device_lock(dev); |
281 | if (!dev->driver) | 281 | if (!dev->driver) |
282 | driver_probe_device(drv, dev); | 282 | driver_probe_device(drv, dev); |
283 | up(&dev->sem); | 283 | device_unlock(dev); |
284 | if (dev->parent) | 284 | if (dev->parent) |
285 | up(&dev->parent->sem); | 285 | device_unlock(dev->parent); |
286 | 286 | ||
287 | return 0; | 287 | return 0; |
288 | } | 288 | } |
@@ -303,8 +303,8 @@ int driver_attach(struct device_driver *drv) | |||
303 | EXPORT_SYMBOL_GPL(driver_attach); | 303 | EXPORT_SYMBOL_GPL(driver_attach); |
304 | 304 | ||
305 | /* | 305 | /* |
306 | * __device_release_driver() must be called with @dev->sem held. | 306 | * __device_release_driver() must be called with @dev lock held. |
307 | * When called for a USB interface, @dev->parent->sem must be held as well. | 307 | * When called for a USB interface, @dev->parent lock must be held as well. |
308 | */ | 308 | */ |
309 | static void __device_release_driver(struct device *dev) | 309 | static void __device_release_driver(struct device *dev) |
310 | { | 310 | { |
@@ -343,7 +343,7 @@ static void __device_release_driver(struct device *dev) | |||
343 | * @dev: device. | 343 | * @dev: device. |
344 | * | 344 | * |
345 | * Manually detach device from driver. | 345 | * Manually detach device from driver. |
346 | * When called for a USB interface, @dev->parent->sem must be held. | 346 | * When called for a USB interface, @dev->parent lock must be held. |
347 | */ | 347 | */ |
348 | void device_release_driver(struct device *dev) | 348 | void device_release_driver(struct device *dev) |
349 | { | 349 | { |
@@ -352,9 +352,9 @@ void device_release_driver(struct device *dev) | |||
352 | * within their ->remove callback for the same device, they | 352 | * within their ->remove callback for the same device, they |
353 | * will deadlock right here. | 353 | * will deadlock right here. |
354 | */ | 354 | */ |
355 | down(&dev->sem); | 355 | device_lock(dev); |
356 | __device_release_driver(dev); | 356 | __device_release_driver(dev); |
357 | up(&dev->sem); | 357 | device_unlock(dev); |
358 | } | 358 | } |
359 | EXPORT_SYMBOL_GPL(device_release_driver); | 359 | EXPORT_SYMBOL_GPL(device_release_driver); |
360 | 360 | ||
@@ -381,13 +381,13 @@ void driver_detach(struct device_driver *drv) | |||
381 | spin_unlock(&drv->p->klist_devices.k_lock); | 381 | spin_unlock(&drv->p->klist_devices.k_lock); |
382 | 382 | ||
383 | if (dev->parent) /* Needed for USB */ | 383 | if (dev->parent) /* Needed for USB */ |
384 | down(&dev->parent->sem); | 384 | device_lock(dev->parent); |
385 | down(&dev->sem); | 385 | device_lock(dev); |
386 | if (dev->driver == drv) | 386 | if (dev->driver == drv) |
387 | __device_release_driver(dev); | 387 | __device_release_driver(dev); |
388 | up(&dev->sem); | 388 | device_unlock(dev); |
389 | if (dev->parent) | 389 | if (dev->parent) |
390 | up(&dev->parent->sem); | 390 | device_unlock(dev->parent); |
391 | put_device(dev); | 391 | put_device(dev); |
392 | } | 392 | } |
393 | } | 393 | } |
diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 05dd307e8f02..cf7a0c788052 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c | |||
@@ -9,6 +9,7 @@ | |||
9 | 9 | ||
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/slab.h> | ||
12 | 13 | ||
13 | #include "base.h" | 14 | #include "base.h" |
14 | 15 | ||
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index a1cb5afe6801..057cf11326bf 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/cred.h> | 23 | #include <linux/cred.h> |
24 | #include <linux/sched.h> | 24 | #include <linux/sched.h> |
25 | #include <linux/init_task.h> | 25 | #include <linux/init_task.h> |
26 | #include <linux/slab.h> | ||
26 | 27 | ||
27 | static struct vfsmount *dev_mnt; | 28 | static struct vfsmount *dev_mnt; |
28 | 29 | ||
@@ -32,6 +33,8 @@ static int dev_mount = 1; | |||
32 | static int dev_mount; | 33 | static int dev_mount; |
33 | #endif | 34 | #endif |
34 | 35 | ||
36 | static DEFINE_MUTEX(dirlock); | ||
37 | |||
35 | static int __init mount_param(char *str) | 38 | static int __init mount_param(char *str) |
36 | { | 39 | { |
37 | dev_mount = simple_strtoul(str, NULL, 0); | 40 | dev_mount = simple_strtoul(str, NULL, 0); |
@@ -74,47 +77,37 @@ static int dev_mkdir(const char *name, mode_t mode) | |||
74 | dentry = lookup_create(&nd, 1); | 77 | dentry = lookup_create(&nd, 1); |
75 | if (!IS_ERR(dentry)) { | 78 | if (!IS_ERR(dentry)) { |
76 | err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); | 79 | err = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); |
80 | if (!err) | ||
81 | /* mark as kernel-created inode */ | ||
82 | dentry->d_inode->i_private = &dev_mnt; | ||
77 | dput(dentry); | 83 | dput(dentry); |
78 | } else { | 84 | } else { |
79 | err = PTR_ERR(dentry); | 85 | err = PTR_ERR(dentry); |
80 | } | 86 | } |
81 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
82 | 87 | ||
88 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
83 | path_put(&nd.path); | 89 | path_put(&nd.path); |
84 | return err; | 90 | return err; |
85 | } | 91 | } |
86 | 92 | ||
87 | static int create_path(const char *nodepath) | 93 | static int create_path(const char *nodepath) |
88 | { | 94 | { |
89 | char *path; | 95 | int err; |
90 | struct nameidata nd; | ||
91 | int err = 0; | ||
92 | |||
93 | path = kstrdup(nodepath, GFP_KERNEL); | ||
94 | if (!path) | ||
95 | return -ENOMEM; | ||
96 | |||
97 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, | ||
98 | path, LOOKUP_PARENT, &nd); | ||
99 | if (err == 0) { | ||
100 | struct dentry *dentry; | ||
101 | |||
102 | /* create directory right away */ | ||
103 | dentry = lookup_create(&nd, 1); | ||
104 | if (!IS_ERR(dentry)) { | ||
105 | err = vfs_mkdir(nd.path.dentry->d_inode, | ||
106 | dentry, 0755); | ||
107 | dput(dentry); | ||
108 | } | ||
109 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
110 | 96 | ||
111 | path_put(&nd.path); | 97 | mutex_lock(&dirlock); |
112 | } else if (err == -ENOENT) { | 98 | err = dev_mkdir(nodepath, 0755); |
99 | if (err == -ENOENT) { | ||
100 | char *path; | ||
113 | char *s; | 101 | char *s; |
114 | 102 | ||
115 | /* parent directories do not exist, create them */ | 103 | /* parent directories do not exist, create them */ |
104 | path = kstrdup(nodepath, GFP_KERNEL); | ||
105 | if (!path) { | ||
106 | err = -ENOMEM; | ||
107 | goto out; | ||
108 | } | ||
116 | s = path; | 109 | s = path; |
117 | while (1) { | 110 | for (;;) { |
118 | s = strchr(s, '/'); | 111 | s = strchr(s, '/'); |
119 | if (!s) | 112 | if (!s) |
120 | break; | 113 | break; |
@@ -125,9 +118,10 @@ static int create_path(const char *nodepath) | |||
125 | s[0] = '/'; | 118 | s[0] = '/'; |
126 | s++; | 119 | s++; |
127 | } | 120 | } |
121 | kfree(path); | ||
128 | } | 122 | } |
129 | 123 | out: | |
130 | kfree(path); | 124 | mutex_unlock(&dirlock); |
131 | return err; | 125 | return err; |
132 | } | 126 | } |
133 | 127 | ||
@@ -156,34 +150,40 @@ int devtmpfs_create_node(struct device *dev) | |||
156 | mode |= S_IFCHR; | 150 | mode |= S_IFCHR; |
157 | 151 | ||
158 | curr_cred = override_creds(&init_cred); | 152 | curr_cred = override_creds(&init_cred); |
153 | |||
159 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, | 154 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, |
160 | nodename, LOOKUP_PARENT, &nd); | 155 | nodename, LOOKUP_PARENT, &nd); |
161 | if (err == -ENOENT) { | 156 | if (err == -ENOENT) { |
162 | /* create missing parent directories */ | ||
163 | create_path(nodename); | 157 | create_path(nodename); |
164 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, | 158 | err = vfs_path_lookup(dev_mnt->mnt_root, dev_mnt, |
165 | nodename, LOOKUP_PARENT, &nd); | 159 | nodename, LOOKUP_PARENT, &nd); |
166 | if (err) | ||
167 | goto out; | ||
168 | } | 160 | } |
161 | if (err) | ||
162 | goto out; | ||
169 | 163 | ||
170 | dentry = lookup_create(&nd, 0); | 164 | dentry = lookup_create(&nd, 0); |
171 | if (!IS_ERR(dentry)) { | 165 | if (!IS_ERR(dentry)) { |
172 | int umask; | ||
173 | |||
174 | umask = sys_umask(0000); | ||
175 | err = vfs_mknod(nd.path.dentry->d_inode, | 166 | err = vfs_mknod(nd.path.dentry->d_inode, |
176 | dentry, mode, dev->devt); | 167 | dentry, mode, dev->devt); |
177 | sys_umask(umask); | 168 | if (!err) { |
178 | /* mark as kernel created inode */ | 169 | struct iattr newattrs; |
179 | if (!err) | 170 | |
171 | /* fixup possibly umasked mode */ | ||
172 | newattrs.ia_mode = mode; | ||
173 | newattrs.ia_valid = ATTR_MODE; | ||
174 | mutex_lock(&dentry->d_inode->i_mutex); | ||
175 | notify_change(dentry, &newattrs); | ||
176 | mutex_unlock(&dentry->d_inode->i_mutex); | ||
177 | |||
178 | /* mark as kernel-created inode */ | ||
180 | dentry->d_inode->i_private = &dev_mnt; | 179 | dentry->d_inode->i_private = &dev_mnt; |
180 | } | ||
181 | dput(dentry); | 181 | dput(dentry); |
182 | } else { | 182 | } else { |
183 | err = PTR_ERR(dentry); | 183 | err = PTR_ERR(dentry); |
184 | } | 184 | } |
185 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
186 | 185 | ||
186 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
187 | path_put(&nd.path); | 187 | path_put(&nd.path); |
188 | out: | 188 | out: |
189 | kfree(tmp); | 189 | kfree(tmp); |
@@ -205,16 +205,21 @@ static int dev_rmdir(const char *name) | |||
205 | mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); | 205 | mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); |
206 | dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); | 206 | dentry = lookup_one_len(nd.last.name, nd.path.dentry, nd.last.len); |
207 | if (!IS_ERR(dentry)) { | 207 | if (!IS_ERR(dentry)) { |
208 | if (dentry->d_inode) | 208 | if (dentry->d_inode) { |
209 | err = vfs_rmdir(nd.path.dentry->d_inode, dentry); | 209 | if (dentry->d_inode->i_private == &dev_mnt) |
210 | else | 210 | err = vfs_rmdir(nd.path.dentry->d_inode, |
211 | dentry); | ||
212 | else | ||
213 | err = -EPERM; | ||
214 | } else { | ||
211 | err = -ENOENT; | 215 | err = -ENOENT; |
216 | } | ||
212 | dput(dentry); | 217 | dput(dentry); |
213 | } else { | 218 | } else { |
214 | err = PTR_ERR(dentry); | 219 | err = PTR_ERR(dentry); |
215 | } | 220 | } |
216 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
217 | 221 | ||
222 | mutex_unlock(&nd.path.dentry->d_inode->i_mutex); | ||
218 | path_put(&nd.path); | 223 | path_put(&nd.path); |
219 | return err; | 224 | return err; |
220 | } | 225 | } |
@@ -228,7 +233,8 @@ static int delete_path(const char *nodepath) | |||
228 | if (!path) | 233 | if (!path) |
229 | return -ENOMEM; | 234 | return -ENOMEM; |
230 | 235 | ||
231 | while (1) { | 236 | mutex_lock(&dirlock); |
237 | for (;;) { | ||
232 | char *base; | 238 | char *base; |
233 | 239 | ||
234 | base = strrchr(path, '/'); | 240 | base = strrchr(path, '/'); |
@@ -239,6 +245,7 @@ static int delete_path(const char *nodepath) | |||
239 | if (err) | 245 | if (err) |
240 | break; | 246 | break; |
241 | } | 247 | } |
248 | mutex_unlock(&dirlock); | ||
242 | 249 | ||
243 | kfree(path); | 250 | kfree(path); |
244 | return err; | 251 | return err; |
@@ -295,6 +302,19 @@ int devtmpfs_delete_node(struct device *dev) | |||
295 | if (dentry->d_inode) { | 302 | if (dentry->d_inode) { |
296 | err = vfs_getattr(nd.path.mnt, dentry, &stat); | 303 | err = vfs_getattr(nd.path.mnt, dentry, &stat); |
297 | if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { | 304 | if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { |
305 | struct iattr newattrs; | ||
306 | /* | ||
307 | * before unlinking this node, reset permissions | ||
308 | * of possible references like hardlinks | ||
309 | */ | ||
310 | newattrs.ia_uid = 0; | ||
311 | newattrs.ia_gid = 0; | ||
312 | newattrs.ia_mode = stat.mode & ~0777; | ||
313 | newattrs.ia_valid = | ||
314 | ATTR_UID|ATTR_GID|ATTR_MODE; | ||
315 | mutex_lock(&dentry->d_inode->i_mutex); | ||
316 | notify_change(dentry, &newattrs); | ||
317 | mutex_unlock(&dentry->d_inode->i_mutex); | ||
298 | err = vfs_unlink(nd.path.dentry->d_inode, | 318 | err = vfs_unlink(nd.path.dentry->d_inode, |
299 | dentry); | 319 | dentry); |
300 | if (!err || err == -ENOENT) | 320 | if (!err || err == -ENOENT) |
@@ -322,9 +342,8 @@ out: | |||
322 | * If configured, or requested by the commandline, devtmpfs will be | 342 | * If configured, or requested by the commandline, devtmpfs will be |
323 | * auto-mounted after the kernel mounted the root filesystem. | 343 | * auto-mounted after the kernel mounted the root filesystem. |
324 | */ | 344 | */ |
325 | int devtmpfs_mount(const char *mountpoint) | 345 | int devtmpfs_mount(const char *mntdir) |
326 | { | 346 | { |
327 | struct path path; | ||
328 | int err; | 347 | int err; |
329 | 348 | ||
330 | if (!dev_mount) | 349 | if (!dev_mount) |
@@ -333,15 +352,11 @@ int devtmpfs_mount(const char *mountpoint) | |||
333 | if (!dev_mnt) | 352 | if (!dev_mnt) |
334 | return 0; | 353 | return 0; |
335 | 354 | ||
336 | err = kern_path(mountpoint, LOOKUP_FOLLOW, &path); | 355 | err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL); |
337 | if (err) | ||
338 | return err; | ||
339 | err = do_add_mount(dev_mnt, &path, 0, NULL); | ||
340 | if (err) | 356 | if (err) |
341 | printk(KERN_INFO "devtmpfs: error mounting %i\n", err); | 357 | printk(KERN_INFO "devtmpfs: error mounting %i\n", err); |
342 | else | 358 | else |
343 | printk(KERN_INFO "devtmpfs: mounted\n"); | 359 | printk(KERN_INFO "devtmpfs: mounted\n"); |
344 | path_put(&path); | ||
345 | return err; | 360 | return err; |
346 | } | 361 | } |
347 | 362 | ||
@@ -353,6 +368,7 @@ int __init devtmpfs_init(void) | |||
353 | { | 368 | { |
354 | int err; | 369 | int err; |
355 | struct vfsmount *mnt; | 370 | struct vfsmount *mnt; |
371 | char options[] = "mode=0755"; | ||
356 | 372 | ||
357 | err = register_filesystem(&dev_fs_type); | 373 | err = register_filesystem(&dev_fs_type); |
358 | if (err) { | 374 | if (err) { |
@@ -361,7 +377,7 @@ int __init devtmpfs_init(void) | |||
361 | return err; | 377 | return err; |
362 | } | 378 | } |
363 | 379 | ||
364 | mnt = kern_mount(&dev_fs_type); | 380 | mnt = kern_mount_data(&dev_fs_type, options); |
365 | if (IS_ERR(mnt)) { | 381 | if (IS_ERR(mnt)) { |
366 | err = PTR_ERR(mnt); | 382 | err = PTR_ERR(mnt); |
367 | printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); | 383 | printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); |
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 962a3b574f21..d4d8ce53886a 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Coherent per-device memory handling. | 2 | * Coherent per-device memory handling. |
3 | * Borrowed from i386 | 3 | * Borrowed from i386 |
4 | */ | 4 | */ |
5 | #include <linux/slab.h> | ||
5 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
6 | #include <linux/dma-mapping.h> | 7 | #include <linux/dma-mapping.h> |
7 | 8 | ||
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index ca9186f70a69..763d59c1eb65 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/dma-mapping.h> | 10 | #include <linux/dma-mapping.h> |
11 | #include <linux/gfp.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * Managed DMA API | 14 | * Managed DMA API |
diff --git a/drivers/base/driver.c b/drivers/base/driver.c index f367885a7646..b631f7c59453 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/slab.h> | ||
16 | #include <linux/string.h> | 17 | #include <linux/string.h> |
17 | #include "base.h" | 18 | #include "base.h" |
18 | 19 | ||
@@ -98,7 +99,7 @@ EXPORT_SYMBOL_GPL(driver_find_device); | |||
98 | * @attr: driver attribute descriptor. | 99 | * @attr: driver attribute descriptor. |
99 | */ | 100 | */ |
100 | int driver_create_file(struct device_driver *drv, | 101 | int driver_create_file(struct device_driver *drv, |
101 | struct driver_attribute *attr) | 102 | const struct driver_attribute *attr) |
102 | { | 103 | { |
103 | int error; | 104 | int error; |
104 | if (drv) | 105 | if (drv) |
@@ -115,7 +116,7 @@ EXPORT_SYMBOL_GPL(driver_create_file); | |||
115 | * @attr: driver attribute descriptor. | 116 | * @attr: driver attribute descriptor. |
116 | */ | 117 | */ |
117 | void driver_remove_file(struct device_driver *drv, | 118 | void driver_remove_file(struct device_driver *drv, |
118 | struct driver_attribute *attr) | 119 | const struct driver_attribute *attr) |
119 | { | 120 | { |
120 | if (drv) | 121 | if (drv) |
121 | sysfs_remove_file(&drv->p->kobj, &attr->attr); | 122 | sysfs_remove_file(&drv->p->kobj, &attr->attr); |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 7376367bcb80..985da11174e7 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <linux/firmware.h> | 21 | #include <linux/firmware.h> |
22 | #include "base.h" | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | #define to_dev(obj) container_of(obj, struct device, kobj) | 24 | #define to_dev(obj) container_of(obj, struct device, kobj) |
25 | 25 | ||
@@ -69,7 +69,9 @@ fw_load_abort(struct firmware_priv *fw_priv) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | static ssize_t | 71 | static ssize_t |
72 | firmware_timeout_show(struct class *class, char *buf) | 72 | firmware_timeout_show(struct class *class, |
73 | struct class_attribute *attr, | ||
74 | char *buf) | ||
73 | { | 75 | { |
74 | return sprintf(buf, "%d\n", loading_timeout); | 76 | return sprintf(buf, "%d\n", loading_timeout); |
75 | } | 77 | } |
@@ -77,6 +79,7 @@ firmware_timeout_show(struct class *class, char *buf) | |||
77 | /** | 79 | /** |
78 | * firmware_timeout_store - set number of seconds to wait for firmware | 80 | * firmware_timeout_store - set number of seconds to wait for firmware |
79 | * @class: device class pointer | 81 | * @class: device class pointer |
82 | * @attr: device attribute pointer | ||
80 | * @buf: buffer to scan for timeout value | 83 | * @buf: buffer to scan for timeout value |
81 | * @count: number of bytes in @buf | 84 | * @count: number of bytes in @buf |
82 | * | 85 | * |
@@ -87,7 +90,9 @@ firmware_timeout_show(struct class *class, char *buf) | |||
87 | * Note: zero means 'wait forever'. | 90 | * Note: zero means 'wait forever'. |
88 | **/ | 91 | **/ |
89 | static ssize_t | 92 | static ssize_t |
90 | firmware_timeout_store(struct class *class, const char *buf, size_t count) | 93 | firmware_timeout_store(struct class *class, |
94 | struct class_attribute *attr, | ||
95 | const char *buf, size_t count) | ||
91 | { | 96 | { |
92 | loading_timeout = simple_strtol(buf, NULL, 10); | 97 | loading_timeout = simple_strtol(buf, NULL, 10); |
93 | if (loading_timeout < 0) | 98 | if (loading_timeout < 0) |
@@ -439,6 +444,7 @@ static int fw_setup_device(struct firmware *fw, struct device **dev_p, | |||
439 | fw_priv = dev_get_drvdata(f_dev); | 444 | fw_priv = dev_get_drvdata(f_dev); |
440 | 445 | ||
441 | fw_priv->fw = fw; | 446 | fw_priv->fw = fw; |
447 | sysfs_bin_attr_init(&fw_priv->attr_data); | ||
442 | retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data); | 448 | retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data); |
443 | if (retval) { | 449 | if (retval) { |
444 | dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__); | 450 | dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__); |
@@ -601,24 +607,22 @@ request_firmware_work_func(void *arg) | |||
601 | } | 607 | } |
602 | ret = _request_firmware(&fw, fw_work->name, fw_work->device, | 608 | ret = _request_firmware(&fw, fw_work->name, fw_work->device, |
603 | fw_work->uevent); | 609 | fw_work->uevent); |
604 | if (ret < 0) | 610 | |
605 | fw_work->cont(NULL, fw_work->context); | 611 | fw_work->cont(fw, fw_work->context); |
606 | else { | 612 | |
607 | fw_work->cont(fw, fw_work->context); | ||
608 | release_firmware(fw); | ||
609 | } | ||
610 | module_put(fw_work->module); | 613 | module_put(fw_work->module); |
611 | kfree(fw_work); | 614 | kfree(fw_work); |
612 | return ret; | 615 | return ret; |
613 | } | 616 | } |
614 | 617 | ||
615 | /** | 618 | /** |
616 | * request_firmware_nowait: asynchronous version of request_firmware | 619 | * request_firmware_nowait - asynchronous version of request_firmware |
617 | * @module: module requesting the firmware | 620 | * @module: module requesting the firmware |
618 | * @uevent: sends uevent to copy the firmware image if this flag | 621 | * @uevent: sends uevent to copy the firmware image if this flag |
619 | * is non-zero else the firmware copy must be done manually. | 622 | * is non-zero else the firmware copy must be done manually. |
620 | * @name: name of firmware file | 623 | * @name: name of firmware file |
621 | * @device: device for which firmware is being loaded | 624 | * @device: device for which firmware is being loaded |
625 | * @gfp: allocation flags | ||
622 | * @context: will be passed over to @cont, and | 626 | * @context: will be passed over to @cont, and |
623 | * @fw may be %NULL if firmware request fails. | 627 | * @fw may be %NULL if firmware request fails. |
624 | * @cont: function will be called asynchronously when the firmware | 628 | * @cont: function will be called asynchronously when the firmware |
@@ -631,12 +635,12 @@ request_firmware_work_func(void *arg) | |||
631 | int | 635 | int |
632 | request_firmware_nowait( | 636 | request_firmware_nowait( |
633 | struct module *module, int uevent, | 637 | struct module *module, int uevent, |
634 | const char *name, struct device *device, void *context, | 638 | const char *name, struct device *device, gfp_t gfp, void *context, |
635 | void (*cont)(const struct firmware *fw, void *context)) | 639 | void (*cont)(const struct firmware *fw, void *context)) |
636 | { | 640 | { |
637 | struct task_struct *task; | 641 | struct task_struct *task; |
638 | struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), | 642 | struct firmware_work *fw_work = kmalloc(sizeof (struct firmware_work), |
639 | GFP_ATOMIC); | 643 | gfp); |
640 | 644 | ||
641 | if (!fw_work) | 645 | if (!fw_work) |
642 | return -ENOMEM; | 646 | return -ENOMEM; |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 989429cfed88..933442f40321 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/mm.h> | 22 | #include <linux/mm.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/stat.h> | 24 | #include <linux/stat.h> |
25 | #include <linux/slab.h> | ||
25 | 26 | ||
26 | #include <asm/atomic.h> | 27 | #include <asm/atomic.h> |
27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
@@ -44,7 +45,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev | |||
44 | return retval; | 45 | return retval; |
45 | } | 46 | } |
46 | 47 | ||
47 | static struct kset_uevent_ops memory_uevent_ops = { | 48 | static const struct kset_uevent_ops memory_uevent_ops = { |
48 | .name = memory_uevent_name, | 49 | .name = memory_uevent_name, |
49 | .uevent = memory_uevent, | 50 | .uevent = memory_uevent, |
50 | }; | 51 | }; |
@@ -63,6 +64,20 @@ void unregister_memory_notifier(struct notifier_block *nb) | |||
63 | } | 64 | } |
64 | EXPORT_SYMBOL(unregister_memory_notifier); | 65 | EXPORT_SYMBOL(unregister_memory_notifier); |
65 | 66 | ||
67 | static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); | ||
68 | |||
69 | int register_memory_isolate_notifier(struct notifier_block *nb) | ||
70 | { | ||
71 | return atomic_notifier_chain_register(&memory_isolate_chain, nb); | ||
72 | } | ||
73 | EXPORT_SYMBOL(register_memory_isolate_notifier); | ||
74 | |||
75 | void unregister_memory_isolate_notifier(struct notifier_block *nb) | ||
76 | { | ||
77 | atomic_notifier_chain_unregister(&memory_isolate_chain, nb); | ||
78 | } | ||
79 | EXPORT_SYMBOL(unregister_memory_isolate_notifier); | ||
80 | |||
66 | /* | 81 | /* |
67 | * register_memory - Setup a sysfs device for a memory block | 82 | * register_memory - Setup a sysfs device for a memory block |
68 | */ | 83 | */ |
@@ -157,6 +172,11 @@ int memory_notify(unsigned long val, void *v) | |||
157 | return blocking_notifier_call_chain(&memory_chain, val, v); | 172 | return blocking_notifier_call_chain(&memory_chain, val, v); |
158 | } | 173 | } |
159 | 174 | ||
175 | int memory_isolate_notify(unsigned long val, void *v) | ||
176 | { | ||
177 | return atomic_notifier_call_chain(&memory_isolate_chain, val, v); | ||
178 | } | ||
179 | |||
160 | /* | 180 | /* |
161 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is | 181 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is |
162 | * OK to have direct references to sparsemem variables in here. | 182 | * OK to have direct references to sparsemem variables in here. |
@@ -290,17 +310,18 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); | |||
290 | * Block size attribute stuff | 310 | * Block size attribute stuff |
291 | */ | 311 | */ |
292 | static ssize_t | 312 | static ssize_t |
293 | print_block_size(struct class *class, char *buf) | 313 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, |
314 | char *buf) | ||
294 | { | 315 | { |
295 | return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); | 316 | return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); |
296 | } | 317 | } |
297 | 318 | ||
298 | static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); | 319 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); |
299 | 320 | ||
300 | static int block_size_init(void) | 321 | static int block_size_init(void) |
301 | { | 322 | { |
302 | return sysfs_create_file(&memory_sysdev_class.kset.kobj, | 323 | return sysfs_create_file(&memory_sysdev_class.kset.kobj, |
303 | &class_attr_block_size_bytes.attr); | 324 | &attr_block_size_bytes.attr); |
304 | } | 325 | } |
305 | 326 | ||
306 | /* | 327 | /* |
@@ -311,7 +332,8 @@ static int block_size_init(void) | |||
311 | */ | 332 | */ |
312 | #ifdef CONFIG_ARCH_MEMORY_PROBE | 333 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
313 | static ssize_t | 334 | static ssize_t |
314 | memory_probe_store(struct class *class, const char *buf, size_t count) | 335 | memory_probe_store(struct class *class, struct class_attribute *attr, |
336 | const char *buf, size_t count) | ||
315 | { | 337 | { |
316 | u64 phys_addr; | 338 | u64 phys_addr; |
317 | int nid; | 339 | int nid; |
@@ -341,17 +363,83 @@ static inline int memory_probe_init(void) | |||
341 | } | 363 | } |
342 | #endif | 364 | #endif |
343 | 365 | ||
366 | #ifdef CONFIG_MEMORY_FAILURE | ||
367 | /* | ||
368 | * Support for offlining pages of memory | ||
369 | */ | ||
370 | |||
371 | /* Soft offline a page */ | ||
372 | static ssize_t | ||
373 | store_soft_offline_page(struct class *class, | ||
374 | struct class_attribute *attr, | ||
375 | const char *buf, size_t count) | ||
376 | { | ||
377 | int ret; | ||
378 | u64 pfn; | ||
379 | if (!capable(CAP_SYS_ADMIN)) | ||
380 | return -EPERM; | ||
381 | if (strict_strtoull(buf, 0, &pfn) < 0) | ||
382 | return -EINVAL; | ||
383 | pfn >>= PAGE_SHIFT; | ||
384 | if (!pfn_valid(pfn)) | ||
385 | return -ENXIO; | ||
386 | ret = soft_offline_page(pfn_to_page(pfn), 0); | ||
387 | return ret == 0 ? count : ret; | ||
388 | } | ||
389 | |||
390 | /* Forcibly offline a page, including killing processes. */ | ||
391 | static ssize_t | ||
392 | store_hard_offline_page(struct class *class, | ||
393 | struct class_attribute *attr, | ||
394 | const char *buf, size_t count) | ||
395 | { | ||
396 | int ret; | ||
397 | u64 pfn; | ||
398 | if (!capable(CAP_SYS_ADMIN)) | ||
399 | return -EPERM; | ||
400 | if (strict_strtoull(buf, 0, &pfn) < 0) | ||
401 | return -EINVAL; | ||
402 | pfn >>= PAGE_SHIFT; | ||
403 | ret = __memory_failure(pfn, 0, 0); | ||
404 | return ret ? ret : count; | ||
405 | } | ||
406 | |||
407 | static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page); | ||
408 | static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page); | ||
409 | |||
410 | static __init int memory_fail_init(void) | ||
411 | { | ||
412 | int err; | ||
413 | |||
414 | err = sysfs_create_file(&memory_sysdev_class.kset.kobj, | ||
415 | &class_attr_soft_offline_page.attr); | ||
416 | if (!err) | ||
417 | err = sysfs_create_file(&memory_sysdev_class.kset.kobj, | ||
418 | &class_attr_hard_offline_page.attr); | ||
419 | return err; | ||
420 | } | ||
421 | #else | ||
422 | static inline int memory_fail_init(void) | ||
423 | { | ||
424 | return 0; | ||
425 | } | ||
426 | #endif | ||
427 | |||
344 | /* | 428 | /* |
345 | * Note that phys_device is optional. It is here to allow for | 429 | * Note that phys_device is optional. It is here to allow for |
346 | * differentiation between which *physical* devices each | 430 | * differentiation between which *physical* devices each |
347 | * section belongs to... | 431 | * section belongs to... |
348 | */ | 432 | */ |
433 | int __weak arch_get_memory_phys_device(unsigned long start_pfn) | ||
434 | { | ||
435 | return 0; | ||
436 | } | ||
349 | 437 | ||
350 | static int add_memory_block(int nid, struct mem_section *section, | 438 | static int add_memory_block(int nid, struct mem_section *section, |
351 | unsigned long state, int phys_device, | 439 | unsigned long state, enum mem_add_context context) |
352 | enum mem_add_context context) | ||
353 | { | 440 | { |
354 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); | 441 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
442 | unsigned long start_pfn; | ||
355 | int ret = 0; | 443 | int ret = 0; |
356 | 444 | ||
357 | if (!mem) | 445 | if (!mem) |
@@ -360,7 +448,8 @@ static int add_memory_block(int nid, struct mem_section *section, | |||
360 | mem->phys_index = __section_nr(section); | 448 | mem->phys_index = __section_nr(section); |
361 | mem->state = state; | 449 | mem->state = state; |
362 | mutex_init(&mem->state_mutex); | 450 | mutex_init(&mem->state_mutex); |
363 | mem->phys_device = phys_device; | 451 | start_pfn = section_nr_to_pfn(mem->phys_index); |
452 | mem->phys_device = arch_get_memory_phys_device(start_pfn); | ||
364 | 453 | ||
365 | ret = register_memory(mem, section); | 454 | ret = register_memory(mem, section); |
366 | if (!ret) | 455 | if (!ret) |
@@ -432,7 +521,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, | |||
432 | */ | 521 | */ |
433 | int register_new_memory(int nid, struct mem_section *section) | 522 | int register_new_memory(int nid, struct mem_section *section) |
434 | { | 523 | { |
435 | return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); | 524 | return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG); |
436 | } | 525 | } |
437 | 526 | ||
438 | int unregister_memory_section(struct mem_section *section) | 527 | int unregister_memory_section(struct mem_section *section) |
@@ -465,7 +554,7 @@ int __init memory_dev_init(void) | |||
465 | if (!present_section_nr(i)) | 554 | if (!present_section_nr(i)) |
466 | continue; | 555 | continue; |
467 | err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, | 556 | err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, |
468 | 0, BOOT); | 557 | BOOT); |
469 | if (!ret) | 558 | if (!ret) |
470 | ret = err; | 559 | ret = err; |
471 | } | 560 | } |
@@ -473,6 +562,9 @@ int __init memory_dev_init(void) | |||
473 | err = memory_probe_init(); | 562 | err = memory_probe_init(); |
474 | if (!ret) | 563 | if (!ret) |
475 | ret = err; | 564 | ret = err; |
565 | err = memory_fail_init(); | ||
566 | if (!ret) | ||
567 | ret = err; | ||
476 | err = block_size_init(); | 568 | err = block_size_init(); |
477 | if (!ret) | 569 | if (!ret) |
478 | ret = err; | 570 | ret = err; |
diff --git a/drivers/base/module.c b/drivers/base/module.c index 103be9cacb05..f32f2f9b7be5 100644 --- a/drivers/base/module.c +++ b/drivers/base/module.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/device.h> | 7 | #include <linux/device.h> |
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/slab.h> | ||
10 | #include <linux/string.h> | 11 | #include <linux/string.h> |
11 | #include "base.h" | 12 | #include "base.h" |
12 | 13 | ||
diff --git a/drivers/base/node.c b/drivers/base/node.c index 1fe5536d404f..057979a19eea 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -15,9 +15,13 @@ | |||
15 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
17 | #include <linux/swap.h> | 17 | #include <linux/swap.h> |
18 | #include <linux/slab.h> | ||
19 | |||
20 | static struct sysdev_class_attribute *node_state_attrs[]; | ||
18 | 21 | ||
19 | static struct sysdev_class node_class = { | 22 | static struct sysdev_class node_class = { |
20 | .name = "node", | 23 | .name = "node", |
24 | .attrs = node_state_attrs, | ||
21 | }; | 25 | }; |
22 | 26 | ||
23 | 27 | ||
@@ -162,8 +166,11 @@ static ssize_t node_read_distance(struct sys_device * dev, | |||
162 | int len = 0; | 166 | int len = 0; |
163 | int i; | 167 | int i; |
164 | 168 | ||
165 | /* buf currently PAGE_SIZE, need ~4 chars per node */ | 169 | /* |
166 | BUILD_BUG_ON(MAX_NUMNODES*4 > PAGE_SIZE/2); | 170 | * buf is currently PAGE_SIZE in length and each node needs 4 chars |
171 | * at the most (distance + space or newline). | ||
172 | */ | ||
173 | BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); | ||
167 | 174 | ||
168 | for_each_online_node(i) | 175 | for_each_online_node(i) |
169 | len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); | 176 | len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); |
@@ -173,6 +180,47 @@ static ssize_t node_read_distance(struct sys_device * dev, | |||
173 | } | 180 | } |
174 | static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); | 181 | static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); |
175 | 182 | ||
183 | #ifdef CONFIG_HUGETLBFS | ||
184 | /* | ||
185 | * hugetlbfs per node attributes registration interface: | ||
186 | * When/if hugetlb[fs] subsystem initializes [sometime after this module], | ||
187 | * it will register its per node attributes for all online nodes with | ||
188 | * memory. It will also call register_hugetlbfs_with_node(), below, to | ||
189 | * register its attribute registration functions with this node driver. | ||
190 | * Once these hooks have been initialized, the node driver will call into | ||
191 | * the hugetlb module to [un]register attributes for hot-plugged nodes. | ||
192 | */ | ||
193 | static node_registration_func_t __hugetlb_register_node; | ||
194 | static node_registration_func_t __hugetlb_unregister_node; | ||
195 | |||
196 | static inline bool hugetlb_register_node(struct node *node) | ||
197 | { | ||
198 | if (__hugetlb_register_node && | ||
199 | node_state(node->sysdev.id, N_HIGH_MEMORY)) { | ||
200 | __hugetlb_register_node(node); | ||
201 | return true; | ||
202 | } | ||
203 | return false; | ||
204 | } | ||
205 | |||
206 | static inline void hugetlb_unregister_node(struct node *node) | ||
207 | { | ||
208 | if (__hugetlb_unregister_node) | ||
209 | __hugetlb_unregister_node(node); | ||
210 | } | ||
211 | |||
212 | void register_hugetlbfs_with_node(node_registration_func_t doregister, | ||
213 | node_registration_func_t unregister) | ||
214 | { | ||
215 | __hugetlb_register_node = doregister; | ||
216 | __hugetlb_unregister_node = unregister; | ||
217 | } | ||
218 | #else | ||
219 | static inline void hugetlb_register_node(struct node *node) {} | ||
220 | |||
221 | static inline void hugetlb_unregister_node(struct node *node) {} | ||
222 | #endif | ||
223 | |||
176 | 224 | ||
177 | /* | 225 | /* |
178 | * register_node - Setup a sysfs device for a node. | 226 | * register_node - Setup a sysfs device for a node. |
@@ -196,6 +244,8 @@ int register_node(struct node *node, int num, struct node *parent) | |||
196 | sysdev_create_file(&node->sysdev, &attr_distance); | 244 | sysdev_create_file(&node->sysdev, &attr_distance); |
197 | 245 | ||
198 | scan_unevictable_register_node(node); | 246 | scan_unevictable_register_node(node); |
247 | |||
248 | hugetlb_register_node(node); | ||
199 | } | 249 | } |
200 | return error; | 250 | return error; |
201 | } | 251 | } |
@@ -216,6 +266,7 @@ void unregister_node(struct node *node) | |||
216 | sysdev_remove_file(&node->sysdev, &attr_distance); | 266 | sysdev_remove_file(&node->sysdev, &attr_distance); |
217 | 267 | ||
218 | scan_unevictable_unregister_node(node); | 268 | scan_unevictable_unregister_node(node); |
269 | hugetlb_unregister_node(node); /* no-op, if memoryless node */ | ||
219 | 270 | ||
220 | sysdev_unregister(&node->sysdev); | 271 | sysdev_unregister(&node->sysdev); |
221 | } | 272 | } |
@@ -227,26 +278,43 @@ struct node node_devices[MAX_NUMNODES]; | |||
227 | */ | 278 | */ |
228 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) | 279 | int register_cpu_under_node(unsigned int cpu, unsigned int nid) |
229 | { | 280 | { |
230 | if (node_online(nid)) { | 281 | int ret; |
231 | struct sys_device *obj = get_cpu_sysdev(cpu); | 282 | struct sys_device *obj; |
232 | if (!obj) | ||
233 | return 0; | ||
234 | return sysfs_create_link(&node_devices[nid].sysdev.kobj, | ||
235 | &obj->kobj, | ||
236 | kobject_name(&obj->kobj)); | ||
237 | } | ||
238 | 283 | ||
239 | return 0; | 284 | if (!node_online(nid)) |
285 | return 0; | ||
286 | |||
287 | obj = get_cpu_sysdev(cpu); | ||
288 | if (!obj) | ||
289 | return 0; | ||
290 | |||
291 | ret = sysfs_create_link(&node_devices[nid].sysdev.kobj, | ||
292 | &obj->kobj, | ||
293 | kobject_name(&obj->kobj)); | ||
294 | if (ret) | ||
295 | return ret; | ||
296 | |||
297 | return sysfs_create_link(&obj->kobj, | ||
298 | &node_devices[nid].sysdev.kobj, | ||
299 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
240 | } | 300 | } |
241 | 301 | ||
242 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) | 302 | int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) |
243 | { | 303 | { |
244 | if (node_online(nid)) { | 304 | struct sys_device *obj; |
245 | struct sys_device *obj = get_cpu_sysdev(cpu); | 305 | |
246 | if (obj) | 306 | if (!node_online(nid)) |
247 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, | 307 | return 0; |
248 | kobject_name(&obj->kobj)); | 308 | |
249 | } | 309 | obj = get_cpu_sysdev(cpu); |
310 | if (!obj) | ||
311 | return 0; | ||
312 | |||
313 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, | ||
314 | kobject_name(&obj->kobj)); | ||
315 | sysfs_remove_link(&obj->kobj, | ||
316 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
317 | |||
250 | return 0; | 318 | return 0; |
251 | } | 319 | } |
252 | 320 | ||
@@ -268,6 +336,7 @@ static int get_nid_for_pfn(unsigned long pfn) | |||
268 | /* register memory section under specified node if it spans that node */ | 336 | /* register memory section under specified node if it spans that node */ |
269 | int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | 337 | int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) |
270 | { | 338 | { |
339 | int ret; | ||
271 | unsigned long pfn, sect_start_pfn, sect_end_pfn; | 340 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
272 | 341 | ||
273 | if (!mem_blk) | 342 | if (!mem_blk) |
@@ -284,9 +353,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
284 | continue; | 353 | continue; |
285 | if (page_nid != nid) | 354 | if (page_nid != nid) |
286 | continue; | 355 | continue; |
287 | return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, | 356 | ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, |
288 | &mem_blk->sysdev.kobj, | 357 | &mem_blk->sysdev.kobj, |
289 | kobject_name(&mem_blk->sysdev.kobj)); | 358 | kobject_name(&mem_blk->sysdev.kobj)); |
359 | if (ret) | ||
360 | return ret; | ||
361 | |||
362 | return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj, | ||
363 | &node_devices[nid].sysdev.kobj, | ||
364 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
290 | } | 365 | } |
291 | /* mem section does not span the specified node */ | 366 | /* mem section does not span the specified node */ |
292 | return 0; | 367 | return 0; |
@@ -295,12 +370,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
295 | /* unregister memory section under all nodes that it spans */ | 370 | /* unregister memory section under all nodes that it spans */ |
296 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | 371 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) |
297 | { | 372 | { |
298 | nodemask_t unlinked_nodes; | 373 | NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); |
299 | unsigned long pfn, sect_start_pfn, sect_end_pfn; | 374 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
300 | 375 | ||
301 | if (!mem_blk) | 376 | if (!mem_blk) { |
377 | NODEMASK_FREE(unlinked_nodes); | ||
302 | return -EFAULT; | 378 | return -EFAULT; |
303 | nodes_clear(unlinked_nodes); | 379 | } |
380 | if (!unlinked_nodes) | ||
381 | return -ENOMEM; | ||
382 | nodes_clear(*unlinked_nodes); | ||
304 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); | 383 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); |
305 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; | 384 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; |
306 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { | 385 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
@@ -311,11 +390,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | |||
311 | continue; | 390 | continue; |
312 | if (!node_online(nid)) | 391 | if (!node_online(nid)) |
313 | continue; | 392 | continue; |
314 | if (node_test_and_set(nid, unlinked_nodes)) | 393 | if (node_test_and_set(nid, *unlinked_nodes)) |
315 | continue; | 394 | continue; |
316 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, | 395 | sysfs_remove_link(&node_devices[nid].sysdev.kobj, |
317 | kobject_name(&mem_blk->sysdev.kobj)); | 396 | kobject_name(&mem_blk->sysdev.kobj)); |
397 | sysfs_remove_link(&mem_blk->sysdev.kobj, | ||
398 | kobject_name(&node_devices[nid].sysdev.kobj)); | ||
318 | } | 399 | } |
400 | NODEMASK_FREE(unlinked_nodes); | ||
319 | return 0; | 401 | return 0; |
320 | } | 402 | } |
321 | 403 | ||
@@ -345,9 +427,77 @@ static int link_mem_sections(int nid) | |||
345 | } | 427 | } |
346 | return err; | 428 | return err; |
347 | } | 429 | } |
348 | #else | 430 | |
431 | #ifdef CONFIG_HUGETLBFS | ||
432 | /* | ||
433 | * Handle per node hstate attribute [un]registration on transistions | ||
434 | * to/from memoryless state. | ||
435 | */ | ||
436 | static void node_hugetlb_work(struct work_struct *work) | ||
437 | { | ||
438 | struct node *node = container_of(work, struct node, node_work); | ||
439 | |||
440 | /* | ||
441 | * We only get here when a node transitions to/from memoryless state. | ||
442 | * We can detect which transition occurred by examining whether the | ||
443 | * node has memory now. hugetlb_register_node() already check this | ||
444 | * so we try to register the attributes. If that fails, then the | ||
445 | * node has transitioned to memoryless, try to unregister the | ||
446 | * attributes. | ||
447 | */ | ||
448 | if (!hugetlb_register_node(node)) | ||
449 | hugetlb_unregister_node(node); | ||
450 | } | ||
451 | |||
452 | static void init_node_hugetlb_work(int nid) | ||
453 | { | ||
454 | INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work); | ||
455 | } | ||
456 | |||
457 | static int node_memory_callback(struct notifier_block *self, | ||
458 | unsigned long action, void *arg) | ||
459 | { | ||
460 | struct memory_notify *mnb = arg; | ||
461 | int nid = mnb->status_change_nid; | ||
462 | |||
463 | switch (action) { | ||
464 | case MEM_ONLINE: | ||
465 | case MEM_OFFLINE: | ||
466 | /* | ||
467 | * offload per node hstate [un]registration to a work thread | ||
468 | * when transitioning to/from memoryless state. | ||
469 | */ | ||
470 | if (nid != NUMA_NO_NODE) | ||
471 | schedule_work(&node_devices[nid].node_work); | ||
472 | break; | ||
473 | |||
474 | case MEM_GOING_ONLINE: | ||
475 | case MEM_GOING_OFFLINE: | ||
476 | case MEM_CANCEL_ONLINE: | ||
477 | case MEM_CANCEL_OFFLINE: | ||
478 | default: | ||
479 | break; | ||
480 | } | ||
481 | |||
482 | return NOTIFY_OK; | ||
483 | } | ||
484 | #endif /* CONFIG_HUGETLBFS */ | ||
485 | #else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */ | ||
486 | |||
349 | static int link_mem_sections(int nid) { return 0; } | 487 | static int link_mem_sections(int nid) { return 0; } |
350 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ | 488 | #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ |
489 | |||
490 | #if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \ | ||
491 | !defined(CONFIG_HUGETLBFS) | ||
492 | static inline int node_memory_callback(struct notifier_block *self, | ||
493 | unsigned long action, void *arg) | ||
494 | { | ||
495 | return NOTIFY_OK; | ||
496 | } | ||
497 | |||
498 | static void init_node_hugetlb_work(int nid) { } | ||
499 | |||
500 | #endif | ||
351 | 501 | ||
352 | int register_one_node(int nid) | 502 | int register_one_node(int nid) |
353 | { | 503 | { |
@@ -371,6 +521,9 @@ int register_one_node(int nid) | |||
371 | 521 | ||
372 | /* link memory sections under this node */ | 522 | /* link memory sections under this node */ |
373 | error = link_mem_sections(nid); | 523 | error = link_mem_sections(nid); |
524 | |||
525 | /* initialize work queue for memory hot plug */ | ||
526 | init_node_hugetlb_work(nid); | ||
374 | } | 527 | } |
375 | 528 | ||
376 | return error; | 529 | return error; |
@@ -398,75 +551,55 @@ static ssize_t print_nodes_state(enum node_states state, char *buf) | |||
398 | return n; | 551 | return n; |
399 | } | 552 | } |
400 | 553 | ||
401 | static ssize_t print_nodes_possible(struct sysdev_class *class, char *buf) | 554 | struct node_attr { |
402 | { | 555 | struct sysdev_class_attribute attr; |
403 | return print_nodes_state(N_POSSIBLE, buf); | 556 | enum node_states state; |
404 | } | 557 | }; |
405 | |||
406 | static ssize_t print_nodes_online(struct sysdev_class *class, char *buf) | ||
407 | { | ||
408 | return print_nodes_state(N_ONLINE, buf); | ||
409 | } | ||
410 | |||
411 | static ssize_t print_nodes_has_normal_memory(struct sysdev_class *class, | ||
412 | char *buf) | ||
413 | { | ||
414 | return print_nodes_state(N_NORMAL_MEMORY, buf); | ||
415 | } | ||
416 | 558 | ||
417 | static ssize_t print_nodes_has_cpu(struct sysdev_class *class, char *buf) | 559 | static ssize_t show_node_state(struct sysdev_class *class, |
560 | struct sysdev_class_attribute *attr, char *buf) | ||
418 | { | 561 | { |
419 | return print_nodes_state(N_CPU, buf); | 562 | struct node_attr *na = container_of(attr, struct node_attr, attr); |
563 | return print_nodes_state(na->state, buf); | ||
420 | } | 564 | } |
421 | 565 | ||
422 | static SYSDEV_CLASS_ATTR(possible, 0444, print_nodes_possible, NULL); | 566 | #define _NODE_ATTR(name, state) \ |
423 | static SYSDEV_CLASS_ATTR(online, 0444, print_nodes_online, NULL); | 567 | { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state } |
424 | static SYSDEV_CLASS_ATTR(has_normal_memory, 0444, print_nodes_has_normal_memory, | ||
425 | NULL); | ||
426 | static SYSDEV_CLASS_ATTR(has_cpu, 0444, print_nodes_has_cpu, NULL); | ||
427 | 568 | ||
569 | static struct node_attr node_state_attr[] = { | ||
570 | _NODE_ATTR(possible, N_POSSIBLE), | ||
571 | _NODE_ATTR(online, N_ONLINE), | ||
572 | _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), | ||
573 | _NODE_ATTR(has_cpu, N_CPU), | ||
428 | #ifdef CONFIG_HIGHMEM | 574 | #ifdef CONFIG_HIGHMEM |
429 | static ssize_t print_nodes_has_high_memory(struct sysdev_class *class, | 575 | _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), |
430 | char *buf) | ||
431 | { | ||
432 | return print_nodes_state(N_HIGH_MEMORY, buf); | ||
433 | } | ||
434 | |||
435 | static SYSDEV_CLASS_ATTR(has_high_memory, 0444, print_nodes_has_high_memory, | ||
436 | NULL); | ||
437 | #endif | 576 | #endif |
577 | }; | ||
438 | 578 | ||
439 | struct sysdev_class_attribute *node_state_attr[] = { | 579 | static struct sysdev_class_attribute *node_state_attrs[] = { |
440 | &attr_possible, | 580 | &node_state_attr[0].attr, |
441 | &attr_online, | 581 | &node_state_attr[1].attr, |
442 | &attr_has_normal_memory, | 582 | &node_state_attr[2].attr, |
583 | &node_state_attr[3].attr, | ||
443 | #ifdef CONFIG_HIGHMEM | 584 | #ifdef CONFIG_HIGHMEM |
444 | &attr_has_high_memory, | 585 | &node_state_attr[4].attr, |
445 | #endif | 586 | #endif |
446 | &attr_has_cpu, | 587 | NULL |
447 | }; | 588 | }; |
448 | 589 | ||
449 | static int node_states_init(void) | 590 | #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ |
450 | { | ||
451 | int i; | ||
452 | int err = 0; | ||
453 | |||
454 | for (i = 0; i < NR_NODE_STATES; i++) { | ||
455 | int ret; | ||
456 | ret = sysdev_class_create_file(&node_class, node_state_attr[i]); | ||
457 | if (!err) | ||
458 | err = ret; | ||
459 | } | ||
460 | return err; | ||
461 | } | ||
462 | |||
463 | static int __init register_node_type(void) | 591 | static int __init register_node_type(void) |
464 | { | 592 | { |
465 | int ret; | 593 | int ret; |
466 | 594 | ||
595 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); | ||
596 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); | ||
597 | |||
467 | ret = sysdev_class_register(&node_class); | 598 | ret = sysdev_class_register(&node_class); |
468 | if (!ret) | 599 | if (!ret) { |
469 | ret = node_states_init(); | 600 | hotplug_memory_notifier(node_memory_callback, |
601 | NODE_CALLBACK_PRI); | ||
602 | } | ||
470 | 603 | ||
471 | /* | 604 | /* |
472 | * Note: we're not going to unregister the node class if we fail | 605 | * Note: we're not going to unregister the node class if we fail |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 4fa954b07ac4..4b4b565c835f 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -128,7 +128,7 @@ struct platform_object { | |||
128 | }; | 128 | }; |
129 | 129 | ||
130 | /** | 130 | /** |
131 | * platform_device_put | 131 | * platform_device_put - destroy a platform device |
132 | * @pdev: platform device to free | 132 | * @pdev: platform device to free |
133 | * | 133 | * |
134 | * Free all memory associated with a platform device. This function must | 134 | * Free all memory associated with a platform device. This function must |
@@ -152,7 +152,7 @@ static void platform_device_release(struct device *dev) | |||
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * platform_device_alloc | 155 | * platform_device_alloc - create a platform device |
156 | * @name: base name of the device we're adding | 156 | * @name: base name of the device we're adding |
157 | * @id: instance id | 157 | * @id: instance id |
158 | * | 158 | * |
@@ -177,7 +177,7 @@ struct platform_device *platform_device_alloc(const char *name, int id) | |||
177 | EXPORT_SYMBOL_GPL(platform_device_alloc); | 177 | EXPORT_SYMBOL_GPL(platform_device_alloc); |
178 | 178 | ||
179 | /** | 179 | /** |
180 | * platform_device_add_resources | 180 | * platform_device_add_resources - add resources to a platform device |
181 | * @pdev: platform device allocated by platform_device_alloc to add resources to | 181 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
182 | * @res: set of resources that needs to be allocated for the device | 182 | * @res: set of resources that needs to be allocated for the device |
183 | * @num: number of resources | 183 | * @num: number of resources |
@@ -202,7 +202,7 @@ int platform_device_add_resources(struct platform_device *pdev, | |||
202 | EXPORT_SYMBOL_GPL(platform_device_add_resources); | 202 | EXPORT_SYMBOL_GPL(platform_device_add_resources); |
203 | 203 | ||
204 | /** | 204 | /** |
205 | * platform_device_add_data | 205 | * platform_device_add_data - add platform-specific data to a platform device |
206 | * @pdev: platform device allocated by platform_device_alloc to add resources to | 206 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
207 | * @data: platform specific data for this platform device | 207 | * @data: platform specific data for this platform device |
208 | * @size: size of platform specific data | 208 | * @size: size of platform specific data |
@@ -344,7 +344,7 @@ void platform_device_unregister(struct platform_device *pdev) | |||
344 | EXPORT_SYMBOL_GPL(platform_device_unregister); | 344 | EXPORT_SYMBOL_GPL(platform_device_unregister); |
345 | 345 | ||
346 | /** | 346 | /** |
347 | * platform_device_register_simple | 347 | * platform_device_register_simple - add a platform-level device and its resources |
348 | * @name: base name of the device we're adding | 348 | * @name: base name of the device we're adding |
349 | * @id: instance id | 349 | * @id: instance id |
350 | * @res: set of resources that needs to be allocated for the device | 350 | * @res: set of resources that needs to be allocated for the device |
@@ -362,6 +362,8 @@ EXPORT_SYMBOL_GPL(platform_device_unregister); | |||
362 | * enumeration tasks, they don't fully conform to the Linux driver model. | 362 | * enumeration tasks, they don't fully conform to the Linux driver model. |
363 | * In particular, when such drivers are built as modules, they can't be | 363 | * In particular, when such drivers are built as modules, they can't be |
364 | * "hotplugged". | 364 | * "hotplugged". |
365 | * | ||
366 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. | ||
365 | */ | 367 | */ |
366 | struct platform_device *platform_device_register_simple(const char *name, | 368 | struct platform_device *platform_device_register_simple(const char *name, |
367 | int id, | 369 | int id, |
@@ -396,7 +398,7 @@ error: | |||
396 | EXPORT_SYMBOL_GPL(platform_device_register_simple); | 398 | EXPORT_SYMBOL_GPL(platform_device_register_simple); |
397 | 399 | ||
398 | /** | 400 | /** |
399 | * platform_device_register_data | 401 | * platform_device_register_data - add a platform-level device with platform-specific data |
400 | * @parent: parent device for the device we're adding | 402 | * @parent: parent device for the device we're adding |
401 | * @name: base name of the device we're adding | 403 | * @name: base name of the device we're adding |
402 | * @id: instance id | 404 | * @id: instance id |
@@ -408,6 +410,8 @@ EXPORT_SYMBOL_GPL(platform_device_register_simple); | |||
408 | * allocated for the device allows drivers using such devices to be | 410 | * allocated for the device allows drivers using such devices to be |
409 | * unloaded without waiting for the last reference to the device to be | 411 | * unloaded without waiting for the last reference to the device to be |
410 | * dropped. | 412 | * dropped. |
413 | * | ||
414 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. | ||
411 | */ | 415 | */ |
412 | struct platform_device *platform_device_register_data( | 416 | struct platform_device *platform_device_register_data( |
413 | struct device *parent, | 417 | struct device *parent, |
@@ -441,6 +445,7 @@ error: | |||
441 | platform_device_put(pdev); | 445 | platform_device_put(pdev); |
442 | return ERR_PTR(retval); | 446 | return ERR_PTR(retval); |
443 | } | 447 | } |
448 | EXPORT_SYMBOL_GPL(platform_device_register_data); | ||
444 | 449 | ||
445 | static int platform_drv_probe(struct device *_dev) | 450 | static int platform_drv_probe(struct device *_dev) |
446 | { | 451 | { |
@@ -472,7 +477,7 @@ static void platform_drv_shutdown(struct device *_dev) | |||
472 | } | 477 | } |
473 | 478 | ||
474 | /** | 479 | /** |
475 | * platform_driver_register | 480 | * platform_driver_register - register a driver for platform-level devices |
476 | * @drv: platform driver structure | 481 | * @drv: platform driver structure |
477 | */ | 482 | */ |
478 | int platform_driver_register(struct platform_driver *drv) | 483 | int platform_driver_register(struct platform_driver *drv) |
@@ -490,7 +495,7 @@ int platform_driver_register(struct platform_driver *drv) | |||
490 | EXPORT_SYMBOL_GPL(platform_driver_register); | 495 | EXPORT_SYMBOL_GPL(platform_driver_register); |
491 | 496 | ||
492 | /** | 497 | /** |
493 | * platform_driver_unregister | 498 | * platform_driver_unregister - unregister a driver for platform-level devices |
494 | * @drv: platform driver structure | 499 | * @drv: platform driver structure |
495 | */ | 500 | */ |
496 | void platform_driver_unregister(struct platform_driver *drv) | 501 | void platform_driver_unregister(struct platform_driver *drv) |
@@ -547,6 +552,66 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv, | |||
547 | } | 552 | } |
548 | EXPORT_SYMBOL_GPL(platform_driver_probe); | 553 | EXPORT_SYMBOL_GPL(platform_driver_probe); |
549 | 554 | ||
555 | /** | ||
556 | * platform_create_bundle - register driver and create corresponding device | ||
557 | * @driver: platform driver structure | ||
558 | * @probe: the driver probe routine, probably from an __init section | ||
559 | * @res: set of resources that needs to be allocated for the device | ||
560 | * @n_res: number of resources | ||
561 | * @data: platform specific data for this platform device | ||
562 | * @size: size of platform specific data | ||
563 | * | ||
564 | * Use this in legacy-style modules that probe hardware directly and | ||
565 | * register a single platform device and corresponding platform driver. | ||
566 | * | ||
567 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. | ||
568 | */ | ||
569 | struct platform_device * __init_or_module platform_create_bundle( | ||
570 | struct platform_driver *driver, | ||
571 | int (*probe)(struct platform_device *), | ||
572 | struct resource *res, unsigned int n_res, | ||
573 | const void *data, size_t size) | ||
574 | { | ||
575 | struct platform_device *pdev; | ||
576 | int error; | ||
577 | |||
578 | pdev = platform_device_alloc(driver->driver.name, -1); | ||
579 | if (!pdev) { | ||
580 | error = -ENOMEM; | ||
581 | goto err_out; | ||
582 | } | ||
583 | |||
584 | if (res) { | ||
585 | error = platform_device_add_resources(pdev, res, n_res); | ||
586 | if (error) | ||
587 | goto err_pdev_put; | ||
588 | } | ||
589 | |||
590 | if (data) { | ||
591 | error = platform_device_add_data(pdev, data, size); | ||
592 | if (error) | ||
593 | goto err_pdev_put; | ||
594 | } | ||
595 | |||
596 | error = platform_device_add(pdev); | ||
597 | if (error) | ||
598 | goto err_pdev_put; | ||
599 | |||
600 | error = platform_driver_probe(driver, probe); | ||
601 | if (error) | ||
602 | goto err_pdev_del; | ||
603 | |||
604 | return pdev; | ||
605 | |||
606 | err_pdev_del: | ||
607 | platform_device_del(pdev); | ||
608 | err_pdev_put: | ||
609 | platform_device_put(pdev); | ||
610 | err_out: | ||
611 | return ERR_PTR(error); | ||
612 | } | ||
613 | EXPORT_SYMBOL_GPL(platform_create_bundle); | ||
614 | |||
550 | /* modalias support enables more hands-off userspace setup: | 615 | /* modalias support enables more hands-off userspace setup: |
551 | * (a) environment variable lets new-style hotplug events work once system is | 616 | * (a) environment variable lets new-style hotplug events work once system is |
552 | * fully running: "modprobe $MODALIAS" | 617 | * fully running: "modprobe $MODALIAS" |
@@ -577,7 +642,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
577 | } | 642 | } |
578 | 643 | ||
579 | static const struct platform_device_id *platform_match_id( | 644 | static const struct platform_device_id *platform_match_id( |
580 | struct platform_device_id *id, | 645 | const struct platform_device_id *id, |
581 | struct platform_device *pdev) | 646 | struct platform_device *pdev) |
582 | { | 647 | { |
583 | while (id->name[0]) { | 648 | while (id->name[0]) { |
@@ -993,14 +1058,16 @@ static __initdata LIST_HEAD(early_platform_driver_list); | |||
993 | static __initdata LIST_HEAD(early_platform_device_list); | 1058 | static __initdata LIST_HEAD(early_platform_device_list); |
994 | 1059 | ||
995 | /** | 1060 | /** |
996 | * early_platform_driver_register | 1061 | * early_platform_driver_register - register early platform driver |
997 | * @epdrv: early_platform driver structure | 1062 | * @epdrv: early_platform driver structure |
998 | * @buf: string passed from early_param() | 1063 | * @buf: string passed from early_param() |
1064 | * | ||
1065 | * Helper function for early_platform_init() / early_platform_init_buffer() | ||
999 | */ | 1066 | */ |
1000 | int __init early_platform_driver_register(struct early_platform_driver *epdrv, | 1067 | int __init early_platform_driver_register(struct early_platform_driver *epdrv, |
1001 | char *buf) | 1068 | char *buf) |
1002 | { | 1069 | { |
1003 | unsigned long index; | 1070 | char *tmp; |
1004 | int n; | 1071 | int n; |
1005 | 1072 | ||
1006 | /* Simply add the driver to the end of the global list. | 1073 | /* Simply add the driver to the end of the global list. |
@@ -1019,22 +1086,40 @@ int __init early_platform_driver_register(struct early_platform_driver *epdrv, | |||
1019 | if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { | 1086 | if (buf && !strncmp(buf, epdrv->pdrv->driver.name, n)) { |
1020 | list_move(&epdrv->list, &early_platform_driver_list); | 1087 | list_move(&epdrv->list, &early_platform_driver_list); |
1021 | 1088 | ||
1022 | if (!strcmp(buf, epdrv->pdrv->driver.name)) | 1089 | /* Allow passing parameters after device name */ |
1090 | if (buf[n] == '\0' || buf[n] == ',') | ||
1023 | epdrv->requested_id = -1; | 1091 | epdrv->requested_id = -1; |
1024 | else if (buf[n] == '.' && strict_strtoul(&buf[n + 1], 10, | 1092 | else { |
1025 | &index) == 0) | 1093 | epdrv->requested_id = simple_strtoul(&buf[n + 1], |
1026 | epdrv->requested_id = index; | 1094 | &tmp, 10); |
1027 | else | 1095 | |
1028 | epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; | 1096 | if (buf[n] != '.' || (tmp == &buf[n + 1])) { |
1097 | epdrv->requested_id = EARLY_PLATFORM_ID_ERROR; | ||
1098 | n = 0; | ||
1099 | } else | ||
1100 | n += strcspn(&buf[n + 1], ",") + 1; | ||
1101 | } | ||
1102 | |||
1103 | if (buf[n] == ',') | ||
1104 | n++; | ||
1105 | |||
1106 | if (epdrv->bufsize) { | ||
1107 | memcpy(epdrv->buffer, &buf[n], | ||
1108 | min_t(int, epdrv->bufsize, strlen(&buf[n]) + 1)); | ||
1109 | epdrv->buffer[epdrv->bufsize - 1] = '\0'; | ||
1110 | } | ||
1029 | } | 1111 | } |
1030 | 1112 | ||
1031 | return 0; | 1113 | return 0; |
1032 | } | 1114 | } |
1033 | 1115 | ||
1034 | /** | 1116 | /** |
1035 | * early_platform_add_devices - add a numbers of early platform devices | 1117 | * early_platform_add_devices - adds a number of early platform devices |
1036 | * @devs: array of early platform devices to add | 1118 | * @devs: array of early platform devices to add |
1037 | * @num: number of early platform devices in array | 1119 | * @num: number of early platform devices in array |
1120 | * | ||
1121 | * Used by early architecture code to register early platform devices and | ||
1122 | * their platform data. | ||
1038 | */ | 1123 | */ |
1039 | void __init early_platform_add_devices(struct platform_device **devs, int num) | 1124 | void __init early_platform_add_devices(struct platform_device **devs, int num) |
1040 | { | 1125 | { |
@@ -1054,8 +1139,12 @@ void __init early_platform_add_devices(struct platform_device **devs, int num) | |||
1054 | } | 1139 | } |
1055 | 1140 | ||
1056 | /** | 1141 | /** |
1057 | * early_platform_driver_register_all | 1142 | * early_platform_driver_register_all - register early platform drivers |
1058 | * @class_str: string to identify early platform driver class | 1143 | * @class_str: string to identify early platform driver class |
1144 | * | ||
1145 | * Used by architecture code to register all early platform drivers | ||
1146 | * for a certain class. If omitted then only early platform drivers | ||
1147 | * with matching kernel command line class parameters will be registered. | ||
1059 | */ | 1148 | */ |
1060 | void __init early_platform_driver_register_all(char *class_str) | 1149 | void __init early_platform_driver_register_all(char *class_str) |
1061 | { | 1150 | { |
@@ -1077,7 +1166,7 @@ void __init early_platform_driver_register_all(char *class_str) | |||
1077 | } | 1166 | } |
1078 | 1167 | ||
1079 | /** | 1168 | /** |
1080 | * early_platform_match | 1169 | * early_platform_match - find early platform device matching driver |
1081 | * @epdrv: early platform driver structure | 1170 | * @epdrv: early platform driver structure |
1082 | * @id: id to match against | 1171 | * @id: id to match against |
1083 | */ | 1172 | */ |
@@ -1095,7 +1184,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id) | |||
1095 | } | 1184 | } |
1096 | 1185 | ||
1097 | /** | 1186 | /** |
1098 | * early_platform_left | 1187 | * early_platform_left - check if early platform driver has matching devices |
1099 | * @epdrv: early platform driver structure | 1188 | * @epdrv: early platform driver structure |
1100 | * @id: return true if id or above exists | 1189 | * @id: return true if id or above exists |
1101 | */ | 1190 | */ |
@@ -1113,7 +1202,7 @@ static __init int early_platform_left(struct early_platform_driver *epdrv, | |||
1113 | } | 1202 | } |
1114 | 1203 | ||
1115 | /** | 1204 | /** |
1116 | * early_platform_driver_probe_id | 1205 | * early_platform_driver_probe_id - probe drivers matching class_str and id |
1117 | * @class_str: string to identify early platform driver class | 1206 | * @class_str: string to identify early platform driver class |
1118 | * @id: id to match against | 1207 | * @id: id to match against |
1119 | * @nr_probe: number of platform devices to successfully probe before exiting | 1208 | * @nr_probe: number of platform devices to successfully probe before exiting |
@@ -1183,10 +1272,14 @@ static int __init early_platform_driver_probe_id(char *class_str, | |||
1183 | } | 1272 | } |
1184 | 1273 | ||
1185 | /** | 1274 | /** |
1186 | * early_platform_driver_probe | 1275 | * early_platform_driver_probe - probe a class of registered drivers |
1187 | * @class_str: string to identify early platform driver class | 1276 | * @class_str: string to identify early platform driver class |
1188 | * @nr_probe: number of platform devices to successfully probe before exiting | 1277 | * @nr_probe: number of platform devices to successfully probe before exiting |
1189 | * @user_only: only probe user specified early platform devices | 1278 | * @user_only: only probe user specified early platform devices |
1279 | * | ||
1280 | * Used by architecture code to probe registered early platform drivers | ||
1281 | * within a certain class. For probe to happen a registered early platform | ||
1282 | * device matching a registered early platform driver is needed. | ||
1190 | */ | 1283 | */ |
1191 | int __init early_platform_driver_probe(char *class_str, | 1284 | int __init early_platform_driver_probe(char *class_str, |
1192 | int nr_probe, | 1285 | int nr_probe, |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 3ce3519e8f30..89de75325cea 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,6 +1,7 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | ||
4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
5 | 6 | ||
6 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c new file mode 100644 index 000000000000..4b29d4981253 --- /dev/null +++ b/drivers/base/power/generic_ops.c | |||
@@ -0,0 +1,233 @@ | |||
1 | /* | ||
2 | * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems | ||
3 | * | ||
4 | * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/pm.h> | ||
10 | #include <linux/pm_runtime.h> | ||
11 | |||
12 | #ifdef CONFIG_PM_RUNTIME | ||
13 | /** | ||
14 | * pm_generic_runtime_idle - Generic runtime idle callback for subsystems. | ||
15 | * @dev: Device to handle. | ||
16 | * | ||
17 | * If PM operations are defined for the @dev's driver and they include | ||
18 | * ->runtime_idle(), execute it and return its error code, if nonzero. | ||
19 | * Otherwise, execute pm_runtime_suspend() for the device and return 0. | ||
20 | */ | ||
21 | int pm_generic_runtime_idle(struct device *dev) | ||
22 | { | ||
23 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
24 | |||
25 | if (pm && pm->runtime_idle) { | ||
26 | int ret = pm->runtime_idle(dev); | ||
27 | if (ret) | ||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | pm_runtime_suspend(dev); | ||
32 | return 0; | ||
33 | } | ||
34 | EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); | ||
35 | |||
36 | /** | ||
37 | * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. | ||
38 | * @dev: Device to suspend. | ||
39 | * | ||
40 | * If PM operations are defined for the @dev's driver and they include | ||
41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, | ||
42 | * return -EINVAL. | ||
43 | */ | ||
44 | int pm_generic_runtime_suspend(struct device *dev) | ||
45 | { | ||
46 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
47 | int ret; | ||
48 | |||
49 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL; | ||
50 | |||
51 | return ret; | ||
52 | } | ||
53 | EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); | ||
54 | |||
55 | /** | ||
56 | * pm_generic_runtime_resume - Generic runtime resume callback for subsystems. | ||
57 | * @dev: Device to resume. | ||
58 | * | ||
59 | * If PM operations are defined for the @dev's driver and they include | ||
60 | * ->runtime_resume(), execute it and return its error code. Otherwise, | ||
61 | * return -EINVAL. | ||
62 | */ | ||
63 | int pm_generic_runtime_resume(struct device *dev) | ||
64 | { | ||
65 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
66 | int ret; | ||
67 | |||
68 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL; | ||
69 | |||
70 | return ret; | ||
71 | } | ||
72 | EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); | ||
73 | #endif /* CONFIG_PM_RUNTIME */ | ||
74 | |||
75 | #ifdef CONFIG_PM_SLEEP | ||
76 | /** | ||
77 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | ||
78 | * @dev: Device to handle. | ||
79 | * @event: PM transition of the system under way. | ||
80 | * | ||
81 | * If the device has not been suspended at run time, execute the | ||
82 | * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and | ||
83 | * return its error code. Otherwise, return zero. | ||
84 | */ | ||
85 | static int __pm_generic_call(struct device *dev, int event) | ||
86 | { | ||
87 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
88 | int (*callback)(struct device *); | ||
89 | |||
90 | if (!pm || pm_runtime_suspended(dev)) | ||
91 | return 0; | ||
92 | |||
93 | switch (event) { | ||
94 | case PM_EVENT_SUSPEND: | ||
95 | callback = pm->suspend; | ||
96 | break; | ||
97 | case PM_EVENT_FREEZE: | ||
98 | callback = pm->freeze; | ||
99 | break; | ||
100 | case PM_EVENT_HIBERNATE: | ||
101 | callback = pm->poweroff; | ||
102 | break; | ||
103 | case PM_EVENT_THAW: | ||
104 | callback = pm->thaw; | ||
105 | break; | ||
106 | default: | ||
107 | callback = NULL; | ||
108 | break; | ||
109 | } | ||
110 | |||
111 | return callback ? callback(dev) : 0; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * pm_generic_suspend - Generic suspend callback for subsystems. | ||
116 | * @dev: Device to suspend. | ||
117 | */ | ||
118 | int pm_generic_suspend(struct device *dev) | ||
119 | { | ||
120 | return __pm_generic_call(dev, PM_EVENT_SUSPEND); | ||
121 | } | ||
122 | EXPORT_SYMBOL_GPL(pm_generic_suspend); | ||
123 | |||
124 | /** | ||
125 | * pm_generic_freeze - Generic freeze callback for subsystems. | ||
126 | * @dev: Device to freeze. | ||
127 | */ | ||
128 | int pm_generic_freeze(struct device *dev) | ||
129 | { | ||
130 | return __pm_generic_call(dev, PM_EVENT_FREEZE); | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(pm_generic_freeze); | ||
133 | |||
134 | /** | ||
135 | * pm_generic_poweroff - Generic poweroff callback for subsystems. | ||
136 | * @dev: Device to handle. | ||
137 | */ | ||
138 | int pm_generic_poweroff(struct device *dev) | ||
139 | { | ||
140 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE); | ||
141 | } | ||
142 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); | ||
143 | |||
144 | /** | ||
145 | * pm_generic_thaw - Generic thaw callback for subsystems. | ||
146 | * @dev: Device to thaw. | ||
147 | */ | ||
148 | int pm_generic_thaw(struct device *dev) | ||
149 | { | ||
150 | return __pm_generic_call(dev, PM_EVENT_THAW); | ||
151 | } | ||
152 | EXPORT_SYMBOL_GPL(pm_generic_thaw); | ||
153 | |||
154 | /** | ||
155 | * __pm_generic_resume - Generic resume/restore callback for subsystems. | ||
156 | * @dev: Device to handle. | ||
157 | * @event: PM transition of the system under way. | ||
158 | * | ||
159 | * Execute the resume/resotre callback provided by the @dev's driver, if | ||
160 | * defined. If it returns 0, change the device's runtime PM status to 'active'. | ||
161 | * Return the callback's error code. | ||
162 | */ | ||
163 | static int __pm_generic_resume(struct device *dev, int event) | ||
164 | { | ||
165 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
166 | int (*callback)(struct device *); | ||
167 | int ret; | ||
168 | |||
169 | if (!pm) | ||
170 | return 0; | ||
171 | |||
172 | switch (event) { | ||
173 | case PM_EVENT_RESUME: | ||
174 | callback = pm->resume; | ||
175 | break; | ||
176 | case PM_EVENT_RESTORE: | ||
177 | callback = pm->restore; | ||
178 | break; | ||
179 | default: | ||
180 | callback = NULL; | ||
181 | break; | ||
182 | } | ||
183 | |||
184 | if (!callback) | ||
185 | return 0; | ||
186 | |||
187 | ret = callback(dev); | ||
188 | if (!ret) { | ||
189 | pm_runtime_disable(dev); | ||
190 | pm_runtime_set_active(dev); | ||
191 | pm_runtime_enable(dev); | ||
192 | } | ||
193 | |||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | /** | ||
198 | * pm_generic_resume - Generic resume callback for subsystems. | ||
199 | * @dev: Device to resume. | ||
200 | */ | ||
201 | int pm_generic_resume(struct device *dev) | ||
202 | { | ||
203 | return __pm_generic_resume(dev, PM_EVENT_RESUME); | ||
204 | } | ||
205 | EXPORT_SYMBOL_GPL(pm_generic_resume); | ||
206 | |||
207 | /** | ||
208 | * pm_generic_restore - Generic restore callback for subsystems. | ||
209 | * @dev: Device to restore. | ||
210 | */ | ||
211 | int pm_generic_restore(struct device *dev) | ||
212 | { | ||
213 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); | ||
214 | } | ||
215 | EXPORT_SYMBOL_GPL(pm_generic_restore); | ||
216 | #endif /* CONFIG_PM_SLEEP */ | ||
217 | |||
218 | struct dev_pm_ops generic_subsys_pm_ops = { | ||
219 | #ifdef CONFIG_PM_SLEEP | ||
220 | .suspend = pm_generic_suspend, | ||
221 | .resume = pm_generic_resume, | ||
222 | .freeze = pm_generic_freeze, | ||
223 | .thaw = pm_generic_thaw, | ||
224 | .poweroff = pm_generic_poweroff, | ||
225 | .restore = pm_generic_restore, | ||
226 | #endif | ||
227 | #ifdef CONFIG_PM_RUNTIME | ||
228 | .runtime_suspend = pm_generic_runtime_suspend, | ||
229 | .runtime_resume = pm_generic_runtime_resume, | ||
230 | .runtime_idle = pm_generic_runtime_idle, | ||
231 | #endif | ||
232 | }; | ||
233 | EXPORT_SYMBOL_GPL(generic_subsys_pm_ops); | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 8aa2443182d5..941fcb87e52a 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -23,8 +23,9 @@ | |||
23 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
24 | #include <linux/pm_runtime.h> | 24 | #include <linux/pm_runtime.h> |
25 | #include <linux/resume-trace.h> | 25 | #include <linux/resume-trace.h> |
26 | #include <linux/rwsem.h> | ||
27 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/sched.h> | ||
28 | #include <linux/async.h> | ||
28 | 29 | ||
29 | #include "../base.h" | 30 | #include "../base.h" |
30 | #include "power.h" | 31 | #include "power.h" |
@@ -34,14 +35,15 @@ | |||
34 | * because children are guaranteed to be discovered after parents, and | 35 | * because children are guaranteed to be discovered after parents, and |
35 | * are inserted at the back of the list on discovery. | 36 | * are inserted at the back of the list on discovery. |
36 | * | 37 | * |
37 | * Since device_pm_add() may be called with a device semaphore held, | 38 | * Since device_pm_add() may be called with a device lock held, |
38 | * we must never try to acquire a device semaphore while holding | 39 | * we must never try to acquire a device lock while holding |
39 | * dpm_list_mutex. | 40 | * dpm_list_mutex. |
40 | */ | 41 | */ |
41 | 42 | ||
42 | LIST_HEAD(dpm_list); | 43 | LIST_HEAD(dpm_list); |
43 | 44 | ||
44 | static DEFINE_MUTEX(dpm_list_mtx); | 45 | static DEFINE_MUTEX(dpm_list_mtx); |
46 | static pm_message_t pm_transition; | ||
45 | 47 | ||
46 | /* | 48 | /* |
47 | * Set once the preparation of devices for a PM transition has started, reset | 49 | * Set once the preparation of devices for a PM transition has started, reset |
@@ -56,6 +58,7 @@ static bool transition_started; | |||
56 | void device_pm_init(struct device *dev) | 58 | void device_pm_init(struct device *dev) |
57 | { | 59 | { |
58 | dev->power.status = DPM_ON; | 60 | dev->power.status = DPM_ON; |
61 | init_completion(&dev->power.completion); | ||
59 | pm_runtime_init(dev); | 62 | pm_runtime_init(dev); |
60 | } | 63 | } |
61 | 64 | ||
@@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev) | |||
111 | pr_debug("PM: Removing info for %s:%s\n", | 114 | pr_debug("PM: Removing info for %s:%s\n", |
112 | dev->bus ? dev->bus->name : "No Bus", | 115 | dev->bus ? dev->bus->name : "No Bus", |
113 | kobject_name(&dev->kobj)); | 116 | kobject_name(&dev->kobj)); |
117 | complete_all(&dev->power.completion); | ||
114 | mutex_lock(&dpm_list_mtx); | 118 | mutex_lock(&dpm_list_mtx); |
115 | list_del_init(&dev->power.entry); | 119 | list_del_init(&dev->power.entry); |
116 | mutex_unlock(&dpm_list_mtx); | 120 | mutex_unlock(&dpm_list_mtx); |
@@ -161,6 +165,57 @@ void device_pm_move_last(struct device *dev) | |||
161 | list_move_tail(&dev->power.entry, &dpm_list); | 165 | list_move_tail(&dev->power.entry, &dpm_list); |
162 | } | 166 | } |
163 | 167 | ||
168 | static ktime_t initcall_debug_start(struct device *dev) | ||
169 | { | ||
170 | ktime_t calltime = ktime_set(0, 0); | ||
171 | |||
172 | if (initcall_debug) { | ||
173 | pr_info("calling %s+ @ %i\n", | ||
174 | dev_name(dev), task_pid_nr(current)); | ||
175 | calltime = ktime_get(); | ||
176 | } | ||
177 | |||
178 | return calltime; | ||
179 | } | ||
180 | |||
181 | static void initcall_debug_report(struct device *dev, ktime_t calltime, | ||
182 | int error) | ||
183 | { | ||
184 | ktime_t delta, rettime; | ||
185 | |||
186 | if (initcall_debug) { | ||
187 | rettime = ktime_get(); | ||
188 | delta = ktime_sub(rettime, calltime); | ||
189 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | ||
190 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * dpm_wait - Wait for a PM operation to complete. | ||
196 | * @dev: Device to wait for. | ||
197 | * @async: If unset, wait only if the device's power.async_suspend flag is set. | ||
198 | */ | ||
199 | static void dpm_wait(struct device *dev, bool async) | ||
200 | { | ||
201 | if (!dev) | ||
202 | return; | ||
203 | |||
204 | if (async || (pm_async_enabled && dev->power.async_suspend)) | ||
205 | wait_for_completion(&dev->power.completion); | ||
206 | } | ||
207 | |||
208 | static int dpm_wait_fn(struct device *dev, void *async_ptr) | ||
209 | { | ||
210 | dpm_wait(dev, *((bool *)async_ptr)); | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static void dpm_wait_for_children(struct device *dev, bool async) | ||
215 | { | ||
216 | device_for_each_child(dev, &async, dpm_wait_fn); | ||
217 | } | ||
218 | |||
164 | /** | 219 | /** |
165 | * pm_op - Execute the PM operation appropriate for given PM event. | 220 | * pm_op - Execute the PM operation appropriate for given PM event. |
166 | * @dev: Device to handle. | 221 | * @dev: Device to handle. |
@@ -172,6 +227,9 @@ static int pm_op(struct device *dev, | |||
172 | pm_message_t state) | 227 | pm_message_t state) |
173 | { | 228 | { |
174 | int error = 0; | 229 | int error = 0; |
230 | ktime_t calltime; | ||
231 | |||
232 | calltime = initcall_debug_start(dev); | ||
175 | 233 | ||
176 | switch (state.event) { | 234 | switch (state.event) { |
177 | #ifdef CONFIG_SUSPEND | 235 | #ifdef CONFIG_SUSPEND |
@@ -219,6 +277,9 @@ static int pm_op(struct device *dev, | |||
219 | default: | 277 | default: |
220 | error = -EINVAL; | 278 | error = -EINVAL; |
221 | } | 279 | } |
280 | |||
281 | initcall_debug_report(dev, calltime, error); | ||
282 | |||
222 | return error; | 283 | return error; |
223 | } | 284 | } |
224 | 285 | ||
@@ -236,6 +297,14 @@ static int pm_noirq_op(struct device *dev, | |||
236 | pm_message_t state) | 297 | pm_message_t state) |
237 | { | 298 | { |
238 | int error = 0; | 299 | int error = 0; |
300 | ktime_t calltime, delta, rettime; | ||
301 | |||
302 | if (initcall_debug) { | ||
303 | pr_info("calling %s+ @ %i, parent: %s\n", | ||
304 | dev_name(dev), task_pid_nr(current), | ||
305 | dev->parent ? dev_name(dev->parent) : "none"); | ||
306 | calltime = ktime_get(); | ||
307 | } | ||
239 | 308 | ||
240 | switch (state.event) { | 309 | switch (state.event) { |
241 | #ifdef CONFIG_SUSPEND | 310 | #ifdef CONFIG_SUSPEND |
@@ -283,6 +352,15 @@ static int pm_noirq_op(struct device *dev, | |||
283 | default: | 352 | default: |
284 | error = -EINVAL; | 353 | error = -EINVAL; |
285 | } | 354 | } |
355 | |||
356 | if (initcall_debug) { | ||
357 | rettime = ktime_get(); | ||
358 | delta = ktime_sub(rettime, calltime); | ||
359 | printk("initcall %s_i+ returned %d after %Ld usecs\n", | ||
360 | dev_name(dev), error, | ||
361 | (unsigned long long)ktime_to_ns(delta) >> 10); | ||
362 | } | ||
363 | |||
286 | return error; | 364 | return error; |
287 | } | 365 | } |
288 | 366 | ||
@@ -324,6 +402,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
324 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 402 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); |
325 | } | 403 | } |
326 | 404 | ||
405 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | ||
406 | { | ||
407 | ktime_t calltime; | ||
408 | s64 usecs64; | ||
409 | int usecs; | ||
410 | |||
411 | calltime = ktime_get(); | ||
412 | usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); | ||
413 | do_div(usecs64, NSEC_PER_USEC); | ||
414 | usecs = usecs64; | ||
415 | if (usecs == 0) | ||
416 | usecs = 1; | ||
417 | pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", | ||
418 | info ?: "", info ? " " : "", pm_verb(state.event), | ||
419 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); | ||
420 | } | ||
421 | |||
327 | /*------------------------- Resume routines -------------------------*/ | 422 | /*------------------------- Resume routines -------------------------*/ |
328 | 423 | ||
329 | /** | 424 | /** |
@@ -341,14 +436,26 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
341 | TRACE_DEVICE(dev); | 436 | TRACE_DEVICE(dev); |
342 | TRACE_RESUME(0); | 437 | TRACE_RESUME(0); |
343 | 438 | ||
344 | if (!dev->bus) | 439 | if (dev->bus && dev->bus->pm) { |
345 | goto End; | ||
346 | |||
347 | if (dev->bus->pm) { | ||
348 | pm_dev_dbg(dev, state, "EARLY "); | 440 | pm_dev_dbg(dev, state, "EARLY "); |
349 | error = pm_noirq_op(dev, dev->bus->pm, state); | 441 | error = pm_noirq_op(dev, dev->bus->pm, state); |
442 | if (error) | ||
443 | goto End; | ||
350 | } | 444 | } |
351 | End: | 445 | |
446 | if (dev->type && dev->type->pm) { | ||
447 | pm_dev_dbg(dev, state, "EARLY type "); | ||
448 | error = pm_noirq_op(dev, dev->type->pm, state); | ||
449 | if (error) | ||
450 | goto End; | ||
451 | } | ||
452 | |||
453 | if (dev->class && dev->class->pm) { | ||
454 | pm_dev_dbg(dev, state, "EARLY class "); | ||
455 | error = pm_noirq_op(dev, dev->class->pm, state); | ||
456 | } | ||
457 | |||
458 | End: | ||
352 | TRACE_RESUME(error); | 459 | TRACE_RESUME(error); |
353 | return error; | 460 | return error; |
354 | } | 461 | } |
@@ -363,6 +470,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
363 | void dpm_resume_noirq(pm_message_t state) | 470 | void dpm_resume_noirq(pm_message_t state) |
364 | { | 471 | { |
365 | struct device *dev; | 472 | struct device *dev; |
473 | ktime_t starttime = ktime_get(); | ||
366 | 474 | ||
367 | mutex_lock(&dpm_list_mtx); | 475 | mutex_lock(&dpm_list_mtx); |
368 | transition_started = false; | 476 | transition_started = false; |
@@ -376,23 +484,48 @@ void dpm_resume_noirq(pm_message_t state) | |||
376 | pm_dev_err(dev, state, " early", error); | 484 | pm_dev_err(dev, state, " early", error); |
377 | } | 485 | } |
378 | mutex_unlock(&dpm_list_mtx); | 486 | mutex_unlock(&dpm_list_mtx); |
487 | dpm_show_time(starttime, state, "early"); | ||
379 | resume_device_irqs(); | 488 | resume_device_irqs(); |
380 | } | 489 | } |
381 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 490 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); |
382 | 491 | ||
383 | /** | 492 | /** |
493 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. | ||
494 | * @dev: Device to resume. | ||
495 | * @cb: Resume callback to execute. | ||
496 | */ | ||
497 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | ||
498 | { | ||
499 | int error; | ||
500 | ktime_t calltime; | ||
501 | |||
502 | calltime = initcall_debug_start(dev); | ||
503 | |||
504 | error = cb(dev); | ||
505 | suspend_report_result(cb, error); | ||
506 | |||
507 | initcall_debug_report(dev, calltime, error); | ||
508 | |||
509 | return error; | ||
510 | } | ||
511 | |||
512 | /** | ||
384 | * device_resume - Execute "resume" callbacks for given device. | 513 | * device_resume - Execute "resume" callbacks for given device. |
385 | * @dev: Device to handle. | 514 | * @dev: Device to handle. |
386 | * @state: PM transition of the system being carried out. | 515 | * @state: PM transition of the system being carried out. |
516 | * @async: If true, the device is being resumed asynchronously. | ||
387 | */ | 517 | */ |
388 | static int device_resume(struct device *dev, pm_message_t state) | 518 | static int device_resume(struct device *dev, pm_message_t state, bool async) |
389 | { | 519 | { |
390 | int error = 0; | 520 | int error = 0; |
391 | 521 | ||
392 | TRACE_DEVICE(dev); | 522 | TRACE_DEVICE(dev); |
393 | TRACE_RESUME(0); | 523 | TRACE_RESUME(0); |
394 | 524 | ||
395 | down(&dev->sem); | 525 | dpm_wait(dev->parent, async); |
526 | device_lock(dev); | ||
527 | |||
528 | dev->power.status = DPM_RESUMING; | ||
396 | 529 | ||
397 | if (dev->bus) { | 530 | if (dev->bus) { |
398 | if (dev->bus->pm) { | 531 | if (dev->bus->pm) { |
@@ -400,7 +533,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
400 | error = pm_op(dev, dev->bus->pm, state); | 533 | error = pm_op(dev, dev->bus->pm, state); |
401 | } else if (dev->bus->resume) { | 534 | } else if (dev->bus->resume) { |
402 | pm_dev_dbg(dev, state, "legacy "); | 535 | pm_dev_dbg(dev, state, "legacy "); |
403 | error = dev->bus->resume(dev); | 536 | error = legacy_resume(dev, dev->bus->resume); |
404 | } | 537 | } |
405 | if (error) | 538 | if (error) |
406 | goto End; | 539 | goto End; |
@@ -421,16 +554,34 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
421 | error = pm_op(dev, dev->class->pm, state); | 554 | error = pm_op(dev, dev->class->pm, state); |
422 | } else if (dev->class->resume) { | 555 | } else if (dev->class->resume) { |
423 | pm_dev_dbg(dev, state, "legacy class "); | 556 | pm_dev_dbg(dev, state, "legacy class "); |
424 | error = dev->class->resume(dev); | 557 | error = legacy_resume(dev, dev->class->resume); |
425 | } | 558 | } |
426 | } | 559 | } |
427 | End: | 560 | End: |
428 | up(&dev->sem); | 561 | device_unlock(dev); |
562 | complete_all(&dev->power.completion); | ||
429 | 563 | ||
430 | TRACE_RESUME(error); | 564 | TRACE_RESUME(error); |
431 | return error; | 565 | return error; |
432 | } | 566 | } |
433 | 567 | ||
568 | static void async_resume(void *data, async_cookie_t cookie) | ||
569 | { | ||
570 | struct device *dev = (struct device *)data; | ||
571 | int error; | ||
572 | |||
573 | error = device_resume(dev, pm_transition, true); | ||
574 | if (error) | ||
575 | pm_dev_err(dev, pm_transition, " async", error); | ||
576 | put_device(dev); | ||
577 | } | ||
578 | |||
579 | static bool is_async(struct device *dev) | ||
580 | { | ||
581 | return dev->power.async_suspend && pm_async_enabled | ||
582 | && !pm_trace_is_enabled(); | ||
583 | } | ||
584 | |||
434 | /** | 585 | /** |
435 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. | 586 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. |
436 | * @state: PM transition of the system being carried out. | 587 | * @state: PM transition of the system being carried out. |
@@ -441,20 +592,33 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
441 | static void dpm_resume(pm_message_t state) | 592 | static void dpm_resume(pm_message_t state) |
442 | { | 593 | { |
443 | struct list_head list; | 594 | struct list_head list; |
595 | struct device *dev; | ||
596 | ktime_t starttime = ktime_get(); | ||
444 | 597 | ||
445 | INIT_LIST_HEAD(&list); | 598 | INIT_LIST_HEAD(&list); |
446 | mutex_lock(&dpm_list_mtx); | 599 | mutex_lock(&dpm_list_mtx); |
447 | while (!list_empty(&dpm_list)) { | 600 | pm_transition = state; |
448 | struct device *dev = to_device(dpm_list.next); | 601 | |
602 | list_for_each_entry(dev, &dpm_list, power.entry) { | ||
603 | if (dev->power.status < DPM_OFF) | ||
604 | continue; | ||
605 | |||
606 | INIT_COMPLETION(dev->power.completion); | ||
607 | if (is_async(dev)) { | ||
608 | get_device(dev); | ||
609 | async_schedule(async_resume, dev); | ||
610 | } | ||
611 | } | ||
449 | 612 | ||
613 | while (!list_empty(&dpm_list)) { | ||
614 | dev = to_device(dpm_list.next); | ||
450 | get_device(dev); | 615 | get_device(dev); |
451 | if (dev->power.status >= DPM_OFF) { | 616 | if (dev->power.status >= DPM_OFF && !is_async(dev)) { |
452 | int error; | 617 | int error; |
453 | 618 | ||
454 | dev->power.status = DPM_RESUMING; | ||
455 | mutex_unlock(&dpm_list_mtx); | 619 | mutex_unlock(&dpm_list_mtx); |
456 | 620 | ||
457 | error = device_resume(dev, state); | 621 | error = device_resume(dev, state, false); |
458 | 622 | ||
459 | mutex_lock(&dpm_list_mtx); | 623 | mutex_lock(&dpm_list_mtx); |
460 | if (error) | 624 | if (error) |
@@ -469,6 +633,8 @@ static void dpm_resume(pm_message_t state) | |||
469 | } | 633 | } |
470 | list_splice(&list, &dpm_list); | 634 | list_splice(&list, &dpm_list); |
471 | mutex_unlock(&dpm_list_mtx); | 635 | mutex_unlock(&dpm_list_mtx); |
636 | async_synchronize_full(); | ||
637 | dpm_show_time(starttime, state, NULL); | ||
472 | } | 638 | } |
473 | 639 | ||
474 | /** | 640 | /** |
@@ -478,7 +644,7 @@ static void dpm_resume(pm_message_t state) | |||
478 | */ | 644 | */ |
479 | static void device_complete(struct device *dev, pm_message_t state) | 645 | static void device_complete(struct device *dev, pm_message_t state) |
480 | { | 646 | { |
481 | down(&dev->sem); | 647 | device_lock(dev); |
482 | 648 | ||
483 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | 649 | if (dev->class && dev->class->pm && dev->class->pm->complete) { |
484 | pm_dev_dbg(dev, state, "completing class "); | 650 | pm_dev_dbg(dev, state, "completing class "); |
@@ -495,7 +661,7 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
495 | dev->bus->pm->complete(dev); | 661 | dev->bus->pm->complete(dev); |
496 | } | 662 | } |
497 | 663 | ||
498 | up(&dev->sem); | 664 | device_unlock(dev); |
499 | } | 665 | } |
500 | 666 | ||
501 | /** | 667 | /** |
@@ -521,7 +687,7 @@ static void dpm_complete(pm_message_t state) | |||
521 | mutex_unlock(&dpm_list_mtx); | 687 | mutex_unlock(&dpm_list_mtx); |
522 | 688 | ||
523 | device_complete(dev, state); | 689 | device_complete(dev, state); |
524 | pm_runtime_put_noidle(dev); | 690 | pm_runtime_put_sync(dev); |
525 | 691 | ||
526 | mutex_lock(&dpm_list_mtx); | 692 | mutex_lock(&dpm_list_mtx); |
527 | } | 693 | } |
@@ -584,13 +750,26 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
584 | { | 750 | { |
585 | int error = 0; | 751 | int error = 0; |
586 | 752 | ||
587 | if (!dev->bus) | 753 | if (dev->class && dev->class->pm) { |
588 | return 0; | 754 | pm_dev_dbg(dev, state, "LATE class "); |
755 | error = pm_noirq_op(dev, dev->class->pm, state); | ||
756 | if (error) | ||
757 | goto End; | ||
758 | } | ||
589 | 759 | ||
590 | if (dev->bus->pm) { | 760 | if (dev->type && dev->type->pm) { |
761 | pm_dev_dbg(dev, state, "LATE type "); | ||
762 | error = pm_noirq_op(dev, dev->type->pm, state); | ||
763 | if (error) | ||
764 | goto End; | ||
765 | } | ||
766 | |||
767 | if (dev->bus && dev->bus->pm) { | ||
591 | pm_dev_dbg(dev, state, "LATE "); | 768 | pm_dev_dbg(dev, state, "LATE "); |
592 | error = pm_noirq_op(dev, dev->bus->pm, state); | 769 | error = pm_noirq_op(dev, dev->bus->pm, state); |
593 | } | 770 | } |
771 | |||
772 | End: | ||
594 | return error; | 773 | return error; |
595 | } | 774 | } |
596 | 775 | ||
@@ -604,6 +783,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
604 | int dpm_suspend_noirq(pm_message_t state) | 783 | int dpm_suspend_noirq(pm_message_t state) |
605 | { | 784 | { |
606 | struct device *dev; | 785 | struct device *dev; |
786 | ktime_t starttime = ktime_get(); | ||
607 | int error = 0; | 787 | int error = 0; |
608 | 788 | ||
609 | suspend_device_irqs(); | 789 | suspend_device_irqs(); |
@@ -619,20 +799,51 @@ int dpm_suspend_noirq(pm_message_t state) | |||
619 | mutex_unlock(&dpm_list_mtx); | 799 | mutex_unlock(&dpm_list_mtx); |
620 | if (error) | 800 | if (error) |
621 | dpm_resume_noirq(resume_event(state)); | 801 | dpm_resume_noirq(resume_event(state)); |
802 | else | ||
803 | dpm_show_time(starttime, state, "late"); | ||
622 | return error; | 804 | return error; |
623 | } | 805 | } |
624 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | 806 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); |
625 | 807 | ||
626 | /** | 808 | /** |
809 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | ||
810 | * @dev: Device to suspend. | ||
811 | * @state: PM transition of the system being carried out. | ||
812 | * @cb: Suspend callback to execute. | ||
813 | */ | ||
814 | static int legacy_suspend(struct device *dev, pm_message_t state, | ||
815 | int (*cb)(struct device *dev, pm_message_t state)) | ||
816 | { | ||
817 | int error; | ||
818 | ktime_t calltime; | ||
819 | |||
820 | calltime = initcall_debug_start(dev); | ||
821 | |||
822 | error = cb(dev, state); | ||
823 | suspend_report_result(cb, error); | ||
824 | |||
825 | initcall_debug_report(dev, calltime, error); | ||
826 | |||
827 | return error; | ||
828 | } | ||
829 | |||
830 | static int async_error; | ||
831 | |||
832 | /** | ||
627 | * device_suspend - Execute "suspend" callbacks for given device. | 833 | * device_suspend - Execute "suspend" callbacks for given device. |
628 | * @dev: Device to handle. | 834 | * @dev: Device to handle. |
629 | * @state: PM transition of the system being carried out. | 835 | * @state: PM transition of the system being carried out. |
836 | * @async: If true, the device is being suspended asynchronously. | ||
630 | */ | 837 | */ |
631 | static int device_suspend(struct device *dev, pm_message_t state) | 838 | static int __device_suspend(struct device *dev, pm_message_t state, bool async) |
632 | { | 839 | { |
633 | int error = 0; | 840 | int error = 0; |
634 | 841 | ||
635 | down(&dev->sem); | 842 | dpm_wait_for_children(dev, async); |
843 | device_lock(dev); | ||
844 | |||
845 | if (async_error) | ||
846 | goto End; | ||
636 | 847 | ||
637 | if (dev->class) { | 848 | if (dev->class) { |
638 | if (dev->class->pm) { | 849 | if (dev->class->pm) { |
@@ -640,8 +851,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
640 | error = pm_op(dev, dev->class->pm, state); | 851 | error = pm_op(dev, dev->class->pm, state); |
641 | } else if (dev->class->suspend) { | 852 | } else if (dev->class->suspend) { |
642 | pm_dev_dbg(dev, state, "legacy class "); | 853 | pm_dev_dbg(dev, state, "legacy class "); |
643 | error = dev->class->suspend(dev, state); | 854 | error = legacy_suspend(dev, state, dev->class->suspend); |
644 | suspend_report_result(dev->class->suspend, error); | ||
645 | } | 855 | } |
646 | if (error) | 856 | if (error) |
647 | goto End; | 857 | goto End; |
@@ -662,16 +872,47 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
662 | error = pm_op(dev, dev->bus->pm, state); | 872 | error = pm_op(dev, dev->bus->pm, state); |
663 | } else if (dev->bus->suspend) { | 873 | } else if (dev->bus->suspend) { |
664 | pm_dev_dbg(dev, state, "legacy "); | 874 | pm_dev_dbg(dev, state, "legacy "); |
665 | error = dev->bus->suspend(dev, state); | 875 | error = legacy_suspend(dev, state, dev->bus->suspend); |
666 | suspend_report_result(dev->bus->suspend, error); | ||
667 | } | 876 | } |
668 | } | 877 | } |
878 | |||
879 | if (!error) | ||
880 | dev->power.status = DPM_OFF; | ||
881 | |||
669 | End: | 882 | End: |
670 | up(&dev->sem); | 883 | device_unlock(dev); |
884 | complete_all(&dev->power.completion); | ||
671 | 885 | ||
672 | return error; | 886 | return error; |
673 | } | 887 | } |
674 | 888 | ||
889 | static void async_suspend(void *data, async_cookie_t cookie) | ||
890 | { | ||
891 | struct device *dev = (struct device *)data; | ||
892 | int error; | ||
893 | |||
894 | error = __device_suspend(dev, pm_transition, true); | ||
895 | if (error) { | ||
896 | pm_dev_err(dev, pm_transition, " async", error); | ||
897 | async_error = error; | ||
898 | } | ||
899 | |||
900 | put_device(dev); | ||
901 | } | ||
902 | |||
903 | static int device_suspend(struct device *dev) | ||
904 | { | ||
905 | INIT_COMPLETION(dev->power.completion); | ||
906 | |||
907 | if (pm_async_enabled && dev->power.async_suspend) { | ||
908 | get_device(dev); | ||
909 | async_schedule(async_suspend, dev); | ||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | return __device_suspend(dev, pm_transition, false); | ||
914 | } | ||
915 | |||
675 | /** | 916 | /** |
676 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 917 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
677 | * @state: PM transition of the system being carried out. | 918 | * @state: PM transition of the system being carried out. |
@@ -679,17 +920,20 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
679 | static int dpm_suspend(pm_message_t state) | 920 | static int dpm_suspend(pm_message_t state) |
680 | { | 921 | { |
681 | struct list_head list; | 922 | struct list_head list; |
923 | ktime_t starttime = ktime_get(); | ||
682 | int error = 0; | 924 | int error = 0; |
683 | 925 | ||
684 | INIT_LIST_HEAD(&list); | 926 | INIT_LIST_HEAD(&list); |
685 | mutex_lock(&dpm_list_mtx); | 927 | mutex_lock(&dpm_list_mtx); |
928 | pm_transition = state; | ||
929 | async_error = 0; | ||
686 | while (!list_empty(&dpm_list)) { | 930 | while (!list_empty(&dpm_list)) { |
687 | struct device *dev = to_device(dpm_list.prev); | 931 | struct device *dev = to_device(dpm_list.prev); |
688 | 932 | ||
689 | get_device(dev); | 933 | get_device(dev); |
690 | mutex_unlock(&dpm_list_mtx); | 934 | mutex_unlock(&dpm_list_mtx); |
691 | 935 | ||
692 | error = device_suspend(dev, state); | 936 | error = device_suspend(dev); |
693 | 937 | ||
694 | mutex_lock(&dpm_list_mtx); | 938 | mutex_lock(&dpm_list_mtx); |
695 | if (error) { | 939 | if (error) { |
@@ -697,13 +941,19 @@ static int dpm_suspend(pm_message_t state) | |||
697 | put_device(dev); | 941 | put_device(dev); |
698 | break; | 942 | break; |
699 | } | 943 | } |
700 | dev->power.status = DPM_OFF; | ||
701 | if (!list_empty(&dev->power.entry)) | 944 | if (!list_empty(&dev->power.entry)) |
702 | list_move(&dev->power.entry, &list); | 945 | list_move(&dev->power.entry, &list); |
703 | put_device(dev); | 946 | put_device(dev); |
947 | if (async_error) | ||
948 | break; | ||
704 | } | 949 | } |
705 | list_splice(&list, dpm_list.prev); | 950 | list_splice(&list, dpm_list.prev); |
706 | mutex_unlock(&dpm_list_mtx); | 951 | mutex_unlock(&dpm_list_mtx); |
952 | async_synchronize_full(); | ||
953 | if (!error) | ||
954 | error = async_error; | ||
955 | if (!error) | ||
956 | dpm_show_time(starttime, state, NULL); | ||
707 | return error; | 957 | return error; |
708 | } | 958 | } |
709 | 959 | ||
@@ -719,7 +969,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
719 | { | 969 | { |
720 | int error = 0; | 970 | int error = 0; |
721 | 971 | ||
722 | down(&dev->sem); | 972 | device_lock(dev); |
723 | 973 | ||
724 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { | 974 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { |
725 | pm_dev_dbg(dev, state, "preparing "); | 975 | pm_dev_dbg(dev, state, "preparing "); |
@@ -743,7 +993,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
743 | suspend_report_result(dev->class->pm->prepare, error); | 993 | suspend_report_result(dev->class->pm->prepare, error); |
744 | } | 994 | } |
745 | End: | 995 | End: |
746 | up(&dev->sem); | 996 | device_unlock(dev); |
747 | 997 | ||
748 | return error; | 998 | return error; |
749 | } | 999 | } |
@@ -772,7 +1022,7 @@ static int dpm_prepare(pm_message_t state) | |||
772 | pm_runtime_get_noresume(dev); | 1022 | pm_runtime_get_noresume(dev); |
773 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 1023 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { |
774 | /* Wake-up requested during system sleep transition. */ | 1024 | /* Wake-up requested during system sleep transition. */ |
775 | pm_runtime_put_noidle(dev); | 1025 | pm_runtime_put_sync(dev); |
776 | error = -EBUSY; | 1026 | error = -EBUSY; |
777 | } else { | 1027 | } else { |
778 | error = device_prepare(dev, state); | 1028 | error = device_prepare(dev, state); |
@@ -827,3 +1077,14 @@ void __suspend_report_result(const char *function, void *fn, int ret) | |||
827 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); | 1077 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); |
828 | } | 1078 | } |
829 | EXPORT_SYMBOL_GPL(__suspend_report_result); | 1079 | EXPORT_SYMBOL_GPL(__suspend_report_result); |
1080 | |||
1081 | /** | ||
1082 | * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. | ||
1083 | * @dev: Device to wait for. | ||
1084 | * @subordinate: Device that needs to wait for @dev. | ||
1085 | */ | ||
1086 | void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) | ||
1087 | { | ||
1088 | dpm_wait(dev, subordinate->power.async_suspend); | ||
1089 | } | ||
1090 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b8fa1aa5225a..c0bd03c83b9c 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {} | |||
12 | 12 | ||
13 | #ifdef CONFIG_PM_SLEEP | 13 | #ifdef CONFIG_PM_SLEEP |
14 | 14 | ||
15 | /* | 15 | /* kernel/power/main.c */ |
16 | * main.c | 16 | extern int pm_async_enabled; |
17 | */ | ||
18 | 17 | ||
18 | /* drivers/base/power/main.c */ | ||
19 | extern struct list_head dpm_list; /* The active device list */ | 19 | extern struct list_head dpm_list; /* The active device list */ |
20 | 20 | ||
21 | static inline struct device *to_device(struct list_head *entry) | 21 | static inline struct device *to_device(struct list_head *entry) |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 846d89e3d122..626dd147b75f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev) | |||
85 | dev->bus->pm->runtime_idle(dev); | 85 | dev->bus->pm->runtime_idle(dev); |
86 | 86 | ||
87 | spin_lock_irq(&dev->power.lock); | 87 | spin_lock_irq(&dev->power.lock); |
88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | ||
89 | spin_unlock_irq(&dev->power.lock); | ||
90 | |||
91 | dev->type->pm->runtime_idle(dev); | ||
92 | |||
93 | spin_lock_irq(&dev->power.lock); | ||
94 | } else if (dev->class && dev->class->pm | ||
95 | && dev->class->pm->runtime_idle) { | ||
96 | spin_unlock_irq(&dev->power.lock); | ||
97 | |||
98 | dev->class->pm->runtime_idle(dev); | ||
99 | |||
100 | spin_lock_irq(&dev->power.lock); | ||
88 | } | 101 | } |
89 | 102 | ||
90 | dev->power.idle_notification = false; | 103 | dev->power.idle_notification = false; |
@@ -185,6 +198,7 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
185 | } | 198 | } |
186 | 199 | ||
187 | dev->power.runtime_status = RPM_SUSPENDING; | 200 | dev->power.runtime_status = RPM_SUSPENDING; |
201 | dev->power.deferred_resume = false; | ||
188 | 202 | ||
189 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 203 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { |
190 | spin_unlock_irq(&dev->power.lock); | 204 | spin_unlock_irq(&dev->power.lock); |
@@ -193,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
193 | 207 | ||
194 | spin_lock_irq(&dev->power.lock); | 208 | spin_lock_irq(&dev->power.lock); |
195 | dev->power.runtime_error = retval; | 209 | dev->power.runtime_error = retval; |
210 | } else if (dev->type && dev->type->pm | ||
211 | && dev->type->pm->runtime_suspend) { | ||
212 | spin_unlock_irq(&dev->power.lock); | ||
213 | |||
214 | retval = dev->type->pm->runtime_suspend(dev); | ||
215 | |||
216 | spin_lock_irq(&dev->power.lock); | ||
217 | dev->power.runtime_error = retval; | ||
218 | } else if (dev->class && dev->class->pm | ||
219 | && dev->class->pm->runtime_suspend) { | ||
220 | spin_unlock_irq(&dev->power.lock); | ||
221 | |||
222 | retval = dev->class->pm->runtime_suspend(dev); | ||
223 | |||
224 | spin_lock_irq(&dev->power.lock); | ||
225 | dev->power.runtime_error = retval; | ||
196 | } else { | 226 | } else { |
197 | retval = -ENOSYS; | 227 | retval = -ENOSYS; |
198 | } | 228 | } |
@@ -200,7 +230,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
200 | if (retval) { | 230 | if (retval) { |
201 | dev->power.runtime_status = RPM_ACTIVE; | 231 | dev->power.runtime_status = RPM_ACTIVE; |
202 | pm_runtime_cancel_pending(dev); | 232 | pm_runtime_cancel_pending(dev); |
203 | dev->power.deferred_resume = false; | ||
204 | 233 | ||
205 | if (retval == -EAGAIN || retval == -EBUSY) { | 234 | if (retval == -EAGAIN || retval == -EBUSY) { |
206 | notify = true; | 235 | notify = true; |
@@ -217,7 +246,6 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
217 | wake_up_all(&dev->power.wait_queue); | 246 | wake_up_all(&dev->power.wait_queue); |
218 | 247 | ||
219 | if (dev->power.deferred_resume) { | 248 | if (dev->power.deferred_resume) { |
220 | dev->power.deferred_resume = false; | ||
221 | __pm_runtime_resume(dev, false); | 249 | __pm_runtime_resume(dev, false); |
222 | retval = -EAGAIN; | 250 | retval = -EAGAIN; |
223 | goto out; | 251 | goto out; |
@@ -360,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
360 | 388 | ||
361 | spin_lock_irq(&dev->power.lock); | 389 | spin_lock_irq(&dev->power.lock); |
362 | dev->power.runtime_error = retval; | 390 | dev->power.runtime_error = retval; |
391 | } else if (dev->type && dev->type->pm | ||
392 | && dev->type->pm->runtime_resume) { | ||
393 | spin_unlock_irq(&dev->power.lock); | ||
394 | |||
395 | retval = dev->type->pm->runtime_resume(dev); | ||
396 | |||
397 | spin_lock_irq(&dev->power.lock); | ||
398 | dev->power.runtime_error = retval; | ||
399 | } else if (dev->class && dev->class->pm | ||
400 | && dev->class->pm->runtime_resume) { | ||
401 | spin_unlock_irq(&dev->power.lock); | ||
402 | |||
403 | retval = dev->class->pm->runtime_resume(dev); | ||
404 | |||
405 | spin_lock_irq(&dev->power.lock); | ||
406 | dev->power.runtime_error = retval; | ||
363 | } else { | 407 | } else { |
364 | retval = -ENOSYS; | 408 | retval = -ENOSYS; |
365 | } | 409 | } |
@@ -626,6 +670,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
626 | goto out; | 670 | goto out; |
627 | 671 | ||
628 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 672 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
673 | if (!dev->power.timer_expires) | ||
674 | dev->power.timer_expires = 1; | ||
629 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 675 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
630 | 676 | ||
631 | out: | 677 | out: |
@@ -659,13 +705,17 @@ static int __pm_request_resume(struct device *dev) | |||
659 | 705 | ||
660 | pm_runtime_deactivate_timer(dev); | 706 | pm_runtime_deactivate_timer(dev); |
661 | 707 | ||
708 | if (dev->power.runtime_status == RPM_SUSPENDING) { | ||
709 | dev->power.deferred_resume = true; | ||
710 | return retval; | ||
711 | } | ||
662 | if (dev->power.request_pending) { | 712 | if (dev->power.request_pending) { |
663 | /* If non-resume request is pending, we can overtake it. */ | 713 | /* If non-resume request is pending, we can overtake it. */ |
664 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | 714 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; |
665 | return retval; | 715 | return retval; |
666 | } else if (retval) { | ||
667 | return retval; | ||
668 | } | 716 | } |
717 | if (retval) | ||
718 | return retval; | ||
669 | 719 | ||
670 | dev->power.request = RPM_REQ_RESUME; | 720 | dev->power.request = RPM_REQ_RESUME; |
671 | dev->power.request_pending = true; | 721 | dev->power.request_pending = true; |
@@ -696,15 +746,15 @@ EXPORT_SYMBOL_GPL(pm_request_resume); | |||
696 | * @dev: Device to handle. | 746 | * @dev: Device to handle. |
697 | * @sync: If set and the device is suspended, resume it synchronously. | 747 | * @sync: If set and the device is suspended, resume it synchronously. |
698 | * | 748 | * |
699 | * Increment the usage count of the device and if it was zero previously, | 749 | * Increment the usage count of the device and resume it or submit a resume |
700 | * resume it or submit a resume request for it, depending on the value of @sync. | 750 | * request for it, depending on the value of @sync. |
701 | */ | 751 | */ |
702 | int __pm_runtime_get(struct device *dev, bool sync) | 752 | int __pm_runtime_get(struct device *dev, bool sync) |
703 | { | 753 | { |
704 | int retval = 1; | 754 | int retval; |
705 | 755 | ||
706 | if (atomic_add_return(1, &dev->power.usage_count) == 1) | 756 | atomic_inc(&dev->power.usage_count); |
707 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 757 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); |
708 | 758 | ||
709 | return retval; | 759 | return retval; |
710 | } | 760 | } |
@@ -777,7 +827,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
777 | } | 827 | } |
778 | 828 | ||
779 | if (parent) { | 829 | if (parent) { |
780 | spin_lock(&parent->power.lock); | 830 | spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING); |
781 | 831 | ||
782 | /* | 832 | /* |
783 | * It is invalid to put an active child under a parent that is | 833 | * It is invalid to put an active child under a parent that is |
@@ -786,12 +836,10 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status) | |||
786 | */ | 836 | */ |
787 | if (!parent->power.disable_depth | 837 | if (!parent->power.disable_depth |
788 | && !parent->power.ignore_children | 838 | && !parent->power.ignore_children |
789 | && parent->power.runtime_status != RPM_ACTIVE) { | 839 | && parent->power.runtime_status != RPM_ACTIVE) |
790 | error = -EBUSY; | 840 | error = -EBUSY; |
791 | } else { | 841 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
792 | if (dev->power.runtime_status == RPM_SUSPENDED) | 842 | atomic_inc(&parent->power.child_count); |
793 | atomic_inc(&parent->power.child_count); | ||
794 | } | ||
795 | 843 | ||
796 | spin_unlock(&parent->power.lock); | 844 | spin_unlock(&parent->power.lock); |
797 | 845 | ||
@@ -963,6 +1011,50 @@ void pm_runtime_enable(struct device *dev) | |||
963 | EXPORT_SYMBOL_GPL(pm_runtime_enable); | 1011 | EXPORT_SYMBOL_GPL(pm_runtime_enable); |
964 | 1012 | ||
965 | /** | 1013 | /** |
1014 | * pm_runtime_forbid - Block run-time PM of a device. | ||
1015 | * @dev: Device to handle. | ||
1016 | * | ||
1017 | * Increase the device's usage count and clear its power.runtime_auto flag, | ||
1018 | * so that it cannot be suspended at run time until pm_runtime_allow() is called | ||
1019 | * for it. | ||
1020 | */ | ||
1021 | void pm_runtime_forbid(struct device *dev) | ||
1022 | { | ||
1023 | spin_lock_irq(&dev->power.lock); | ||
1024 | if (!dev->power.runtime_auto) | ||
1025 | goto out; | ||
1026 | |||
1027 | dev->power.runtime_auto = false; | ||
1028 | atomic_inc(&dev->power.usage_count); | ||
1029 | __pm_runtime_resume(dev, false); | ||
1030 | |||
1031 | out: | ||
1032 | spin_unlock_irq(&dev->power.lock); | ||
1033 | } | ||
1034 | EXPORT_SYMBOL_GPL(pm_runtime_forbid); | ||
1035 | |||
1036 | /** | ||
1037 | * pm_runtime_allow - Unblock run-time PM of a device. | ||
1038 | * @dev: Device to handle. | ||
1039 | * | ||
1040 | * Decrease the device's usage count and set its power.runtime_auto flag. | ||
1041 | */ | ||
1042 | void pm_runtime_allow(struct device *dev) | ||
1043 | { | ||
1044 | spin_lock_irq(&dev->power.lock); | ||
1045 | if (dev->power.runtime_auto) | ||
1046 | goto out; | ||
1047 | |||
1048 | dev->power.runtime_auto = true; | ||
1049 | if (atomic_dec_and_test(&dev->power.usage_count)) | ||
1050 | __pm_runtime_idle(dev); | ||
1051 | |||
1052 | out: | ||
1053 | spin_unlock_irq(&dev->power.lock); | ||
1054 | } | ||
1055 | EXPORT_SYMBOL_GPL(pm_runtime_allow); | ||
1056 | |||
1057 | /** | ||
966 | * pm_runtime_init - Initialize run-time PM fields in given device object. | 1058 | * pm_runtime_init - Initialize run-time PM fields in given device object. |
967 | * @dev: Device object to initialize. | 1059 | * @dev: Device object to initialize. |
968 | */ | 1060 | */ |
@@ -980,6 +1072,7 @@ void pm_runtime_init(struct device *dev) | |||
980 | 1072 | ||
981 | atomic_set(&dev->power.child_count, 0); | 1073 | atomic_set(&dev->power.child_count, 0); |
982 | pm_suspend_ignore_children(dev, false); | 1074 | pm_suspend_ignore_children(dev, false); |
1075 | dev->power.runtime_auto = true; | ||
983 | 1076 | ||
984 | dev->power.request_pending = false; | 1077 | dev->power.request_pending = false; |
985 | dev->power.request = RPM_REQ_NONE; | 1078 | dev->power.request = RPM_REQ_NONE; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 596aeecfdffe..86fd9373447e 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -4,9 +4,25 @@ | |||
4 | 4 | ||
5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
7 | #include <linux/pm_runtime.h> | ||
7 | #include "power.h" | 8 | #include "power.h" |
8 | 9 | ||
9 | /* | 10 | /* |
11 | * control - Report/change current runtime PM setting of the device | ||
12 | * | ||
13 | * Runtime power management of a device can be blocked with the help of | ||
14 | * this attribute. All devices have one of the following two values for | ||
15 | * the power/control file: | ||
16 | * | ||
17 | * + "auto\n" to allow the device to be power managed at run time; | ||
18 | * + "on\n" to prevent the device from being power managed at run time; | ||
19 | * | ||
20 | * The default for all devices is "auto", which means that devices may be | ||
21 | * subject to automatic power management, depending on their drivers. | ||
22 | * Changing this attribute to "on" prevents the driver from power managing | ||
23 | * the device at run time. Doing that while the device is suspended causes | ||
24 | * it to be woken up. | ||
25 | * | ||
10 | * wakeup - Report/change current wakeup option for device | 26 | * wakeup - Report/change current wakeup option for device |
11 | * | 27 | * |
12 | * Some devices support "wakeup" events, which are hardware signals | 28 | * Some devices support "wakeup" events, which are hardware signals |
@@ -38,11 +54,61 @@ | |||
38 | * wakeup events internally (unless they are disabled), keeping | 54 | * wakeup events internally (unless they are disabled), keeping |
39 | * their hardware in low power modes whenever they're unused. This | 55 | * their hardware in low power modes whenever they're unused. This |
40 | * saves runtime power, without requiring system-wide sleep states. | 56 | * saves runtime power, without requiring system-wide sleep states. |
57 | * | ||
58 | * async - Report/change current async suspend setting for the device | ||
59 | * | ||
60 | * Asynchronous suspend and resume of the device during system-wide power | ||
61 | * state transitions can be enabled by writing "enabled" to this file. | ||
62 | * Analogously, if "disabled" is written to this file, the device will be | ||
63 | * suspended and resumed synchronously. | ||
64 | * | ||
65 | * All devices have one of the following two values for power/async: | ||
66 | * | ||
67 | * + "enabled\n" to permit the asynchronous suspend/resume of the device; | ||
68 | * + "disabled\n" to forbid it; | ||
69 | * | ||
70 | * NOTE: It generally is unsafe to permit the asynchronous suspend/resume | ||
71 | * of a device unless it is certain that all of the PM dependencies of the | ||
72 | * device are known to the PM core. However, for some devices this | ||
73 | * attribute is set to "enabled" by bus type code or device drivers and in | ||
74 | * that cases it should be safe to leave the default value. | ||
41 | */ | 75 | */ |
42 | 76 | ||
43 | static const char enabled[] = "enabled"; | 77 | static const char enabled[] = "enabled"; |
44 | static const char disabled[] = "disabled"; | 78 | static const char disabled[] = "disabled"; |
45 | 79 | ||
80 | #ifdef CONFIG_PM_RUNTIME | ||
81 | static const char ctrl_auto[] = "auto"; | ||
82 | static const char ctrl_on[] = "on"; | ||
83 | |||
84 | static ssize_t control_show(struct device *dev, struct device_attribute *attr, | ||
85 | char *buf) | ||
86 | { | ||
87 | return sprintf(buf, "%s\n", | ||
88 | dev->power.runtime_auto ? ctrl_auto : ctrl_on); | ||
89 | } | ||
90 | |||
91 | static ssize_t control_store(struct device * dev, struct device_attribute *attr, | ||
92 | const char * buf, size_t n) | ||
93 | { | ||
94 | char *cp; | ||
95 | int len = n; | ||
96 | |||
97 | cp = memchr(buf, '\n', n); | ||
98 | if (cp) | ||
99 | len = cp - buf; | ||
100 | if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) | ||
101 | pm_runtime_allow(dev); | ||
102 | else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) | ||
103 | pm_runtime_forbid(dev); | ||
104 | else | ||
105 | return -EINVAL; | ||
106 | return n; | ||
107 | } | ||
108 | |||
109 | static DEVICE_ATTR(control, 0644, control_show, control_store); | ||
110 | #endif | ||
111 | |||
46 | static ssize_t | 112 | static ssize_t |
47 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 113 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
48 | { | 114 | { |
@@ -77,9 +143,43 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
77 | 143 | ||
78 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 144 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
79 | 145 | ||
146 | #ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG | ||
147 | static ssize_t async_show(struct device *dev, struct device_attribute *attr, | ||
148 | char *buf) | ||
149 | { | ||
150 | return sprintf(buf, "%s\n", | ||
151 | device_async_suspend_enabled(dev) ? enabled : disabled); | ||
152 | } | ||
153 | |||
154 | static ssize_t async_store(struct device *dev, struct device_attribute *attr, | ||
155 | const char *buf, size_t n) | ||
156 | { | ||
157 | char *cp; | ||
158 | int len = n; | ||
159 | |||
160 | cp = memchr(buf, '\n', n); | ||
161 | if (cp) | ||
162 | len = cp - buf; | ||
163 | if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) | ||
164 | device_enable_async_suspend(dev); | ||
165 | else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) | ||
166 | device_disable_async_suspend(dev); | ||
167 | else | ||
168 | return -EINVAL; | ||
169 | return n; | ||
170 | } | ||
171 | |||
172 | static DEVICE_ATTR(async, 0644, async_show, async_store); | ||
173 | #endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */ | ||
80 | 174 | ||
81 | static struct attribute * power_attrs[] = { | 175 | static struct attribute * power_attrs[] = { |
176 | #ifdef CONFIG_PM_RUNTIME | ||
177 | &dev_attr_control.attr, | ||
178 | #endif | ||
82 | &dev_attr_wakeup.attr, | 179 | &dev_attr_wakeup.attr, |
180 | #ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG | ||
181 | &dev_attr_async.attr, | ||
182 | #endif | ||
83 | NULL, | 183 | NULL, |
84 | }; | 184 | }; |
85 | static struct attribute_group pm_attr_group = { | 185 | static struct attribute_group pm_attr_group = { |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 0d903909af7e..9354dc10a363 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/slab.h> | ||
21 | #include <linux/string.h> | 20 | #include <linux/string.h> |
22 | #include <linux/pm.h> | 21 | #include <linux/pm.h> |
23 | #include <linux/device.h> | 22 | #include <linux/device.h> |
@@ -54,7 +53,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr, | |||
54 | return -EIO; | 53 | return -EIO; |
55 | } | 54 | } |
56 | 55 | ||
57 | static struct sysfs_ops sysfs_ops = { | 56 | static const struct sysfs_ops sysfs_ops = { |
58 | .show = sysdev_show, | 57 | .show = sysdev_show, |
59 | .store = sysdev_store, | 58 | .store = sysdev_store, |
60 | }; | 59 | }; |
@@ -89,7 +88,7 @@ static ssize_t sysdev_class_show(struct kobject *kobj, struct attribute *attr, | |||
89 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); | 88 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); |
90 | 89 | ||
91 | if (class_attr->show) | 90 | if (class_attr->show) |
92 | return class_attr->show(class, buffer); | 91 | return class_attr->show(class, class_attr, buffer); |
93 | return -EIO; | 92 | return -EIO; |
94 | } | 93 | } |
95 | 94 | ||
@@ -100,11 +99,11 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr, | |||
100 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); | 99 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); |
101 | 100 | ||
102 | if (class_attr->store) | 101 | if (class_attr->store) |
103 | return class_attr->store(class, buffer, count); | 102 | return class_attr->store(class, class_attr, buffer, count); |
104 | return -EIO; | 103 | return -EIO; |
105 | } | 104 | } |
106 | 105 | ||
107 | static struct sysfs_ops sysfs_class_ops = { | 106 | static const struct sysfs_ops sysfs_class_ops = { |
108 | .show = sysdev_class_show, | 107 | .show = sysdev_class_show, |
109 | .store = sysdev_class_store, | 108 | .store = sysdev_class_store, |
110 | }; | 109 | }; |
@@ -145,13 +144,20 @@ int sysdev_class_register(struct sysdev_class *cls) | |||
145 | if (retval) | 144 | if (retval) |
146 | return retval; | 145 | return retval; |
147 | 146 | ||
148 | return kset_register(&cls->kset); | 147 | retval = kset_register(&cls->kset); |
148 | if (!retval && cls->attrs) | ||
149 | retval = sysfs_create_files(&cls->kset.kobj, | ||
150 | (const struct attribute **)cls->attrs); | ||
151 | return retval; | ||
149 | } | 152 | } |
150 | 153 | ||
151 | void sysdev_class_unregister(struct sysdev_class *cls) | 154 | void sysdev_class_unregister(struct sysdev_class *cls) |
152 | { | 155 | { |
153 | pr_debug("Unregistering sysdev class '%s'\n", | 156 | pr_debug("Unregistering sysdev class '%s'\n", |
154 | kobject_name(&cls->kset.kobj)); | 157 | kobject_name(&cls->kset.kobj)); |
158 | if (cls->attrs) | ||
159 | sysfs_remove_files(&cls->kset.kobj, | ||
160 | (const struct attribute **)cls->attrs); | ||
155 | kset_unregister(&cls->kset); | 161 | kset_unregister(&cls->kset); |
156 | } | 162 | } |
157 | 163 | ||