diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/base | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/Kconfig | 3 | ||||
-rw-r--r-- | drivers/base/Makefile | 6 | ||||
-rw-r--r-- | drivers/base/base.h | 64 | ||||
-rw-r--r-- | drivers/base/bus.c | 37 | ||||
-rw-r--r-- | drivers/base/class.c | 65 | ||||
-rw-r--r-- | drivers/base/core.c | 310 | ||||
-rw-r--r-- | drivers/base/dd.c | 18 | ||||
-rw-r--r-- | drivers/base/devtmpfs.c | 18 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 12 | ||||
-rw-r--r-- | drivers/base/memory.c | 274 | ||||
-rw-r--r-- | drivers/base/node.c | 59 | ||||
-rw-r--r-- | drivers/base/platform.c | 176 | ||||
-rw-r--r-- | drivers/base/power/Makefile | 8 | ||||
-rw-r--r-- | drivers/base/power/clock_ops.c | 431 | ||||
-rw-r--r-- | drivers/base/power/generic_ops.c | 49 | ||||
-rw-r--r-- | drivers/base/power/main.c | 413 | ||||
-rw-r--r-- | drivers/base/power/opp.c | 628 | ||||
-rw-r--r-- | drivers/base/power/power.h | 21 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 998 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 263 | ||||
-rw-r--r-- | drivers/base/power/trace.c | 42 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 697 | ||||
-rw-r--r-- | drivers/base/sys.c | 272 | ||||
-rw-r--r-- | drivers/base/syscore.c | 127 | ||||
-rw-r--r-- | drivers/base/topology.c | 16 |
25 files changed, 3455 insertions, 1552 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index ef38aff737eb..d57e8d0fb823 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
@@ -70,8 +70,7 @@ config PREVENT_FIRMWARE_BUILD | |||
70 | If unsure say Y here. | 70 | If unsure say Y here. |
71 | 71 | ||
72 | config FW_LOADER | 72 | config FW_LOADER |
73 | tristate "Userspace firmware loading support" if EMBEDDED | 73 | tristate "Userspace firmware loading support" if EXPERT |
74 | depends on HOTPLUG | ||
75 | default y | 74 | default y |
76 | ---help--- | 75 | ---help--- |
77 | This option is provided for the case where no in-kernel-tree modules | 76 | This option is provided for the case where no in-kernel-tree modules |
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index c12c7f2f2a6f..4c5701c15f53 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # Makefile for the Linux device tree | 1 | # Makefile for the Linux device tree |
2 | 2 | ||
3 | obj-y := core.o sys.o bus.o dd.o \ | 3 | obj-y := core.o sys.o bus.o dd.o syscore.o \ |
4 | driver.o class.o platform.o \ | 4 | driver.o class.o platform.o \ |
5 | cpu.o firmware.o init.o map.o devres.o \ | 5 | cpu.o firmware.o init.o map.o devres.o \ |
6 | attribute_container.o transport_class.o | 6 | attribute_container.o transport_class.o |
@@ -19,7 +19,5 @@ obj-$(CONFIG_MODULES) += module.o | |||
19 | endif | 19 | endif |
20 | obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o | 20 | obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o |
21 | 21 | ||
22 | ifeq ($(CONFIG_DEBUG_DRIVER),y) | 22 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
23 | EXTRA_CFLAGS += -DDEBUG | ||
24 | endif | ||
25 | 23 | ||
diff --git a/drivers/base/base.h b/drivers/base/base.h index 2ca7f5b7b824..a34dca0ad041 100644 --- a/drivers/base/base.h +++ b/drivers/base/base.h | |||
@@ -1,31 +1,46 @@ | |||
1 | 1 | ||
2 | /** | 2 | /** |
3 | * struct bus_type_private - structure to hold the private to the driver core portions of the bus_type structure. | 3 | * struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure. |
4 | * | 4 | * |
5 | * @subsys - the struct kset that defines this bus. This is the main kobject | 5 | * @subsys - the struct kset that defines this subsystem |
6 | * @drivers_kset - the list of drivers associated with this bus | 6 | * @devices_kset - the list of devices associated |
7 | * @devices_kset - the list of devices associated with this bus | 7 | * |
8 | * @drivers_kset - the list of drivers associated | ||
8 | * @klist_devices - the klist to iterate over the @devices_kset | 9 | * @klist_devices - the klist to iterate over the @devices_kset |
9 | * @klist_drivers - the klist to iterate over the @drivers_kset | 10 | * @klist_drivers - the klist to iterate over the @drivers_kset |
10 | * @bus_notifier - the bus notifier list for anything that cares about things | 11 | * @bus_notifier - the bus notifier list for anything that cares about things |
11 | * on this bus. | 12 | * on this bus. |
12 | * @bus - pointer back to the struct bus_type that this structure is associated | 13 | * @bus - pointer back to the struct bus_type that this structure is associated |
13 | * with. | 14 | * with. |
15 | * | ||
16 | * @class_interfaces - list of class_interfaces associated | ||
17 | * @glue_dirs - "glue" directory to put in-between the parent device to | ||
18 | * avoid namespace conflicts | ||
19 | * @class_mutex - mutex to protect the children, devices, and interfaces lists. | ||
20 | * @class - pointer back to the struct class that this structure is associated | ||
21 | * with. | ||
14 | * | 22 | * |
15 | * This structure is the one that is the actual kobject allowing struct | 23 | * This structure is the one that is the actual kobject allowing struct |
16 | * bus_type to be statically allocated safely. Nothing outside of the driver | 24 | * bus_type/class to be statically allocated safely. Nothing outside of the |
17 | * core should ever touch these fields. | 25 | * driver core should ever touch these fields. |
18 | */ | 26 | */ |
19 | struct bus_type_private { | 27 | struct subsys_private { |
20 | struct kset subsys; | 28 | struct kset subsys; |
21 | struct kset *drivers_kset; | ||
22 | struct kset *devices_kset; | 29 | struct kset *devices_kset; |
30 | |||
31 | struct kset *drivers_kset; | ||
23 | struct klist klist_devices; | 32 | struct klist klist_devices; |
24 | struct klist klist_drivers; | 33 | struct klist klist_drivers; |
25 | struct blocking_notifier_head bus_notifier; | 34 | struct blocking_notifier_head bus_notifier; |
26 | unsigned int drivers_autoprobe:1; | 35 | unsigned int drivers_autoprobe:1; |
27 | struct bus_type *bus; | 36 | struct bus_type *bus; |
37 | |||
38 | struct list_head class_interfaces; | ||
39 | struct kset glue_dirs; | ||
40 | struct mutex class_mutex; | ||
41 | struct class *class; | ||
28 | }; | 42 | }; |
43 | #define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj) | ||
29 | 44 | ||
30 | struct driver_private { | 45 | struct driver_private { |
31 | struct kobject kobj; | 46 | struct kobject kobj; |
@@ -36,33 +51,6 @@ struct driver_private { | |||
36 | }; | 51 | }; |
37 | #define to_driver(obj) container_of(obj, struct driver_private, kobj) | 52 | #define to_driver(obj) container_of(obj, struct driver_private, kobj) |
38 | 53 | ||
39 | |||
40 | /** | ||
41 | * struct class_private - structure to hold the private to the driver core portions of the class structure. | ||
42 | * | ||
43 | * @class_subsys - the struct kset that defines this class. This is the main kobject | ||
44 | * @class_devices - list of devices associated with this class | ||
45 | * @class_interfaces - list of class_interfaces associated with this class | ||
46 | * @class_dirs - "glue" directory for virtual devices associated with this class | ||
47 | * @class_mutex - mutex to protect the children, devices, and interfaces lists. | ||
48 | * @class - pointer back to the struct class that this structure is associated | ||
49 | * with. | ||
50 | * | ||
51 | * This structure is the one that is the actual kobject allowing struct | ||
52 | * class to be statically allocated safely. Nothing outside of the driver | ||
53 | * core should ever touch these fields. | ||
54 | */ | ||
55 | struct class_private { | ||
56 | struct kset class_subsys; | ||
57 | struct klist class_devices; | ||
58 | struct list_head class_interfaces; | ||
59 | struct kset class_dirs; | ||
60 | struct mutex class_mutex; | ||
61 | struct class *class; | ||
62 | }; | ||
63 | #define to_class(obj) \ | ||
64 | container_of(obj, struct class_private, class_subsys.kobj) | ||
65 | |||
66 | /** | 54 | /** |
67 | * struct device_private - structure to hold the private to the driver core portions of the device structure. | 55 | * struct device_private - structure to hold the private to the driver core portions of the device structure. |
68 | * | 56 | * |
@@ -123,8 +111,6 @@ static inline int driver_match_device(struct device_driver *drv, | |||
123 | return drv->bus->match ? drv->bus->match(dev, drv) : 1; | 111 | return drv->bus->match ? drv->bus->match(dev, drv) : 1; |
124 | } | 112 | } |
125 | 113 | ||
126 | extern void sysdev_shutdown(void); | ||
127 | |||
128 | extern char *make_class_name(const char *name, struct kobject *kobj); | 114 | extern char *make_class_name(const char *name, struct kobject *kobj); |
129 | 115 | ||
130 | extern int devres_release_all(struct device *dev); | 116 | extern int devres_release_all(struct device *dev); |
diff --git a/drivers/base/bus.c b/drivers/base/bus.c index eb1b7fa20dce..000e7b2006f8 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include "power/power.h" | 20 | #include "power/power.h" |
21 | 21 | ||
22 | #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) | 22 | #define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr) |
23 | #define to_bus(obj) container_of(obj, struct bus_type_private, subsys.kobj) | ||
24 | 23 | ||
25 | /* | 24 | /* |
26 | * sysfs bindings for drivers | 25 | * sysfs bindings for drivers |
@@ -96,11 +95,11 @@ static ssize_t bus_attr_show(struct kobject *kobj, struct attribute *attr, | |||
96 | char *buf) | 95 | char *buf) |
97 | { | 96 | { |
98 | struct bus_attribute *bus_attr = to_bus_attr(attr); | 97 | struct bus_attribute *bus_attr = to_bus_attr(attr); |
99 | struct bus_type_private *bus_priv = to_bus(kobj); | 98 | struct subsys_private *subsys_priv = to_subsys_private(kobj); |
100 | ssize_t ret = 0; | 99 | ssize_t ret = 0; |
101 | 100 | ||
102 | if (bus_attr->show) | 101 | if (bus_attr->show) |
103 | ret = bus_attr->show(bus_priv->bus, buf); | 102 | ret = bus_attr->show(subsys_priv->bus, buf); |
104 | return ret; | 103 | return ret; |
105 | } | 104 | } |
106 | 105 | ||
@@ -108,11 +107,11 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, | |||
108 | const char *buf, size_t count) | 107 | const char *buf, size_t count) |
109 | { | 108 | { |
110 | struct bus_attribute *bus_attr = to_bus_attr(attr); | 109 | struct bus_attribute *bus_attr = to_bus_attr(attr); |
111 | struct bus_type_private *bus_priv = to_bus(kobj); | 110 | struct subsys_private *subsys_priv = to_subsys_private(kobj); |
112 | ssize_t ret = 0; | 111 | ssize_t ret = 0; |
113 | 112 | ||
114 | if (bus_attr->store) | 113 | if (bus_attr->store) |
115 | ret = bus_attr->store(bus_priv->bus, buf, count); | 114 | ret = bus_attr->store(subsys_priv->bus, buf, count); |
116 | return ret; | 115 | return ret; |
117 | } | 116 | } |
118 | 117 | ||
@@ -440,22 +439,6 @@ static void device_remove_attrs(struct bus_type *bus, struct device *dev) | |||
440 | } | 439 | } |
441 | } | 440 | } |
442 | 441 | ||
443 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
444 | static int make_deprecated_bus_links(struct device *dev) | ||
445 | { | ||
446 | return sysfs_create_link(&dev->kobj, | ||
447 | &dev->bus->p->subsys.kobj, "bus"); | ||
448 | } | ||
449 | |||
450 | static void remove_deprecated_bus_links(struct device *dev) | ||
451 | { | ||
452 | sysfs_remove_link(&dev->kobj, "bus"); | ||
453 | } | ||
454 | #else | ||
455 | static inline int make_deprecated_bus_links(struct device *dev) { return 0; } | ||
456 | static inline void remove_deprecated_bus_links(struct device *dev) { } | ||
457 | #endif | ||
458 | |||
459 | /** | 442 | /** |
460 | * bus_add_device - add device to bus | 443 | * bus_add_device - add device to bus |
461 | * @dev: device being added | 444 | * @dev: device being added |
@@ -482,15 +465,10 @@ int bus_add_device(struct device *dev) | |||
482 | &dev->bus->p->subsys.kobj, "subsystem"); | 465 | &dev->bus->p->subsys.kobj, "subsystem"); |
483 | if (error) | 466 | if (error) |
484 | goto out_subsys; | 467 | goto out_subsys; |
485 | error = make_deprecated_bus_links(dev); | ||
486 | if (error) | ||
487 | goto out_deprecated; | ||
488 | klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices); | 468 | klist_add_tail(&dev->p->knode_bus, &bus->p->klist_devices); |
489 | } | 469 | } |
490 | return 0; | 470 | return 0; |
491 | 471 | ||
492 | out_deprecated: | ||
493 | sysfs_remove_link(&dev->kobj, "subsystem"); | ||
494 | out_subsys: | 472 | out_subsys: |
495 | sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); | 473 | sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev)); |
496 | out_id: | 474 | out_id: |
@@ -530,7 +508,6 @@ void bus_remove_device(struct device *dev) | |||
530 | { | 508 | { |
531 | if (dev->bus) { | 509 | if (dev->bus) { |
532 | sysfs_remove_link(&dev->kobj, "subsystem"); | 510 | sysfs_remove_link(&dev->kobj, "subsystem"); |
533 | remove_deprecated_bus_links(dev); | ||
534 | sysfs_remove_link(&dev->bus->p->devices_kset->kobj, | 511 | sysfs_remove_link(&dev->bus->p->devices_kset->kobj, |
535 | dev_name(dev)); | 512 | dev_name(dev)); |
536 | device_remove_attrs(dev->bus, dev); | 513 | device_remove_attrs(dev->bus, dev); |
@@ -880,9 +857,9 @@ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store); | |||
880 | int bus_register(struct bus_type *bus) | 857 | int bus_register(struct bus_type *bus) |
881 | { | 858 | { |
882 | int retval; | 859 | int retval; |
883 | struct bus_type_private *priv; | 860 | struct subsys_private *priv; |
884 | 861 | ||
885 | priv = kzalloc(sizeof(struct bus_type_private), GFP_KERNEL); | 862 | priv = kzalloc(sizeof(struct subsys_private), GFP_KERNEL); |
886 | if (!priv) | 863 | if (!priv) |
887 | return -ENOMEM; | 864 | return -ENOMEM; |
888 | 865 | ||
@@ -998,7 +975,7 @@ struct klist *bus_get_device_klist(struct bus_type *bus) | |||
998 | EXPORT_SYMBOL_GPL(bus_get_device_klist); | 975 | EXPORT_SYMBOL_GPL(bus_get_device_klist); |
999 | 976 | ||
1000 | /* | 977 | /* |
1001 | * Yes, this forcably breaks the klist abstraction temporarily. It | 978 | * Yes, this forcibly breaks the klist abstraction temporarily. It |
1002 | * just wants to sort the klist, not change reference counts and | 979 | * just wants to sort the klist, not change reference counts and |
1003 | * take/drop locks rapidly in the process. It does all this while | 980 | * take/drop locks rapidly in the process. It does all this while |
1004 | * holding the lock for the list, so objects can't otherwise be | 981 | * holding the lock for the list, so objects can't otherwise be |
diff --git a/drivers/base/class.c b/drivers/base/class.c index 8e231d05b400..4f1df2e8fd74 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c | |||
@@ -27,7 +27,7 @@ static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr, | |||
27 | char *buf) | 27 | char *buf) |
28 | { | 28 | { |
29 | struct class_attribute *class_attr = to_class_attr(attr); | 29 | struct class_attribute *class_attr = to_class_attr(attr); |
30 | struct class_private *cp = to_class(kobj); | 30 | struct subsys_private *cp = to_subsys_private(kobj); |
31 | ssize_t ret = -EIO; | 31 | ssize_t ret = -EIO; |
32 | 32 | ||
33 | if (class_attr->show) | 33 | if (class_attr->show) |
@@ -39,7 +39,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, | |||
39 | const char *buf, size_t count) | 39 | const char *buf, size_t count) |
40 | { | 40 | { |
41 | struct class_attribute *class_attr = to_class_attr(attr); | 41 | struct class_attribute *class_attr = to_class_attr(attr); |
42 | struct class_private *cp = to_class(kobj); | 42 | struct subsys_private *cp = to_subsys_private(kobj); |
43 | ssize_t ret = -EIO; | 43 | ssize_t ret = -EIO; |
44 | 44 | ||
45 | if (class_attr->store) | 45 | if (class_attr->store) |
@@ -49,7 +49,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, | |||
49 | 49 | ||
50 | static void class_release(struct kobject *kobj) | 50 | static void class_release(struct kobject *kobj) |
51 | { | 51 | { |
52 | struct class_private *cp = to_class(kobj); | 52 | struct subsys_private *cp = to_subsys_private(kobj); |
53 | struct class *class = cp->class; | 53 | struct class *class = cp->class; |
54 | 54 | ||
55 | pr_debug("class '%s': release.\n", class->name); | 55 | pr_debug("class '%s': release.\n", class->name); |
@@ -65,7 +65,7 @@ static void class_release(struct kobject *kobj) | |||
65 | 65 | ||
66 | static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj) | 66 | static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject *kobj) |
67 | { | 67 | { |
68 | struct class_private *cp = to_class(kobj); | 68 | struct subsys_private *cp = to_subsys_private(kobj); |
69 | struct class *class = cp->class; | 69 | struct class *class = cp->class; |
70 | 70 | ||
71 | return class->ns_type; | 71 | return class->ns_type; |
@@ -82,7 +82,7 @@ static struct kobj_type class_ktype = { | |||
82 | .child_ns_type = class_child_ns_type, | 82 | .child_ns_type = class_child_ns_type, |
83 | }; | 83 | }; |
84 | 84 | ||
85 | /* Hotplug events for classes go to the class class_subsys */ | 85 | /* Hotplug events for classes go to the class subsys */ |
86 | static struct kset *class_kset; | 86 | static struct kset *class_kset; |
87 | 87 | ||
88 | 88 | ||
@@ -90,7 +90,7 @@ int class_create_file(struct class *cls, const struct class_attribute *attr) | |||
90 | { | 90 | { |
91 | int error; | 91 | int error; |
92 | if (cls) | 92 | if (cls) |
93 | error = sysfs_create_file(&cls->p->class_subsys.kobj, | 93 | error = sysfs_create_file(&cls->p->subsys.kobj, |
94 | &attr->attr); | 94 | &attr->attr); |
95 | else | 95 | else |
96 | error = -EINVAL; | 96 | error = -EINVAL; |
@@ -100,20 +100,20 @@ int class_create_file(struct class *cls, const struct class_attribute *attr) | |||
100 | void class_remove_file(struct class *cls, const struct class_attribute *attr) | 100 | void class_remove_file(struct class *cls, const struct class_attribute *attr) |
101 | { | 101 | { |
102 | if (cls) | 102 | if (cls) |
103 | sysfs_remove_file(&cls->p->class_subsys.kobj, &attr->attr); | 103 | sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr); |
104 | } | 104 | } |
105 | 105 | ||
106 | static struct class *class_get(struct class *cls) | 106 | static struct class *class_get(struct class *cls) |
107 | { | 107 | { |
108 | if (cls) | 108 | if (cls) |
109 | kset_get(&cls->p->class_subsys); | 109 | kset_get(&cls->p->subsys); |
110 | return cls; | 110 | return cls; |
111 | } | 111 | } |
112 | 112 | ||
113 | static void class_put(struct class *cls) | 113 | static void class_put(struct class *cls) |
114 | { | 114 | { |
115 | if (cls) | 115 | if (cls) |
116 | kset_put(&cls->p->class_subsys); | 116 | kset_put(&cls->p->subsys); |
117 | } | 117 | } |
118 | 118 | ||
119 | static int add_class_attrs(struct class *cls) | 119 | static int add_class_attrs(struct class *cls) |
@@ -162,7 +162,7 @@ static void klist_class_dev_put(struct klist_node *n) | |||
162 | 162 | ||
163 | int __class_register(struct class *cls, struct lock_class_key *key) | 163 | int __class_register(struct class *cls, struct lock_class_key *key) |
164 | { | 164 | { |
165 | struct class_private *cp; | 165 | struct subsys_private *cp; |
166 | int error; | 166 | int error; |
167 | 167 | ||
168 | pr_debug("device class '%s': registering\n", cls->name); | 168 | pr_debug("device class '%s': registering\n", cls->name); |
@@ -170,11 +170,11 @@ int __class_register(struct class *cls, struct lock_class_key *key) | |||
170 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); | 170 | cp = kzalloc(sizeof(*cp), GFP_KERNEL); |
171 | if (!cp) | 171 | if (!cp) |
172 | return -ENOMEM; | 172 | return -ENOMEM; |
173 | klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put); | 173 | klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put); |
174 | INIT_LIST_HEAD(&cp->class_interfaces); | 174 | INIT_LIST_HEAD(&cp->class_interfaces); |
175 | kset_init(&cp->class_dirs); | 175 | kset_init(&cp->glue_dirs); |
176 | __mutex_init(&cp->class_mutex, "struct class mutex", key); | 176 | __mutex_init(&cp->class_mutex, "struct class mutex", key); |
177 | error = kobject_set_name(&cp->class_subsys.kobj, "%s", cls->name); | 177 | error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); |
178 | if (error) { | 178 | if (error) { |
179 | kfree(cp); | 179 | kfree(cp); |
180 | return error; | 180 | return error; |
@@ -184,18 +184,18 @@ int __class_register(struct class *cls, struct lock_class_key *key) | |||
184 | if (!cls->dev_kobj) | 184 | if (!cls->dev_kobj) |
185 | cls->dev_kobj = sysfs_dev_char_kobj; | 185 | cls->dev_kobj = sysfs_dev_char_kobj; |
186 | 186 | ||
187 | #if defined(CONFIG_SYSFS_DEPRECATED) && defined(CONFIG_BLOCK) | 187 | #if defined(CONFIG_BLOCK) |
188 | /* let the block class directory show up in the root of sysfs */ | 188 | /* let the block class directory show up in the root of sysfs */ |
189 | if (cls != &block_class) | 189 | if (!sysfs_deprecated || cls != &block_class) |
190 | cp->class_subsys.kobj.kset = class_kset; | 190 | cp->subsys.kobj.kset = class_kset; |
191 | #else | 191 | #else |
192 | cp->class_subsys.kobj.kset = class_kset; | 192 | cp->subsys.kobj.kset = class_kset; |
193 | #endif | 193 | #endif |
194 | cp->class_subsys.kobj.ktype = &class_ktype; | 194 | cp->subsys.kobj.ktype = &class_ktype; |
195 | cp->class = cls; | 195 | cp->class = cls; |
196 | cls->p = cp; | 196 | cls->p = cp; |
197 | 197 | ||
198 | error = kset_register(&cp->class_subsys); | 198 | error = kset_register(&cp->subsys); |
199 | if (error) { | 199 | if (error) { |
200 | kfree(cp); | 200 | kfree(cp); |
201 | return error; | 201 | return error; |
@@ -210,7 +210,7 @@ void class_unregister(struct class *cls) | |||
210 | { | 210 | { |
211 | pr_debug("device class '%s': unregistering\n", cls->name); | 211 | pr_debug("device class '%s': unregistering\n", cls->name); |
212 | remove_class_attrs(cls); | 212 | remove_class_attrs(cls); |
213 | kset_unregister(&cls->p->class_subsys); | 213 | kset_unregister(&cls->p->subsys); |
214 | } | 214 | } |
215 | 215 | ||
216 | static void class_create_release(struct class *cls) | 216 | static void class_create_release(struct class *cls) |
@@ -276,25 +276,6 @@ void class_destroy(struct class *cls) | |||
276 | class_unregister(cls); | 276 | class_unregister(cls); |
277 | } | 277 | } |
278 | 278 | ||
279 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
280 | char *make_class_name(const char *name, struct kobject *kobj) | ||
281 | { | ||
282 | char *class_name; | ||
283 | int size; | ||
284 | |||
285 | size = strlen(name) + strlen(kobject_name(kobj)) + 2; | ||
286 | |||
287 | class_name = kmalloc(size, GFP_KERNEL); | ||
288 | if (!class_name) | ||
289 | return NULL; | ||
290 | |||
291 | strcpy(class_name, name); | ||
292 | strcat(class_name, ":"); | ||
293 | strcat(class_name, kobject_name(kobj)); | ||
294 | return class_name; | ||
295 | } | ||
296 | #endif | ||
297 | |||
298 | /** | 279 | /** |
299 | * class_dev_iter_init - initialize class device iterator | 280 | * class_dev_iter_init - initialize class device iterator |
300 | * @iter: class iterator to initialize | 281 | * @iter: class iterator to initialize |
@@ -314,7 +295,7 @@ void class_dev_iter_init(struct class_dev_iter *iter, struct class *class, | |||
314 | 295 | ||
315 | if (start) | 296 | if (start) |
316 | start_knode = &start->knode_class; | 297 | start_knode = &start->knode_class; |
317 | klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode); | 298 | klist_iter_init_node(&class->p->klist_devices, &iter->ki, start_knode); |
318 | iter->type = type; | 299 | iter->type = type; |
319 | } | 300 | } |
320 | EXPORT_SYMBOL_GPL(class_dev_iter_init); | 301 | EXPORT_SYMBOL_GPL(class_dev_iter_init); |
@@ -501,8 +482,8 @@ void class_interface_unregister(struct class_interface *class_intf) | |||
501 | class_put(parent); | 482 | class_put(parent); |
502 | } | 483 | } |
503 | 484 | ||
504 | ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, | 485 | ssize_t show_class_attr_string(struct class *class, |
505 | char *buf) | 486 | struct class_attribute *attr, char *buf) |
506 | { | 487 | { |
507 | struct class_attribute_string *cs; | 488 | struct class_attribute_string *cs; |
508 | cs = container_of(attr, struct class_attribute_string, attr); | 489 | cs = container_of(attr, struct class_attribute_string, attr); |
diff --git a/drivers/base/core.c b/drivers/base/core.c index d1b2c9adc271..bc8729d603a7 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -26,6 +26,19 @@ | |||
26 | #include "base.h" | 26 | #include "base.h" |
27 | #include "power/power.h" | 27 | #include "power/power.h" |
28 | 28 | ||
29 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
30 | #ifdef CONFIG_SYSFS_DEPRECATED_V2 | ||
31 | long sysfs_deprecated = 1; | ||
32 | #else | ||
33 | long sysfs_deprecated = 0; | ||
34 | #endif | ||
35 | static __init int sysfs_deprecated_setup(char *arg) | ||
36 | { | ||
37 | return strict_strtol(arg, 10, &sysfs_deprecated); | ||
38 | } | ||
39 | early_param("sysfs.deprecated", sysfs_deprecated_setup); | ||
40 | #endif | ||
41 | |||
29 | int (*platform_notify)(struct device *dev) = NULL; | 42 | int (*platform_notify)(struct device *dev) = NULL; |
30 | int (*platform_notify_remove)(struct device *dev) = NULL; | 43 | int (*platform_notify_remove)(struct device *dev) = NULL; |
31 | static struct kobject *dev_kobj; | 44 | static struct kobject *dev_kobj; |
@@ -203,37 +216,6 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, | |||
203 | if (dev->driver) | 216 | if (dev->driver) |
204 | add_uevent_var(env, "DRIVER=%s", dev->driver->name); | 217 | add_uevent_var(env, "DRIVER=%s", dev->driver->name); |
205 | 218 | ||
206 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
207 | if (dev->class) { | ||
208 | struct device *parent = dev->parent; | ||
209 | |||
210 | /* find first bus device in parent chain */ | ||
211 | while (parent && !parent->bus) | ||
212 | parent = parent->parent; | ||
213 | if (parent && parent->bus) { | ||
214 | const char *path; | ||
215 | |||
216 | path = kobject_get_path(&parent->kobj, GFP_KERNEL); | ||
217 | if (path) { | ||
218 | add_uevent_var(env, "PHYSDEVPATH=%s", path); | ||
219 | kfree(path); | ||
220 | } | ||
221 | |||
222 | add_uevent_var(env, "PHYSDEVBUS=%s", parent->bus->name); | ||
223 | |||
224 | if (parent->driver) | ||
225 | add_uevent_var(env, "PHYSDEVDRIVER=%s", | ||
226 | parent->driver->name); | ||
227 | } | ||
228 | } else if (dev->bus) { | ||
229 | add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name); | ||
230 | |||
231 | if (dev->driver) | ||
232 | add_uevent_var(env, "PHYSDEVDRIVER=%s", | ||
233 | dev->driver->name); | ||
234 | } | ||
235 | #endif | ||
236 | |||
237 | /* have the bus specific function add its stuff */ | 219 | /* have the bus specific function add its stuff */ |
238 | if (dev->bus && dev->bus->uevent) { | 220 | if (dev->bus && dev->bus->uevent) { |
239 | retval = dev->bus->uevent(dev, env); | 221 | retval = dev->bus->uevent(dev, env); |
@@ -251,7 +233,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, | |||
251 | __func__, retval); | 233 | __func__, retval); |
252 | } | 234 | } |
253 | 235 | ||
254 | /* have the device type specific fuction add its stuff */ | 236 | /* have the device type specific function add its stuff */ |
255 | if (dev->type && dev->type->uevent) { | 237 | if (dev->type && dev->type->uevent) { |
256 | retval = dev->type->uevent(dev, env); | 238 | retval = dev->type->uevent(dev, env); |
257 | if (retval) | 239 | if (retval) |
@@ -356,6 +338,35 @@ static void device_remove_attributes(struct device *dev, | |||
356 | device_remove_file(dev, &attrs[i]); | 338 | device_remove_file(dev, &attrs[i]); |
357 | } | 339 | } |
358 | 340 | ||
341 | static int device_add_bin_attributes(struct device *dev, | ||
342 | struct bin_attribute *attrs) | ||
343 | { | ||
344 | int error = 0; | ||
345 | int i; | ||
346 | |||
347 | if (attrs) { | ||
348 | for (i = 0; attr_name(attrs[i]); i++) { | ||
349 | error = device_create_bin_file(dev, &attrs[i]); | ||
350 | if (error) | ||
351 | break; | ||
352 | } | ||
353 | if (error) | ||
354 | while (--i >= 0) | ||
355 | device_remove_bin_file(dev, &attrs[i]); | ||
356 | } | ||
357 | return error; | ||
358 | } | ||
359 | |||
360 | static void device_remove_bin_attributes(struct device *dev, | ||
361 | struct bin_attribute *attrs) | ||
362 | { | ||
363 | int i; | ||
364 | |||
365 | if (attrs) | ||
366 | for (i = 0; attr_name(attrs[i]); i++) | ||
367 | device_remove_bin_file(dev, &attrs[i]); | ||
368 | } | ||
369 | |||
359 | static int device_add_groups(struct device *dev, | 370 | static int device_add_groups(struct device *dev, |
360 | const struct attribute_group **groups) | 371 | const struct attribute_group **groups) |
361 | { | 372 | { |
@@ -389,19 +400,22 @@ static void device_remove_groups(struct device *dev, | |||
389 | static int device_add_attrs(struct device *dev) | 400 | static int device_add_attrs(struct device *dev) |
390 | { | 401 | { |
391 | struct class *class = dev->class; | 402 | struct class *class = dev->class; |
392 | struct device_type *type = dev->type; | 403 | const struct device_type *type = dev->type; |
393 | int error; | 404 | int error; |
394 | 405 | ||
395 | if (class) { | 406 | if (class) { |
396 | error = device_add_attributes(dev, class->dev_attrs); | 407 | error = device_add_attributes(dev, class->dev_attrs); |
397 | if (error) | 408 | if (error) |
398 | return error; | 409 | return error; |
410 | error = device_add_bin_attributes(dev, class->dev_bin_attrs); | ||
411 | if (error) | ||
412 | goto err_remove_class_attrs; | ||
399 | } | 413 | } |
400 | 414 | ||
401 | if (type) { | 415 | if (type) { |
402 | error = device_add_groups(dev, type->groups); | 416 | error = device_add_groups(dev, type->groups); |
403 | if (error) | 417 | if (error) |
404 | goto err_remove_class_attrs; | 418 | goto err_remove_class_bin_attrs; |
405 | } | 419 | } |
406 | 420 | ||
407 | error = device_add_groups(dev, dev->groups); | 421 | error = device_add_groups(dev, dev->groups); |
@@ -413,6 +427,9 @@ static int device_add_attrs(struct device *dev) | |||
413 | err_remove_type_groups: | 427 | err_remove_type_groups: |
414 | if (type) | 428 | if (type) |
415 | device_remove_groups(dev, type->groups); | 429 | device_remove_groups(dev, type->groups); |
430 | err_remove_class_bin_attrs: | ||
431 | if (class) | ||
432 | device_remove_bin_attributes(dev, class->dev_bin_attrs); | ||
416 | err_remove_class_attrs: | 433 | err_remove_class_attrs: |
417 | if (class) | 434 | if (class) |
418 | device_remove_attributes(dev, class->dev_attrs); | 435 | device_remove_attributes(dev, class->dev_attrs); |
@@ -423,15 +440,17 @@ static int device_add_attrs(struct device *dev) | |||
423 | static void device_remove_attrs(struct device *dev) | 440 | static void device_remove_attrs(struct device *dev) |
424 | { | 441 | { |
425 | struct class *class = dev->class; | 442 | struct class *class = dev->class; |
426 | struct device_type *type = dev->type; | 443 | const struct device_type *type = dev->type; |
427 | 444 | ||
428 | device_remove_groups(dev, dev->groups); | 445 | device_remove_groups(dev, dev->groups); |
429 | 446 | ||
430 | if (type) | 447 | if (type) |
431 | device_remove_groups(dev, type->groups); | 448 | device_remove_groups(dev, type->groups); |
432 | 449 | ||
433 | if (class) | 450 | if (class) { |
434 | device_remove_attributes(dev, class->dev_attrs); | 451 | device_remove_attributes(dev, class->dev_attrs); |
452 | device_remove_bin_attributes(dev, class->dev_bin_attrs); | ||
453 | } | ||
435 | } | 454 | } |
436 | 455 | ||
437 | 456 | ||
@@ -578,24 +597,6 @@ void device_initialize(struct device *dev) | |||
578 | set_dev_node(dev, -1); | 597 | set_dev_node(dev, -1); |
579 | } | 598 | } |
580 | 599 | ||
581 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
582 | static struct kobject *get_device_parent(struct device *dev, | ||
583 | struct device *parent) | ||
584 | { | ||
585 | /* class devices without a parent live in /sys/class/<classname>/ */ | ||
586 | if (dev->class && (!parent || parent->class != dev->class)) | ||
587 | return &dev->class->p->class_subsys.kobj; | ||
588 | /* all other devices keep their parent */ | ||
589 | else if (parent) | ||
590 | return &parent->kobj; | ||
591 | |||
592 | return NULL; | ||
593 | } | ||
594 | |||
595 | static inline void cleanup_device_parent(struct device *dev) {} | ||
596 | static inline void cleanup_glue_dir(struct device *dev, | ||
597 | struct kobject *glue_dir) {} | ||
598 | #else | ||
599 | static struct kobject *virtual_device_parent(struct device *dev) | 600 | static struct kobject *virtual_device_parent(struct device *dev) |
600 | { | 601 | { |
601 | static struct kobject *virtual_dir = NULL; | 602 | static struct kobject *virtual_dir = NULL; |
@@ -646,7 +647,7 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) | |||
646 | dir->class = class; | 647 | dir->class = class; |
647 | kobject_init(&dir->kobj, &class_dir_ktype); | 648 | kobject_init(&dir->kobj, &class_dir_ktype); |
648 | 649 | ||
649 | dir->kobj.kset = &class->p->class_dirs; | 650 | dir->kobj.kset = &class->p->glue_dirs; |
650 | 651 | ||
651 | retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); | 652 | retval = kobject_add(&dir->kobj, parent_kobj, "%s", class->name); |
652 | if (retval < 0) { | 653 | if (retval < 0) { |
@@ -666,6 +667,15 @@ static struct kobject *get_device_parent(struct device *dev, | |||
666 | struct kobject *parent_kobj; | 667 | struct kobject *parent_kobj; |
667 | struct kobject *k; | 668 | struct kobject *k; |
668 | 669 | ||
670 | #ifdef CONFIG_BLOCK | ||
671 | /* block disks show up in /sys/block */ | ||
672 | if (sysfs_deprecated && dev->class == &block_class) { | ||
673 | if (parent && parent->class == &block_class) | ||
674 | return &parent->kobj; | ||
675 | return &block_class.p->subsys.kobj; | ||
676 | } | ||
677 | #endif | ||
678 | |||
669 | /* | 679 | /* |
670 | * If we have no parent, we live in "virtual". | 680 | * If we have no parent, we live in "virtual". |
671 | * Class-devices with a non class-device as parent, live | 681 | * Class-devices with a non class-device as parent, live |
@@ -681,13 +691,13 @@ static struct kobject *get_device_parent(struct device *dev, | |||
681 | mutex_lock(&gdp_mutex); | 691 | mutex_lock(&gdp_mutex); |
682 | 692 | ||
683 | /* find our class-directory at the parent and reference it */ | 693 | /* find our class-directory at the parent and reference it */ |
684 | spin_lock(&dev->class->p->class_dirs.list_lock); | 694 | spin_lock(&dev->class->p->glue_dirs.list_lock); |
685 | list_for_each_entry(k, &dev->class->p->class_dirs.list, entry) | 695 | list_for_each_entry(k, &dev->class->p->glue_dirs.list, entry) |
686 | if (k->parent == parent_kobj) { | 696 | if (k->parent == parent_kobj) { |
687 | kobj = kobject_get(k); | 697 | kobj = kobject_get(k); |
688 | break; | 698 | break; |
689 | } | 699 | } |
690 | spin_unlock(&dev->class->p->class_dirs.list_lock); | 700 | spin_unlock(&dev->class->p->glue_dirs.list_lock); |
691 | if (kobj) { | 701 | if (kobj) { |
692 | mutex_unlock(&gdp_mutex); | 702 | mutex_unlock(&gdp_mutex); |
693 | return kobj; | 703 | return kobj; |
@@ -709,7 +719,7 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) | |||
709 | { | 719 | { |
710 | /* see if we live in a "glue" directory */ | 720 | /* see if we live in a "glue" directory */ |
711 | if (!glue_dir || !dev->class || | 721 | if (!glue_dir || !dev->class || |
712 | glue_dir->kset != &dev->class->p->class_dirs) | 722 | glue_dir->kset != &dev->class->p->glue_dirs) |
713 | return; | 723 | return; |
714 | 724 | ||
715 | kobject_put(glue_dir); | 725 | kobject_put(glue_dir); |
@@ -719,7 +729,6 @@ static void cleanup_device_parent(struct device *dev) | |||
719 | { | 729 | { |
720 | cleanup_glue_dir(dev, dev->kobj.parent); | 730 | cleanup_glue_dir(dev, dev->kobj.parent); |
721 | } | 731 | } |
722 | #endif | ||
723 | 732 | ||
724 | static void setup_parent(struct device *dev, struct device *parent) | 733 | static void setup_parent(struct device *dev, struct device *parent) |
725 | { | 734 | { |
@@ -737,75 +746,34 @@ static int device_add_class_symlinks(struct device *dev) | |||
737 | return 0; | 746 | return 0; |
738 | 747 | ||
739 | error = sysfs_create_link(&dev->kobj, | 748 | error = sysfs_create_link(&dev->kobj, |
740 | &dev->class->p->class_subsys.kobj, | 749 | &dev->class->p->subsys.kobj, |
741 | "subsystem"); | 750 | "subsystem"); |
742 | if (error) | 751 | if (error) |
743 | goto out; | 752 | goto out; |
744 | 753 | ||
745 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
746 | /* stacked class devices need a symlink in the class directory */ | ||
747 | if (dev->kobj.parent != &dev->class->p->class_subsys.kobj && | ||
748 | device_is_not_partition(dev)) { | ||
749 | error = sysfs_create_link(&dev->class->p->class_subsys.kobj, | ||
750 | &dev->kobj, dev_name(dev)); | ||
751 | if (error) | ||
752 | goto out_subsys; | ||
753 | } | ||
754 | |||
755 | if (dev->parent && device_is_not_partition(dev)) { | 754 | if (dev->parent && device_is_not_partition(dev)) { |
756 | struct device *parent = dev->parent; | 755 | error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, |
757 | char *class_name; | ||
758 | |||
759 | /* | ||
760 | * stacked class devices have the 'device' link | ||
761 | * pointing to the bus device instead of the parent | ||
762 | */ | ||
763 | while (parent->class && !parent->bus && parent->parent) | ||
764 | parent = parent->parent; | ||
765 | |||
766 | error = sysfs_create_link(&dev->kobj, | ||
767 | &parent->kobj, | ||
768 | "device"); | 756 | "device"); |
769 | if (error) | 757 | if (error) |
770 | goto out_busid; | 758 | goto out_subsys; |
771 | |||
772 | class_name = make_class_name(dev->class->name, | ||
773 | &dev->kobj); | ||
774 | if (class_name) | ||
775 | error = sysfs_create_link(&dev->parent->kobj, | ||
776 | &dev->kobj, class_name); | ||
777 | kfree(class_name); | ||
778 | if (error) | ||
779 | goto out_device; | ||
780 | } | 759 | } |
781 | return 0; | ||
782 | 760 | ||
783 | out_device: | 761 | #ifdef CONFIG_BLOCK |
784 | if (dev->parent && device_is_not_partition(dev)) | 762 | /* /sys/block has directories and does not need symlinks */ |
785 | sysfs_remove_link(&dev->kobj, "device"); | 763 | if (sysfs_deprecated && dev->class == &block_class) |
786 | out_busid: | 764 | return 0; |
787 | if (dev->kobj.parent != &dev->class->p->class_subsys.kobj && | 765 | #endif |
788 | device_is_not_partition(dev)) | 766 | |
789 | sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, | ||
790 | dev_name(dev)); | ||
791 | #else | ||
792 | /* link in the class directory pointing to the device */ | 767 | /* link in the class directory pointing to the device */ |
793 | error = sysfs_create_link(&dev->class->p->class_subsys.kobj, | 768 | error = sysfs_create_link(&dev->class->p->subsys.kobj, |
794 | &dev->kobj, dev_name(dev)); | 769 | &dev->kobj, dev_name(dev)); |
795 | if (error) | 770 | if (error) |
796 | goto out_subsys; | 771 | goto out_device; |
797 | 772 | ||
798 | if (dev->parent && device_is_not_partition(dev)) { | ||
799 | error = sysfs_create_link(&dev->kobj, &dev->parent->kobj, | ||
800 | "device"); | ||
801 | if (error) | ||
802 | goto out_busid; | ||
803 | } | ||
804 | return 0; | 773 | return 0; |
805 | 774 | ||
806 | out_busid: | 775 | out_device: |
807 | sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev)); | 776 | sysfs_remove_link(&dev->kobj, "device"); |
808 | #endif | ||
809 | 777 | ||
810 | out_subsys: | 778 | out_subsys: |
811 | sysfs_remove_link(&dev->kobj, "subsystem"); | 779 | sysfs_remove_link(&dev->kobj, "subsystem"); |
@@ -818,30 +786,14 @@ static void device_remove_class_symlinks(struct device *dev) | |||
818 | if (!dev->class) | 786 | if (!dev->class) |
819 | return; | 787 | return; |
820 | 788 | ||
821 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
822 | if (dev->parent && device_is_not_partition(dev)) { | ||
823 | char *class_name; | ||
824 | |||
825 | class_name = make_class_name(dev->class->name, &dev->kobj); | ||
826 | if (class_name) { | ||
827 | sysfs_remove_link(&dev->parent->kobj, class_name); | ||
828 | kfree(class_name); | ||
829 | } | ||
830 | sysfs_remove_link(&dev->kobj, "device"); | ||
831 | } | ||
832 | |||
833 | if (dev->kobj.parent != &dev->class->p->class_subsys.kobj && | ||
834 | device_is_not_partition(dev)) | ||
835 | sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, | ||
836 | dev_name(dev)); | ||
837 | #else | ||
838 | if (dev->parent && device_is_not_partition(dev)) | 789 | if (dev->parent && device_is_not_partition(dev)) |
839 | sysfs_remove_link(&dev->kobj, "device"); | 790 | sysfs_remove_link(&dev->kobj, "device"); |
840 | |||
841 | sysfs_delete_link(&dev->class->p->class_subsys.kobj, &dev->kobj, dev_name(dev)); | ||
842 | #endif | ||
843 | |||
844 | sysfs_remove_link(&dev->kobj, "subsystem"); | 791 | sysfs_remove_link(&dev->kobj, "subsystem"); |
792 | #ifdef CONFIG_BLOCK | ||
793 | if (sysfs_deprecated && dev->class == &block_class) | ||
794 | return; | ||
795 | #endif | ||
796 | sysfs_delete_link(&dev->class->p->subsys.kobj, &dev->kobj, dev_name(dev)); | ||
845 | } | 797 | } |
846 | 798 | ||
847 | /** | 799 | /** |
@@ -1032,7 +984,7 @@ int device_add(struct device *dev) | |||
1032 | mutex_lock(&dev->class->p->class_mutex); | 984 | mutex_lock(&dev->class->p->class_mutex); |
1033 | /* tie the class to the device */ | 985 | /* tie the class to the device */ |
1034 | klist_add_tail(&dev->knode_class, | 986 | klist_add_tail(&dev->knode_class, |
1035 | &dev->class->p->class_devices); | 987 | &dev->class->p->klist_devices); |
1036 | 988 | ||
1037 | /* notify any interfaces that the device is here */ | 989 | /* notify any interfaces that the device is here */ |
1038 | list_for_each_entry(class_intf, | 990 | list_for_each_entry(class_intf, |
@@ -1362,13 +1314,15 @@ EXPORT_SYMBOL_GPL(put_device); | |||
1362 | EXPORT_SYMBOL_GPL(device_create_file); | 1314 | EXPORT_SYMBOL_GPL(device_create_file); |
1363 | EXPORT_SYMBOL_GPL(device_remove_file); | 1315 | EXPORT_SYMBOL_GPL(device_remove_file); |
1364 | 1316 | ||
1365 | struct root_device | 1317 | struct root_device { |
1366 | { | ||
1367 | struct device dev; | 1318 | struct device dev; |
1368 | struct module *owner; | 1319 | struct module *owner; |
1369 | }; | 1320 | }; |
1370 | 1321 | ||
1371 | #define to_root_device(dev) container_of(dev, struct root_device, dev) | 1322 | inline struct root_device *to_root_device(struct device *d) |
1323 | { | ||
1324 | return container_of(d, struct root_device, dev); | ||
1325 | } | ||
1372 | 1326 | ||
1373 | static void root_device_release(struct device *dev) | 1327 | static void root_device_release(struct device *dev) |
1374 | { | 1328 | { |
@@ -1598,6 +1552,35 @@ EXPORT_SYMBOL_GPL(device_destroy); | |||
1598 | * exclusion between two different calls of device_rename | 1552 | * exclusion between two different calls of device_rename |
1599 | * on the same device to ensure that new_name is valid and | 1553 | * on the same device to ensure that new_name is valid and |
1600 | * won't conflict with other devices. | 1554 | * won't conflict with other devices. |
1555 | * | ||
1556 | * Note: Don't call this function. Currently, the networking layer calls this | ||
1557 | * function, but that will change. The following text from Kay Sievers offers | ||
1558 | * some insight: | ||
1559 | * | ||
1560 | * Renaming devices is racy at many levels, symlinks and other stuff are not | ||
1561 | * replaced atomically, and you get a "move" uevent, but it's not easy to | ||
1562 | * connect the event to the old and new device. Device nodes are not renamed at | ||
1563 | * all, there isn't even support for that in the kernel now. | ||
1564 | * | ||
1565 | * In the meantime, during renaming, your target name might be taken by another | ||
1566 | * driver, creating conflicts. Or the old name is taken directly after you | ||
1567 | * renamed it -- then you get events for the same DEVPATH, before you even see | ||
1568 | * the "move" event. It's just a mess, and nothing new should ever rely on | ||
1569 | * kernel device renaming. Besides that, it's not even implemented now for | ||
1570 | * other things than (driver-core wise very simple) network devices. | ||
1571 | * | ||
1572 | * We are currently about to change network renaming in udev to completely | ||
1573 | * disallow renaming of devices in the same namespace as the kernel uses, | ||
1574 | * because we can't solve the problems properly, that arise with swapping names | ||
1575 | * of multiple interfaces without races. Means, renaming of eth[0-9]* will only | ||
1576 | * be allowed to some other name than eth[0-9]*, for the aforementioned | ||
1577 | * reasons. | ||
1578 | * | ||
1579 | * Make up a "real" name in the driver before you register anything, or add | ||
1580 | * some other attributes for userspace to find the device, or use udev to add | ||
1581 | * symlinks -- but never rename kernel devices later, it's a complete mess. We | ||
1582 | * don't even want to get into that and try to implement the missing pieces in | ||
1583 | * the core. We really have other pieces to fix in the driver core mess. :) | ||
1601 | */ | 1584 | */ |
1602 | int device_rename(struct device *dev, const char *new_name) | 1585 | int device_rename(struct device *dev, const char *new_name) |
1603 | { | 1586 | { |
@@ -1613,41 +1596,23 @@ int device_rename(struct device *dev, const char *new_name) | |||
1613 | pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev), | 1596 | pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev), |
1614 | __func__, new_name); | 1597 | __func__, new_name); |
1615 | 1598 | ||
1616 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
1617 | if ((dev->class) && (dev->parent)) | ||
1618 | old_class_name = make_class_name(dev->class->name, &dev->kobj); | ||
1619 | #endif | ||
1620 | |||
1621 | old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); | 1599 | old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); |
1622 | if (!old_device_name) { | 1600 | if (!old_device_name) { |
1623 | error = -ENOMEM; | 1601 | error = -ENOMEM; |
1624 | goto out; | 1602 | goto out; |
1625 | } | 1603 | } |
1626 | 1604 | ||
1627 | #ifndef CONFIG_SYSFS_DEPRECATED | ||
1628 | if (dev->class) { | 1605 | if (dev->class) { |
1629 | error = sysfs_rename_link(&dev->class->p->class_subsys.kobj, | 1606 | error = sysfs_rename_link(&dev->class->p->subsys.kobj, |
1630 | &dev->kobj, old_device_name, new_name); | 1607 | &dev->kobj, old_device_name, new_name); |
1631 | if (error) | 1608 | if (error) |
1632 | goto out; | 1609 | goto out; |
1633 | } | 1610 | } |
1634 | #endif | 1611 | |
1635 | error = kobject_rename(&dev->kobj, new_name); | 1612 | error = kobject_rename(&dev->kobj, new_name); |
1636 | if (error) | 1613 | if (error) |
1637 | goto out; | 1614 | goto out; |
1638 | 1615 | ||
1639 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
1640 | if (old_class_name) { | ||
1641 | new_class_name = make_class_name(dev->class->name, &dev->kobj); | ||
1642 | if (new_class_name) { | ||
1643 | error = sysfs_rename_link(&dev->parent->kobj, | ||
1644 | &dev->kobj, | ||
1645 | old_class_name, | ||
1646 | new_class_name); | ||
1647 | } | ||
1648 | } | ||
1649 | #endif | ||
1650 | |||
1651 | out: | 1616 | out: |
1652 | put_device(dev); | 1617 | put_device(dev); |
1653 | 1618 | ||
@@ -1664,40 +1629,13 @@ static int device_move_class_links(struct device *dev, | |||
1664 | struct device *new_parent) | 1629 | struct device *new_parent) |
1665 | { | 1630 | { |
1666 | int error = 0; | 1631 | int error = 0; |
1667 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
1668 | char *class_name; | ||
1669 | 1632 | ||
1670 | class_name = make_class_name(dev->class->name, &dev->kobj); | ||
1671 | if (!class_name) { | ||
1672 | error = -ENOMEM; | ||
1673 | goto out; | ||
1674 | } | ||
1675 | if (old_parent) { | ||
1676 | sysfs_remove_link(&dev->kobj, "device"); | ||
1677 | sysfs_remove_link(&old_parent->kobj, class_name); | ||
1678 | } | ||
1679 | if (new_parent) { | ||
1680 | error = sysfs_create_link(&dev->kobj, &new_parent->kobj, | ||
1681 | "device"); | ||
1682 | if (error) | ||
1683 | goto out; | ||
1684 | error = sysfs_create_link(&new_parent->kobj, &dev->kobj, | ||
1685 | class_name); | ||
1686 | if (error) | ||
1687 | sysfs_remove_link(&dev->kobj, "device"); | ||
1688 | } else | ||
1689 | error = 0; | ||
1690 | out: | ||
1691 | kfree(class_name); | ||
1692 | return error; | ||
1693 | #else | ||
1694 | if (old_parent) | 1633 | if (old_parent) |
1695 | sysfs_remove_link(&dev->kobj, "device"); | 1634 | sysfs_remove_link(&dev->kobj, "device"); |
1696 | if (new_parent) | 1635 | if (new_parent) |
1697 | error = sysfs_create_link(&dev->kobj, &new_parent->kobj, | 1636 | error = sysfs_create_link(&dev->kobj, &new_parent->kobj, |
1698 | "device"); | 1637 | "device"); |
1699 | return error; | 1638 | return error; |
1700 | #endif | ||
1701 | } | 1639 | } |
1702 | 1640 | ||
1703 | /** | 1641 | /** |
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index da57ee9d63fe..6658da743c3a 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
@@ -245,6 +245,10 @@ int device_attach(struct device *dev) | |||
245 | 245 | ||
246 | device_lock(dev); | 246 | device_lock(dev); |
247 | if (dev->driver) { | 247 | if (dev->driver) { |
248 | if (klist_node_attached(&dev->p->knode_driver)) { | ||
249 | ret = 1; | ||
250 | goto out_unlock; | ||
251 | } | ||
248 | ret = device_bind_driver(dev); | 252 | ret = device_bind_driver(dev); |
249 | if (ret == 0) | 253 | if (ret == 0) |
250 | ret = 1; | 254 | ret = 1; |
@@ -257,6 +261,7 @@ int device_attach(struct device *dev) | |||
257 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); | 261 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); |
258 | pm_runtime_put_sync(dev); | 262 | pm_runtime_put_sync(dev); |
259 | } | 263 | } |
264 | out_unlock: | ||
260 | device_unlock(dev); | 265 | device_unlock(dev); |
261 | return ret; | 266 | return ret; |
262 | } | 267 | } |
@@ -316,8 +321,7 @@ static void __device_release_driver(struct device *dev) | |||
316 | 321 | ||
317 | drv = dev->driver; | 322 | drv = dev->driver; |
318 | if (drv) { | 323 | if (drv) { |
319 | pm_runtime_get_noresume(dev); | 324 | pm_runtime_get_sync(dev); |
320 | pm_runtime_barrier(dev); | ||
321 | 325 | ||
322 | driver_sysfs_remove(dev); | 326 | driver_sysfs_remove(dev); |
323 | 327 | ||
@@ -326,6 +330,8 @@ static void __device_release_driver(struct device *dev) | |||
326 | BUS_NOTIFY_UNBIND_DRIVER, | 330 | BUS_NOTIFY_UNBIND_DRIVER, |
327 | dev); | 331 | dev); |
328 | 332 | ||
333 | pm_runtime_put_sync(dev); | ||
334 | |||
329 | if (dev->bus && dev->bus->remove) | 335 | if (dev->bus && dev->bus->remove) |
330 | dev->bus->remove(dev); | 336 | dev->bus->remove(dev); |
331 | else if (drv->remove) | 337 | else if (drv->remove) |
@@ -338,7 +344,6 @@ static void __device_release_driver(struct device *dev) | |||
338 | BUS_NOTIFY_UNBOUND_DRIVER, | 344 | BUS_NOTIFY_UNBOUND_DRIVER, |
339 | dev); | 345 | dev); |
340 | 346 | ||
341 | pm_runtime_put_sync(dev); | ||
342 | } | 347 | } |
343 | } | 348 | } |
344 | 349 | ||
@@ -408,17 +413,16 @@ void *dev_get_drvdata(const struct device *dev) | |||
408 | } | 413 | } |
409 | EXPORT_SYMBOL(dev_get_drvdata); | 414 | EXPORT_SYMBOL(dev_get_drvdata); |
410 | 415 | ||
411 | void dev_set_drvdata(struct device *dev, void *data) | 416 | int dev_set_drvdata(struct device *dev, void *data) |
412 | { | 417 | { |
413 | int error; | 418 | int error; |
414 | 419 | ||
415 | if (!dev) | ||
416 | return; | ||
417 | if (!dev->p) { | 420 | if (!dev->p) { |
418 | error = device_private_init(dev); | 421 | error = device_private_init(dev); |
419 | if (error) | 422 | if (error) |
420 | return; | 423 | return error; |
421 | } | 424 | } |
422 | dev->p->driver_data = data; | 425 | dev->p->driver_data = data; |
426 | return 0; | ||
423 | } | 427 | } |
424 | EXPORT_SYMBOL(dev_set_drvdata); | 428 | EXPORT_SYMBOL(dev_set_drvdata); |
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index af0600143d1c..82bbb5967aa9 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c | |||
@@ -29,33 +29,33 @@ | |||
29 | static struct vfsmount *dev_mnt; | 29 | static struct vfsmount *dev_mnt; |
30 | 30 | ||
31 | #if defined CONFIG_DEVTMPFS_MOUNT | 31 | #if defined CONFIG_DEVTMPFS_MOUNT |
32 | static int dev_mount = 1; | 32 | static int mount_dev = 1; |
33 | #else | 33 | #else |
34 | static int dev_mount; | 34 | static int mount_dev; |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | static DEFINE_MUTEX(dirlock); | 37 | static DEFINE_MUTEX(dirlock); |
38 | 38 | ||
39 | static int __init mount_param(char *str) | 39 | static int __init mount_param(char *str) |
40 | { | 40 | { |
41 | dev_mount = simple_strtoul(str, NULL, 0); | 41 | mount_dev = simple_strtoul(str, NULL, 0); |
42 | return 1; | 42 | return 1; |
43 | } | 43 | } |
44 | __setup("devtmpfs.mount=", mount_param); | 44 | __setup("devtmpfs.mount=", mount_param); |
45 | 45 | ||
46 | static int dev_get_sb(struct file_system_type *fs_type, int flags, | 46 | static struct dentry *dev_mount(struct file_system_type *fs_type, int flags, |
47 | const char *dev_name, void *data, struct vfsmount *mnt) | 47 | const char *dev_name, void *data) |
48 | { | 48 | { |
49 | #ifdef CONFIG_TMPFS | 49 | #ifdef CONFIG_TMPFS |
50 | return get_sb_single(fs_type, flags, data, shmem_fill_super, mnt); | 50 | return mount_single(fs_type, flags, data, shmem_fill_super); |
51 | #else | 51 | #else |
52 | return get_sb_single(fs_type, flags, data, ramfs_fill_super, mnt); | 52 | return mount_single(fs_type, flags, data, ramfs_fill_super); |
53 | #endif | 53 | #endif |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct file_system_type dev_fs_type = { | 56 | static struct file_system_type dev_fs_type = { |
57 | .name = "devtmpfs", | 57 | .name = "devtmpfs", |
58 | .get_sb = dev_get_sb, | 58 | .mount = dev_mount, |
59 | .kill_sb = kill_litter_super, | 59 | .kill_sb = kill_litter_super, |
60 | }; | 60 | }; |
61 | 61 | ||
@@ -351,7 +351,7 @@ int devtmpfs_mount(const char *mntdir) | |||
351 | { | 351 | { |
352 | int err; | 352 | int err; |
353 | 353 | ||
354 | if (!dev_mount) | 354 | if (!mount_dev) |
355 | return 0; | 355 | return 0; |
356 | 356 | ||
357 | if (!dev_mnt) | 357 | if (!dev_mnt) |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 40af43ebd92d..bbb03e6f7255 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -521,6 +521,11 @@ static int _request_firmware(const struct firmware **firmware_p, | |||
521 | if (!firmware_p) | 521 | if (!firmware_p) |
522 | return -EINVAL; | 522 | return -EINVAL; |
523 | 523 | ||
524 | if (WARN_ON(usermodehelper_is_disabled())) { | ||
525 | dev_err(device, "firmware: %s will not be loaded\n", name); | ||
526 | return -EBUSY; | ||
527 | } | ||
528 | |||
524 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 529 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
525 | if (!firmware) { | 530 | if (!firmware) { |
526 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", | 531 | dev_err(device, "%s: kmalloc(struct firmware) failed\n", |
@@ -593,8 +598,7 @@ int | |||
593 | request_firmware(const struct firmware **firmware_p, const char *name, | 598 | request_firmware(const struct firmware **firmware_p, const char *name, |
594 | struct device *device) | 599 | struct device *device) |
595 | { | 600 | { |
596 | int uevent = 1; | 601 | return _request_firmware(firmware_p, name, device, true, false); |
597 | return _request_firmware(firmware_p, name, device, uevent, false); | ||
598 | } | 602 | } |
599 | 603 | ||
600 | /** | 604 | /** |
@@ -618,7 +622,7 @@ struct firmware_work { | |||
618 | struct device *device; | 622 | struct device *device; |
619 | void *context; | 623 | void *context; |
620 | void (*cont)(const struct firmware *fw, void *context); | 624 | void (*cont)(const struct firmware *fw, void *context); |
621 | int uevent; | 625 | bool uevent; |
622 | }; | 626 | }; |
623 | 627 | ||
624 | static int request_firmware_work_func(void *arg) | 628 | static int request_firmware_work_func(void *arg) |
@@ -661,7 +665,7 @@ static int request_firmware_work_func(void *arg) | |||
661 | **/ | 665 | **/ |
662 | int | 666 | int |
663 | request_firmware_nowait( | 667 | request_firmware_nowait( |
664 | struct module *module, int uevent, | 668 | struct module *module, bool uevent, |
665 | const char *name, struct device *device, gfp_t gfp, void *context, | 669 | const char *name, struct device *device, gfp_t gfp, void *context, |
666 | void (*cont)(const struct firmware *fw, void *context)) | 670 | void (*cont)(const struct firmware *fw, void *context)) |
667 | { | 671 | { |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 933442f40321..45d7c8fc73bd 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
@@ -27,8 +27,17 @@ | |||
27 | #include <asm/atomic.h> | 27 | #include <asm/atomic.h> |
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | 29 | ||
30 | static DEFINE_MUTEX(mem_sysfs_mutex); | ||
31 | |||
30 | #define MEMORY_CLASS_NAME "memory" | 32 | #define MEMORY_CLASS_NAME "memory" |
31 | 33 | ||
34 | static int sections_per_block; | ||
35 | |||
36 | static inline int base_memory_block_id(int section_nr) | ||
37 | { | ||
38 | return section_nr / sections_per_block; | ||
39 | } | ||
40 | |||
32 | static struct sysdev_class memory_sysdev_class = { | 41 | static struct sysdev_class memory_sysdev_class = { |
33 | .name = MEMORY_CLASS_NAME, | 42 | .name = MEMORY_CLASS_NAME, |
34 | }; | 43 | }; |
@@ -38,7 +47,8 @@ static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj) | |||
38 | return MEMORY_CLASS_NAME; | 47 | return MEMORY_CLASS_NAME; |
39 | } | 48 | } |
40 | 49 | ||
41 | static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uevent_env *env) | 50 | static int memory_uevent(struct kset *kset, struct kobject *obj, |
51 | struct kobj_uevent_env *env) | ||
42 | { | 52 | { |
43 | int retval = 0; | 53 | int retval = 0; |
44 | 54 | ||
@@ -82,39 +92,72 @@ EXPORT_SYMBOL(unregister_memory_isolate_notifier); | |||
82 | * register_memory - Setup a sysfs device for a memory block | 92 | * register_memory - Setup a sysfs device for a memory block |
83 | */ | 93 | */ |
84 | static | 94 | static |
85 | int register_memory(struct memory_block *memory, struct mem_section *section) | 95 | int register_memory(struct memory_block *memory) |
86 | { | 96 | { |
87 | int error; | 97 | int error; |
88 | 98 | ||
89 | memory->sysdev.cls = &memory_sysdev_class; | 99 | memory->sysdev.cls = &memory_sysdev_class; |
90 | memory->sysdev.id = __section_nr(section); | 100 | memory->sysdev.id = memory->start_section_nr / sections_per_block; |
91 | 101 | ||
92 | error = sysdev_register(&memory->sysdev); | 102 | error = sysdev_register(&memory->sysdev); |
93 | return error; | 103 | return error; |
94 | } | 104 | } |
95 | 105 | ||
96 | static void | 106 | static void |
97 | unregister_memory(struct memory_block *memory, struct mem_section *section) | 107 | unregister_memory(struct memory_block *memory) |
98 | { | 108 | { |
99 | BUG_ON(memory->sysdev.cls != &memory_sysdev_class); | 109 | BUG_ON(memory->sysdev.cls != &memory_sysdev_class); |
100 | BUG_ON(memory->sysdev.id != __section_nr(section)); | ||
101 | 110 | ||
102 | /* drop the ref. we got in remove_memory_block() */ | 111 | /* drop the ref. we got in remove_memory_block() */ |
103 | kobject_put(&memory->sysdev.kobj); | 112 | kobject_put(&memory->sysdev.kobj); |
104 | sysdev_unregister(&memory->sysdev); | 113 | sysdev_unregister(&memory->sysdev); |
105 | } | 114 | } |
106 | 115 | ||
116 | unsigned long __weak memory_block_size_bytes(void) | ||
117 | { | ||
118 | return MIN_MEMORY_BLOCK_SIZE; | ||
119 | } | ||
120 | |||
121 | static unsigned long get_memory_block_size(void) | ||
122 | { | ||
123 | unsigned long block_sz; | ||
124 | |||
125 | block_sz = memory_block_size_bytes(); | ||
126 | |||
127 | /* Validate blk_sz is a power of 2 and not less than section size */ | ||
128 | if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) { | ||
129 | WARN_ON(1); | ||
130 | block_sz = MIN_MEMORY_BLOCK_SIZE; | ||
131 | } | ||
132 | |||
133 | return block_sz; | ||
134 | } | ||
135 | |||
107 | /* | 136 | /* |
108 | * use this as the physical section index that this memsection | 137 | * use this as the physical section index that this memsection |
109 | * uses. | 138 | * uses. |
110 | */ | 139 | */ |
111 | 140 | ||
112 | static ssize_t show_mem_phys_index(struct sys_device *dev, | 141 | static ssize_t show_mem_start_phys_index(struct sys_device *dev, |
142 | struct sysdev_attribute *attr, char *buf) | ||
143 | { | ||
144 | struct memory_block *mem = | ||
145 | container_of(dev, struct memory_block, sysdev); | ||
146 | unsigned long phys_index; | ||
147 | |||
148 | phys_index = mem->start_section_nr / sections_per_block; | ||
149 | return sprintf(buf, "%08lx\n", phys_index); | ||
150 | } | ||
151 | |||
152 | static ssize_t show_mem_end_phys_index(struct sys_device *dev, | ||
113 | struct sysdev_attribute *attr, char *buf) | 153 | struct sysdev_attribute *attr, char *buf) |
114 | { | 154 | { |
115 | struct memory_block *mem = | 155 | struct memory_block *mem = |
116 | container_of(dev, struct memory_block, sysdev); | 156 | container_of(dev, struct memory_block, sysdev); |
117 | return sprintf(buf, "%08lx\n", mem->phys_index); | 157 | unsigned long phys_index; |
158 | |||
159 | phys_index = mem->end_section_nr / sections_per_block; | ||
160 | return sprintf(buf, "%08lx\n", phys_index); | ||
118 | } | 161 | } |
119 | 162 | ||
120 | /* | 163 | /* |
@@ -123,13 +166,16 @@ static ssize_t show_mem_phys_index(struct sys_device *dev, | |||
123 | static ssize_t show_mem_removable(struct sys_device *dev, | 166 | static ssize_t show_mem_removable(struct sys_device *dev, |
124 | struct sysdev_attribute *attr, char *buf) | 167 | struct sysdev_attribute *attr, char *buf) |
125 | { | 168 | { |
126 | unsigned long start_pfn; | 169 | unsigned long i, pfn; |
127 | int ret; | 170 | int ret = 1; |
128 | struct memory_block *mem = | 171 | struct memory_block *mem = |
129 | container_of(dev, struct memory_block, sysdev); | 172 | container_of(dev, struct memory_block, sysdev); |
130 | 173 | ||
131 | start_pfn = section_nr_to_pfn(mem->phys_index); | 174 | for (i = 0; i < sections_per_block; i++) { |
132 | ret = is_mem_section_removable(start_pfn, PAGES_PER_SECTION); | 175 | pfn = section_nr_to_pfn(mem->start_section_nr + i); |
176 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); | ||
177 | } | ||
178 | |||
133 | return sprintf(buf, "%d\n", ret); | 179 | return sprintf(buf, "%d\n", ret); |
134 | } | 180 | } |
135 | 181 | ||
@@ -182,17 +228,15 @@ int memory_isolate_notify(unsigned long val, void *v) | |||
182 | * OK to have direct references to sparsemem variables in here. | 228 | * OK to have direct references to sparsemem variables in here. |
183 | */ | 229 | */ |
184 | static int | 230 | static int |
185 | memory_block_action(struct memory_block *mem, unsigned long action) | 231 | memory_block_action(unsigned long phys_index, unsigned long action) |
186 | { | 232 | { |
187 | int i; | 233 | int i; |
188 | unsigned long psection; | ||
189 | unsigned long start_pfn, start_paddr; | 234 | unsigned long start_pfn, start_paddr; |
235 | unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; | ||
190 | struct page *first_page; | 236 | struct page *first_page; |
191 | int ret; | 237 | int ret; |
192 | int old_state = mem->state; | ||
193 | 238 | ||
194 | psection = mem->phys_index; | 239 | first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT); |
195 | first_page = pfn_to_page(psection << PFN_SECTION_SHIFT); | ||
196 | 240 | ||
197 | /* | 241 | /* |
198 | * The probe routines leave the pages reserved, just | 242 | * The probe routines leave the pages reserved, just |
@@ -200,13 +244,13 @@ memory_block_action(struct memory_block *mem, unsigned long action) | |||
200 | * that way. | 244 | * that way. |
201 | */ | 245 | */ |
202 | if (action == MEM_ONLINE) { | 246 | if (action == MEM_ONLINE) { |
203 | for (i = 0; i < PAGES_PER_SECTION; i++) { | 247 | for (i = 0; i < nr_pages; i++) { |
204 | if (PageReserved(first_page+i)) | 248 | if (PageReserved(first_page+i)) |
205 | continue; | 249 | continue; |
206 | 250 | ||
207 | printk(KERN_WARNING "section number %ld page number %d " | 251 | printk(KERN_WARNING "section number %ld page number %d " |
208 | "not reserved, was it already online? \n", | 252 | "not reserved, was it already online?\n", |
209 | psection, i); | 253 | phys_index, i); |
210 | return -EBUSY; | 254 | return -EBUSY; |
211 | } | 255 | } |
212 | } | 256 | } |
@@ -214,21 +258,16 @@ memory_block_action(struct memory_block *mem, unsigned long action) | |||
214 | switch (action) { | 258 | switch (action) { |
215 | case MEM_ONLINE: | 259 | case MEM_ONLINE: |
216 | start_pfn = page_to_pfn(first_page); | 260 | start_pfn = page_to_pfn(first_page); |
217 | ret = online_pages(start_pfn, PAGES_PER_SECTION); | 261 | ret = online_pages(start_pfn, nr_pages); |
218 | break; | 262 | break; |
219 | case MEM_OFFLINE: | 263 | case MEM_OFFLINE: |
220 | mem->state = MEM_GOING_OFFLINE; | ||
221 | start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; | 264 | start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; |
222 | ret = remove_memory(start_paddr, | 265 | ret = remove_memory(start_paddr, |
223 | PAGES_PER_SECTION << PAGE_SHIFT); | 266 | nr_pages << PAGE_SHIFT); |
224 | if (ret) { | ||
225 | mem->state = old_state; | ||
226 | break; | ||
227 | } | ||
228 | break; | 267 | break; |
229 | default: | 268 | default: |
230 | WARN(1, KERN_WARNING "%s(%p, %ld) unknown action: %ld\n", | 269 | WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " |
231 | __func__, mem, action, action); | 270 | "%ld\n", __func__, phys_index, action, action); |
232 | ret = -EINVAL; | 271 | ret = -EINVAL; |
233 | } | 272 | } |
234 | 273 | ||
@@ -239,6 +278,7 @@ static int memory_block_change_state(struct memory_block *mem, | |||
239 | unsigned long to_state, unsigned long from_state_req) | 278 | unsigned long to_state, unsigned long from_state_req) |
240 | { | 279 | { |
241 | int ret = 0; | 280 | int ret = 0; |
281 | |||
242 | mutex_lock(&mem->state_mutex); | 282 | mutex_lock(&mem->state_mutex); |
243 | 283 | ||
244 | if (mem->state != from_state_req) { | 284 | if (mem->state != from_state_req) { |
@@ -246,8 +286,14 @@ static int memory_block_change_state(struct memory_block *mem, | |||
246 | goto out; | 286 | goto out; |
247 | } | 287 | } |
248 | 288 | ||
249 | ret = memory_block_action(mem, to_state); | 289 | if (to_state == MEM_OFFLINE) |
250 | if (!ret) | 290 | mem->state = MEM_GOING_OFFLINE; |
291 | |||
292 | ret = memory_block_action(mem->start_section_nr, to_state); | ||
293 | |||
294 | if (ret) | ||
295 | mem->state = from_state_req; | ||
296 | else | ||
251 | mem->state = to_state; | 297 | mem->state = to_state; |
252 | 298 | ||
253 | out: | 299 | out: |
@@ -260,20 +306,15 @@ store_mem_state(struct sys_device *dev, | |||
260 | struct sysdev_attribute *attr, const char *buf, size_t count) | 306 | struct sysdev_attribute *attr, const char *buf, size_t count) |
261 | { | 307 | { |
262 | struct memory_block *mem; | 308 | struct memory_block *mem; |
263 | unsigned int phys_section_nr; | ||
264 | int ret = -EINVAL; | 309 | int ret = -EINVAL; |
265 | 310 | ||
266 | mem = container_of(dev, struct memory_block, sysdev); | 311 | mem = container_of(dev, struct memory_block, sysdev); |
267 | phys_section_nr = mem->phys_index; | ||
268 | |||
269 | if (!present_section_nr(phys_section_nr)) | ||
270 | goto out; | ||
271 | 312 | ||
272 | if (!strncmp(buf, "online", min((int)count, 6))) | 313 | if (!strncmp(buf, "online", min((int)count, 6))) |
273 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); | 314 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); |
274 | else if(!strncmp(buf, "offline", min((int)count, 7))) | 315 | else if(!strncmp(buf, "offline", min((int)count, 7))) |
275 | ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); | 316 | ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); |
276 | out: | 317 | |
277 | if (ret) | 318 | if (ret) |
278 | return ret; | 319 | return ret; |
279 | return count; | 320 | return count; |
@@ -296,7 +337,8 @@ static ssize_t show_phys_device(struct sys_device *dev, | |||
296 | return sprintf(buf, "%d\n", mem->phys_device); | 337 | return sprintf(buf, "%d\n", mem->phys_device); |
297 | } | 338 | } |
298 | 339 | ||
299 | static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL); | 340 | static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); |
341 | static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL); | ||
300 | static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); | 342 | static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); |
301 | static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); | 343 | static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); |
302 | static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); | 344 | static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); |
@@ -313,7 +355,7 @@ static ssize_t | |||
313 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, | 355 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, |
314 | char *buf) | 356 | char *buf) |
315 | { | 357 | { |
316 | return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); | 358 | return sprintf(buf, "%lx\n", get_memory_block_size()); |
317 | } | 359 | } |
318 | 360 | ||
319 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); | 361 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); |
@@ -337,17 +379,23 @@ memory_probe_store(struct class *class, struct class_attribute *attr, | |||
337 | { | 379 | { |
338 | u64 phys_addr; | 380 | u64 phys_addr; |
339 | int nid; | 381 | int nid; |
340 | int ret; | 382 | int i, ret; |
341 | 383 | ||
342 | phys_addr = simple_strtoull(buf, NULL, 0); | 384 | phys_addr = simple_strtoull(buf, NULL, 0); |
343 | 385 | ||
344 | nid = memory_add_physaddr_to_nid(phys_addr); | 386 | for (i = 0; i < sections_per_block; i++) { |
345 | ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT); | 387 | nid = memory_add_physaddr_to_nid(phys_addr); |
388 | ret = add_memory(nid, phys_addr, | ||
389 | PAGES_PER_SECTION << PAGE_SHIFT); | ||
390 | if (ret) | ||
391 | goto out; | ||
346 | 392 | ||
347 | if (ret) | 393 | phys_addr += MIN_MEMORY_BLOCK_SIZE; |
348 | count = ret; | 394 | } |
349 | 395 | ||
350 | return count; | 396 | ret = count; |
397 | out: | ||
398 | return ret; | ||
351 | } | 399 | } |
352 | static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store); | 400 | static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store); |
353 | 401 | ||
@@ -435,68 +483,107 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn) | |||
435 | return 0; | 483 | return 0; |
436 | } | 484 | } |
437 | 485 | ||
438 | static int add_memory_block(int nid, struct mem_section *section, | 486 | struct memory_block *find_memory_block_hinted(struct mem_section *section, |
439 | unsigned long state, enum mem_add_context context) | 487 | struct memory_block *hint) |
440 | { | 488 | { |
441 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); | 489 | struct kobject *kobj; |
490 | struct sys_device *sysdev; | ||
491 | struct memory_block *mem; | ||
492 | char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1]; | ||
493 | int block_id = base_memory_block_id(__section_nr(section)); | ||
494 | |||
495 | kobj = hint ? &hint->sysdev.kobj : NULL; | ||
496 | |||
497 | /* | ||
498 | * This only works because we know that section == sysdev->id | ||
499 | * slightly redundant with sysdev_register() | ||
500 | */ | ||
501 | sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id); | ||
502 | |||
503 | kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj); | ||
504 | if (!kobj) | ||
505 | return NULL; | ||
506 | |||
507 | sysdev = container_of(kobj, struct sys_device, kobj); | ||
508 | mem = container_of(sysdev, struct memory_block, sysdev); | ||
509 | |||
510 | return mem; | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * For now, we have a linear search to go find the appropriate | ||
515 | * memory_block corresponding to a particular phys_index. If | ||
516 | * this gets to be a real problem, we can always use a radix | ||
517 | * tree or something here. | ||
518 | * | ||
519 | * This could be made generic for all sysdev classes. | ||
520 | */ | ||
521 | struct memory_block *find_memory_block(struct mem_section *section) | ||
522 | { | ||
523 | return find_memory_block_hinted(section, NULL); | ||
524 | } | ||
525 | |||
526 | static int init_memory_block(struct memory_block **memory, | ||
527 | struct mem_section *section, unsigned long state) | ||
528 | { | ||
529 | struct memory_block *mem; | ||
442 | unsigned long start_pfn; | 530 | unsigned long start_pfn; |
531 | int scn_nr; | ||
443 | int ret = 0; | 532 | int ret = 0; |
444 | 533 | ||
534 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); | ||
445 | if (!mem) | 535 | if (!mem) |
446 | return -ENOMEM; | 536 | return -ENOMEM; |
447 | 537 | ||
448 | mem->phys_index = __section_nr(section); | 538 | scn_nr = __section_nr(section); |
539 | mem->start_section_nr = | ||
540 | base_memory_block_id(scn_nr) * sections_per_block; | ||
541 | mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; | ||
449 | mem->state = state; | 542 | mem->state = state; |
543 | mem->section_count++; | ||
450 | mutex_init(&mem->state_mutex); | 544 | mutex_init(&mem->state_mutex); |
451 | start_pfn = section_nr_to_pfn(mem->phys_index); | 545 | start_pfn = section_nr_to_pfn(mem->start_section_nr); |
452 | mem->phys_device = arch_get_memory_phys_device(start_pfn); | 546 | mem->phys_device = arch_get_memory_phys_device(start_pfn); |
453 | 547 | ||
454 | ret = register_memory(mem, section); | 548 | ret = register_memory(mem); |
455 | if (!ret) | 549 | if (!ret) |
456 | ret = mem_create_simple_file(mem, phys_index); | 550 | ret = mem_create_simple_file(mem, phys_index); |
457 | if (!ret) | 551 | if (!ret) |
552 | ret = mem_create_simple_file(mem, end_phys_index); | ||
553 | if (!ret) | ||
458 | ret = mem_create_simple_file(mem, state); | 554 | ret = mem_create_simple_file(mem, state); |
459 | if (!ret) | 555 | if (!ret) |
460 | ret = mem_create_simple_file(mem, phys_device); | 556 | ret = mem_create_simple_file(mem, phys_device); |
461 | if (!ret) | 557 | if (!ret) |
462 | ret = mem_create_simple_file(mem, removable); | 558 | ret = mem_create_simple_file(mem, removable); |
463 | if (!ret) { | ||
464 | if (context == HOTPLUG) | ||
465 | ret = register_mem_sect_under_node(mem, nid); | ||
466 | } | ||
467 | 559 | ||
560 | *memory = mem; | ||
468 | return ret; | 561 | return ret; |
469 | } | 562 | } |
470 | 563 | ||
471 | /* | 564 | static int add_memory_section(int nid, struct mem_section *section, |
472 | * For now, we have a linear search to go find the appropriate | 565 | unsigned long state, enum mem_add_context context) |
473 | * memory_block corresponding to a particular phys_index. If | ||
474 | * this gets to be a real problem, we can always use a radix | ||
475 | * tree or something here. | ||
476 | * | ||
477 | * This could be made generic for all sysdev classes. | ||
478 | */ | ||
479 | struct memory_block *find_memory_block(struct mem_section *section) | ||
480 | { | 566 | { |
481 | struct kobject *kobj; | ||
482 | struct sys_device *sysdev; | ||
483 | struct memory_block *mem; | 567 | struct memory_block *mem; |
484 | char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1]; | 568 | int ret = 0; |
485 | 569 | ||
486 | /* | 570 | mutex_lock(&mem_sysfs_mutex); |
487 | * This only works because we know that section == sysdev->id | ||
488 | * slightly redundant with sysdev_register() | ||
489 | */ | ||
490 | sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, __section_nr(section)); | ||
491 | 571 | ||
492 | kobj = kset_find_obj(&memory_sysdev_class.kset, name); | 572 | mem = find_memory_block(section); |
493 | if (!kobj) | 573 | if (mem) { |
494 | return NULL; | 574 | mem->section_count++; |
575 | kobject_put(&mem->sysdev.kobj); | ||
576 | } else | ||
577 | ret = init_memory_block(&mem, section, state); | ||
495 | 578 | ||
496 | sysdev = container_of(kobj, struct sys_device, kobj); | 579 | if (!ret) { |
497 | mem = container_of(sysdev, struct memory_block, sysdev); | 580 | if (context == HOTPLUG && |
581 | mem->section_count == sections_per_block) | ||
582 | ret = register_mem_sect_under_node(mem, nid); | ||
583 | } | ||
498 | 584 | ||
499 | return mem; | 585 | mutex_unlock(&mem_sysfs_mutex); |
586 | return ret; | ||
500 | } | 587 | } |
501 | 588 | ||
502 | int remove_memory_block(unsigned long node_id, struct mem_section *section, | 589 | int remove_memory_block(unsigned long node_id, struct mem_section *section, |
@@ -504,14 +591,23 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, | |||
504 | { | 591 | { |
505 | struct memory_block *mem; | 592 | struct memory_block *mem; |
506 | 593 | ||
594 | mutex_lock(&mem_sysfs_mutex); | ||
507 | mem = find_memory_block(section); | 595 | mem = find_memory_block(section); |
508 | unregister_mem_sect_under_nodes(mem); | 596 | unregister_mem_sect_under_nodes(mem, __section_nr(section)); |
509 | mem_remove_simple_file(mem, phys_index); | 597 | |
510 | mem_remove_simple_file(mem, state); | 598 | mem->section_count--; |
511 | mem_remove_simple_file(mem, phys_device); | 599 | if (mem->section_count == 0) { |
512 | mem_remove_simple_file(mem, removable); | 600 | mem_remove_simple_file(mem, phys_index); |
513 | unregister_memory(mem, section); | 601 | mem_remove_simple_file(mem, end_phys_index); |
514 | 602 | mem_remove_simple_file(mem, state); | |
603 | mem_remove_simple_file(mem, phys_device); | ||
604 | mem_remove_simple_file(mem, removable); | ||
605 | unregister_memory(mem); | ||
606 | kfree(mem); | ||
607 | } else | ||
608 | kobject_put(&mem->sysdev.kobj); | ||
609 | |||
610 | mutex_unlock(&mem_sysfs_mutex); | ||
515 | return 0; | 611 | return 0; |
516 | } | 612 | } |
517 | 613 | ||
@@ -521,7 +617,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, | |||
521 | */ | 617 | */ |
522 | int register_new_memory(int nid, struct mem_section *section) | 618 | int register_new_memory(int nid, struct mem_section *section) |
523 | { | 619 | { |
524 | return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG); | 620 | return add_memory_section(nid, section, MEM_OFFLINE, HOTPLUG); |
525 | } | 621 | } |
526 | 622 | ||
527 | int unregister_memory_section(struct mem_section *section) | 623 | int unregister_memory_section(struct mem_section *section) |
@@ -540,12 +636,16 @@ int __init memory_dev_init(void) | |||
540 | unsigned int i; | 636 | unsigned int i; |
541 | int ret; | 637 | int ret; |
542 | int err; | 638 | int err; |
639 | unsigned long block_sz; | ||
543 | 640 | ||
544 | memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops; | 641 | memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops; |
545 | ret = sysdev_class_register(&memory_sysdev_class); | 642 | ret = sysdev_class_register(&memory_sysdev_class); |
546 | if (ret) | 643 | if (ret) |
547 | goto out; | 644 | goto out; |
548 | 645 | ||
646 | block_sz = get_memory_block_size(); | ||
647 | sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; | ||
648 | |||
549 | /* | 649 | /* |
550 | * Create entries for memory sections that were found | 650 | * Create entries for memory sections that were found |
551 | * during boot and have been initialized | 651 | * during boot and have been initialized |
@@ -553,8 +653,8 @@ int __init memory_dev_init(void) | |||
553 | for (i = 0; i < NR_MEM_SECTIONS; i++) { | 653 | for (i = 0; i < NR_MEM_SECTIONS; i++) { |
554 | if (!present_section_nr(i)) | 654 | if (!present_section_nr(i)) |
555 | continue; | 655 | continue; |
556 | err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, | 656 | err = add_memory_section(0, __nr_to_section(i), MEM_ONLINE, |
557 | BOOT); | 657 | BOOT); |
558 | if (!ret) | 658 | if (!ret) |
559 | ret = err; | 659 | ret = err; |
560 | } | 660 | } |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 2872e86837b2..793f796c4da3 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/memory.h> | 9 | #include <linux/memory.h> |
10 | #include <linux/vmstat.h> | ||
10 | #include <linux/node.h> | 11 | #include <linux/node.h> |
11 | #include <linux/hugetlb.h> | 12 | #include <linux/hugetlb.h> |
12 | #include <linux/compaction.h> | 13 | #include <linux/compaction.h> |
@@ -117,12 +118,21 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
117 | "Node %d WritebackTmp: %8lu kB\n" | 118 | "Node %d WritebackTmp: %8lu kB\n" |
118 | "Node %d Slab: %8lu kB\n" | 119 | "Node %d Slab: %8lu kB\n" |
119 | "Node %d SReclaimable: %8lu kB\n" | 120 | "Node %d SReclaimable: %8lu kB\n" |
120 | "Node %d SUnreclaim: %8lu kB\n", | 121 | "Node %d SUnreclaim: %8lu kB\n" |
122 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
123 | "Node %d AnonHugePages: %8lu kB\n" | ||
124 | #endif | ||
125 | , | ||
121 | nid, K(node_page_state(nid, NR_FILE_DIRTY)), | 126 | nid, K(node_page_state(nid, NR_FILE_DIRTY)), |
122 | nid, K(node_page_state(nid, NR_WRITEBACK)), | 127 | nid, K(node_page_state(nid, NR_WRITEBACK)), |
123 | nid, K(node_page_state(nid, NR_FILE_PAGES)), | 128 | nid, K(node_page_state(nid, NR_FILE_PAGES)), |
124 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), | 129 | nid, K(node_page_state(nid, NR_FILE_MAPPED)), |
125 | nid, K(node_page_state(nid, NR_ANON_PAGES)), | 130 | nid, K(node_page_state(nid, NR_ANON_PAGES) |
131 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
132 | + node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * | ||
133 | HPAGE_PMD_NR | ||
134 | #endif | ||
135 | ), | ||
126 | nid, K(node_page_state(nid, NR_SHMEM)), | 136 | nid, K(node_page_state(nid, NR_SHMEM)), |
127 | nid, node_page_state(nid, NR_KERNEL_STACK) * | 137 | nid, node_page_state(nid, NR_KERNEL_STACK) * |
128 | THREAD_SIZE / 1024, | 138 | THREAD_SIZE / 1024, |
@@ -133,7 +143,13 @@ static ssize_t node_read_meminfo(struct sys_device * dev, | |||
133 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + | 143 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + |
134 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), | 144 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), |
135 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), | 145 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), |
136 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE))); | 146 | nid, K(node_page_state(nid, NR_SLAB_UNRECLAIMABLE)) |
147 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
148 | , nid, | ||
149 | K(node_page_state(nid, NR_ANON_TRANSPARENT_HUGEPAGES) * | ||
150 | HPAGE_PMD_NR) | ||
151 | #endif | ||
152 | ); | ||
137 | n += hugetlb_report_node_meminfo(nid, buf + n); | 153 | n += hugetlb_report_node_meminfo(nid, buf + n); |
138 | return n; | 154 | return n; |
139 | } | 155 | } |
@@ -160,6 +176,21 @@ static ssize_t node_read_numastat(struct sys_device * dev, | |||
160 | } | 176 | } |
161 | static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); | 177 | static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL); |
162 | 178 | ||
179 | static ssize_t node_read_vmstat(struct sys_device *dev, | ||
180 | struct sysdev_attribute *attr, char *buf) | ||
181 | { | ||
182 | int nid = dev->id; | ||
183 | int i; | ||
184 | int n = 0; | ||
185 | |||
186 | for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) | ||
187 | n += sprintf(buf+n, "%s %lu\n", vmstat_text[i], | ||
188 | node_page_state(nid, i)); | ||
189 | |||
190 | return n; | ||
191 | } | ||
192 | static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL); | ||
193 | |||
163 | static ssize_t node_read_distance(struct sys_device * dev, | 194 | static ssize_t node_read_distance(struct sys_device * dev, |
164 | struct sysdev_attribute *attr, char * buf) | 195 | struct sysdev_attribute *attr, char * buf) |
165 | { | 196 | { |
@@ -243,6 +274,7 @@ int register_node(struct node *node, int num, struct node *parent) | |||
243 | sysdev_create_file(&node->sysdev, &attr_meminfo); | 274 | sysdev_create_file(&node->sysdev, &attr_meminfo); |
244 | sysdev_create_file(&node->sysdev, &attr_numastat); | 275 | sysdev_create_file(&node->sysdev, &attr_numastat); |
245 | sysdev_create_file(&node->sysdev, &attr_distance); | 276 | sysdev_create_file(&node->sysdev, &attr_distance); |
277 | sysdev_create_file(&node->sysdev, &attr_vmstat); | ||
246 | 278 | ||
247 | scan_unevictable_register_node(node); | 279 | scan_unevictable_register_node(node); |
248 | 280 | ||
@@ -267,6 +299,7 @@ void unregister_node(struct node *node) | |||
267 | sysdev_remove_file(&node->sysdev, &attr_meminfo); | 299 | sysdev_remove_file(&node->sysdev, &attr_meminfo); |
268 | sysdev_remove_file(&node->sysdev, &attr_numastat); | 300 | sysdev_remove_file(&node->sysdev, &attr_numastat); |
269 | sysdev_remove_file(&node->sysdev, &attr_distance); | 301 | sysdev_remove_file(&node->sysdev, &attr_distance); |
302 | sysdev_remove_file(&node->sysdev, &attr_vmstat); | ||
270 | 303 | ||
271 | scan_unevictable_unregister_node(node); | 304 | scan_unevictable_unregister_node(node); |
272 | hugetlb_unregister_node(node); /* no-op, if memoryless node */ | 305 | hugetlb_unregister_node(node); /* no-op, if memoryless node */ |
@@ -346,8 +379,10 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
346 | return -EFAULT; | 379 | return -EFAULT; |
347 | if (!node_online(nid)) | 380 | if (!node_online(nid)) |
348 | return 0; | 381 | return 0; |
349 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); | 382 | |
350 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; | 383 | sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); |
384 | sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); | ||
385 | sect_end_pfn += PAGES_PER_SECTION - 1; | ||
351 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { | 386 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
352 | int page_nid; | 387 | int page_nid; |
353 | 388 | ||
@@ -371,7 +406,8 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
371 | } | 406 | } |
372 | 407 | ||
373 | /* unregister memory section under all nodes that it spans */ | 408 | /* unregister memory section under all nodes that it spans */ |
374 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | 409 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, |
410 | unsigned long phys_index) | ||
375 | { | 411 | { |
376 | NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); | 412 | NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); |
377 | unsigned long pfn, sect_start_pfn, sect_end_pfn; | 413 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
@@ -383,7 +419,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | |||
383 | if (!unlinked_nodes) | 419 | if (!unlinked_nodes) |
384 | return -ENOMEM; | 420 | return -ENOMEM; |
385 | nodes_clear(*unlinked_nodes); | 421 | nodes_clear(*unlinked_nodes); |
386 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); | 422 | |
423 | sect_start_pfn = section_nr_to_pfn(phys_index); | ||
387 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; | 424 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; |
388 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { | 425 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
389 | int nid; | 426 | int nid; |
@@ -409,25 +446,27 @@ static int link_mem_sections(int nid) | |||
409 | unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; | 446 | unsigned long start_pfn = NODE_DATA(nid)->node_start_pfn; |
410 | unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; | 447 | unsigned long end_pfn = start_pfn + NODE_DATA(nid)->node_spanned_pages; |
411 | unsigned long pfn; | 448 | unsigned long pfn; |
449 | struct memory_block *mem_blk = NULL; | ||
412 | int err = 0; | 450 | int err = 0; |
413 | 451 | ||
414 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | 452 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
415 | unsigned long section_nr = pfn_to_section_nr(pfn); | 453 | unsigned long section_nr = pfn_to_section_nr(pfn); |
416 | struct mem_section *mem_sect; | 454 | struct mem_section *mem_sect; |
417 | struct memory_block *mem_blk; | ||
418 | int ret; | 455 | int ret; |
419 | 456 | ||
420 | if (!present_section_nr(section_nr)) | 457 | if (!present_section_nr(section_nr)) |
421 | continue; | 458 | continue; |
422 | mem_sect = __nr_to_section(section_nr); | 459 | mem_sect = __nr_to_section(section_nr); |
423 | mem_blk = find_memory_block(mem_sect); | 460 | mem_blk = find_memory_block_hinted(mem_sect, mem_blk); |
424 | ret = register_mem_sect_under_node(mem_blk, nid); | 461 | ret = register_mem_sect_under_node(mem_blk, nid); |
425 | if (!err) | 462 | if (!err) |
426 | err = ret; | 463 | err = ret; |
427 | 464 | ||
428 | /* discard ref obtained in find_memory_block() */ | 465 | /* discard ref obtained in find_memory_block() */ |
429 | kobject_put(&mem_blk->sysdev.kobj); | ||
430 | } | 466 | } |
467 | |||
468 | if (mem_blk) | ||
469 | kobject_put(&mem_blk->sysdev.kobj); | ||
431 | return err; | 470 | return err; |
432 | } | 471 | } |
433 | 472 | ||
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index c6c933f58102..6040717b62bb 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
@@ -147,7 +147,9 @@ static void platform_device_release(struct device *dev) | |||
147 | struct platform_object *pa = container_of(dev, struct platform_object, | 147 | struct platform_object *pa = container_of(dev, struct platform_object, |
148 | pdev.dev); | 148 | pdev.dev); |
149 | 149 | ||
150 | of_device_node_put(&pa->pdev.dev); | ||
150 | kfree(pa->pdev.dev.platform_data); | 151 | kfree(pa->pdev.dev.platform_data); |
152 | kfree(pa->pdev.mfd_cell); | ||
151 | kfree(pa->pdev.resource); | 153 | kfree(pa->pdev.resource); |
152 | kfree(pa); | 154 | kfree(pa); |
153 | } | 155 | } |
@@ -190,15 +192,18 @@ EXPORT_SYMBOL_GPL(platform_device_alloc); | |||
190 | int platform_device_add_resources(struct platform_device *pdev, | 192 | int platform_device_add_resources(struct platform_device *pdev, |
191 | const struct resource *res, unsigned int num) | 193 | const struct resource *res, unsigned int num) |
192 | { | 194 | { |
193 | struct resource *r; | 195 | struct resource *r = NULL; |
194 | 196 | ||
195 | r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); | 197 | if (res) { |
196 | if (r) { | 198 | r = kmemdup(res, sizeof(struct resource) * num, GFP_KERNEL); |
197 | pdev->resource = r; | 199 | if (!r) |
198 | pdev->num_resources = num; | 200 | return -ENOMEM; |
199 | return 0; | ||
200 | } | 201 | } |
201 | return -ENOMEM; | 202 | |
203 | kfree(pdev->resource); | ||
204 | pdev->resource = r; | ||
205 | pdev->num_resources = num; | ||
206 | return 0; | ||
202 | } | 207 | } |
203 | EXPORT_SYMBOL_GPL(platform_device_add_resources); | 208 | EXPORT_SYMBOL_GPL(platform_device_add_resources); |
204 | 209 | ||
@@ -215,13 +220,17 @@ EXPORT_SYMBOL_GPL(platform_device_add_resources); | |||
215 | int platform_device_add_data(struct platform_device *pdev, const void *data, | 220 | int platform_device_add_data(struct platform_device *pdev, const void *data, |
216 | size_t size) | 221 | size_t size) |
217 | { | 222 | { |
218 | void *d = kmemdup(data, size, GFP_KERNEL); | 223 | void *d = NULL; |
219 | 224 | ||
220 | if (d) { | 225 | if (data) { |
221 | pdev->dev.platform_data = d; | 226 | d = kmemdup(data, size, GFP_KERNEL); |
222 | return 0; | 227 | if (!d) |
228 | return -ENOMEM; | ||
223 | } | 229 | } |
224 | return -ENOMEM; | 230 | |
231 | kfree(pdev->dev.platform_data); | ||
232 | pdev->dev.platform_data = d; | ||
233 | return 0; | ||
225 | } | 234 | } |
226 | EXPORT_SYMBOL_GPL(platform_device_add_data); | 235 | EXPORT_SYMBOL_GPL(platform_device_add_data); |
227 | 236 | ||
@@ -358,7 +367,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister); | |||
358 | * | 367 | * |
359 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. | 368 | * Returns &struct platform_device pointer on success, or ERR_PTR() on error. |
360 | */ | 369 | */ |
361 | struct platform_device *__init_or_module platform_device_register_resndata( | 370 | struct platform_device *platform_device_register_resndata( |
362 | struct device *parent, | 371 | struct device *parent, |
363 | const char *name, int id, | 372 | const char *name, int id, |
364 | const struct resource *res, unsigned int num, | 373 | const struct resource *res, unsigned int num, |
@@ -373,17 +382,13 @@ struct platform_device *__init_or_module platform_device_register_resndata( | |||
373 | 382 | ||
374 | pdev->dev.parent = parent; | 383 | pdev->dev.parent = parent; |
375 | 384 | ||
376 | if (res) { | 385 | ret = platform_device_add_resources(pdev, res, num); |
377 | ret = platform_device_add_resources(pdev, res, num); | 386 | if (ret) |
378 | if (ret) | 387 | goto err; |
379 | goto err; | ||
380 | } | ||
381 | 388 | ||
382 | if (data) { | 389 | ret = platform_device_add_data(pdev, data, size); |
383 | ret = platform_device_add_data(pdev, data, size); | 390 | if (ret) |
384 | if (ret) | 391 | goto err; |
385 | goto err; | ||
386 | } | ||
387 | 392 | ||
388 | ret = platform_device_add(pdev); | 393 | ret = platform_device_add(pdev); |
389 | if (ret) { | 394 | if (ret) { |
@@ -488,12 +493,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv, | |||
488 | * if the probe was successful, and make sure any forced probes of | 493 | * if the probe was successful, and make sure any forced probes of |
489 | * new devices fail. | 494 | * new devices fail. |
490 | */ | 495 | */ |
491 | spin_lock(&platform_bus_type.p->klist_drivers.k_lock); | 496 | spin_lock(&drv->driver.bus->p->klist_drivers.k_lock); |
492 | drv->probe = NULL; | 497 | drv->probe = NULL; |
493 | if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) | 498 | if (code == 0 && list_empty(&drv->driver.p->klist_devices.k_list)) |
494 | retval = -ENODEV; | 499 | retval = -ENODEV; |
495 | drv->driver.probe = platform_drv_probe_fail; | 500 | drv->driver.probe = platform_drv_probe_fail; |
496 | spin_unlock(&platform_bus_type.p->klist_drivers.k_lock); | 501 | spin_unlock(&drv->driver.bus->p->klist_drivers.k_lock); |
497 | 502 | ||
498 | if (code != retval) | 503 | if (code != retval) |
499 | platform_driver_unregister(drv); | 504 | platform_driver_unregister(drv); |
@@ -530,17 +535,13 @@ struct platform_device * __init_or_module platform_create_bundle( | |||
530 | goto err_out; | 535 | goto err_out; |
531 | } | 536 | } |
532 | 537 | ||
533 | if (res) { | 538 | error = platform_device_add_resources(pdev, res, n_res); |
534 | error = platform_device_add_resources(pdev, res, n_res); | 539 | if (error) |
535 | if (error) | 540 | goto err_pdev_put; |
536 | goto err_pdev_put; | ||
537 | } | ||
538 | 541 | ||
539 | if (data) { | 542 | error = platform_device_add_data(pdev, data, size); |
540 | error = platform_device_add_data(pdev, data, size); | 543 | if (error) |
541 | if (error) | 544 | goto err_pdev_put; |
542 | goto err_pdev_put; | ||
543 | } | ||
544 | 545 | ||
545 | error = platform_device_add(pdev); | 546 | error = platform_device_add(pdev); |
546 | if (error) | 547 | if (error) |
@@ -666,7 +667,7 @@ static int platform_legacy_resume(struct device *dev) | |||
666 | return ret; | 667 | return ret; |
667 | } | 668 | } |
668 | 669 | ||
669 | static int platform_pm_prepare(struct device *dev) | 670 | int platform_pm_prepare(struct device *dev) |
670 | { | 671 | { |
671 | struct device_driver *drv = dev->driver; | 672 | struct device_driver *drv = dev->driver; |
672 | int ret = 0; | 673 | int ret = 0; |
@@ -677,7 +678,7 @@ static int platform_pm_prepare(struct device *dev) | |||
677 | return ret; | 678 | return ret; |
678 | } | 679 | } |
679 | 680 | ||
680 | static void platform_pm_complete(struct device *dev) | 681 | void platform_pm_complete(struct device *dev) |
681 | { | 682 | { |
682 | struct device_driver *drv = dev->driver; | 683 | struct device_driver *drv = dev->driver; |
683 | 684 | ||
@@ -685,16 +686,11 @@ static void platform_pm_complete(struct device *dev) | |||
685 | drv->pm->complete(dev); | 686 | drv->pm->complete(dev); |
686 | } | 687 | } |
687 | 688 | ||
688 | #else /* !CONFIG_PM_SLEEP */ | 689 | #endif /* CONFIG_PM_SLEEP */ |
689 | |||
690 | #define platform_pm_prepare NULL | ||
691 | #define platform_pm_complete NULL | ||
692 | |||
693 | #endif /* !CONFIG_PM_SLEEP */ | ||
694 | 690 | ||
695 | #ifdef CONFIG_SUSPEND | 691 | #ifdef CONFIG_SUSPEND |
696 | 692 | ||
697 | int __weak platform_pm_suspend(struct device *dev) | 693 | int platform_pm_suspend(struct device *dev) |
698 | { | 694 | { |
699 | struct device_driver *drv = dev->driver; | 695 | struct device_driver *drv = dev->driver; |
700 | int ret = 0; | 696 | int ret = 0; |
@@ -712,7 +708,7 @@ int __weak platform_pm_suspend(struct device *dev) | |||
712 | return ret; | 708 | return ret; |
713 | } | 709 | } |
714 | 710 | ||
715 | int __weak platform_pm_suspend_noirq(struct device *dev) | 711 | int platform_pm_suspend_noirq(struct device *dev) |
716 | { | 712 | { |
717 | struct device_driver *drv = dev->driver; | 713 | struct device_driver *drv = dev->driver; |
718 | int ret = 0; | 714 | int ret = 0; |
@@ -728,7 +724,7 @@ int __weak platform_pm_suspend_noirq(struct device *dev) | |||
728 | return ret; | 724 | return ret; |
729 | } | 725 | } |
730 | 726 | ||
731 | int __weak platform_pm_resume(struct device *dev) | 727 | int platform_pm_resume(struct device *dev) |
732 | { | 728 | { |
733 | struct device_driver *drv = dev->driver; | 729 | struct device_driver *drv = dev->driver; |
734 | int ret = 0; | 730 | int ret = 0; |
@@ -746,7 +742,7 @@ int __weak platform_pm_resume(struct device *dev) | |||
746 | return ret; | 742 | return ret; |
747 | } | 743 | } |
748 | 744 | ||
749 | int __weak platform_pm_resume_noirq(struct device *dev) | 745 | int platform_pm_resume_noirq(struct device *dev) |
750 | { | 746 | { |
751 | struct device_driver *drv = dev->driver; | 747 | struct device_driver *drv = dev->driver; |
752 | int ret = 0; | 748 | int ret = 0; |
@@ -762,18 +758,11 @@ int __weak platform_pm_resume_noirq(struct device *dev) | |||
762 | return ret; | 758 | return ret; |
763 | } | 759 | } |
764 | 760 | ||
765 | #else /* !CONFIG_SUSPEND */ | 761 | #endif /* CONFIG_SUSPEND */ |
766 | |||
767 | #define platform_pm_suspend NULL | ||
768 | #define platform_pm_resume NULL | ||
769 | #define platform_pm_suspend_noirq NULL | ||
770 | #define platform_pm_resume_noirq NULL | ||
771 | |||
772 | #endif /* !CONFIG_SUSPEND */ | ||
773 | 762 | ||
774 | #ifdef CONFIG_HIBERNATION | 763 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
775 | 764 | ||
776 | static int platform_pm_freeze(struct device *dev) | 765 | int platform_pm_freeze(struct device *dev) |
777 | { | 766 | { |
778 | struct device_driver *drv = dev->driver; | 767 | struct device_driver *drv = dev->driver; |
779 | int ret = 0; | 768 | int ret = 0; |
@@ -791,7 +780,7 @@ static int platform_pm_freeze(struct device *dev) | |||
791 | return ret; | 780 | return ret; |
792 | } | 781 | } |
793 | 782 | ||
794 | static int platform_pm_freeze_noirq(struct device *dev) | 783 | int platform_pm_freeze_noirq(struct device *dev) |
795 | { | 784 | { |
796 | struct device_driver *drv = dev->driver; | 785 | struct device_driver *drv = dev->driver; |
797 | int ret = 0; | 786 | int ret = 0; |
@@ -807,7 +796,7 @@ static int platform_pm_freeze_noirq(struct device *dev) | |||
807 | return ret; | 796 | return ret; |
808 | } | 797 | } |
809 | 798 | ||
810 | static int platform_pm_thaw(struct device *dev) | 799 | int platform_pm_thaw(struct device *dev) |
811 | { | 800 | { |
812 | struct device_driver *drv = dev->driver; | 801 | struct device_driver *drv = dev->driver; |
813 | int ret = 0; | 802 | int ret = 0; |
@@ -825,7 +814,7 @@ static int platform_pm_thaw(struct device *dev) | |||
825 | return ret; | 814 | return ret; |
826 | } | 815 | } |
827 | 816 | ||
828 | static int platform_pm_thaw_noirq(struct device *dev) | 817 | int platform_pm_thaw_noirq(struct device *dev) |
829 | { | 818 | { |
830 | struct device_driver *drv = dev->driver; | 819 | struct device_driver *drv = dev->driver; |
831 | int ret = 0; | 820 | int ret = 0; |
@@ -841,7 +830,7 @@ static int platform_pm_thaw_noirq(struct device *dev) | |||
841 | return ret; | 830 | return ret; |
842 | } | 831 | } |
843 | 832 | ||
844 | static int platform_pm_poweroff(struct device *dev) | 833 | int platform_pm_poweroff(struct device *dev) |
845 | { | 834 | { |
846 | struct device_driver *drv = dev->driver; | 835 | struct device_driver *drv = dev->driver; |
847 | int ret = 0; | 836 | int ret = 0; |
@@ -859,7 +848,7 @@ static int platform_pm_poweroff(struct device *dev) | |||
859 | return ret; | 848 | return ret; |
860 | } | 849 | } |
861 | 850 | ||
862 | static int platform_pm_poweroff_noirq(struct device *dev) | 851 | int platform_pm_poweroff_noirq(struct device *dev) |
863 | { | 852 | { |
864 | struct device_driver *drv = dev->driver; | 853 | struct device_driver *drv = dev->driver; |
865 | int ret = 0; | 854 | int ret = 0; |
@@ -875,7 +864,7 @@ static int platform_pm_poweroff_noirq(struct device *dev) | |||
875 | return ret; | 864 | return ret; |
876 | } | 865 | } |
877 | 866 | ||
878 | static int platform_pm_restore(struct device *dev) | 867 | int platform_pm_restore(struct device *dev) |
879 | { | 868 | { |
880 | struct device_driver *drv = dev->driver; | 869 | struct device_driver *drv = dev->driver; |
881 | int ret = 0; | 870 | int ret = 0; |
@@ -893,7 +882,7 @@ static int platform_pm_restore(struct device *dev) | |||
893 | return ret; | 882 | return ret; |
894 | } | 883 | } |
895 | 884 | ||
896 | static int platform_pm_restore_noirq(struct device *dev) | 885 | int platform_pm_restore_noirq(struct device *dev) |
897 | { | 886 | { |
898 | struct device_driver *drv = dev->driver; | 887 | struct device_driver *drv = dev->driver; |
899 | int ret = 0; | 888 | int ret = 0; |
@@ -909,62 +898,13 @@ static int platform_pm_restore_noirq(struct device *dev) | |||
909 | return ret; | 898 | return ret; |
910 | } | 899 | } |
911 | 900 | ||
912 | #else /* !CONFIG_HIBERNATION */ | 901 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
913 | |||
914 | #define platform_pm_freeze NULL | ||
915 | #define platform_pm_thaw NULL | ||
916 | #define platform_pm_poweroff NULL | ||
917 | #define platform_pm_restore NULL | ||
918 | #define platform_pm_freeze_noirq NULL | ||
919 | #define platform_pm_thaw_noirq NULL | ||
920 | #define platform_pm_poweroff_noirq NULL | ||
921 | #define platform_pm_restore_noirq NULL | ||
922 | |||
923 | #endif /* !CONFIG_HIBERNATION */ | ||
924 | |||
925 | #ifdef CONFIG_PM_RUNTIME | ||
926 | |||
927 | int __weak platform_pm_runtime_suspend(struct device *dev) | ||
928 | { | ||
929 | return pm_generic_runtime_suspend(dev); | ||
930 | }; | ||
931 | |||
932 | int __weak platform_pm_runtime_resume(struct device *dev) | ||
933 | { | ||
934 | return pm_generic_runtime_resume(dev); | ||
935 | }; | ||
936 | |||
937 | int __weak platform_pm_runtime_idle(struct device *dev) | ||
938 | { | ||
939 | return pm_generic_runtime_idle(dev); | ||
940 | }; | ||
941 | |||
942 | #else /* !CONFIG_PM_RUNTIME */ | ||
943 | |||
944 | #define platform_pm_runtime_suspend NULL | ||
945 | #define platform_pm_runtime_resume NULL | ||
946 | #define platform_pm_runtime_idle NULL | ||
947 | |||
948 | #endif /* !CONFIG_PM_RUNTIME */ | ||
949 | 902 | ||
950 | static const struct dev_pm_ops platform_dev_pm_ops = { | 903 | static const struct dev_pm_ops platform_dev_pm_ops = { |
951 | .prepare = platform_pm_prepare, | 904 | .runtime_suspend = pm_generic_runtime_suspend, |
952 | .complete = platform_pm_complete, | 905 | .runtime_resume = pm_generic_runtime_resume, |
953 | .suspend = platform_pm_suspend, | 906 | .runtime_idle = pm_generic_runtime_idle, |
954 | .resume = platform_pm_resume, | 907 | USE_PLATFORM_PM_SLEEP_OPS |
955 | .freeze = platform_pm_freeze, | ||
956 | .thaw = platform_pm_thaw, | ||
957 | .poweroff = platform_pm_poweroff, | ||
958 | .restore = platform_pm_restore, | ||
959 | .suspend_noirq = platform_pm_suspend_noirq, | ||
960 | .resume_noirq = platform_pm_resume_noirq, | ||
961 | .freeze_noirq = platform_pm_freeze_noirq, | ||
962 | .thaw_noirq = platform_pm_thaw_noirq, | ||
963 | .poweroff_noirq = platform_pm_poweroff_noirq, | ||
964 | .restore_noirq = platform_pm_restore_noirq, | ||
965 | .runtime_suspend = platform_pm_runtime_suspend, | ||
966 | .runtime_resume = platform_pm_runtime_resume, | ||
967 | .runtime_idle = platform_pm_runtime_idle, | ||
968 | }; | 908 | }; |
969 | 909 | ||
970 | struct bus_type platform_bus_type = { | 910 | struct bus_type platform_bus_type = { |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index cbccf9a3cee4..3647e114d0e7 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
@@ -1,8 +1,8 @@ | |||
1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o |
2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | ||
5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
5 | obj-$(CONFIG_PM_OPP) += opp.o | ||
6 | obj-$(CONFIG_HAVE_CLK) += clock_ops.o | ||
6 | 7 | ||
7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 8 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG \ No newline at end of file |
8 | ccflags-$(CONFIG_PM_VERBOSE) += -DDEBUG | ||
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c new file mode 100644 index 000000000000..ad367c4139b1 --- /dev/null +++ b/drivers/base/power/clock_ops.c | |||
@@ -0,0 +1,431 @@ | |||
1 | /* | ||
2 | * drivers/base/power/clock_ops.c - Generic clock manipulation PM callbacks | ||
3 | * | ||
4 | * Copyright (c) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/init.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/pm.h> | ||
13 | #include <linux/pm_runtime.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/err.h> | ||
17 | |||
18 | #ifdef CONFIG_PM_RUNTIME | ||
19 | |||
20 | struct pm_runtime_clk_data { | ||
21 | struct list_head clock_list; | ||
22 | struct mutex lock; | ||
23 | }; | ||
24 | |||
25 | enum pce_status { | ||
26 | PCE_STATUS_NONE = 0, | ||
27 | PCE_STATUS_ACQUIRED, | ||
28 | PCE_STATUS_ENABLED, | ||
29 | PCE_STATUS_ERROR, | ||
30 | }; | ||
31 | |||
32 | struct pm_clock_entry { | ||
33 | struct list_head node; | ||
34 | char *con_id; | ||
35 | struct clk *clk; | ||
36 | enum pce_status status; | ||
37 | }; | ||
38 | |||
39 | static struct pm_runtime_clk_data *__to_prd(struct device *dev) | ||
40 | { | ||
41 | return dev ? dev->power.subsys_data : NULL; | ||
42 | } | ||
43 | |||
44 | /** | ||
45 | * pm_runtime_clk_add - Start using a device clock for runtime PM. | ||
46 | * @dev: Device whose clock is going to be used for runtime PM. | ||
47 | * @con_id: Connection ID of the clock. | ||
48 | * | ||
49 | * Add the clock represented by @con_id to the list of clocks used for | ||
50 | * the runtime PM of @dev. | ||
51 | */ | ||
52 | int pm_runtime_clk_add(struct device *dev, const char *con_id) | ||
53 | { | ||
54 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
55 | struct pm_clock_entry *ce; | ||
56 | |||
57 | if (!prd) | ||
58 | return -EINVAL; | ||
59 | |||
60 | ce = kzalloc(sizeof(*ce), GFP_KERNEL); | ||
61 | if (!ce) { | ||
62 | dev_err(dev, "Not enough memory for clock entry.\n"); | ||
63 | return -ENOMEM; | ||
64 | } | ||
65 | |||
66 | if (con_id) { | ||
67 | ce->con_id = kstrdup(con_id, GFP_KERNEL); | ||
68 | if (!ce->con_id) { | ||
69 | dev_err(dev, | ||
70 | "Not enough memory for clock connection ID.\n"); | ||
71 | kfree(ce); | ||
72 | return -ENOMEM; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | mutex_lock(&prd->lock); | ||
77 | list_add_tail(&ce->node, &prd->clock_list); | ||
78 | mutex_unlock(&prd->lock); | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | /** | ||
83 | * __pm_runtime_clk_remove - Destroy runtime PM clock entry. | ||
84 | * @ce: Runtime PM clock entry to destroy. | ||
85 | * | ||
86 | * This routine must be called under the mutex protecting the runtime PM list | ||
87 | * of clocks corresponding the the @ce's device. | ||
88 | */ | ||
89 | static void __pm_runtime_clk_remove(struct pm_clock_entry *ce) | ||
90 | { | ||
91 | if (!ce) | ||
92 | return; | ||
93 | |||
94 | list_del(&ce->node); | ||
95 | |||
96 | if (ce->status < PCE_STATUS_ERROR) { | ||
97 | if (ce->status == PCE_STATUS_ENABLED) | ||
98 | clk_disable(ce->clk); | ||
99 | |||
100 | if (ce->status >= PCE_STATUS_ACQUIRED) | ||
101 | clk_put(ce->clk); | ||
102 | } | ||
103 | |||
104 | if (ce->con_id) | ||
105 | kfree(ce->con_id); | ||
106 | |||
107 | kfree(ce); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * pm_runtime_clk_remove - Stop using a device clock for runtime PM. | ||
112 | * @dev: Device whose clock should not be used for runtime PM any more. | ||
113 | * @con_id: Connection ID of the clock. | ||
114 | * | ||
115 | * Remove the clock represented by @con_id from the list of clocks used for | ||
116 | * the runtime PM of @dev. | ||
117 | */ | ||
118 | void pm_runtime_clk_remove(struct device *dev, const char *con_id) | ||
119 | { | ||
120 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
121 | struct pm_clock_entry *ce; | ||
122 | |||
123 | if (!prd) | ||
124 | return; | ||
125 | |||
126 | mutex_lock(&prd->lock); | ||
127 | |||
128 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
129 | if (!con_id && !ce->con_id) { | ||
130 | __pm_runtime_clk_remove(ce); | ||
131 | break; | ||
132 | } else if (!con_id || !ce->con_id) { | ||
133 | continue; | ||
134 | } else if (!strcmp(con_id, ce->con_id)) { | ||
135 | __pm_runtime_clk_remove(ce); | ||
136 | break; | ||
137 | } | ||
138 | } | ||
139 | |||
140 | mutex_unlock(&prd->lock); | ||
141 | } | ||
142 | |||
143 | /** | ||
144 | * pm_runtime_clk_init - Initialize a device's list of runtime PM clocks. | ||
145 | * @dev: Device to initialize the list of runtime PM clocks for. | ||
146 | * | ||
147 | * Allocate a struct pm_runtime_clk_data object, initialize its lock member and | ||
148 | * make the @dev's power.subsys_data field point to it. | ||
149 | */ | ||
150 | int pm_runtime_clk_init(struct device *dev) | ||
151 | { | ||
152 | struct pm_runtime_clk_data *prd; | ||
153 | |||
154 | prd = kzalloc(sizeof(*prd), GFP_KERNEL); | ||
155 | if (!prd) { | ||
156 | dev_err(dev, "Not enough memory fo runtime PM data.\n"); | ||
157 | return -ENOMEM; | ||
158 | } | ||
159 | |||
160 | INIT_LIST_HEAD(&prd->clock_list); | ||
161 | mutex_init(&prd->lock); | ||
162 | dev->power.subsys_data = prd; | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * pm_runtime_clk_destroy - Destroy a device's list of runtime PM clocks. | ||
168 | * @dev: Device to destroy the list of runtime PM clocks for. | ||
169 | * | ||
170 | * Clear the @dev's power.subsys_data field, remove the list of clock entries | ||
171 | * from the struct pm_runtime_clk_data object pointed to by it before and free | ||
172 | * that object. | ||
173 | */ | ||
174 | void pm_runtime_clk_destroy(struct device *dev) | ||
175 | { | ||
176 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
177 | struct pm_clock_entry *ce, *c; | ||
178 | |||
179 | if (!prd) | ||
180 | return; | ||
181 | |||
182 | dev->power.subsys_data = NULL; | ||
183 | |||
184 | mutex_lock(&prd->lock); | ||
185 | |||
186 | list_for_each_entry_safe_reverse(ce, c, &prd->clock_list, node) | ||
187 | __pm_runtime_clk_remove(ce); | ||
188 | |||
189 | mutex_unlock(&prd->lock); | ||
190 | |||
191 | kfree(prd); | ||
192 | } | ||
193 | |||
194 | /** | ||
195 | * pm_runtime_clk_acquire - Acquire a device clock. | ||
196 | * @dev: Device whose clock is to be acquired. | ||
197 | * @con_id: Connection ID of the clock. | ||
198 | */ | ||
199 | static void pm_runtime_clk_acquire(struct device *dev, | ||
200 | struct pm_clock_entry *ce) | ||
201 | { | ||
202 | ce->clk = clk_get(dev, ce->con_id); | ||
203 | if (IS_ERR(ce->clk)) { | ||
204 | ce->status = PCE_STATUS_ERROR; | ||
205 | } else { | ||
206 | ce->status = PCE_STATUS_ACQUIRED; | ||
207 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
208 | } | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * pm_runtime_clk_suspend - Disable clocks in a device's runtime PM clock list. | ||
213 | * @dev: Device to disable the clocks for. | ||
214 | */ | ||
215 | int pm_runtime_clk_suspend(struct device *dev) | ||
216 | { | ||
217 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
218 | struct pm_clock_entry *ce; | ||
219 | |||
220 | dev_dbg(dev, "%s()\n", __func__); | ||
221 | |||
222 | if (!prd) | ||
223 | return 0; | ||
224 | |||
225 | mutex_lock(&prd->lock); | ||
226 | |||
227 | list_for_each_entry_reverse(ce, &prd->clock_list, node) { | ||
228 | if (ce->status == PCE_STATUS_NONE) | ||
229 | pm_runtime_clk_acquire(dev, ce); | ||
230 | |||
231 | if (ce->status < PCE_STATUS_ERROR) { | ||
232 | clk_disable(ce->clk); | ||
233 | ce->status = PCE_STATUS_ACQUIRED; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | mutex_unlock(&prd->lock); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * pm_runtime_clk_resume - Enable clocks in a device's runtime PM clock list. | ||
244 | * @dev: Device to enable the clocks for. | ||
245 | */ | ||
246 | int pm_runtime_clk_resume(struct device *dev) | ||
247 | { | ||
248 | struct pm_runtime_clk_data *prd = __to_prd(dev); | ||
249 | struct pm_clock_entry *ce; | ||
250 | |||
251 | dev_dbg(dev, "%s()\n", __func__); | ||
252 | |||
253 | if (!prd) | ||
254 | return 0; | ||
255 | |||
256 | mutex_lock(&prd->lock); | ||
257 | |||
258 | list_for_each_entry(ce, &prd->clock_list, node) { | ||
259 | if (ce->status == PCE_STATUS_NONE) | ||
260 | pm_runtime_clk_acquire(dev, ce); | ||
261 | |||
262 | if (ce->status < PCE_STATUS_ERROR) { | ||
263 | clk_enable(ce->clk); | ||
264 | ce->status = PCE_STATUS_ENABLED; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | mutex_unlock(&prd->lock); | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
275 | * @nb: Notifier block object this function is a member of. | ||
276 | * @action: Operation being carried out by the caller. | ||
277 | * @data: Device the routine is being run for. | ||
278 | * | ||
279 | * For this function to work, @nb must be a member of an object of type | ||
280 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
281 | * Specifically, the pwr_domain member of that object is copied to the device's | ||
282 | * pwr_domain field and its con_ids member is used to populate the device's list | ||
283 | * of runtime PM clocks, depending on @action. | ||
284 | * | ||
285 | * If the device's pwr_domain field is already populated with a value different | ||
286 | * from the one stored in the struct pm_clk_notifier_block object, the function | ||
287 | * does nothing. | ||
288 | */ | ||
289 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
290 | unsigned long action, void *data) | ||
291 | { | ||
292 | struct pm_clk_notifier_block *clknb; | ||
293 | struct device *dev = data; | ||
294 | char **con_id; | ||
295 | int error; | ||
296 | |||
297 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
298 | |||
299 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
300 | |||
301 | switch (action) { | ||
302 | case BUS_NOTIFY_ADD_DEVICE: | ||
303 | if (dev->pwr_domain) | ||
304 | break; | ||
305 | |||
306 | error = pm_runtime_clk_init(dev); | ||
307 | if (error) | ||
308 | break; | ||
309 | |||
310 | dev->pwr_domain = clknb->pwr_domain; | ||
311 | if (clknb->con_ids[0]) { | ||
312 | for (con_id = clknb->con_ids; *con_id; con_id++) | ||
313 | pm_runtime_clk_add(dev, *con_id); | ||
314 | } else { | ||
315 | pm_runtime_clk_add(dev, NULL); | ||
316 | } | ||
317 | |||
318 | break; | ||
319 | case BUS_NOTIFY_DEL_DEVICE: | ||
320 | if (dev->pwr_domain != clknb->pwr_domain) | ||
321 | break; | ||
322 | |||
323 | dev->pwr_domain = NULL; | ||
324 | pm_runtime_clk_destroy(dev); | ||
325 | break; | ||
326 | } | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | #else /* !CONFIG_PM_RUNTIME */ | ||
332 | |||
333 | /** | ||
334 | * enable_clock - Enable a device clock. | ||
335 | * @dev: Device whose clock is to be enabled. | ||
336 | * @con_id: Connection ID of the clock. | ||
337 | */ | ||
338 | static void enable_clock(struct device *dev, const char *con_id) | ||
339 | { | ||
340 | struct clk *clk; | ||
341 | |||
342 | clk = clk_get(dev, con_id); | ||
343 | if (!IS_ERR(clk)) { | ||
344 | clk_enable(clk); | ||
345 | clk_put(clk); | ||
346 | dev_info(dev, "Runtime PM disabled, clock forced on.\n"); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | /** | ||
351 | * disable_clock - Disable a device clock. | ||
352 | * @dev: Device whose clock is to be disabled. | ||
353 | * @con_id: Connection ID of the clock. | ||
354 | */ | ||
355 | static void disable_clock(struct device *dev, const char *con_id) | ||
356 | { | ||
357 | struct clk *clk; | ||
358 | |||
359 | clk = clk_get(dev, con_id); | ||
360 | if (!IS_ERR(clk)) { | ||
361 | clk_disable(clk); | ||
362 | clk_put(clk); | ||
363 | dev_info(dev, "Runtime PM disabled, clock forced off.\n"); | ||
364 | } | ||
365 | } | ||
366 | |||
367 | /** | ||
368 | * pm_runtime_clk_notify - Notify routine for device addition and removal. | ||
369 | * @nb: Notifier block object this function is a member of. | ||
370 | * @action: Operation being carried out by the caller. | ||
371 | * @data: Device the routine is being run for. | ||
372 | * | ||
373 | * For this function to work, @nb must be a member of an object of type | ||
374 | * struct pm_clk_notifier_block containing all of the requisite data. | ||
375 | * Specifically, the con_ids member of that object is used to enable or disable | ||
376 | * the device's clocks, depending on @action. | ||
377 | */ | ||
378 | static int pm_runtime_clk_notify(struct notifier_block *nb, | ||
379 | unsigned long action, void *data) | ||
380 | { | ||
381 | struct pm_clk_notifier_block *clknb; | ||
382 | struct device *dev = data; | ||
383 | char **con_id; | ||
384 | |||
385 | dev_dbg(dev, "%s() %ld\n", __func__, action); | ||
386 | |||
387 | clknb = container_of(nb, struct pm_clk_notifier_block, nb); | ||
388 | |||
389 | switch (action) { | ||
390 | case BUS_NOTIFY_BIND_DRIVER: | ||
391 | if (clknb->con_ids[0]) { | ||
392 | for (con_id = clknb->con_ids; *con_id; con_id++) | ||
393 | enable_clock(dev, *con_id); | ||
394 | } else { | ||
395 | enable_clock(dev, NULL); | ||
396 | } | ||
397 | break; | ||
398 | case BUS_NOTIFY_UNBOUND_DRIVER: | ||
399 | if (clknb->con_ids[0]) { | ||
400 | for (con_id = clknb->con_ids; *con_id; con_id++) | ||
401 | disable_clock(dev, *con_id); | ||
402 | } else { | ||
403 | disable_clock(dev, NULL); | ||
404 | } | ||
405 | break; | ||
406 | } | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | #endif /* !CONFIG_PM_RUNTIME */ | ||
412 | |||
413 | /** | ||
414 | * pm_runtime_clk_add_notifier - Add bus type notifier for runtime PM clocks. | ||
415 | * @bus: Bus type to add the notifier to. | ||
416 | * @clknb: Notifier to be added to the given bus type. | ||
417 | * | ||
418 | * The nb member of @clknb is not expected to be initialized and its | ||
419 | * notifier_call member will be replaced with pm_runtime_clk_notify(). However, | ||
420 | * the remaining members of @clknb should be populated prior to calling this | ||
421 | * routine. | ||
422 | */ | ||
423 | void pm_runtime_clk_add_notifier(struct bus_type *bus, | ||
424 | struct pm_clk_notifier_block *clknb) | ||
425 | { | ||
426 | if (!bus || !clknb) | ||
427 | return; | ||
428 | |||
429 | clknb->nb.notifier_call = pm_runtime_clk_notify; | ||
430 | bus_register_notifier(bus, &clknb->nb); | ||
431 | } | ||
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c index 4b29d4981253..cb3bb368681c 100644 --- a/drivers/base/power/generic_ops.c +++ b/drivers/base/power/generic_ops.c | |||
@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); | |||
39 | * | 39 | * |
40 | * If PM operations are defined for the @dev's driver and they include | 40 | * If PM operations are defined for the @dev's driver and they include |
41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, | 41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, |
42 | * return -EINVAL. | 42 | * return 0. |
43 | */ | 43 | */ |
44 | int pm_generic_runtime_suspend(struct device *dev) | 44 | int pm_generic_runtime_suspend(struct device *dev) |
45 | { | 45 | { |
46 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 46 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
47 | int ret; | 47 | int ret; |
48 | 48 | ||
49 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL; | 49 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0; |
50 | 50 | ||
51 | return ret; | 51 | return ret; |
52 | } | 52 | } |
@@ -58,14 +58,14 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); | |||
58 | * | 58 | * |
59 | * If PM operations are defined for the @dev's driver and they include | 59 | * If PM operations are defined for the @dev's driver and they include |
60 | * ->runtime_resume(), execute it and return its error code. Otherwise, | 60 | * ->runtime_resume(), execute it and return its error code. Otherwise, |
61 | * return -EINVAL. | 61 | * return 0. |
62 | */ | 62 | */ |
63 | int pm_generic_runtime_resume(struct device *dev) | 63 | int pm_generic_runtime_resume(struct device *dev) |
64 | { | 64 | { |
65 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | 65 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; |
66 | int ret; | 66 | int ret; |
67 | 67 | ||
68 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL; | 68 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0; |
69 | 69 | ||
70 | return ret; | 70 | return ret; |
71 | } | 71 | } |
@@ -74,6 +74,23 @@ EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); | |||
74 | 74 | ||
75 | #ifdef CONFIG_PM_SLEEP | 75 | #ifdef CONFIG_PM_SLEEP |
76 | /** | 76 | /** |
77 | * pm_generic_prepare - Generic routine preparing a device for power transition. | ||
78 | * @dev: Device to prepare. | ||
79 | * | ||
80 | * Prepare a device for a system-wide power transition. | ||
81 | */ | ||
82 | int pm_generic_prepare(struct device *dev) | ||
83 | { | ||
84 | struct device_driver *drv = dev->driver; | ||
85 | int ret = 0; | ||
86 | |||
87 | if (drv && drv->pm && drv->pm->prepare) | ||
88 | ret = drv->pm->prepare(dev); | ||
89 | |||
90 | return ret; | ||
91 | } | ||
92 | |||
93 | /** | ||
77 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | 94 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. |
78 | * @dev: Device to handle. | 95 | * @dev: Device to handle. |
79 | * @event: PM transition of the system under way. | 96 | * @event: PM transition of the system under way. |
@@ -185,7 +202,7 @@ static int __pm_generic_resume(struct device *dev, int event) | |||
185 | return 0; | 202 | return 0; |
186 | 203 | ||
187 | ret = callback(dev); | 204 | ret = callback(dev); |
188 | if (!ret) { | 205 | if (!ret && pm_runtime_enabled(dev)) { |
189 | pm_runtime_disable(dev); | 206 | pm_runtime_disable(dev); |
190 | pm_runtime_set_active(dev); | 207 | pm_runtime_set_active(dev); |
191 | pm_runtime_enable(dev); | 208 | pm_runtime_enable(dev); |
@@ -213,16 +230,38 @@ int pm_generic_restore(struct device *dev) | |||
213 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); | 230 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); |
214 | } | 231 | } |
215 | EXPORT_SYMBOL_GPL(pm_generic_restore); | 232 | EXPORT_SYMBOL_GPL(pm_generic_restore); |
233 | |||
234 | /** | ||
235 | * pm_generic_complete - Generic routine competing a device power transition. | ||
236 | * @dev: Device to handle. | ||
237 | * | ||
238 | * Complete a device power transition during a system-wide power transition. | ||
239 | */ | ||
240 | void pm_generic_complete(struct device *dev) | ||
241 | { | ||
242 | struct device_driver *drv = dev->driver; | ||
243 | |||
244 | if (drv && drv->pm && drv->pm->complete) | ||
245 | drv->pm->complete(dev); | ||
246 | |||
247 | /* | ||
248 | * Let runtime PM try to suspend devices that haven't been in use before | ||
249 | * going into the system-wide sleep state we're resuming from. | ||
250 | */ | ||
251 | pm_runtime_idle(dev); | ||
252 | } | ||
216 | #endif /* CONFIG_PM_SLEEP */ | 253 | #endif /* CONFIG_PM_SLEEP */ |
217 | 254 | ||
218 | struct dev_pm_ops generic_subsys_pm_ops = { | 255 | struct dev_pm_ops generic_subsys_pm_ops = { |
219 | #ifdef CONFIG_PM_SLEEP | 256 | #ifdef CONFIG_PM_SLEEP |
257 | .prepare = pm_generic_prepare, | ||
220 | .suspend = pm_generic_suspend, | 258 | .suspend = pm_generic_suspend, |
221 | .resume = pm_generic_resume, | 259 | .resume = pm_generic_resume, |
222 | .freeze = pm_generic_freeze, | 260 | .freeze = pm_generic_freeze, |
223 | .thaw = pm_generic_thaw, | 261 | .thaw = pm_generic_thaw, |
224 | .poweroff = pm_generic_poweroff, | 262 | .poweroff = pm_generic_poweroff, |
225 | .restore = pm_generic_restore, | 263 | .restore = pm_generic_restore, |
264 | .complete = pm_generic_complete, | ||
226 | #endif | 265 | #endif |
227 | #ifdef CONFIG_PM_RUNTIME | 266 | #ifdef CONFIG_PM_RUNTIME |
228 | .runtime_suspend = pm_generic_runtime_suspend, | 267 | .runtime_suspend = pm_generic_runtime_suspend, |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 276d5a701dc3..06f09bf89cb2 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * | 8 | * |
9 | * | 9 | * |
10 | * The driver model core calls device_pm_add() when a device is registered. | 10 | * The driver model core calls device_pm_add() when a device is registered. |
11 | * This will intialize the embedded device_pm_info object in the device | 11 | * This will initialize the embedded device_pm_info object in the device |
12 | * and add it to the list of power-controlled devices. sysfs entries for | 12 | * and add it to the list of power-controlled devices. sysfs entries for |
13 | * controlling device power management will also be added. | 13 | * controlling device power management will also be added. |
14 | * | 14 | * |
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/async.h> | 28 | #include <linux/async.h> |
29 | #include <linux/suspend.h> | ||
29 | 30 | ||
30 | #include "../base.h" | 31 | #include "../base.h" |
31 | #include "power.h" | 32 | #include "power.h" |
@@ -41,15 +42,14 @@ | |||
41 | */ | 42 | */ |
42 | 43 | ||
43 | LIST_HEAD(dpm_list); | 44 | LIST_HEAD(dpm_list); |
45 | LIST_HEAD(dpm_prepared_list); | ||
46 | LIST_HEAD(dpm_suspended_list); | ||
47 | LIST_HEAD(dpm_noirq_list); | ||
44 | 48 | ||
45 | static DEFINE_MUTEX(dpm_list_mtx); | 49 | static DEFINE_MUTEX(dpm_list_mtx); |
46 | static pm_message_t pm_transition; | 50 | static pm_message_t pm_transition; |
47 | 51 | ||
48 | /* | 52 | static int async_error; |
49 | * Set once the preparation of devices for a PM transition has started, reset | ||
50 | * before starting to resume devices. Protected by dpm_list_mtx. | ||
51 | */ | ||
52 | static bool transition_started; | ||
53 | 53 | ||
54 | /** | 54 | /** |
55 | * device_pm_init - Initialize the PM-related part of a device object. | 55 | * device_pm_init - Initialize the PM-related part of a device object. |
@@ -57,11 +57,14 @@ static bool transition_started; | |||
57 | */ | 57 | */ |
58 | void device_pm_init(struct device *dev) | 58 | void device_pm_init(struct device *dev) |
59 | { | 59 | { |
60 | dev->power.status = DPM_ON; | 60 | dev->power.is_prepared = false; |
61 | dev->power.is_suspended = false; | ||
61 | init_completion(&dev->power.completion); | 62 | init_completion(&dev->power.completion); |
62 | complete_all(&dev->power.completion); | 63 | complete_all(&dev->power.completion); |
63 | dev->power.wakeup_count = 0; | 64 | dev->power.wakeup = NULL; |
65 | spin_lock_init(&dev->power.lock); | ||
64 | pm_runtime_init(dev); | 66 | pm_runtime_init(dev); |
67 | INIT_LIST_HEAD(&dev->power.entry); | ||
65 | } | 68 | } |
66 | 69 | ||
67 | /** | 70 | /** |
@@ -87,22 +90,11 @@ void device_pm_unlock(void) | |||
87 | void device_pm_add(struct device *dev) | 90 | void device_pm_add(struct device *dev) |
88 | { | 91 | { |
89 | pr_debug("PM: Adding info for %s:%s\n", | 92 | pr_debug("PM: Adding info for %s:%s\n", |
90 | dev->bus ? dev->bus->name : "No Bus", | 93 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
91 | kobject_name(&dev->kobj)); | ||
92 | mutex_lock(&dpm_list_mtx); | 94 | mutex_lock(&dpm_list_mtx); |
93 | if (dev->parent) { | 95 | if (dev->parent && dev->parent->power.is_prepared) |
94 | if (dev->parent->power.status >= DPM_SUSPENDING) | 96 | dev_warn(dev, "parent %s should not be sleeping\n", |
95 | dev_warn(dev, "parent %s should not be sleeping\n", | 97 | dev_name(dev->parent)); |
96 | dev_name(dev->parent)); | ||
97 | } else if (transition_started) { | ||
98 | /* | ||
99 | * We refuse to register parentless devices while a PM | ||
100 | * transition is in progress in order to avoid leaving them | ||
101 | * unhandled down the road | ||
102 | */ | ||
103 | dev_WARN(dev, "Parentless device registered during a PM transaction\n"); | ||
104 | } | ||
105 | |||
106 | list_add_tail(&dev->power.entry, &dpm_list); | 98 | list_add_tail(&dev->power.entry, &dpm_list); |
107 | mutex_unlock(&dpm_list_mtx); | 99 | mutex_unlock(&dpm_list_mtx); |
108 | } | 100 | } |
@@ -114,12 +106,12 @@ void device_pm_add(struct device *dev) | |||
114 | void device_pm_remove(struct device *dev) | 106 | void device_pm_remove(struct device *dev) |
115 | { | 107 | { |
116 | pr_debug("PM: Removing info for %s:%s\n", | 108 | pr_debug("PM: Removing info for %s:%s\n", |
117 | dev->bus ? dev->bus->name : "No Bus", | 109 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
118 | kobject_name(&dev->kobj)); | ||
119 | complete_all(&dev->power.completion); | 110 | complete_all(&dev->power.completion); |
120 | mutex_lock(&dpm_list_mtx); | 111 | mutex_lock(&dpm_list_mtx); |
121 | list_del_init(&dev->power.entry); | 112 | list_del_init(&dev->power.entry); |
122 | mutex_unlock(&dpm_list_mtx); | 113 | mutex_unlock(&dpm_list_mtx); |
114 | device_wakeup_disable(dev); | ||
123 | pm_runtime_remove(dev); | 115 | pm_runtime_remove(dev); |
124 | } | 116 | } |
125 | 117 | ||
@@ -131,10 +123,8 @@ void device_pm_remove(struct device *dev) | |||
131 | void device_pm_move_before(struct device *deva, struct device *devb) | 123 | void device_pm_move_before(struct device *deva, struct device *devb) |
132 | { | 124 | { |
133 | pr_debug("PM: Moving %s:%s before %s:%s\n", | 125 | pr_debug("PM: Moving %s:%s before %s:%s\n", |
134 | deva->bus ? deva->bus->name : "No Bus", | 126 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
135 | kobject_name(&deva->kobj), | 127 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
136 | devb->bus ? devb->bus->name : "No Bus", | ||
137 | kobject_name(&devb->kobj)); | ||
138 | /* Delete deva from dpm_list and reinsert before devb. */ | 128 | /* Delete deva from dpm_list and reinsert before devb. */ |
139 | list_move_tail(&deva->power.entry, &devb->power.entry); | 129 | list_move_tail(&deva->power.entry, &devb->power.entry); |
140 | } | 130 | } |
@@ -147,10 +137,8 @@ void device_pm_move_before(struct device *deva, struct device *devb) | |||
147 | void device_pm_move_after(struct device *deva, struct device *devb) | 137 | void device_pm_move_after(struct device *deva, struct device *devb) |
148 | { | 138 | { |
149 | pr_debug("PM: Moving %s:%s after %s:%s\n", | 139 | pr_debug("PM: Moving %s:%s after %s:%s\n", |
150 | deva->bus ? deva->bus->name : "No Bus", | 140 | deva->bus ? deva->bus->name : "No Bus", dev_name(deva), |
151 | kobject_name(&deva->kobj), | 141 | devb->bus ? devb->bus->name : "No Bus", dev_name(devb)); |
152 | devb->bus ? devb->bus->name : "No Bus", | ||
153 | kobject_name(&devb->kobj)); | ||
154 | /* Delete deva from dpm_list and reinsert after devb. */ | 142 | /* Delete deva from dpm_list and reinsert after devb. */ |
155 | list_move(&deva->power.entry, &devb->power.entry); | 143 | list_move(&deva->power.entry, &devb->power.entry); |
156 | } | 144 | } |
@@ -162,8 +150,7 @@ void device_pm_move_after(struct device *deva, struct device *devb) | |||
162 | void device_pm_move_last(struct device *dev) | 150 | void device_pm_move_last(struct device *dev) |
163 | { | 151 | { |
164 | pr_debug("PM: Moving %s:%s to end of list\n", | 152 | pr_debug("PM: Moving %s:%s to end of list\n", |
165 | dev->bus ? dev->bus->name : "No Bus", | 153 | dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); |
166 | kobject_name(&dev->kobj)); | ||
167 | list_move_tail(&dev->power.entry, &dpm_list); | 154 | list_move_tail(&dev->power.entry, &dpm_list); |
168 | } | 155 | } |
169 | 156 | ||
@@ -248,7 +235,7 @@ static int pm_op(struct device *dev, | |||
248 | } | 235 | } |
249 | break; | 236 | break; |
250 | #endif /* CONFIG_SUSPEND */ | 237 | #endif /* CONFIG_SUSPEND */ |
251 | #ifdef CONFIG_HIBERNATION | 238 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
252 | case PM_EVENT_FREEZE: | 239 | case PM_EVENT_FREEZE: |
253 | case PM_EVENT_QUIESCE: | 240 | case PM_EVENT_QUIESCE: |
254 | if (ops->freeze) { | 241 | if (ops->freeze) { |
@@ -275,7 +262,7 @@ static int pm_op(struct device *dev, | |||
275 | suspend_report_result(ops->restore, error); | 262 | suspend_report_result(ops->restore, error); |
276 | } | 263 | } |
277 | break; | 264 | break; |
278 | #endif /* CONFIG_HIBERNATION */ | 265 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
279 | default: | 266 | default: |
280 | error = -EINVAL; | 267 | error = -EINVAL; |
281 | } | 268 | } |
@@ -299,7 +286,7 @@ static int pm_noirq_op(struct device *dev, | |||
299 | pm_message_t state) | 286 | pm_message_t state) |
300 | { | 287 | { |
301 | int error = 0; | 288 | int error = 0; |
302 | ktime_t calltime, delta, rettime; | 289 | ktime_t calltime = ktime_set(0, 0), delta, rettime; |
303 | 290 | ||
304 | if (initcall_debug) { | 291 | if (initcall_debug) { |
305 | pr_info("calling %s+ @ %i, parent: %s\n", | 292 | pr_info("calling %s+ @ %i, parent: %s\n", |
@@ -323,7 +310,7 @@ static int pm_noirq_op(struct device *dev, | |||
323 | } | 310 | } |
324 | break; | 311 | break; |
325 | #endif /* CONFIG_SUSPEND */ | 312 | #endif /* CONFIG_SUSPEND */ |
326 | #ifdef CONFIG_HIBERNATION | 313 | #ifdef CONFIG_HIBERNATE_CALLBACKS |
327 | case PM_EVENT_FREEZE: | 314 | case PM_EVENT_FREEZE: |
328 | case PM_EVENT_QUIESCE: | 315 | case PM_EVENT_QUIESCE: |
329 | if (ops->freeze_noirq) { | 316 | if (ops->freeze_noirq) { |
@@ -350,7 +337,7 @@ static int pm_noirq_op(struct device *dev, | |||
350 | suspend_report_result(ops->restore_noirq, error); | 337 | suspend_report_result(ops->restore_noirq, error); |
351 | } | 338 | } |
352 | break; | 339 | break; |
353 | #endif /* CONFIG_HIBERNATION */ | 340 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
354 | default: | 341 | default: |
355 | error = -EINVAL; | 342 | error = -EINVAL; |
356 | } | 343 | } |
@@ -401,13 +388,13 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
401 | int error) | 388 | int error) |
402 | { | 389 | { |
403 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", | 390 | printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n", |
404 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 391 | dev_name(dev), pm_verb(state.event), info, error); |
405 | } | 392 | } |
406 | 393 | ||
407 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | 394 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) |
408 | { | 395 | { |
409 | ktime_t calltime; | 396 | ktime_t calltime; |
410 | s64 usecs64; | 397 | u64 usecs64; |
411 | int usecs; | 398 | int usecs; |
412 | 399 | ||
413 | calltime = ktime_get(); | 400 | calltime = ktime_get(); |
@@ -438,26 +425,20 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
438 | TRACE_DEVICE(dev); | 425 | TRACE_DEVICE(dev); |
439 | TRACE_RESUME(0); | 426 | TRACE_RESUME(0); |
440 | 427 | ||
441 | if (dev->bus && dev->bus->pm) { | 428 | if (dev->pwr_domain) { |
442 | pm_dev_dbg(dev, state, "EARLY "); | 429 | pm_dev_dbg(dev, state, "EARLY power domain "); |
443 | error = pm_noirq_op(dev, dev->bus->pm, state); | 430 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
444 | if (error) | 431 | } else if (dev->type && dev->type->pm) { |
445 | goto End; | ||
446 | } | ||
447 | |||
448 | if (dev->type && dev->type->pm) { | ||
449 | pm_dev_dbg(dev, state, "EARLY type "); | 432 | pm_dev_dbg(dev, state, "EARLY type "); |
450 | error = pm_noirq_op(dev, dev->type->pm, state); | 433 | error = pm_noirq_op(dev, dev->type->pm, state); |
451 | if (error) | 434 | } else if (dev->class && dev->class->pm) { |
452 | goto End; | ||
453 | } | ||
454 | |||
455 | if (dev->class && dev->class->pm) { | ||
456 | pm_dev_dbg(dev, state, "EARLY class "); | 435 | pm_dev_dbg(dev, state, "EARLY class "); |
457 | error = pm_noirq_op(dev, dev->class->pm, state); | 436 | error = pm_noirq_op(dev, dev->class->pm, state); |
437 | } else if (dev->bus && dev->bus->pm) { | ||
438 | pm_dev_dbg(dev, state, "EARLY "); | ||
439 | error = pm_noirq_op(dev, dev->bus->pm, state); | ||
458 | } | 440 | } |
459 | 441 | ||
460 | End: | ||
461 | TRACE_RESUME(error); | 442 | TRACE_RESUME(error); |
462 | return error; | 443 | return error; |
463 | } | 444 | } |
@@ -471,20 +452,24 @@ End: | |||
471 | */ | 452 | */ |
472 | void dpm_resume_noirq(pm_message_t state) | 453 | void dpm_resume_noirq(pm_message_t state) |
473 | { | 454 | { |
474 | struct device *dev; | ||
475 | ktime_t starttime = ktime_get(); | 455 | ktime_t starttime = ktime_get(); |
476 | 456 | ||
477 | mutex_lock(&dpm_list_mtx); | 457 | mutex_lock(&dpm_list_mtx); |
478 | transition_started = false; | 458 | while (!list_empty(&dpm_noirq_list)) { |
479 | list_for_each_entry(dev, &dpm_list, power.entry) | 459 | struct device *dev = to_device(dpm_noirq_list.next); |
480 | if (dev->power.status > DPM_OFF) { | 460 | int error; |
481 | int error; | ||
482 | 461 | ||
483 | dev->power.status = DPM_OFF; | 462 | get_device(dev); |
484 | error = device_resume_noirq(dev, state); | 463 | list_move_tail(&dev->power.entry, &dpm_suspended_list); |
485 | if (error) | 464 | mutex_unlock(&dpm_list_mtx); |
486 | pm_dev_err(dev, state, " early", error); | 465 | |
487 | } | 466 | error = device_resume_noirq(dev, state); |
467 | if (error) | ||
468 | pm_dev_err(dev, state, " early", error); | ||
469 | |||
470 | mutex_lock(&dpm_list_mtx); | ||
471 | put_device(dev); | ||
472 | } | ||
488 | mutex_unlock(&dpm_list_mtx); | 473 | mutex_unlock(&dpm_list_mtx); |
489 | dpm_show_time(starttime, state, "early"); | 474 | dpm_show_time(starttime, state, "early"); |
490 | resume_device_irqs(); | 475 | resume_device_irqs(); |
@@ -527,39 +512,53 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
527 | dpm_wait(dev->parent, async); | 512 | dpm_wait(dev->parent, async); |
528 | device_lock(dev); | 513 | device_lock(dev); |
529 | 514 | ||
530 | dev->power.status = DPM_RESUMING; | 515 | /* |
516 | * This is a fib. But we'll allow new children to be added below | ||
517 | * a resumed device, even if the device hasn't been completed yet. | ||
518 | */ | ||
519 | dev->power.is_prepared = false; | ||
531 | 520 | ||
532 | if (dev->bus) { | 521 | if (!dev->power.is_suspended) |
533 | if (dev->bus->pm) { | 522 | goto Unlock; |
534 | pm_dev_dbg(dev, state, ""); | 523 | |
535 | error = pm_op(dev, dev->bus->pm, state); | 524 | if (dev->pwr_domain) { |
536 | } else if (dev->bus->resume) { | 525 | pm_dev_dbg(dev, state, "power domain "); |
537 | pm_dev_dbg(dev, state, "legacy "); | 526 | error = pm_op(dev, &dev->pwr_domain->ops, state); |
538 | error = legacy_resume(dev, dev->bus->resume); | 527 | goto End; |
539 | } | ||
540 | if (error) | ||
541 | goto End; | ||
542 | } | 528 | } |
543 | 529 | ||
544 | if (dev->type) { | 530 | if (dev->type && dev->type->pm) { |
545 | if (dev->type->pm) { | 531 | pm_dev_dbg(dev, state, "type "); |
546 | pm_dev_dbg(dev, state, "type "); | 532 | error = pm_op(dev, dev->type->pm, state); |
547 | error = pm_op(dev, dev->type->pm, state); | 533 | goto End; |
548 | } | ||
549 | if (error) | ||
550 | goto End; | ||
551 | } | 534 | } |
552 | 535 | ||
553 | if (dev->class) { | 536 | if (dev->class) { |
554 | if (dev->class->pm) { | 537 | if (dev->class->pm) { |
555 | pm_dev_dbg(dev, state, "class "); | 538 | pm_dev_dbg(dev, state, "class "); |
556 | error = pm_op(dev, dev->class->pm, state); | 539 | error = pm_op(dev, dev->class->pm, state); |
540 | goto End; | ||
557 | } else if (dev->class->resume) { | 541 | } else if (dev->class->resume) { |
558 | pm_dev_dbg(dev, state, "legacy class "); | 542 | pm_dev_dbg(dev, state, "legacy class "); |
559 | error = legacy_resume(dev, dev->class->resume); | 543 | error = legacy_resume(dev, dev->class->resume); |
544 | goto End; | ||
560 | } | 545 | } |
561 | } | 546 | } |
547 | |||
548 | if (dev->bus) { | ||
549 | if (dev->bus->pm) { | ||
550 | pm_dev_dbg(dev, state, ""); | ||
551 | error = pm_op(dev, dev->bus->pm, state); | ||
552 | } else if (dev->bus->resume) { | ||
553 | pm_dev_dbg(dev, state, "legacy "); | ||
554 | error = legacy_resume(dev, dev->bus->resume); | ||
555 | } | ||
556 | } | ||
557 | |||
562 | End: | 558 | End: |
559 | dev->power.is_suspended = false; | ||
560 | |||
561 | Unlock: | ||
563 | device_unlock(dev); | 562 | device_unlock(dev); |
564 | complete_all(&dev->power.completion); | 563 | complete_all(&dev->power.completion); |
565 | 564 | ||
@@ -591,20 +590,18 @@ static bool is_async(struct device *dev) | |||
591 | * Execute the appropriate "resume" callback for all devices whose status | 590 | * Execute the appropriate "resume" callback for all devices whose status |
592 | * indicates that they are suspended. | 591 | * indicates that they are suspended. |
593 | */ | 592 | */ |
594 | static void dpm_resume(pm_message_t state) | 593 | void dpm_resume(pm_message_t state) |
595 | { | 594 | { |
596 | struct list_head list; | ||
597 | struct device *dev; | 595 | struct device *dev; |
598 | ktime_t starttime = ktime_get(); | 596 | ktime_t starttime = ktime_get(); |
599 | 597 | ||
600 | INIT_LIST_HEAD(&list); | 598 | might_sleep(); |
599 | |||
601 | mutex_lock(&dpm_list_mtx); | 600 | mutex_lock(&dpm_list_mtx); |
602 | pm_transition = state; | 601 | pm_transition = state; |
602 | async_error = 0; | ||
603 | 603 | ||
604 | list_for_each_entry(dev, &dpm_list, power.entry) { | 604 | list_for_each_entry(dev, &dpm_suspended_list, power.entry) { |
605 | if (dev->power.status < DPM_OFF) | ||
606 | continue; | ||
607 | |||
608 | INIT_COMPLETION(dev->power.completion); | 605 | INIT_COMPLETION(dev->power.completion); |
609 | if (is_async(dev)) { | 606 | if (is_async(dev)) { |
610 | get_device(dev); | 607 | get_device(dev); |
@@ -612,28 +609,24 @@ static void dpm_resume(pm_message_t state) | |||
612 | } | 609 | } |
613 | } | 610 | } |
614 | 611 | ||
615 | while (!list_empty(&dpm_list)) { | 612 | while (!list_empty(&dpm_suspended_list)) { |
616 | dev = to_device(dpm_list.next); | 613 | dev = to_device(dpm_suspended_list.next); |
617 | get_device(dev); | 614 | get_device(dev); |
618 | if (dev->power.status >= DPM_OFF && !is_async(dev)) { | 615 | if (!is_async(dev)) { |
619 | int error; | 616 | int error; |
620 | 617 | ||
621 | mutex_unlock(&dpm_list_mtx); | 618 | mutex_unlock(&dpm_list_mtx); |
622 | 619 | ||
623 | error = device_resume(dev, state, false); | 620 | error = device_resume(dev, state, false); |
624 | |||
625 | mutex_lock(&dpm_list_mtx); | ||
626 | if (error) | 621 | if (error) |
627 | pm_dev_err(dev, state, "", error); | 622 | pm_dev_err(dev, state, "", error); |
628 | } else if (dev->power.status == DPM_SUSPENDING) { | 623 | |
629 | /* Allow new children of the device to be registered */ | 624 | mutex_lock(&dpm_list_mtx); |
630 | dev->power.status = DPM_RESUMING; | ||
631 | } | 625 | } |
632 | if (!list_empty(&dev->power.entry)) | 626 | if (!list_empty(&dev->power.entry)) |
633 | list_move_tail(&dev->power.entry, &list); | 627 | list_move_tail(&dev->power.entry, &dpm_prepared_list); |
634 | put_device(dev); | 628 | put_device(dev); |
635 | } | 629 | } |
636 | list_splice(&list, &dpm_list); | ||
637 | mutex_unlock(&dpm_list_mtx); | 630 | mutex_unlock(&dpm_list_mtx); |
638 | async_synchronize_full(); | 631 | async_synchronize_full(); |
639 | dpm_show_time(starttime, state, NULL); | 632 | dpm_show_time(starttime, state, NULL); |
@@ -648,19 +641,22 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
648 | { | 641 | { |
649 | device_lock(dev); | 642 | device_lock(dev); |
650 | 643 | ||
651 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | 644 | if (dev->pwr_domain) { |
652 | pm_dev_dbg(dev, state, "completing class "); | 645 | pm_dev_dbg(dev, state, "completing power domain "); |
653 | dev->class->pm->complete(dev); | 646 | if (dev->pwr_domain->ops.complete) |
654 | } | 647 | dev->pwr_domain->ops.complete(dev); |
655 | 648 | } else if (dev->type && dev->type->pm) { | |
656 | if (dev->type && dev->type->pm && dev->type->pm->complete) { | ||
657 | pm_dev_dbg(dev, state, "completing type "); | 649 | pm_dev_dbg(dev, state, "completing type "); |
658 | dev->type->pm->complete(dev); | 650 | if (dev->type->pm->complete) |
659 | } | 651 | dev->type->pm->complete(dev); |
660 | 652 | } else if (dev->class && dev->class->pm) { | |
661 | if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { | 653 | pm_dev_dbg(dev, state, "completing class "); |
654 | if (dev->class->pm->complete) | ||
655 | dev->class->pm->complete(dev); | ||
656 | } else if (dev->bus && dev->bus->pm) { | ||
662 | pm_dev_dbg(dev, state, "completing "); | 657 | pm_dev_dbg(dev, state, "completing "); |
663 | dev->bus->pm->complete(dev); | 658 | if (dev->bus->pm->complete) |
659 | dev->bus->pm->complete(dev); | ||
664 | } | 660 | } |
665 | 661 | ||
666 | device_unlock(dev); | 662 | device_unlock(dev); |
@@ -673,28 +669,25 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
673 | * Execute the ->complete() callbacks for all devices whose PM status is not | 669 | * Execute the ->complete() callbacks for all devices whose PM status is not |
674 | * DPM_ON (this allows new devices to be registered). | 670 | * DPM_ON (this allows new devices to be registered). |
675 | */ | 671 | */ |
676 | static void dpm_complete(pm_message_t state) | 672 | void dpm_complete(pm_message_t state) |
677 | { | 673 | { |
678 | struct list_head list; | 674 | struct list_head list; |
679 | 675 | ||
676 | might_sleep(); | ||
677 | |||
680 | INIT_LIST_HEAD(&list); | 678 | INIT_LIST_HEAD(&list); |
681 | mutex_lock(&dpm_list_mtx); | 679 | mutex_lock(&dpm_list_mtx); |
682 | transition_started = false; | 680 | while (!list_empty(&dpm_prepared_list)) { |
683 | while (!list_empty(&dpm_list)) { | 681 | struct device *dev = to_device(dpm_prepared_list.prev); |
684 | struct device *dev = to_device(dpm_list.prev); | ||
685 | 682 | ||
686 | get_device(dev); | 683 | get_device(dev); |
687 | if (dev->power.status > DPM_ON) { | 684 | dev->power.is_prepared = false; |
688 | dev->power.status = DPM_ON; | 685 | list_move(&dev->power.entry, &list); |
689 | mutex_unlock(&dpm_list_mtx); | 686 | mutex_unlock(&dpm_list_mtx); |
690 | 687 | ||
691 | device_complete(dev, state); | 688 | device_complete(dev, state); |
692 | pm_runtime_put_sync(dev); | ||
693 | 689 | ||
694 | mutex_lock(&dpm_list_mtx); | 690 | mutex_lock(&dpm_list_mtx); |
695 | } | ||
696 | if (!list_empty(&dev->power.entry)) | ||
697 | list_move(&dev->power.entry, &list); | ||
698 | put_device(dev); | 691 | put_device(dev); |
699 | } | 692 | } |
700 | list_splice(&list, &dpm_list); | 693 | list_splice(&list, &dpm_list); |
@@ -710,7 +703,6 @@ static void dpm_complete(pm_message_t state) | |||
710 | */ | 703 | */ |
711 | void dpm_resume_end(pm_message_t state) | 704 | void dpm_resume_end(pm_message_t state) |
712 | { | 705 | { |
713 | might_sleep(); | ||
714 | dpm_resume(state); | 706 | dpm_resume(state); |
715 | dpm_complete(state); | 707 | dpm_complete(state); |
716 | } | 708 | } |
@@ -750,29 +742,31 @@ static pm_message_t resume_event(pm_message_t sleep_state) | |||
750 | */ | 742 | */ |
751 | static int device_suspend_noirq(struct device *dev, pm_message_t state) | 743 | static int device_suspend_noirq(struct device *dev, pm_message_t state) |
752 | { | 744 | { |
753 | int error = 0; | 745 | int error; |
754 | 746 | ||
755 | if (dev->class && dev->class->pm) { | 747 | if (dev->pwr_domain) { |
756 | pm_dev_dbg(dev, state, "LATE class "); | 748 | pm_dev_dbg(dev, state, "LATE power domain "); |
757 | error = pm_noirq_op(dev, dev->class->pm, state); | 749 | error = pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
758 | if (error) | 750 | if (error) |
759 | goto End; | 751 | return error; |
760 | } | 752 | } else if (dev->type && dev->type->pm) { |
761 | |||
762 | if (dev->type && dev->type->pm) { | ||
763 | pm_dev_dbg(dev, state, "LATE type "); | 753 | pm_dev_dbg(dev, state, "LATE type "); |
764 | error = pm_noirq_op(dev, dev->type->pm, state); | 754 | error = pm_noirq_op(dev, dev->type->pm, state); |
765 | if (error) | 755 | if (error) |
766 | goto End; | 756 | return error; |
767 | } | 757 | } else if (dev->class && dev->class->pm) { |
768 | 758 | pm_dev_dbg(dev, state, "LATE class "); | |
769 | if (dev->bus && dev->bus->pm) { | 759 | error = pm_noirq_op(dev, dev->class->pm, state); |
760 | if (error) | ||
761 | return error; | ||
762 | } else if (dev->bus && dev->bus->pm) { | ||
770 | pm_dev_dbg(dev, state, "LATE "); | 763 | pm_dev_dbg(dev, state, "LATE "); |
771 | error = pm_noirq_op(dev, dev->bus->pm, state); | 764 | error = pm_noirq_op(dev, dev->bus->pm, state); |
765 | if (error) | ||
766 | return error; | ||
772 | } | 767 | } |
773 | 768 | ||
774 | End: | 769 | return 0; |
775 | return error; | ||
776 | } | 770 | } |
777 | 771 | ||
778 | /** | 772 | /** |
@@ -784,19 +778,28 @@ End: | |||
784 | */ | 778 | */ |
785 | int dpm_suspend_noirq(pm_message_t state) | 779 | int dpm_suspend_noirq(pm_message_t state) |
786 | { | 780 | { |
787 | struct device *dev; | ||
788 | ktime_t starttime = ktime_get(); | 781 | ktime_t starttime = ktime_get(); |
789 | int error = 0; | 782 | int error = 0; |
790 | 783 | ||
791 | suspend_device_irqs(); | 784 | suspend_device_irqs(); |
792 | mutex_lock(&dpm_list_mtx); | 785 | mutex_lock(&dpm_list_mtx); |
793 | list_for_each_entry_reverse(dev, &dpm_list, power.entry) { | 786 | while (!list_empty(&dpm_suspended_list)) { |
787 | struct device *dev = to_device(dpm_suspended_list.prev); | ||
788 | |||
789 | get_device(dev); | ||
790 | mutex_unlock(&dpm_list_mtx); | ||
791 | |||
794 | error = device_suspend_noirq(dev, state); | 792 | error = device_suspend_noirq(dev, state); |
793 | |||
794 | mutex_lock(&dpm_list_mtx); | ||
795 | if (error) { | 795 | if (error) { |
796 | pm_dev_err(dev, state, " late", error); | 796 | pm_dev_err(dev, state, " late", error); |
797 | put_device(dev); | ||
797 | break; | 798 | break; |
798 | } | 799 | } |
799 | dev->power.status = DPM_OFF_IRQ; | 800 | if (!list_empty(&dev->power.entry)) |
801 | list_move(&dev->power.entry, &dpm_noirq_list); | ||
802 | put_device(dev); | ||
800 | } | 803 | } |
801 | mutex_unlock(&dpm_list_mtx); | 804 | mutex_unlock(&dpm_list_mtx); |
802 | if (error) | 805 | if (error) |
@@ -829,8 +832,6 @@ static int legacy_suspend(struct device *dev, pm_message_t state, | |||
829 | return error; | 832 | return error; |
830 | } | 833 | } |
831 | 834 | ||
832 | static int async_error; | ||
833 | |||
834 | /** | 835 | /** |
835 | * device_suspend - Execute "suspend" callbacks for given device. | 836 | * device_suspend - Execute "suspend" callbacks for given device. |
836 | * @dev: Device to handle. | 837 | * @dev: Device to handle. |
@@ -845,27 +846,35 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
845 | device_lock(dev); | 846 | device_lock(dev); |
846 | 847 | ||
847 | if (async_error) | 848 | if (async_error) |
849 | goto Unlock; | ||
850 | |||
851 | if (pm_wakeup_pending()) { | ||
852 | async_error = -EBUSY; | ||
853 | goto Unlock; | ||
854 | } | ||
855 | |||
856 | if (dev->pwr_domain) { | ||
857 | pm_dev_dbg(dev, state, "power domain "); | ||
858 | error = pm_op(dev, &dev->pwr_domain->ops, state); | ||
848 | goto End; | 859 | goto End; |
860 | } | ||
861 | |||
862 | if (dev->type && dev->type->pm) { | ||
863 | pm_dev_dbg(dev, state, "type "); | ||
864 | error = pm_op(dev, dev->type->pm, state); | ||
865 | goto End; | ||
866 | } | ||
849 | 867 | ||
850 | if (dev->class) { | 868 | if (dev->class) { |
851 | if (dev->class->pm) { | 869 | if (dev->class->pm) { |
852 | pm_dev_dbg(dev, state, "class "); | 870 | pm_dev_dbg(dev, state, "class "); |
853 | error = pm_op(dev, dev->class->pm, state); | 871 | error = pm_op(dev, dev->class->pm, state); |
872 | goto End; | ||
854 | } else if (dev->class->suspend) { | 873 | } else if (dev->class->suspend) { |
855 | pm_dev_dbg(dev, state, "legacy class "); | 874 | pm_dev_dbg(dev, state, "legacy class "); |
856 | error = legacy_suspend(dev, state, dev->class->suspend); | 875 | error = legacy_suspend(dev, state, dev->class->suspend); |
857 | } | ||
858 | if (error) | ||
859 | goto End; | 876 | goto End; |
860 | } | ||
861 | |||
862 | if (dev->type) { | ||
863 | if (dev->type->pm) { | ||
864 | pm_dev_dbg(dev, state, "type "); | ||
865 | error = pm_op(dev, dev->type->pm, state); | ||
866 | } | 877 | } |
867 | if (error) | ||
868 | goto End; | ||
869 | } | 878 | } |
870 | 879 | ||
871 | if (dev->bus) { | 880 | if (dev->bus) { |
@@ -878,13 +887,16 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
878 | } | 887 | } |
879 | } | 888 | } |
880 | 889 | ||
881 | if (!error) | ||
882 | dev->power.status = DPM_OFF; | ||
883 | |||
884 | End: | 890 | End: |
891 | dev->power.is_suspended = !error; | ||
892 | |||
893 | Unlock: | ||
885 | device_unlock(dev); | 894 | device_unlock(dev); |
886 | complete_all(&dev->power.completion); | 895 | complete_all(&dev->power.completion); |
887 | 896 | ||
897 | if (error) | ||
898 | async_error = error; | ||
899 | |||
888 | return error; | 900 | return error; |
889 | } | 901 | } |
890 | 902 | ||
@@ -894,10 +906,8 @@ static void async_suspend(void *data, async_cookie_t cookie) | |||
894 | int error; | 906 | int error; |
895 | 907 | ||
896 | error = __device_suspend(dev, pm_transition, true); | 908 | error = __device_suspend(dev, pm_transition, true); |
897 | if (error) { | 909 | if (error) |
898 | pm_dev_err(dev, pm_transition, " async", error); | 910 | pm_dev_err(dev, pm_transition, " async", error); |
899 | async_error = error; | ||
900 | } | ||
901 | 911 | ||
902 | put_device(dev); | 912 | put_device(dev); |
903 | } | 913 | } |
@@ -919,18 +929,18 @@ static int device_suspend(struct device *dev) | |||
919 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 929 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
920 | * @state: PM transition of the system being carried out. | 930 | * @state: PM transition of the system being carried out. |
921 | */ | 931 | */ |
922 | static int dpm_suspend(pm_message_t state) | 932 | int dpm_suspend(pm_message_t state) |
923 | { | 933 | { |
924 | struct list_head list; | ||
925 | ktime_t starttime = ktime_get(); | 934 | ktime_t starttime = ktime_get(); |
926 | int error = 0; | 935 | int error = 0; |
927 | 936 | ||
928 | INIT_LIST_HEAD(&list); | 937 | might_sleep(); |
938 | |||
929 | mutex_lock(&dpm_list_mtx); | 939 | mutex_lock(&dpm_list_mtx); |
930 | pm_transition = state; | 940 | pm_transition = state; |
931 | async_error = 0; | 941 | async_error = 0; |
932 | while (!list_empty(&dpm_list)) { | 942 | while (!list_empty(&dpm_prepared_list)) { |
933 | struct device *dev = to_device(dpm_list.prev); | 943 | struct device *dev = to_device(dpm_prepared_list.prev); |
934 | 944 | ||
935 | get_device(dev); | 945 | get_device(dev); |
936 | mutex_unlock(&dpm_list_mtx); | 946 | mutex_unlock(&dpm_list_mtx); |
@@ -944,12 +954,11 @@ static int dpm_suspend(pm_message_t state) | |||
944 | break; | 954 | break; |
945 | } | 955 | } |
946 | if (!list_empty(&dev->power.entry)) | 956 | if (!list_empty(&dev->power.entry)) |
947 | list_move(&dev->power.entry, &list); | 957 | list_move(&dev->power.entry, &dpm_suspended_list); |
948 | put_device(dev); | 958 | put_device(dev); |
949 | if (async_error) | 959 | if (async_error) |
950 | break; | 960 | break; |
951 | } | 961 | } |
952 | list_splice(&list, dpm_list.prev); | ||
953 | mutex_unlock(&dpm_list_mtx); | 962 | mutex_unlock(&dpm_list_mtx); |
954 | async_synchronize_full(); | 963 | async_synchronize_full(); |
955 | if (!error) | 964 | if (!error) |
@@ -973,27 +982,34 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
973 | 982 | ||
974 | device_lock(dev); | 983 | device_lock(dev); |
975 | 984 | ||
976 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { | 985 | if (dev->pwr_domain) { |
977 | pm_dev_dbg(dev, state, "preparing "); | 986 | pm_dev_dbg(dev, state, "preparing power domain "); |
978 | error = dev->bus->pm->prepare(dev); | 987 | if (dev->pwr_domain->ops.prepare) |
979 | suspend_report_result(dev->bus->pm->prepare, error); | 988 | error = dev->pwr_domain->ops.prepare(dev); |
989 | suspend_report_result(dev->pwr_domain->ops.prepare, error); | ||
980 | if (error) | 990 | if (error) |
981 | goto End; | 991 | goto End; |
982 | } | 992 | } else if (dev->type && dev->type->pm) { |
983 | |||
984 | if (dev->type && dev->type->pm && dev->type->pm->prepare) { | ||
985 | pm_dev_dbg(dev, state, "preparing type "); | 993 | pm_dev_dbg(dev, state, "preparing type "); |
986 | error = dev->type->pm->prepare(dev); | 994 | if (dev->type->pm->prepare) |
995 | error = dev->type->pm->prepare(dev); | ||
987 | suspend_report_result(dev->type->pm->prepare, error); | 996 | suspend_report_result(dev->type->pm->prepare, error); |
988 | if (error) | 997 | if (error) |
989 | goto End; | 998 | goto End; |
990 | } | 999 | } else if (dev->class && dev->class->pm) { |
991 | |||
992 | if (dev->class && dev->class->pm && dev->class->pm->prepare) { | ||
993 | pm_dev_dbg(dev, state, "preparing class "); | 1000 | pm_dev_dbg(dev, state, "preparing class "); |
994 | error = dev->class->pm->prepare(dev); | 1001 | if (dev->class->pm->prepare) |
1002 | error = dev->class->pm->prepare(dev); | ||
995 | suspend_report_result(dev->class->pm->prepare, error); | 1003 | suspend_report_result(dev->class->pm->prepare, error); |
1004 | if (error) | ||
1005 | goto End; | ||
1006 | } else if (dev->bus && dev->bus->pm) { | ||
1007 | pm_dev_dbg(dev, state, "preparing "); | ||
1008 | if (dev->bus->pm->prepare) | ||
1009 | error = dev->bus->pm->prepare(dev); | ||
1010 | suspend_report_result(dev->bus->pm->prepare, error); | ||
996 | } | 1011 | } |
1012 | |||
997 | End: | 1013 | End: |
998 | device_unlock(dev); | 1014 | device_unlock(dev); |
999 | 1015 | ||
@@ -1006,50 +1022,45 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
1006 | * | 1022 | * |
1007 | * Execute the ->prepare() callback(s) for all devices. | 1023 | * Execute the ->prepare() callback(s) for all devices. |
1008 | */ | 1024 | */ |
1009 | static int dpm_prepare(pm_message_t state) | 1025 | int dpm_prepare(pm_message_t state) |
1010 | { | 1026 | { |
1011 | struct list_head list; | ||
1012 | int error = 0; | 1027 | int error = 0; |
1013 | 1028 | ||
1014 | INIT_LIST_HEAD(&list); | 1029 | might_sleep(); |
1030 | |||
1015 | mutex_lock(&dpm_list_mtx); | 1031 | mutex_lock(&dpm_list_mtx); |
1016 | transition_started = true; | ||
1017 | while (!list_empty(&dpm_list)) { | 1032 | while (!list_empty(&dpm_list)) { |
1018 | struct device *dev = to_device(dpm_list.next); | 1033 | struct device *dev = to_device(dpm_list.next); |
1019 | 1034 | ||
1020 | get_device(dev); | 1035 | get_device(dev); |
1021 | dev->power.status = DPM_PREPARING; | ||
1022 | mutex_unlock(&dpm_list_mtx); | 1036 | mutex_unlock(&dpm_list_mtx); |
1023 | 1037 | ||
1024 | pm_runtime_get_noresume(dev); | 1038 | pm_runtime_get_noresume(dev); |
1025 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 1039 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) |
1026 | /* Wake-up requested during system sleep transition. */ | 1040 | pm_wakeup_event(dev, 0); |
1027 | pm_runtime_put_sync(dev); | 1041 | |
1028 | error = -EBUSY; | 1042 | pm_runtime_put_sync(dev); |
1029 | } else { | 1043 | error = pm_wakeup_pending() ? |
1030 | error = device_prepare(dev, state); | 1044 | -EBUSY : device_prepare(dev, state); |
1031 | } | ||
1032 | 1045 | ||
1033 | mutex_lock(&dpm_list_mtx); | 1046 | mutex_lock(&dpm_list_mtx); |
1034 | if (error) { | 1047 | if (error) { |
1035 | dev->power.status = DPM_ON; | ||
1036 | if (error == -EAGAIN) { | 1048 | if (error == -EAGAIN) { |
1037 | put_device(dev); | 1049 | put_device(dev); |
1038 | error = 0; | 1050 | error = 0; |
1039 | continue; | 1051 | continue; |
1040 | } | 1052 | } |
1041 | printk(KERN_ERR "PM: Failed to prepare device %s " | 1053 | printk(KERN_INFO "PM: Device %s not prepared " |
1042 | "for power transition: error %d\n", | 1054 | "for power transition: code %d\n", |
1043 | kobject_name(&dev->kobj), error); | 1055 | dev_name(dev), error); |
1044 | put_device(dev); | 1056 | put_device(dev); |
1045 | break; | 1057 | break; |
1046 | } | 1058 | } |
1047 | dev->power.status = DPM_SUSPENDING; | 1059 | dev->power.is_prepared = true; |
1048 | if (!list_empty(&dev->power.entry)) | 1060 | if (!list_empty(&dev->power.entry)) |
1049 | list_move_tail(&dev->power.entry, &list); | 1061 | list_move_tail(&dev->power.entry, &dpm_prepared_list); |
1050 | put_device(dev); | 1062 | put_device(dev); |
1051 | } | 1063 | } |
1052 | list_splice(&list, &dpm_list); | ||
1053 | mutex_unlock(&dpm_list_mtx); | 1064 | mutex_unlock(&dpm_list_mtx); |
1054 | return error; | 1065 | return error; |
1055 | } | 1066 | } |
@@ -1065,7 +1076,6 @@ int dpm_suspend_start(pm_message_t state) | |||
1065 | { | 1076 | { |
1066 | int error; | 1077 | int error; |
1067 | 1078 | ||
1068 | might_sleep(); | ||
1069 | error = dpm_prepare(state); | 1079 | error = dpm_prepare(state); |
1070 | if (!error) | 1080 | if (!error) |
1071 | error = dpm_suspend(state); | 1081 | error = dpm_suspend(state); |
@@ -1085,8 +1095,9 @@ EXPORT_SYMBOL_GPL(__suspend_report_result); | |||
1085 | * @dev: Device to wait for. | 1095 | * @dev: Device to wait for. |
1086 | * @subordinate: Device that needs to wait for @dev. | 1096 | * @subordinate: Device that needs to wait for @dev. |
1087 | */ | 1097 | */ |
1088 | void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) | 1098 | int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) |
1089 | { | 1099 | { |
1090 | dpm_wait(dev, subordinate->power.async_suspend); | 1100 | dpm_wait(dev, subordinate->power.async_suspend); |
1101 | return async_error; | ||
1091 | } | 1102 | } |
1092 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); | 1103 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c new file mode 100644 index 000000000000..56a6899f5e9e --- /dev/null +++ b/drivers/base/power/opp.c | |||
@@ -0,0 +1,628 @@ | |||
1 | /* | ||
2 | * Generic OPP Interface | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/cpufreq.h> | ||
20 | #include <linux/list.h> | ||
21 | #include <linux/rculist.h> | ||
22 | #include <linux/rcupdate.h> | ||
23 | #include <linux/opp.h> | ||
24 | |||
25 | /* | ||
26 | * Internal data structure organization with the OPP layer library is as | ||
27 | * follows: | ||
28 | * dev_opp_list (root) | ||
29 | * |- device 1 (represents voltage domain 1) | ||
30 | * | |- opp 1 (availability, freq, voltage) | ||
31 | * | |- opp 2 .. | ||
32 | * ... ... | ||
33 | * | `- opp n .. | ||
34 | * |- device 2 (represents the next voltage domain) | ||
35 | * ... | ||
36 | * `- device m (represents mth voltage domain) | ||
37 | * device 1, 2.. are represented by dev_opp structure while each opp | ||
38 | * is represented by the opp structure. | ||
39 | */ | ||
40 | |||
41 | /** | ||
42 | * struct opp - Generic OPP description structure | ||
43 | * @node: opp list node. The nodes are maintained throughout the lifetime | ||
44 | * of boot. It is expected only an optimal set of OPPs are | ||
45 | * added to the library by the SoC framework. | ||
46 | * RCU usage: opp list is traversed with RCU locks. node | ||
47 | * modification is possible realtime, hence the modifications | ||
48 | * are protected by the dev_opp_list_lock for integrity. | ||
49 | * IMPORTANT: the opp nodes should be maintained in increasing | ||
50 | * order. | ||
51 | * @available: true/false - marks if this OPP as available or not | ||
52 | * @rate: Frequency in hertz | ||
53 | * @u_volt: Nominal voltage in microvolts corresponding to this OPP | ||
54 | * @dev_opp: points back to the device_opp struct this opp belongs to | ||
55 | * | ||
56 | * This structure stores the OPP information for a given device. | ||
57 | */ | ||
58 | struct opp { | ||
59 | struct list_head node; | ||
60 | |||
61 | bool available; | ||
62 | unsigned long rate; | ||
63 | unsigned long u_volt; | ||
64 | |||
65 | struct device_opp *dev_opp; | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * struct device_opp - Device opp structure | ||
70 | * @node: list node - contains the devices with OPPs that | ||
71 | * have been registered. Nodes once added are not modified in this | ||
72 | * list. | ||
73 | * RCU usage: nodes are not modified in the list of device_opp, | ||
74 | * however addition is possible and is secured by dev_opp_list_lock | ||
75 | * @dev: device pointer | ||
76 | * @opp_list: list of opps | ||
77 | * | ||
78 | * This is an internal data structure maintaining the link to opps attached to | ||
79 | * a device. This structure is not meant to be shared to users as it is | ||
80 | * meant for book keeping and private to OPP library | ||
81 | */ | ||
82 | struct device_opp { | ||
83 | struct list_head node; | ||
84 | |||
85 | struct device *dev; | ||
86 | struct list_head opp_list; | ||
87 | }; | ||
88 | |||
89 | /* | ||
90 | * The root of the list of all devices. All device_opp structures branch off | ||
91 | * from here, with each device_opp containing the list of opp it supports in | ||
92 | * various states of availability. | ||
93 | */ | ||
94 | static LIST_HEAD(dev_opp_list); | ||
95 | /* Lock to allow exclusive modification to the device and opp lists */ | ||
96 | static DEFINE_MUTEX(dev_opp_list_lock); | ||
97 | |||
98 | /** | ||
99 | * find_device_opp() - find device_opp struct using device pointer | ||
100 | * @dev: device pointer used to lookup device OPPs | ||
101 | * | ||
102 | * Search list of device OPPs for one containing matching device. Does a RCU | ||
103 | * reader operation to grab the pointer needed. | ||
104 | * | ||
105 | * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or | ||
106 | * -EINVAL based on type of error. | ||
107 | * | ||
108 | * Locking: This function must be called under rcu_read_lock(). device_opp | ||
109 | * is a RCU protected pointer. This means that device_opp is valid as long | ||
110 | * as we are under RCU lock. | ||
111 | */ | ||
112 | static struct device_opp *find_device_opp(struct device *dev) | ||
113 | { | ||
114 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); | ||
115 | |||
116 | if (unlikely(IS_ERR_OR_NULL(dev))) { | ||
117 | pr_err("%s: Invalid parameters\n", __func__); | ||
118 | return ERR_PTR(-EINVAL); | ||
119 | } | ||
120 | |||
121 | list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) { | ||
122 | if (tmp_dev_opp->dev == dev) { | ||
123 | dev_opp = tmp_dev_opp; | ||
124 | break; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | return dev_opp; | ||
129 | } | ||
130 | |||
131 | /** | ||
132 | * opp_get_voltage() - Gets the voltage corresponding to an available opp | ||
133 | * @opp: opp for which voltage has to be returned for | ||
134 | * | ||
135 | * Return voltage in micro volt corresponding to the opp, else | ||
136 | * return 0 | ||
137 | * | ||
138 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
139 | * protected pointer. This means that opp which could have been fetched by | ||
140 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
141 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
142 | * used in the same section as the usage of this function with the pointer | ||
143 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
144 | * pointer. | ||
145 | */ | ||
146 | unsigned long opp_get_voltage(struct opp *opp) | ||
147 | { | ||
148 | struct opp *tmp_opp; | ||
149 | unsigned long v = 0; | ||
150 | |||
151 | tmp_opp = rcu_dereference(opp); | ||
152 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | ||
153 | pr_err("%s: Invalid parameters\n", __func__); | ||
154 | else | ||
155 | v = tmp_opp->u_volt; | ||
156 | |||
157 | return v; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * opp_get_freq() - Gets the frequency corresponding to an available opp | ||
162 | * @opp: opp for which frequency has to be returned for | ||
163 | * | ||
164 | * Return frequency in hertz corresponding to the opp, else | ||
165 | * return 0 | ||
166 | * | ||
167 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
168 | * protected pointer. This means that opp which could have been fetched by | ||
169 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
170 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
171 | * used in the same section as the usage of this function with the pointer | ||
172 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
173 | * pointer. | ||
174 | */ | ||
175 | unsigned long opp_get_freq(struct opp *opp) | ||
176 | { | ||
177 | struct opp *tmp_opp; | ||
178 | unsigned long f = 0; | ||
179 | |||
180 | tmp_opp = rcu_dereference(opp); | ||
181 | if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available) | ||
182 | pr_err("%s: Invalid parameters\n", __func__); | ||
183 | else | ||
184 | f = tmp_opp->rate; | ||
185 | |||
186 | return f; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * opp_get_opp_count() - Get number of opps available in the opp list | ||
191 | * @dev: device for which we do this operation | ||
192 | * | ||
193 | * This function returns the number of available opps if there are any, | ||
194 | * else returns 0 if none or the corresponding error value. | ||
195 | * | ||
196 | * Locking: This function must be called under rcu_read_lock(). This function | ||
197 | * internally references two RCU protected structures: device_opp and opp which | ||
198 | * are safe as long as we are under a common RCU locked section. | ||
199 | */ | ||
200 | int opp_get_opp_count(struct device *dev) | ||
201 | { | ||
202 | struct device_opp *dev_opp; | ||
203 | struct opp *temp_opp; | ||
204 | int count = 0; | ||
205 | |||
206 | dev_opp = find_device_opp(dev); | ||
207 | if (IS_ERR(dev_opp)) { | ||
208 | int r = PTR_ERR(dev_opp); | ||
209 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | ||
210 | return r; | ||
211 | } | ||
212 | |||
213 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
214 | if (temp_opp->available) | ||
215 | count++; | ||
216 | } | ||
217 | |||
218 | return count; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * opp_find_freq_exact() - search for an exact frequency | ||
223 | * @dev: device for which we do this operation | ||
224 | * @freq: frequency to search for | ||
225 | * @available: true/false - match for available opp | ||
226 | * | ||
227 | * Searches for exact match in the opp list and returns pointer to the matching | ||
228 | * opp if found, else returns ERR_PTR in case of error and should be handled | ||
229 | * using IS_ERR. | ||
230 | * | ||
231 | * Note: available is a modifier for the search. if available=true, then the | ||
232 | * match is for exact matching frequency and is available in the stored OPP | ||
233 | * table. if false, the match is for exact frequency which is not available. | ||
234 | * | ||
235 | * This provides a mechanism to enable an opp which is not available currently | ||
236 | * or the opposite as well. | ||
237 | * | ||
238 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
239 | * protected pointer. The reason for the same is that the opp pointer which is | ||
240 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
241 | * under the locked area. The pointer returned must be used prior to unlocking | ||
242 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
243 | */ | ||
244 | struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | ||
245 | bool available) | ||
246 | { | ||
247 | struct device_opp *dev_opp; | ||
248 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
249 | |||
250 | dev_opp = find_device_opp(dev); | ||
251 | if (IS_ERR(dev_opp)) { | ||
252 | int r = PTR_ERR(dev_opp); | ||
253 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | ||
254 | return ERR_PTR(r); | ||
255 | } | ||
256 | |||
257 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
258 | if (temp_opp->available == available && | ||
259 | temp_opp->rate == freq) { | ||
260 | opp = temp_opp; | ||
261 | break; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | return opp; | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * opp_find_freq_ceil() - Search for an rounded ceil freq | ||
270 | * @dev: device for which we do this operation | ||
271 | * @freq: Start frequency | ||
272 | * | ||
273 | * Search for the matching ceil *available* OPP from a starting freq | ||
274 | * for a device. | ||
275 | * | ||
276 | * Returns matching *opp and refreshes *freq accordingly, else returns | ||
277 | * ERR_PTR in case of error and should be handled using IS_ERR. | ||
278 | * | ||
279 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
280 | * protected pointer. The reason for the same is that the opp pointer which is | ||
281 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
282 | * under the locked area. The pointer returned must be used prior to unlocking | ||
283 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
284 | */ | ||
285 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) | ||
286 | { | ||
287 | struct device_opp *dev_opp; | ||
288 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
289 | |||
290 | if (!dev || !freq) { | ||
291 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
292 | return ERR_PTR(-EINVAL); | ||
293 | } | ||
294 | |||
295 | dev_opp = find_device_opp(dev); | ||
296 | if (IS_ERR(dev_opp)) | ||
297 | return opp; | ||
298 | |||
299 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
300 | if (temp_opp->available && temp_opp->rate >= *freq) { | ||
301 | opp = temp_opp; | ||
302 | *freq = opp->rate; | ||
303 | break; | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return opp; | ||
308 | } | ||
309 | |||
310 | /** | ||
311 | * opp_find_freq_floor() - Search for a rounded floor freq | ||
312 | * @dev: device for which we do this operation | ||
313 | * @freq: Start frequency | ||
314 | * | ||
315 | * Search for the matching floor *available* OPP from a starting freq | ||
316 | * for a device. | ||
317 | * | ||
318 | * Returns matching *opp and refreshes *freq accordingly, else returns | ||
319 | * ERR_PTR in case of error and should be handled using IS_ERR. | ||
320 | * | ||
321 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
322 | * protected pointer. The reason for the same is that the opp pointer which is | ||
323 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
324 | * under the locked area. The pointer returned must be used prior to unlocking | ||
325 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
326 | */ | ||
327 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) | ||
328 | { | ||
329 | struct device_opp *dev_opp; | ||
330 | struct opp *temp_opp, *opp = ERR_PTR(-ENODEV); | ||
331 | |||
332 | if (!dev || !freq) { | ||
333 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | ||
334 | return ERR_PTR(-EINVAL); | ||
335 | } | ||
336 | |||
337 | dev_opp = find_device_opp(dev); | ||
338 | if (IS_ERR(dev_opp)) | ||
339 | return opp; | ||
340 | |||
341 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | ||
342 | if (temp_opp->available) { | ||
343 | /* go to the next node, before choosing prev */ | ||
344 | if (temp_opp->rate > *freq) | ||
345 | break; | ||
346 | else | ||
347 | opp = temp_opp; | ||
348 | } | ||
349 | } | ||
350 | if (!IS_ERR(opp)) | ||
351 | *freq = opp->rate; | ||
352 | |||
353 | return opp; | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * opp_add() - Add an OPP table from a table definitions | ||
358 | * @dev: device for which we do this operation | ||
359 | * @freq: Frequency in Hz for this OPP | ||
360 | * @u_volt: Voltage in uVolts for this OPP | ||
361 | * | ||
362 | * This function adds an opp definition to the opp list and returns status. | ||
363 | * The opp is made available by default and it can be controlled using | ||
364 | * opp_enable/disable functions. | ||
365 | * | ||
366 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
367 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
368 | * to keep the integrity of the internal data structures. Callers should ensure | ||
369 | * that this function is *NOT* called under RCU protection or in contexts where | ||
370 | * mutex cannot be locked. | ||
371 | */ | ||
372 | int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | ||
373 | { | ||
374 | struct device_opp *dev_opp = NULL; | ||
375 | struct opp *opp, *new_opp; | ||
376 | struct list_head *head; | ||
377 | |||
378 | /* allocate new OPP node */ | ||
379 | new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); | ||
380 | if (!new_opp) { | ||
381 | dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); | ||
382 | return -ENOMEM; | ||
383 | } | ||
384 | |||
385 | /* Hold our list modification lock here */ | ||
386 | mutex_lock(&dev_opp_list_lock); | ||
387 | |||
388 | /* Check for existing list for 'dev' */ | ||
389 | dev_opp = find_device_opp(dev); | ||
390 | if (IS_ERR(dev_opp)) { | ||
391 | /* | ||
392 | * Allocate a new device OPP table. In the infrequent case | ||
393 | * where a new device is needed to be added, we pay this | ||
394 | * penalty. | ||
395 | */ | ||
396 | dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); | ||
397 | if (!dev_opp) { | ||
398 | mutex_unlock(&dev_opp_list_lock); | ||
399 | kfree(new_opp); | ||
400 | dev_warn(dev, | ||
401 | "%s: Unable to create device OPP structure\n", | ||
402 | __func__); | ||
403 | return -ENOMEM; | ||
404 | } | ||
405 | |||
406 | dev_opp->dev = dev; | ||
407 | INIT_LIST_HEAD(&dev_opp->opp_list); | ||
408 | |||
409 | /* Secure the device list modification */ | ||
410 | list_add_rcu(&dev_opp->node, &dev_opp_list); | ||
411 | } | ||
412 | |||
413 | /* populate the opp table */ | ||
414 | new_opp->dev_opp = dev_opp; | ||
415 | new_opp->rate = freq; | ||
416 | new_opp->u_volt = u_volt; | ||
417 | new_opp->available = true; | ||
418 | |||
419 | /* Insert new OPP in order of increasing frequency */ | ||
420 | head = &dev_opp->opp_list; | ||
421 | list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) { | ||
422 | if (new_opp->rate < opp->rate) | ||
423 | break; | ||
424 | else | ||
425 | head = &opp->node; | ||
426 | } | ||
427 | |||
428 | list_add_rcu(&new_opp->node, head); | ||
429 | mutex_unlock(&dev_opp_list_lock); | ||
430 | |||
431 | return 0; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * opp_set_availability() - helper to set the availability of an opp | ||
436 | * @dev: device for which we do this operation | ||
437 | * @freq: OPP frequency to modify availability | ||
438 | * @availability_req: availability status requested for this opp | ||
439 | * | ||
440 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} | ||
441 | * share a common logic which is isolated here. | ||
442 | * | ||
443 | * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the | ||
444 | * copy operation, returns 0 if no modifcation was done OR modification was | ||
445 | * successful. | ||
446 | * | ||
447 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
448 | * Hence this function internally uses RCU updater strategy with mutex locks to | ||
449 | * keep the integrity of the internal data structures. Callers should ensure | ||
450 | * that this function is *NOT* called under RCU protection or in contexts where | ||
451 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
452 | */ | ||
453 | static int opp_set_availability(struct device *dev, unsigned long freq, | ||
454 | bool availability_req) | ||
455 | { | ||
456 | struct device_opp *tmp_dev_opp, *dev_opp = NULL; | ||
457 | struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | ||
458 | int r = 0; | ||
459 | |||
460 | /* keep the node allocated */ | ||
461 | new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); | ||
462 | if (!new_opp) { | ||
463 | dev_warn(dev, "%s: Unable to create OPP\n", __func__); | ||
464 | return -ENOMEM; | ||
465 | } | ||
466 | |||
467 | mutex_lock(&dev_opp_list_lock); | ||
468 | |||
469 | /* Find the device_opp */ | ||
470 | list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { | ||
471 | if (dev == tmp_dev_opp->dev) { | ||
472 | dev_opp = tmp_dev_opp; | ||
473 | break; | ||
474 | } | ||
475 | } | ||
476 | if (IS_ERR(dev_opp)) { | ||
477 | r = PTR_ERR(dev_opp); | ||
478 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
479 | goto unlock; | ||
480 | } | ||
481 | |||
482 | /* Do we have the frequency? */ | ||
483 | list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) { | ||
484 | if (tmp_opp->rate == freq) { | ||
485 | opp = tmp_opp; | ||
486 | break; | ||
487 | } | ||
488 | } | ||
489 | if (IS_ERR(opp)) { | ||
490 | r = PTR_ERR(opp); | ||
491 | goto unlock; | ||
492 | } | ||
493 | |||
494 | /* Is update really needed? */ | ||
495 | if (opp->available == availability_req) | ||
496 | goto unlock; | ||
497 | /* copy the old data over */ | ||
498 | *new_opp = *opp; | ||
499 | |||
500 | /* plug in new node */ | ||
501 | new_opp->available = availability_req; | ||
502 | |||
503 | list_replace_rcu(&opp->node, &new_opp->node); | ||
504 | mutex_unlock(&dev_opp_list_lock); | ||
505 | synchronize_rcu(); | ||
506 | |||
507 | /* clean up old opp */ | ||
508 | new_opp = opp; | ||
509 | goto out; | ||
510 | |||
511 | unlock: | ||
512 | mutex_unlock(&dev_opp_list_lock); | ||
513 | out: | ||
514 | kfree(new_opp); | ||
515 | return r; | ||
516 | } | ||
517 | |||
518 | /** | ||
519 | * opp_enable() - Enable a specific OPP | ||
520 | * @dev: device for which we do this operation | ||
521 | * @freq: OPP frequency to enable | ||
522 | * | ||
523 | * Enables a provided opp. If the operation is valid, this returns 0, else the | ||
524 | * corresponding error value. It is meant to be used for users an OPP available | ||
525 | * after being temporarily made unavailable with opp_disable. | ||
526 | * | ||
527 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
528 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
529 | * integrity of the internal data structures. Callers should ensure that | ||
530 | * this function is *NOT* called under RCU protection or in contexts where | ||
531 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
532 | */ | ||
533 | int opp_enable(struct device *dev, unsigned long freq) | ||
534 | { | ||
535 | return opp_set_availability(dev, freq, true); | ||
536 | } | ||
537 | |||
538 | /** | ||
539 | * opp_disable() - Disable a specific OPP | ||
540 | * @dev: device for which we do this operation | ||
541 | * @freq: OPP frequency to disable | ||
542 | * | ||
543 | * Disables a provided opp. If the operation is valid, this returns | ||
544 | * 0, else the corresponding error value. It is meant to be a temporary | ||
545 | * control by users to make this OPP not available until the circumstances are | ||
546 | * right to make it available again (with a call to opp_enable). | ||
547 | * | ||
548 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
549 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
550 | * integrity of the internal data structures. Callers should ensure that | ||
551 | * this function is *NOT* called under RCU protection or in contexts where | ||
552 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
553 | */ | ||
554 | int opp_disable(struct device *dev, unsigned long freq) | ||
555 | { | ||
556 | return opp_set_availability(dev, freq, false); | ||
557 | } | ||
558 | |||
559 | #ifdef CONFIG_CPU_FREQ | ||
560 | /** | ||
561 | * opp_init_cpufreq_table() - create a cpufreq table for a device | ||
562 | * @dev: device for which we do this operation | ||
563 | * @table: Cpufreq table returned back to caller | ||
564 | * | ||
565 | * Generate a cpufreq table for a provided device- this assumes that the | ||
566 | * opp list is already initialized and ready for usage. | ||
567 | * | ||
568 | * This function allocates required memory for the cpufreq table. It is | ||
569 | * expected that the caller does the required maintenance such as freeing | ||
570 | * the table as required. | ||
571 | * | ||
572 | * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM | ||
573 | * if no memory available for the operation (table is not populated), returns 0 | ||
574 | * if successful and table is populated. | ||
575 | * | ||
576 | * WARNING: It is important for the callers to ensure refreshing their copy of | ||
577 | * the table if any of the mentioned functions have been invoked in the interim. | ||
578 | * | ||
579 | * Locking: The internal device_opp and opp structures are RCU protected. | ||
580 | * To simplify the logic, we pretend we are updater and hold relevant mutex here | ||
581 | * Callers should ensure that this function is *NOT* called under RCU protection | ||
582 | * or in contexts where mutex locking cannot be used. | ||
583 | */ | ||
584 | int opp_init_cpufreq_table(struct device *dev, | ||
585 | struct cpufreq_frequency_table **table) | ||
586 | { | ||
587 | struct device_opp *dev_opp; | ||
588 | struct opp *opp; | ||
589 | struct cpufreq_frequency_table *freq_table; | ||
590 | int i = 0; | ||
591 | |||
592 | /* Pretend as if I am an updater */ | ||
593 | mutex_lock(&dev_opp_list_lock); | ||
594 | |||
595 | dev_opp = find_device_opp(dev); | ||
596 | if (IS_ERR(dev_opp)) { | ||
597 | int r = PTR_ERR(dev_opp); | ||
598 | mutex_unlock(&dev_opp_list_lock); | ||
599 | dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r); | ||
600 | return r; | ||
601 | } | ||
602 | |||
603 | freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * | ||
604 | (opp_get_opp_count(dev) + 1), GFP_KERNEL); | ||
605 | if (!freq_table) { | ||
606 | mutex_unlock(&dev_opp_list_lock); | ||
607 | dev_warn(dev, "%s: Unable to allocate frequency table\n", | ||
608 | __func__); | ||
609 | return -ENOMEM; | ||
610 | } | ||
611 | |||
612 | list_for_each_entry(opp, &dev_opp->opp_list, node) { | ||
613 | if (opp->available) { | ||
614 | freq_table[i].index = i; | ||
615 | freq_table[i].frequency = opp->rate / 1000; | ||
616 | i++; | ||
617 | } | ||
618 | } | ||
619 | mutex_unlock(&dev_opp_list_lock); | ||
620 | |||
621 | freq_table[i].index = i; | ||
622 | freq_table[i].frequency = CPUFREQ_TABLE_END; | ||
623 | |||
624 | *table = &freq_table[0]; | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | #endif /* CONFIG_CPU_FREQ */ | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index c0bd03c83b9c..f2a25f18fde7 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -34,6 +34,7 @@ extern void device_pm_move_last(struct device *); | |||
34 | 34 | ||
35 | static inline void device_pm_init(struct device *dev) | 35 | static inline void device_pm_init(struct device *dev) |
36 | { | 36 | { |
37 | spin_lock_init(&dev->power.lock); | ||
37 | pm_runtime_init(dev); | 38 | pm_runtime_init(dev); |
38 | } | 39 | } |
39 | 40 | ||
@@ -57,18 +58,18 @@ static inline void device_pm_move_last(struct device *dev) {} | |||
57 | * sysfs.c | 58 | * sysfs.c |
58 | */ | 59 | */ |
59 | 60 | ||
60 | extern int dpm_sysfs_add(struct device *); | 61 | extern int dpm_sysfs_add(struct device *dev); |
61 | extern void dpm_sysfs_remove(struct device *); | 62 | extern void dpm_sysfs_remove(struct device *dev); |
63 | extern void rpm_sysfs_remove(struct device *dev); | ||
64 | extern int wakeup_sysfs_add(struct device *dev); | ||
65 | extern void wakeup_sysfs_remove(struct device *dev); | ||
62 | 66 | ||
63 | #else /* CONFIG_PM */ | 67 | #else /* CONFIG_PM */ |
64 | 68 | ||
65 | static inline int dpm_sysfs_add(struct device *dev) | 69 | static inline int dpm_sysfs_add(struct device *dev) { return 0; } |
66 | { | 70 | static inline void dpm_sysfs_remove(struct device *dev) {} |
67 | return 0; | 71 | static inline void rpm_sysfs_remove(struct device *dev) {} |
68 | } | 72 | static inline int wakeup_sysfs_add(struct device *dev) { return 0; } |
69 | 73 | static inline void wakeup_sysfs_remove(struct device *dev) {} | |
70 | static inline void dpm_sysfs_remove(struct device *dev) | ||
71 | { | ||
72 | } | ||
73 | 74 | ||
74 | #endif | 75 | #endif |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index b78c401ffa73..0d4587b15c55 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -2,17 +2,55 @@ | |||
2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM | 2 | * drivers/base/power/runtime.c - Helper functions for device run-time PM |
3 | * | 3 | * |
4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | 4 | * Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
5 | * Copyright (C) 2010 Alan Stern <stern@rowland.harvard.edu> | ||
5 | * | 6 | * |
6 | * This file is released under the GPLv2. | 7 | * This file is released under the GPLv2. |
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
10 | #include <linux/pm_runtime.h> | 11 | #include <linux/pm_runtime.h> |
11 | #include <linux/jiffies.h> | 12 | #include "power.h" |
12 | 13 | ||
13 | static int __pm_runtime_resume(struct device *dev, bool from_wq); | 14 | static int rpm_resume(struct device *dev, int rpmflags); |
14 | static int __pm_request_idle(struct device *dev); | 15 | static int rpm_suspend(struct device *dev, int rpmflags); |
15 | static int __pm_request_resume(struct device *dev); | 16 | |
17 | /** | ||
18 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
19 | * @dev: Device to update the accounting for | ||
20 | * | ||
21 | * In order to be able to have time accounting of the various power states | ||
22 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
23 | * PM), we need to track the time spent in each state. | ||
24 | * update_pm_runtime_accounting must be called each time before the | ||
25 | * runtime_status field is updated, to account the time in the old state | ||
26 | * correctly. | ||
27 | */ | ||
28 | void update_pm_runtime_accounting(struct device *dev) | ||
29 | { | ||
30 | unsigned long now = jiffies; | ||
31 | int delta; | ||
32 | |||
33 | delta = now - dev->power.accounting_timestamp; | ||
34 | |||
35 | if (delta < 0) | ||
36 | delta = 0; | ||
37 | |||
38 | dev->power.accounting_timestamp = now; | ||
39 | |||
40 | if (dev->power.disable_depth > 0) | ||
41 | return; | ||
42 | |||
43 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
44 | dev->power.suspended_jiffies += delta; | ||
45 | else | ||
46 | dev->power.active_jiffies += delta; | ||
47 | } | ||
48 | |||
49 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | ||
50 | { | ||
51 | update_pm_runtime_accounting(dev); | ||
52 | dev->power.runtime_status = status; | ||
53 | } | ||
16 | 54 | ||
17 | /** | 55 | /** |
18 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. | 56 | * pm_runtime_deactivate_timer - Deactivate given device's suspend timer. |
@@ -40,62 +78,156 @@ static void pm_runtime_cancel_pending(struct device *dev) | |||
40 | dev->power.request = RPM_REQ_NONE; | 78 | dev->power.request = RPM_REQ_NONE; |
41 | } | 79 | } |
42 | 80 | ||
43 | /** | 81 | /* |
44 | * __pm_runtime_idle - Notify device bus type if the device can be suspended. | 82 | * pm_runtime_autosuspend_expiration - Get a device's autosuspend-delay expiration time. |
45 | * @dev: Device to notify the bus type about. | 83 | * @dev: Device to handle. |
46 | * | 84 | * |
47 | * This function must be called under dev->power.lock with interrupts disabled. | 85 | * Compute the autosuspend-delay expiration time based on the device's |
86 | * power.last_busy time. If the delay has already expired or is disabled | ||
87 | * (negative) or the power.use_autosuspend flag isn't set, return 0. | ||
88 | * Otherwise return the expiration time in jiffies (adjusted to be nonzero). | ||
89 | * | ||
90 | * This function may be called either with or without dev->power.lock held. | ||
91 | * Either way it can be racy, since power.last_busy may be updated at any time. | ||
48 | */ | 92 | */ |
49 | static int __pm_runtime_idle(struct device *dev) | 93 | unsigned long pm_runtime_autosuspend_expiration(struct device *dev) |
50 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 94 | { |
95 | int autosuspend_delay; | ||
96 | long elapsed; | ||
97 | unsigned long last_busy; | ||
98 | unsigned long expires = 0; | ||
99 | |||
100 | if (!dev->power.use_autosuspend) | ||
101 | goto out; | ||
102 | |||
103 | autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); | ||
104 | if (autosuspend_delay < 0) | ||
105 | goto out; | ||
106 | |||
107 | last_busy = ACCESS_ONCE(dev->power.last_busy); | ||
108 | elapsed = jiffies - last_busy; | ||
109 | if (elapsed < 0) | ||
110 | goto out; /* jiffies has wrapped around. */ | ||
111 | |||
112 | /* | ||
113 | * If the autosuspend_delay is >= 1 second, align the timer by rounding | ||
114 | * up to the nearest second. | ||
115 | */ | ||
116 | expires = last_busy + msecs_to_jiffies(autosuspend_delay); | ||
117 | if (autosuspend_delay >= 1000) | ||
118 | expires = round_jiffies(expires); | ||
119 | expires += !expires; | ||
120 | if (elapsed >= expires - last_busy) | ||
121 | expires = 0; /* Already expired. */ | ||
122 | |||
123 | out: | ||
124 | return expires; | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration); | ||
127 | |||
128 | /** | ||
129 | * rpm_check_suspend_allowed - Test whether a device may be suspended. | ||
130 | * @dev: Device to test. | ||
131 | */ | ||
132 | static int rpm_check_suspend_allowed(struct device *dev) | ||
51 | { | 133 | { |
52 | int retval = 0; | 134 | int retval = 0; |
53 | 135 | ||
54 | if (dev->power.runtime_error) | 136 | if (dev->power.runtime_error) |
55 | retval = -EINVAL; | 137 | retval = -EINVAL; |
56 | else if (dev->power.idle_notification) | ||
57 | retval = -EINPROGRESS; | ||
58 | else if (atomic_read(&dev->power.usage_count) > 0 | 138 | else if (atomic_read(&dev->power.usage_count) > 0 |
59 | || dev->power.disable_depth > 0 | 139 | || dev->power.disable_depth > 0) |
60 | || dev->power.runtime_status != RPM_ACTIVE) | ||
61 | retval = -EAGAIN; | 140 | retval = -EAGAIN; |
62 | else if (!pm_children_suspended(dev)) | 141 | else if (!pm_children_suspended(dev)) |
63 | retval = -EBUSY; | 142 | retval = -EBUSY; |
143 | |||
144 | /* Pending resume requests take precedence over suspends. */ | ||
145 | else if ((dev->power.deferred_resume | ||
146 | && dev->power.runtime_status == RPM_SUSPENDING) | ||
147 | || (dev->power.request_pending | ||
148 | && dev->power.request == RPM_REQ_RESUME)) | ||
149 | retval = -EAGAIN; | ||
150 | else if (dev->power.runtime_status == RPM_SUSPENDED) | ||
151 | retval = 1; | ||
152 | |||
153 | return retval; | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * rpm_idle - Notify device bus type if the device can be suspended. | ||
158 | * @dev: Device to notify the bus type about. | ||
159 | * @rpmflags: Flag bits. | ||
160 | * | ||
161 | * Check if the device's run-time PM status allows it to be suspended. If | ||
162 | * another idle notification has been started earlier, return immediately. If | ||
163 | * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise | ||
164 | * run the ->runtime_idle() callback directly. | ||
165 | * | ||
166 | * This function must be called under dev->power.lock with interrupts disabled. | ||
167 | */ | ||
168 | static int rpm_idle(struct device *dev, int rpmflags) | ||
169 | { | ||
170 | int (*callback)(struct device *); | ||
171 | int retval; | ||
172 | |||
173 | retval = rpm_check_suspend_allowed(dev); | ||
174 | if (retval < 0) | ||
175 | ; /* Conditions are wrong. */ | ||
176 | |||
177 | /* Idle notifications are allowed only in the RPM_ACTIVE state. */ | ||
178 | else if (dev->power.runtime_status != RPM_ACTIVE) | ||
179 | retval = -EAGAIN; | ||
180 | |||
181 | /* | ||
182 | * Any pending request other than an idle notification takes | ||
183 | * precedence over us, except that the timer may be running. | ||
184 | */ | ||
185 | else if (dev->power.request_pending && | ||
186 | dev->power.request > RPM_REQ_IDLE) | ||
187 | retval = -EAGAIN; | ||
188 | |||
189 | /* Act as though RPM_NOWAIT is always set. */ | ||
190 | else if (dev->power.idle_notification) | ||
191 | retval = -EINPROGRESS; | ||
64 | if (retval) | 192 | if (retval) |
65 | goto out; | 193 | goto out; |
66 | 194 | ||
67 | if (dev->power.request_pending) { | 195 | /* Pending requests need to be canceled. */ |
68 | /* | 196 | dev->power.request = RPM_REQ_NONE; |
69 | * If an idle notification request is pending, cancel it. Any | 197 | |
70 | * other pending request takes precedence over us. | 198 | if (dev->power.no_callbacks) { |
71 | */ | 199 | /* Assume ->runtime_idle() callback would have suspended. */ |
72 | if (dev->power.request == RPM_REQ_IDLE) { | 200 | retval = rpm_suspend(dev, rpmflags); |
73 | dev->power.request = RPM_REQ_NONE; | 201 | goto out; |
74 | } else if (dev->power.request != RPM_REQ_NONE) { | 202 | } |
75 | retval = -EAGAIN; | 203 | |
76 | goto out; | 204 | /* Carry out an asynchronous or a synchronous idle notification. */ |
205 | if (rpmflags & RPM_ASYNC) { | ||
206 | dev->power.request = RPM_REQ_IDLE; | ||
207 | if (!dev->power.request_pending) { | ||
208 | dev->power.request_pending = true; | ||
209 | queue_work(pm_wq, &dev->power.work); | ||
77 | } | 210 | } |
211 | goto out; | ||
78 | } | 212 | } |
79 | 213 | ||
80 | dev->power.idle_notification = true; | 214 | dev->power.idle_notification = true; |
81 | 215 | ||
82 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) { | 216 | if (dev->pwr_domain) |
83 | spin_unlock_irq(&dev->power.lock); | 217 | callback = dev->pwr_domain->ops.runtime_idle; |
84 | 218 | else if (dev->type && dev->type->pm) | |
85 | dev->bus->pm->runtime_idle(dev); | 219 | callback = dev->type->pm->runtime_idle; |
86 | 220 | else if (dev->class && dev->class->pm) | |
87 | spin_lock_irq(&dev->power.lock); | 221 | callback = dev->class->pm->runtime_idle; |
88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | 222 | else if (dev->bus && dev->bus->pm) |
89 | spin_unlock_irq(&dev->power.lock); | 223 | callback = dev->bus->pm->runtime_idle; |
90 | 224 | else | |
91 | dev->type->pm->runtime_idle(dev); | 225 | callback = NULL; |
92 | 226 | ||
93 | spin_lock_irq(&dev->power.lock); | 227 | if (callback) { |
94 | } else if (dev->class && dev->class->pm | ||
95 | && dev->class->pm->runtime_idle) { | ||
96 | spin_unlock_irq(&dev->power.lock); | 228 | spin_unlock_irq(&dev->power.lock); |
97 | 229 | ||
98 | dev->class->pm->runtime_idle(dev); | 230 | callback(dev); |
99 | 231 | ||
100 | spin_lock_irq(&dev->power.lock); | 232 | spin_lock_irq(&dev->power.lock); |
101 | } | 233 | } |
@@ -108,113 +240,102 @@ static int __pm_runtime_idle(struct device *dev) | |||
108 | } | 240 | } |
109 | 241 | ||
110 | /** | 242 | /** |
111 | * pm_runtime_idle - Notify device bus type if the device can be suspended. | 243 | * rpm_callback - Run a given runtime PM callback for a given device. |
112 | * @dev: Device to notify the bus type about. | 244 | * @cb: Runtime PM callback to run. |
245 | * @dev: Device to run the callback for. | ||
113 | */ | 246 | */ |
114 | int pm_runtime_idle(struct device *dev) | 247 | static int rpm_callback(int (*cb)(struct device *), struct device *dev) |
248 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | ||
115 | { | 249 | { |
116 | int retval; | 250 | int retval; |
117 | 251 | ||
118 | spin_lock_irq(&dev->power.lock); | 252 | if (!cb) |
119 | retval = __pm_runtime_idle(dev); | 253 | return -ENOSYS; |
120 | spin_unlock_irq(&dev->power.lock); | ||
121 | |||
122 | return retval; | ||
123 | } | ||
124 | EXPORT_SYMBOL_GPL(pm_runtime_idle); | ||
125 | |||
126 | |||
127 | /** | ||
128 | * update_pm_runtime_accounting - Update the time accounting of power states | ||
129 | * @dev: Device to update the accounting for | ||
130 | * | ||
131 | * In order to be able to have time accounting of the various power states | ||
132 | * (as used by programs such as PowerTOP to show the effectiveness of runtime | ||
133 | * PM), we need to track the time spent in each state. | ||
134 | * update_pm_runtime_accounting must be called each time before the | ||
135 | * runtime_status field is updated, to account the time in the old state | ||
136 | * correctly. | ||
137 | */ | ||
138 | void update_pm_runtime_accounting(struct device *dev) | ||
139 | { | ||
140 | unsigned long now = jiffies; | ||
141 | int delta; | ||
142 | |||
143 | delta = now - dev->power.accounting_timestamp; | ||
144 | |||
145 | if (delta < 0) | ||
146 | delta = 0; | ||
147 | 254 | ||
148 | dev->power.accounting_timestamp = now; | 255 | if (dev->power.irq_safe) { |
149 | 256 | retval = cb(dev); | |
150 | if (dev->power.disable_depth > 0) | 257 | } else { |
151 | return; | 258 | spin_unlock_irq(&dev->power.lock); |
152 | 259 | ||
153 | if (dev->power.runtime_status == RPM_SUSPENDED) | 260 | retval = cb(dev); |
154 | dev->power.suspended_jiffies += delta; | ||
155 | else | ||
156 | dev->power.active_jiffies += delta; | ||
157 | } | ||
158 | 261 | ||
159 | static void __update_runtime_status(struct device *dev, enum rpm_status status) | 262 | spin_lock_irq(&dev->power.lock); |
160 | { | 263 | } |
161 | update_pm_runtime_accounting(dev); | 264 | dev->power.runtime_error = retval; |
162 | dev->power.runtime_status = status; | 265 | return retval; |
163 | } | 266 | } |
164 | 267 | ||
165 | /** | 268 | /** |
166 | * __pm_runtime_suspend - Carry out run-time suspend of given device. | 269 | * rpm_suspend - Carry out run-time suspend of given device. |
167 | * @dev: Device to suspend. | 270 | * @dev: Device to suspend. |
168 | * @from_wq: If set, the function has been called via pm_wq. | 271 | * @rpmflags: Flag bits. |
169 | * | 272 | * |
170 | * Check if the device can be suspended and run the ->runtime_suspend() callback | 273 | * Check if the device's run-time PM status allows it to be suspended. If |
171 | * provided by its bus type. If another suspend has been started earlier, wait | 274 | * another suspend has been started earlier, either return immediately or wait |
172 | * for it to finish. If an idle notification or suspend request is pending or | 275 | * for it to finish, depending on the RPM_NOWAIT and RPM_ASYNC flags. Cancel a |
173 | * scheduled, cancel it. | 276 | * pending idle notification. If the RPM_ASYNC flag is set then queue a |
277 | * suspend request; otherwise run the ->runtime_suspend() callback directly. | ||
278 | * If a deferred resume was requested while the callback was running then carry | ||
279 | * it out; otherwise send an idle notification for the device (if the suspend | ||
280 | * failed) or for its parent (if the suspend succeeded). | ||
174 | * | 281 | * |
175 | * This function must be called under dev->power.lock with interrupts disabled. | 282 | * This function must be called under dev->power.lock with interrupts disabled. |
176 | */ | 283 | */ |
177 | int __pm_runtime_suspend(struct device *dev, bool from_wq) | 284 | static int rpm_suspend(struct device *dev, int rpmflags) |
178 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 285 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
179 | { | 286 | { |
287 | int (*callback)(struct device *); | ||
180 | struct device *parent = NULL; | 288 | struct device *parent = NULL; |
181 | bool notify = false; | 289 | int retval; |
182 | int retval = 0; | ||
183 | 290 | ||
184 | dev_dbg(dev, "__pm_runtime_suspend()%s!\n", | 291 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
185 | from_wq ? " from workqueue" : ""); | ||
186 | 292 | ||
187 | repeat: | 293 | repeat: |
188 | if (dev->power.runtime_error) { | 294 | retval = rpm_check_suspend_allowed(dev); |
189 | retval = -EINVAL; | ||
190 | goto out; | ||
191 | } | ||
192 | 295 | ||
193 | /* Pending resume requests take precedence over us. */ | 296 | if (retval < 0) |
194 | if (dev->power.request_pending | 297 | ; /* Conditions are wrong. */ |
195 | && dev->power.request == RPM_REQ_RESUME) { | 298 | |
299 | /* Synchronous suspends are not allowed in the RPM_RESUMING state. */ | ||
300 | else if (dev->power.runtime_status == RPM_RESUMING && | ||
301 | !(rpmflags & RPM_ASYNC)) | ||
196 | retval = -EAGAIN; | 302 | retval = -EAGAIN; |
303 | if (retval) | ||
197 | goto out; | 304 | goto out; |
305 | |||
306 | /* If the autosuspend_delay time hasn't expired yet, reschedule. */ | ||
307 | if ((rpmflags & RPM_AUTO) | ||
308 | && dev->power.runtime_status != RPM_SUSPENDING) { | ||
309 | unsigned long expires = pm_runtime_autosuspend_expiration(dev); | ||
310 | |||
311 | if (expires != 0) { | ||
312 | /* Pending requests need to be canceled. */ | ||
313 | dev->power.request = RPM_REQ_NONE; | ||
314 | |||
315 | /* | ||
316 | * Optimization: If the timer is already running and is | ||
317 | * set to expire at or before the autosuspend delay, | ||
318 | * avoid the overhead of resetting it. Just let it | ||
319 | * expire; pm_suspend_timer_fn() will take care of the | ||
320 | * rest. | ||
321 | */ | ||
322 | if (!(dev->power.timer_expires && time_before_eq( | ||
323 | dev->power.timer_expires, expires))) { | ||
324 | dev->power.timer_expires = expires; | ||
325 | mod_timer(&dev->power.suspend_timer, expires); | ||
326 | } | ||
327 | dev->power.timer_autosuspends = 1; | ||
328 | goto out; | ||
329 | } | ||
198 | } | 330 | } |
199 | 331 | ||
200 | /* Other scheduled or pending requests need to be canceled. */ | 332 | /* Other scheduled or pending requests need to be canceled. */ |
201 | pm_runtime_cancel_pending(dev); | 333 | pm_runtime_cancel_pending(dev); |
202 | 334 | ||
203 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
204 | retval = 1; | ||
205 | else if (dev->power.runtime_status == RPM_RESUMING | ||
206 | || dev->power.disable_depth > 0 | ||
207 | || atomic_read(&dev->power.usage_count) > 0) | ||
208 | retval = -EAGAIN; | ||
209 | else if (!pm_children_suspended(dev)) | ||
210 | retval = -EBUSY; | ||
211 | if (retval) | ||
212 | goto out; | ||
213 | |||
214 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 335 | if (dev->power.runtime_status == RPM_SUSPENDING) { |
215 | DEFINE_WAIT(wait); | 336 | DEFINE_WAIT(wait); |
216 | 337 | ||
217 | if (from_wq) { | 338 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
218 | retval = -EINPROGRESS; | 339 | retval = -EINPROGRESS; |
219 | goto out; | 340 | goto out; |
220 | } | 341 | } |
@@ -236,46 +357,44 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
236 | goto repeat; | 357 | goto repeat; |
237 | } | 358 | } |
238 | 359 | ||
239 | __update_runtime_status(dev, RPM_SUSPENDING); | ||
240 | dev->power.deferred_resume = false; | 360 | dev->power.deferred_resume = false; |
361 | if (dev->power.no_callbacks) | ||
362 | goto no_callback; /* Assume success. */ | ||
363 | |||
364 | /* Carry out an asynchronous or a synchronous suspend. */ | ||
365 | if (rpmflags & RPM_ASYNC) { | ||
366 | dev->power.request = (rpmflags & RPM_AUTO) ? | ||
367 | RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND; | ||
368 | if (!dev->power.request_pending) { | ||
369 | dev->power.request_pending = true; | ||
370 | queue_work(pm_wq, &dev->power.work); | ||
371 | } | ||
372 | goto out; | ||
373 | } | ||
241 | 374 | ||
242 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) { | 375 | __update_runtime_status(dev, RPM_SUSPENDING); |
243 | spin_unlock_irq(&dev->power.lock); | ||
244 | |||
245 | retval = dev->bus->pm->runtime_suspend(dev); | ||
246 | |||
247 | spin_lock_irq(&dev->power.lock); | ||
248 | dev->power.runtime_error = retval; | ||
249 | } else if (dev->type && dev->type->pm | ||
250 | && dev->type->pm->runtime_suspend) { | ||
251 | spin_unlock_irq(&dev->power.lock); | ||
252 | |||
253 | retval = dev->type->pm->runtime_suspend(dev); | ||
254 | |||
255 | spin_lock_irq(&dev->power.lock); | ||
256 | dev->power.runtime_error = retval; | ||
257 | } else if (dev->class && dev->class->pm | ||
258 | && dev->class->pm->runtime_suspend) { | ||
259 | spin_unlock_irq(&dev->power.lock); | ||
260 | |||
261 | retval = dev->class->pm->runtime_suspend(dev); | ||
262 | 376 | ||
263 | spin_lock_irq(&dev->power.lock); | 377 | if (dev->pwr_domain) |
264 | dev->power.runtime_error = retval; | 378 | callback = dev->pwr_domain->ops.runtime_suspend; |
265 | } else { | 379 | else if (dev->type && dev->type->pm) |
266 | retval = -ENOSYS; | 380 | callback = dev->type->pm->runtime_suspend; |
267 | } | 381 | else if (dev->class && dev->class->pm) |
382 | callback = dev->class->pm->runtime_suspend; | ||
383 | else if (dev->bus && dev->bus->pm) | ||
384 | callback = dev->bus->pm->runtime_suspend; | ||
385 | else | ||
386 | callback = NULL; | ||
268 | 387 | ||
388 | retval = rpm_callback(callback, dev); | ||
269 | if (retval) { | 389 | if (retval) { |
270 | __update_runtime_status(dev, RPM_ACTIVE); | 390 | __update_runtime_status(dev, RPM_ACTIVE); |
271 | if (retval == -EAGAIN || retval == -EBUSY) { | 391 | dev->power.deferred_resume = 0; |
272 | if (dev->power.timer_expires == 0) | 392 | if (retval == -EAGAIN || retval == -EBUSY) |
273 | notify = true; | ||
274 | dev->power.runtime_error = 0; | 393 | dev->power.runtime_error = 0; |
275 | } else { | 394 | else |
276 | pm_runtime_cancel_pending(dev); | 395 | pm_runtime_cancel_pending(dev); |
277 | } | ||
278 | } else { | 396 | } else { |
397 | no_callback: | ||
279 | __update_runtime_status(dev, RPM_SUSPENDED); | 398 | __update_runtime_status(dev, RPM_SUSPENDED); |
280 | pm_runtime_deactivate_timer(dev); | 399 | pm_runtime_deactivate_timer(dev); |
281 | 400 | ||
@@ -287,89 +406,86 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
287 | wake_up_all(&dev->power.wait_queue); | 406 | wake_up_all(&dev->power.wait_queue); |
288 | 407 | ||
289 | if (dev->power.deferred_resume) { | 408 | if (dev->power.deferred_resume) { |
290 | __pm_runtime_resume(dev, false); | 409 | rpm_resume(dev, 0); |
291 | retval = -EAGAIN; | 410 | retval = -EAGAIN; |
292 | goto out; | 411 | goto out; |
293 | } | 412 | } |
294 | 413 | ||
295 | if (notify) | 414 | /* Maybe the parent is now able to suspend. */ |
296 | __pm_runtime_idle(dev); | 415 | if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { |
297 | 416 | spin_unlock(&dev->power.lock); | |
298 | if (parent && !parent->power.ignore_children) { | ||
299 | spin_unlock_irq(&dev->power.lock); | ||
300 | 417 | ||
301 | pm_request_idle(parent); | 418 | spin_lock(&parent->power.lock); |
419 | rpm_idle(parent, RPM_ASYNC); | ||
420 | spin_unlock(&parent->power.lock); | ||
302 | 421 | ||
303 | spin_lock_irq(&dev->power.lock); | 422 | spin_lock(&dev->power.lock); |
304 | } | 423 | } |
305 | 424 | ||
306 | out: | 425 | out: |
307 | dev_dbg(dev, "__pm_runtime_suspend() returns %d!\n", retval); | 426 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
308 | 427 | ||
309 | return retval; | 428 | return retval; |
310 | } | 429 | } |
311 | 430 | ||
312 | /** | 431 | /** |
313 | * pm_runtime_suspend - Carry out run-time suspend of given device. | 432 | * rpm_resume - Carry out run-time resume of given device. |
314 | * @dev: Device to suspend. | ||
315 | */ | ||
316 | int pm_runtime_suspend(struct device *dev) | ||
317 | { | ||
318 | int retval; | ||
319 | |||
320 | spin_lock_irq(&dev->power.lock); | ||
321 | retval = __pm_runtime_suspend(dev, false); | ||
322 | spin_unlock_irq(&dev->power.lock); | ||
323 | |||
324 | return retval; | ||
325 | } | ||
326 | EXPORT_SYMBOL_GPL(pm_runtime_suspend); | ||
327 | |||
328 | /** | ||
329 | * __pm_runtime_resume - Carry out run-time resume of given device. | ||
330 | * @dev: Device to resume. | 433 | * @dev: Device to resume. |
331 | * @from_wq: If set, the function has been called via pm_wq. | 434 | * @rpmflags: Flag bits. |
332 | * | 435 | * |
333 | * Check if the device can be woken up and run the ->runtime_resume() callback | 436 | * Check if the device's run-time PM status allows it to be resumed. Cancel |
334 | * provided by its bus type. If another resume has been started earlier, wait | 437 | * any scheduled or pending requests. If another resume has been started |
335 | * for it to finish. If there's a suspend running in parallel with this | 438 | * earlier, either return immediately or wait for it to finish, depending on the |
336 | * function, wait for it to finish and resume the device. Cancel any scheduled | 439 | * RPM_NOWAIT and RPM_ASYNC flags. Similarly, if there's a suspend running in |
337 | * or pending requests. | 440 | * parallel with this function, either tell the other process to resume after |
441 | * suspending (deferred_resume) or wait for it to finish. If the RPM_ASYNC | ||
442 | * flag is set then queue a resume request; otherwise run the | ||
443 | * ->runtime_resume() callback directly. Queue an idle notification for the | ||
444 | * device if the resume succeeded. | ||
338 | * | 445 | * |
339 | * This function must be called under dev->power.lock with interrupts disabled. | 446 | * This function must be called under dev->power.lock with interrupts disabled. |
340 | */ | 447 | */ |
341 | int __pm_runtime_resume(struct device *dev, bool from_wq) | 448 | static int rpm_resume(struct device *dev, int rpmflags) |
342 | __releases(&dev->power.lock) __acquires(&dev->power.lock) | 449 | __releases(&dev->power.lock) __acquires(&dev->power.lock) |
343 | { | 450 | { |
451 | int (*callback)(struct device *); | ||
344 | struct device *parent = NULL; | 452 | struct device *parent = NULL; |
345 | int retval = 0; | 453 | int retval = 0; |
346 | 454 | ||
347 | dev_dbg(dev, "__pm_runtime_resume()%s!\n", | 455 | dev_dbg(dev, "%s flags 0x%x\n", __func__, rpmflags); |
348 | from_wq ? " from workqueue" : ""); | ||
349 | 456 | ||
350 | repeat: | 457 | repeat: |
351 | if (dev->power.runtime_error) { | 458 | if (dev->power.runtime_error) |
352 | retval = -EINVAL; | 459 | retval = -EINVAL; |
460 | else if (dev->power.disable_depth > 0) | ||
461 | retval = -EAGAIN; | ||
462 | if (retval) | ||
353 | goto out; | 463 | goto out; |
354 | } | ||
355 | 464 | ||
356 | pm_runtime_cancel_pending(dev); | 465 | /* |
466 | * Other scheduled or pending requests need to be canceled. Small | ||
467 | * optimization: If an autosuspend timer is running, leave it running | ||
468 | * rather than cancelling it now only to restart it again in the near | ||
469 | * future. | ||
470 | */ | ||
471 | dev->power.request = RPM_REQ_NONE; | ||
472 | if (!dev->power.timer_autosuspends) | ||
473 | pm_runtime_deactivate_timer(dev); | ||
357 | 474 | ||
358 | if (dev->power.runtime_status == RPM_ACTIVE) | 475 | if (dev->power.runtime_status == RPM_ACTIVE) { |
359 | retval = 1; | 476 | retval = 1; |
360 | else if (dev->power.disable_depth > 0) | ||
361 | retval = -EAGAIN; | ||
362 | if (retval) | ||
363 | goto out; | 477 | goto out; |
478 | } | ||
364 | 479 | ||
365 | if (dev->power.runtime_status == RPM_RESUMING | 480 | if (dev->power.runtime_status == RPM_RESUMING |
366 | || dev->power.runtime_status == RPM_SUSPENDING) { | 481 | || dev->power.runtime_status == RPM_SUSPENDING) { |
367 | DEFINE_WAIT(wait); | 482 | DEFINE_WAIT(wait); |
368 | 483 | ||
369 | if (from_wq) { | 484 | if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) { |
370 | if (dev->power.runtime_status == RPM_SUSPENDING) | 485 | if (dev->power.runtime_status == RPM_SUSPENDING) |
371 | dev->power.deferred_resume = true; | 486 | dev->power.deferred_resume = true; |
372 | retval = -EINPROGRESS; | 487 | else |
488 | retval = -EINPROGRESS; | ||
373 | goto out; | 489 | goto out; |
374 | } | 490 | } |
375 | 491 | ||
@@ -391,12 +507,43 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
391 | goto repeat; | 507 | goto repeat; |
392 | } | 508 | } |
393 | 509 | ||
510 | /* | ||
511 | * See if we can skip waking up the parent. This is safe only if | ||
512 | * power.no_callbacks is set, because otherwise we don't know whether | ||
513 | * the resume will actually succeed. | ||
514 | */ | ||
515 | if (dev->power.no_callbacks && !parent && dev->parent) { | ||
516 | spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); | ||
517 | if (dev->parent->power.disable_depth > 0 | ||
518 | || dev->parent->power.ignore_children | ||
519 | || dev->parent->power.runtime_status == RPM_ACTIVE) { | ||
520 | atomic_inc(&dev->parent->power.child_count); | ||
521 | spin_unlock(&dev->parent->power.lock); | ||
522 | goto no_callback; /* Assume success. */ | ||
523 | } | ||
524 | spin_unlock(&dev->parent->power.lock); | ||
525 | } | ||
526 | |||
527 | /* Carry out an asynchronous or a synchronous resume. */ | ||
528 | if (rpmflags & RPM_ASYNC) { | ||
529 | dev->power.request = RPM_REQ_RESUME; | ||
530 | if (!dev->power.request_pending) { | ||
531 | dev->power.request_pending = true; | ||
532 | queue_work(pm_wq, &dev->power.work); | ||
533 | } | ||
534 | retval = 0; | ||
535 | goto out; | ||
536 | } | ||
537 | |||
394 | if (!parent && dev->parent) { | 538 | if (!parent && dev->parent) { |
395 | /* | 539 | /* |
396 | * Increment the parent's resume counter and resume it if | 540 | * Increment the parent's usage counter and resume it if |
397 | * necessary. | 541 | * necessary. Not needed if dev is irq-safe; then the |
542 | * parent is permanently resumed. | ||
398 | */ | 543 | */ |
399 | parent = dev->parent; | 544 | parent = dev->parent; |
545 | if (dev->power.irq_safe) | ||
546 | goto skip_parent; | ||
400 | spin_unlock(&dev->power.lock); | 547 | spin_unlock(&dev->power.lock); |
401 | 548 | ||
402 | pm_runtime_get_noresume(parent); | 549 | pm_runtime_get_noresume(parent); |
@@ -408,7 +555,7 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
408 | */ | 555 | */ |
409 | if (!parent->power.disable_depth | 556 | if (!parent->power.disable_depth |
410 | && !parent->power.ignore_children) { | 557 | && !parent->power.ignore_children) { |
411 | __pm_runtime_resume(parent, false); | 558 | rpm_resume(parent, 0); |
412 | if (parent->power.runtime_status != RPM_ACTIVE) | 559 | if (parent->power.runtime_status != RPM_ACTIVE) |
413 | retval = -EBUSY; | 560 | retval = -EBUSY; |
414 | } | 561 | } |
@@ -419,40 +566,30 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
419 | goto out; | 566 | goto out; |
420 | goto repeat; | 567 | goto repeat; |
421 | } | 568 | } |
569 | skip_parent: | ||
422 | 570 | ||
423 | __update_runtime_status(dev, RPM_RESUMING); | 571 | if (dev->power.no_callbacks) |
424 | 572 | goto no_callback; /* Assume success. */ | |
425 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) { | ||
426 | spin_unlock_irq(&dev->power.lock); | ||
427 | |||
428 | retval = dev->bus->pm->runtime_resume(dev); | ||
429 | |||
430 | spin_lock_irq(&dev->power.lock); | ||
431 | dev->power.runtime_error = retval; | ||
432 | } else if (dev->type && dev->type->pm | ||
433 | && dev->type->pm->runtime_resume) { | ||
434 | spin_unlock_irq(&dev->power.lock); | ||
435 | |||
436 | retval = dev->type->pm->runtime_resume(dev); | ||
437 | |||
438 | spin_lock_irq(&dev->power.lock); | ||
439 | dev->power.runtime_error = retval; | ||
440 | } else if (dev->class && dev->class->pm | ||
441 | && dev->class->pm->runtime_resume) { | ||
442 | spin_unlock_irq(&dev->power.lock); | ||
443 | 573 | ||
444 | retval = dev->class->pm->runtime_resume(dev); | 574 | __update_runtime_status(dev, RPM_RESUMING); |
445 | 575 | ||
446 | spin_lock_irq(&dev->power.lock); | 576 | if (dev->pwr_domain) |
447 | dev->power.runtime_error = retval; | 577 | callback = dev->pwr_domain->ops.runtime_resume; |
448 | } else { | 578 | else if (dev->type && dev->type->pm) |
449 | retval = -ENOSYS; | 579 | callback = dev->type->pm->runtime_resume; |
450 | } | 580 | else if (dev->class && dev->class->pm) |
581 | callback = dev->class->pm->runtime_resume; | ||
582 | else if (dev->bus && dev->bus->pm) | ||
583 | callback = dev->bus->pm->runtime_resume; | ||
584 | else | ||
585 | callback = NULL; | ||
451 | 586 | ||
587 | retval = rpm_callback(callback, dev); | ||
452 | if (retval) { | 588 | if (retval) { |
453 | __update_runtime_status(dev, RPM_SUSPENDED); | 589 | __update_runtime_status(dev, RPM_SUSPENDED); |
454 | pm_runtime_cancel_pending(dev); | 590 | pm_runtime_cancel_pending(dev); |
455 | } else { | 591 | } else { |
592 | no_callback: | ||
456 | __update_runtime_status(dev, RPM_ACTIVE); | 593 | __update_runtime_status(dev, RPM_ACTIVE); |
457 | if (parent) | 594 | if (parent) |
458 | atomic_inc(&parent->power.child_count); | 595 | atomic_inc(&parent->power.child_count); |
@@ -460,10 +597,10 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
460 | wake_up_all(&dev->power.wait_queue); | 597 | wake_up_all(&dev->power.wait_queue); |
461 | 598 | ||
462 | if (!retval) | 599 | if (!retval) |
463 | __pm_request_idle(dev); | 600 | rpm_idle(dev, RPM_ASYNC); |
464 | 601 | ||
465 | out: | 602 | out: |
466 | if (parent) { | 603 | if (parent && !dev->power.irq_safe) { |
467 | spin_unlock_irq(&dev->power.lock); | 604 | spin_unlock_irq(&dev->power.lock); |
468 | 605 | ||
469 | pm_runtime_put(parent); | 606 | pm_runtime_put(parent); |
@@ -471,28 +608,12 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
471 | spin_lock_irq(&dev->power.lock); | 608 | spin_lock_irq(&dev->power.lock); |
472 | } | 609 | } |
473 | 610 | ||
474 | dev_dbg(dev, "__pm_runtime_resume() returns %d!\n", retval); | 611 | dev_dbg(dev, "%s returns %d\n", __func__, retval); |
475 | 612 | ||
476 | return retval; | 613 | return retval; |
477 | } | 614 | } |
478 | 615 | ||
479 | /** | 616 | /** |
480 | * pm_runtime_resume - Carry out run-time resume of given device. | ||
481 | * @dev: Device to suspend. | ||
482 | */ | ||
483 | int pm_runtime_resume(struct device *dev) | ||
484 | { | ||
485 | int retval; | ||
486 | |||
487 | spin_lock_irq(&dev->power.lock); | ||
488 | retval = __pm_runtime_resume(dev, false); | ||
489 | spin_unlock_irq(&dev->power.lock); | ||
490 | |||
491 | return retval; | ||
492 | } | ||
493 | EXPORT_SYMBOL_GPL(pm_runtime_resume); | ||
494 | |||
495 | /** | ||
496 | * pm_runtime_work - Universal run-time PM work function. | 617 | * pm_runtime_work - Universal run-time PM work function. |
497 | * @work: Work structure used for scheduling the execution of this function. | 618 | * @work: Work structure used for scheduling the execution of this function. |
498 | * | 619 | * |
@@ -517,13 +638,16 @@ static void pm_runtime_work(struct work_struct *work) | |||
517 | case RPM_REQ_NONE: | 638 | case RPM_REQ_NONE: |
518 | break; | 639 | break; |
519 | case RPM_REQ_IDLE: | 640 | case RPM_REQ_IDLE: |
520 | __pm_runtime_idle(dev); | 641 | rpm_idle(dev, RPM_NOWAIT); |
521 | break; | 642 | break; |
522 | case RPM_REQ_SUSPEND: | 643 | case RPM_REQ_SUSPEND: |
523 | __pm_runtime_suspend(dev, true); | 644 | rpm_suspend(dev, RPM_NOWAIT); |
645 | break; | ||
646 | case RPM_REQ_AUTOSUSPEND: | ||
647 | rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); | ||
524 | break; | 648 | break; |
525 | case RPM_REQ_RESUME: | 649 | case RPM_REQ_RESUME: |
526 | __pm_runtime_resume(dev, true); | 650 | rpm_resume(dev, RPM_NOWAIT); |
527 | break; | 651 | break; |
528 | } | 652 | } |
529 | 653 | ||
@@ -532,117 +656,10 @@ static void pm_runtime_work(struct work_struct *work) | |||
532 | } | 656 | } |
533 | 657 | ||
534 | /** | 658 | /** |
535 | * __pm_request_idle - Submit an idle notification request for given device. | ||
536 | * @dev: Device to handle. | ||
537 | * | ||
538 | * Check if the device's run-time PM status is correct for suspending the device | ||
539 | * and queue up a request to run __pm_runtime_idle() for it. | ||
540 | * | ||
541 | * This function must be called under dev->power.lock with interrupts disabled. | ||
542 | */ | ||
543 | static int __pm_request_idle(struct device *dev) | ||
544 | { | ||
545 | int retval = 0; | ||
546 | |||
547 | if (dev->power.runtime_error) | ||
548 | retval = -EINVAL; | ||
549 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
550 | || dev->power.disable_depth > 0 | ||
551 | || dev->power.runtime_status == RPM_SUSPENDED | ||
552 | || dev->power.runtime_status == RPM_SUSPENDING) | ||
553 | retval = -EAGAIN; | ||
554 | else if (!pm_children_suspended(dev)) | ||
555 | retval = -EBUSY; | ||
556 | if (retval) | ||
557 | return retval; | ||
558 | |||
559 | if (dev->power.request_pending) { | ||
560 | /* Any requests other then RPM_REQ_IDLE take precedence. */ | ||
561 | if (dev->power.request == RPM_REQ_NONE) | ||
562 | dev->power.request = RPM_REQ_IDLE; | ||
563 | else if (dev->power.request != RPM_REQ_IDLE) | ||
564 | retval = -EAGAIN; | ||
565 | return retval; | ||
566 | } | ||
567 | |||
568 | dev->power.request = RPM_REQ_IDLE; | ||
569 | dev->power.request_pending = true; | ||
570 | queue_work(pm_wq, &dev->power.work); | ||
571 | |||
572 | return retval; | ||
573 | } | ||
574 | |||
575 | /** | ||
576 | * pm_request_idle - Submit an idle notification request for given device. | ||
577 | * @dev: Device to handle. | ||
578 | */ | ||
579 | int pm_request_idle(struct device *dev) | ||
580 | { | ||
581 | unsigned long flags; | ||
582 | int retval; | ||
583 | |||
584 | spin_lock_irqsave(&dev->power.lock, flags); | ||
585 | retval = __pm_request_idle(dev); | ||
586 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
587 | |||
588 | return retval; | ||
589 | } | ||
590 | EXPORT_SYMBOL_GPL(pm_request_idle); | ||
591 | |||
592 | /** | ||
593 | * __pm_request_suspend - Submit a suspend request for given device. | ||
594 | * @dev: Device to suspend. | ||
595 | * | ||
596 | * This function must be called under dev->power.lock with interrupts disabled. | ||
597 | */ | ||
598 | static int __pm_request_suspend(struct device *dev) | ||
599 | { | ||
600 | int retval = 0; | ||
601 | |||
602 | if (dev->power.runtime_error) | ||
603 | return -EINVAL; | ||
604 | |||
605 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
606 | retval = 1; | ||
607 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
608 | || dev->power.disable_depth > 0) | ||
609 | retval = -EAGAIN; | ||
610 | else if (dev->power.runtime_status == RPM_SUSPENDING) | ||
611 | retval = -EINPROGRESS; | ||
612 | else if (!pm_children_suspended(dev)) | ||
613 | retval = -EBUSY; | ||
614 | if (retval < 0) | ||
615 | return retval; | ||
616 | |||
617 | pm_runtime_deactivate_timer(dev); | ||
618 | |||
619 | if (dev->power.request_pending) { | ||
620 | /* | ||
621 | * Pending resume requests take precedence over us, but we can | ||
622 | * overtake any other pending request. | ||
623 | */ | ||
624 | if (dev->power.request == RPM_REQ_RESUME) | ||
625 | retval = -EAGAIN; | ||
626 | else if (dev->power.request != RPM_REQ_SUSPEND) | ||
627 | dev->power.request = retval ? | ||
628 | RPM_REQ_NONE : RPM_REQ_SUSPEND; | ||
629 | return retval; | ||
630 | } else if (retval) { | ||
631 | return retval; | ||
632 | } | ||
633 | |||
634 | dev->power.request = RPM_REQ_SUSPEND; | ||
635 | dev->power.request_pending = true; | ||
636 | queue_work(pm_wq, &dev->power.work); | ||
637 | |||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). | 659 | * pm_suspend_timer_fn - Timer function for pm_schedule_suspend(). |
643 | * @data: Device pointer passed by pm_schedule_suspend(). | 660 | * @data: Device pointer passed by pm_schedule_suspend(). |
644 | * | 661 | * |
645 | * Check if the time is right and execute __pm_request_suspend() in that case. | 662 | * Check if the time is right and queue a suspend request. |
646 | */ | 663 | */ |
647 | static void pm_suspend_timer_fn(unsigned long data) | 664 | static void pm_suspend_timer_fn(unsigned long data) |
648 | { | 665 | { |
@@ -656,7 +673,8 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
656 | /* If 'expire' is after 'jiffies' we've been called too early. */ | 673 | /* If 'expire' is after 'jiffies' we've been called too early. */ |
657 | if (expires > 0 && !time_after(expires, jiffies)) { | 674 | if (expires > 0 && !time_after(expires, jiffies)) { |
658 | dev->power.timer_expires = 0; | 675 | dev->power.timer_expires = 0; |
659 | __pm_request_suspend(dev); | 676 | rpm_suspend(dev, dev->power.timer_autosuspends ? |
677 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); | ||
660 | } | 678 | } |
661 | 679 | ||
662 | spin_unlock_irqrestore(&dev->power.lock, flags); | 680 | spin_unlock_irqrestore(&dev->power.lock, flags); |
@@ -670,47 +688,25 @@ static void pm_suspend_timer_fn(unsigned long data) | |||
670 | int pm_schedule_suspend(struct device *dev, unsigned int delay) | 688 | int pm_schedule_suspend(struct device *dev, unsigned int delay) |
671 | { | 689 | { |
672 | unsigned long flags; | 690 | unsigned long flags; |
673 | int retval = 0; | 691 | int retval; |
674 | 692 | ||
675 | spin_lock_irqsave(&dev->power.lock, flags); | 693 | spin_lock_irqsave(&dev->power.lock, flags); |
676 | 694 | ||
677 | if (dev->power.runtime_error) { | ||
678 | retval = -EINVAL; | ||
679 | goto out; | ||
680 | } | ||
681 | |||
682 | if (!delay) { | 695 | if (!delay) { |
683 | retval = __pm_request_suspend(dev); | 696 | retval = rpm_suspend(dev, RPM_ASYNC); |
684 | goto out; | 697 | goto out; |
685 | } | 698 | } |
686 | 699 | ||
687 | pm_runtime_deactivate_timer(dev); | 700 | retval = rpm_check_suspend_allowed(dev); |
688 | |||
689 | if (dev->power.request_pending) { | ||
690 | /* | ||
691 | * Pending resume requests take precedence over us, but any | ||
692 | * other pending requests have to be canceled. | ||
693 | */ | ||
694 | if (dev->power.request == RPM_REQ_RESUME) { | ||
695 | retval = -EAGAIN; | ||
696 | goto out; | ||
697 | } | ||
698 | dev->power.request = RPM_REQ_NONE; | ||
699 | } | ||
700 | |||
701 | if (dev->power.runtime_status == RPM_SUSPENDED) | ||
702 | retval = 1; | ||
703 | else if (atomic_read(&dev->power.usage_count) > 0 | ||
704 | || dev->power.disable_depth > 0) | ||
705 | retval = -EAGAIN; | ||
706 | else if (!pm_children_suspended(dev)) | ||
707 | retval = -EBUSY; | ||
708 | if (retval) | 701 | if (retval) |
709 | goto out; | 702 | goto out; |
710 | 703 | ||
704 | /* Other scheduled or pending requests need to be canceled. */ | ||
705 | pm_runtime_cancel_pending(dev); | ||
706 | |||
711 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); | 707 | dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); |
712 | if (!dev->power.timer_expires) | 708 | dev->power.timer_expires += !dev->power.timer_expires; |
713 | dev->power.timer_expires = 1; | 709 | dev->power.timer_autosuspends = 0; |
714 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); | 710 | mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); |
715 | 711 | ||
716 | out: | 712 | out: |
@@ -721,103 +717,88 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
721 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); | 717 | EXPORT_SYMBOL_GPL(pm_schedule_suspend); |
722 | 718 | ||
723 | /** | 719 | /** |
724 | * pm_request_resume - Submit a resume request for given device. | 720 | * __pm_runtime_idle - Entry point for run-time idle operations. |
725 | * @dev: Device to resume. | 721 | * @dev: Device to send idle notification for. |
722 | * @rpmflags: Flag bits. | ||
726 | * | 723 | * |
727 | * This function must be called under dev->power.lock with interrupts disabled. | 724 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and |
725 | * return immediately if it is larger than zero. Then carry out an idle | ||
726 | * notification, either synchronous or asynchronous. | ||
727 | * | ||
728 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
728 | */ | 729 | */ |
729 | static int __pm_request_resume(struct device *dev) | 730 | int __pm_runtime_idle(struct device *dev, int rpmflags) |
730 | { | 731 | { |
731 | int retval = 0; | 732 | unsigned long flags; |
732 | 733 | int retval; | |
733 | if (dev->power.runtime_error) | ||
734 | return -EINVAL; | ||
735 | |||
736 | if (dev->power.runtime_status == RPM_ACTIVE) | ||
737 | retval = 1; | ||
738 | else if (dev->power.runtime_status == RPM_RESUMING) | ||
739 | retval = -EINPROGRESS; | ||
740 | else if (dev->power.disable_depth > 0) | ||
741 | retval = -EAGAIN; | ||
742 | if (retval < 0) | ||
743 | return retval; | ||
744 | |||
745 | pm_runtime_deactivate_timer(dev); | ||
746 | 734 | ||
747 | if (dev->power.runtime_status == RPM_SUSPENDING) { | 735 | if (rpmflags & RPM_GET_PUT) { |
748 | dev->power.deferred_resume = true; | 736 | if (!atomic_dec_and_test(&dev->power.usage_count)) |
749 | return retval; | 737 | return 0; |
750 | } | ||
751 | if (dev->power.request_pending) { | ||
752 | /* If non-resume request is pending, we can overtake it. */ | ||
753 | dev->power.request = retval ? RPM_REQ_NONE : RPM_REQ_RESUME; | ||
754 | return retval; | ||
755 | } | 738 | } |
756 | if (retval) | ||
757 | return retval; | ||
758 | 739 | ||
759 | dev->power.request = RPM_REQ_RESUME; | 740 | spin_lock_irqsave(&dev->power.lock, flags); |
760 | dev->power.request_pending = true; | 741 | retval = rpm_idle(dev, rpmflags); |
761 | queue_work(pm_wq, &dev->power.work); | 742 | spin_unlock_irqrestore(&dev->power.lock, flags); |
762 | 743 | ||
763 | return retval; | 744 | return retval; |
764 | } | 745 | } |
746 | EXPORT_SYMBOL_GPL(__pm_runtime_idle); | ||
765 | 747 | ||
766 | /** | 748 | /** |
767 | * pm_request_resume - Submit a resume request for given device. | 749 | * __pm_runtime_suspend - Entry point for run-time put/suspend operations. |
768 | * @dev: Device to resume. | 750 | * @dev: Device to suspend. |
751 | * @rpmflags: Flag bits. | ||
752 | * | ||
753 | * If the RPM_GET_PUT flag is set, decrement the device's usage count and | ||
754 | * return immediately if it is larger than zero. Then carry out a suspend, | ||
755 | * either synchronous or asynchronous. | ||
756 | * | ||
757 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. | ||
769 | */ | 758 | */ |
770 | int pm_request_resume(struct device *dev) | 759 | int __pm_runtime_suspend(struct device *dev, int rpmflags) |
771 | { | 760 | { |
772 | unsigned long flags; | 761 | unsigned long flags; |
773 | int retval; | 762 | int retval; |
774 | 763 | ||
764 | if (rpmflags & RPM_GET_PUT) { | ||
765 | if (!atomic_dec_and_test(&dev->power.usage_count)) | ||
766 | return 0; | ||
767 | } | ||
768 | |||
775 | spin_lock_irqsave(&dev->power.lock, flags); | 769 | spin_lock_irqsave(&dev->power.lock, flags); |
776 | retval = __pm_request_resume(dev); | 770 | retval = rpm_suspend(dev, rpmflags); |
777 | spin_unlock_irqrestore(&dev->power.lock, flags); | 771 | spin_unlock_irqrestore(&dev->power.lock, flags); |
778 | 772 | ||
779 | return retval; | 773 | return retval; |
780 | } | 774 | } |
781 | EXPORT_SYMBOL_GPL(pm_request_resume); | 775 | EXPORT_SYMBOL_GPL(__pm_runtime_suspend); |
782 | 776 | ||
783 | /** | 777 | /** |
784 | * __pm_runtime_get - Reference count a device and wake it up, if necessary. | 778 | * __pm_runtime_resume - Entry point for run-time resume operations. |
785 | * @dev: Device to handle. | 779 | * @dev: Device to resume. |
786 | * @sync: If set and the device is suspended, resume it synchronously. | 780 | * @rpmflags: Flag bits. |
781 | * | ||
782 | * If the RPM_GET_PUT flag is set, increment the device's usage count. Then | ||
783 | * carry out a resume, either synchronous or asynchronous. | ||
787 | * | 784 | * |
788 | * Increment the usage count of the device and resume it or submit a resume | 785 | * This routine may be called in atomic context if the RPM_ASYNC flag is set. |
789 | * request for it, depending on the value of @sync. | ||
790 | */ | 786 | */ |
791 | int __pm_runtime_get(struct device *dev, bool sync) | 787 | int __pm_runtime_resume(struct device *dev, int rpmflags) |
792 | { | 788 | { |
789 | unsigned long flags; | ||
793 | int retval; | 790 | int retval; |
794 | 791 | ||
795 | atomic_inc(&dev->power.usage_count); | 792 | if (rpmflags & RPM_GET_PUT) |
796 | retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); | 793 | atomic_inc(&dev->power.usage_count); |
797 | |||
798 | return retval; | ||
799 | } | ||
800 | EXPORT_SYMBOL_GPL(__pm_runtime_get); | ||
801 | |||
802 | /** | ||
803 | * __pm_runtime_put - Decrement the device's usage counter and notify its bus. | ||
804 | * @dev: Device to handle. | ||
805 | * @sync: If the device's bus type is to be notified, do that synchronously. | ||
806 | * | ||
807 | * Decrement the usage count of the device and if it reaches zero, carry out a | ||
808 | * synchronous idle notification or submit an idle notification request for it, | ||
809 | * depending on the value of @sync. | ||
810 | */ | ||
811 | int __pm_runtime_put(struct device *dev, bool sync) | ||
812 | { | ||
813 | int retval = 0; | ||
814 | 794 | ||
815 | if (atomic_dec_and_test(&dev->power.usage_count)) | 795 | spin_lock_irqsave(&dev->power.lock, flags); |
816 | retval = sync ? pm_runtime_idle(dev) : pm_request_idle(dev); | 796 | retval = rpm_resume(dev, rpmflags); |
797 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
817 | 798 | ||
818 | return retval; | 799 | return retval; |
819 | } | 800 | } |
820 | EXPORT_SYMBOL_GPL(__pm_runtime_put); | 801 | EXPORT_SYMBOL_GPL(__pm_runtime_resume); |
821 | 802 | ||
822 | /** | 803 | /** |
823 | * __pm_runtime_set_status - Set run-time PM status of a device. | 804 | * __pm_runtime_set_status - Set run-time PM status of a device. |
@@ -968,7 +949,7 @@ int pm_runtime_barrier(struct device *dev) | |||
968 | 949 | ||
969 | if (dev->power.request_pending | 950 | if (dev->power.request_pending |
970 | && dev->power.request == RPM_REQ_RESUME) { | 951 | && dev->power.request == RPM_REQ_RESUME) { |
971 | __pm_runtime_resume(dev, false); | 952 | rpm_resume(dev, 0); |
972 | retval = 1; | 953 | retval = 1; |
973 | } | 954 | } |
974 | 955 | ||
@@ -1017,7 +998,7 @@ void __pm_runtime_disable(struct device *dev, bool check_resume) | |||
1017 | */ | 998 | */ |
1018 | pm_runtime_get_noresume(dev); | 999 | pm_runtime_get_noresume(dev); |
1019 | 1000 | ||
1020 | __pm_runtime_resume(dev, false); | 1001 | rpm_resume(dev, 0); |
1021 | 1002 | ||
1022 | pm_runtime_put_noidle(dev); | 1003 | pm_runtime_put_noidle(dev); |
1023 | } | 1004 | } |
@@ -1065,7 +1046,7 @@ void pm_runtime_forbid(struct device *dev) | |||
1065 | 1046 | ||
1066 | dev->power.runtime_auto = false; | 1047 | dev->power.runtime_auto = false; |
1067 | atomic_inc(&dev->power.usage_count); | 1048 | atomic_inc(&dev->power.usage_count); |
1068 | __pm_runtime_resume(dev, false); | 1049 | rpm_resume(dev, 0); |
1069 | 1050 | ||
1070 | out: | 1051 | out: |
1071 | spin_unlock_irq(&dev->power.lock); | 1052 | spin_unlock_irq(&dev->power.lock); |
@@ -1086,7 +1067,7 @@ void pm_runtime_allow(struct device *dev) | |||
1086 | 1067 | ||
1087 | dev->power.runtime_auto = true; | 1068 | dev->power.runtime_auto = true; |
1088 | if (atomic_dec_and_test(&dev->power.usage_count)) | 1069 | if (atomic_dec_and_test(&dev->power.usage_count)) |
1089 | __pm_runtime_idle(dev); | 1070 | rpm_idle(dev, RPM_AUTO); |
1090 | 1071 | ||
1091 | out: | 1072 | out: |
1092 | spin_unlock_irq(&dev->power.lock); | 1073 | spin_unlock_irq(&dev->power.lock); |
@@ -1094,13 +1075,130 @@ void pm_runtime_allow(struct device *dev) | |||
1094 | EXPORT_SYMBOL_GPL(pm_runtime_allow); | 1075 | EXPORT_SYMBOL_GPL(pm_runtime_allow); |
1095 | 1076 | ||
1096 | /** | 1077 | /** |
1078 | * pm_runtime_no_callbacks - Ignore run-time PM callbacks for a device. | ||
1079 | * @dev: Device to handle. | ||
1080 | * | ||
1081 | * Set the power.no_callbacks flag, which tells the PM core that this | ||
1082 | * device is power-managed through its parent and has no run-time PM | ||
1083 | * callbacks of its own. The run-time sysfs attributes will be removed. | ||
1084 | */ | ||
1085 | void pm_runtime_no_callbacks(struct device *dev) | ||
1086 | { | ||
1087 | spin_lock_irq(&dev->power.lock); | ||
1088 | dev->power.no_callbacks = 1; | ||
1089 | spin_unlock_irq(&dev->power.lock); | ||
1090 | if (device_is_registered(dev)) | ||
1091 | rpm_sysfs_remove(dev); | ||
1092 | } | ||
1093 | EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks); | ||
1094 | |||
1095 | /** | ||
1096 | * pm_runtime_irq_safe - Leave interrupts disabled during callbacks. | ||
1097 | * @dev: Device to handle | ||
1098 | * | ||
1099 | * Set the power.irq_safe flag, which tells the PM core that the | ||
1100 | * ->runtime_suspend() and ->runtime_resume() callbacks for this device should | ||
1101 | * always be invoked with the spinlock held and interrupts disabled. It also | ||
1102 | * causes the parent's usage counter to be permanently incremented, preventing | ||
1103 | * the parent from runtime suspending -- otherwise an irq-safe child might have | ||
1104 | * to wait for a non-irq-safe parent. | ||
1105 | */ | ||
1106 | void pm_runtime_irq_safe(struct device *dev) | ||
1107 | { | ||
1108 | if (dev->parent) | ||
1109 | pm_runtime_get_sync(dev->parent); | ||
1110 | spin_lock_irq(&dev->power.lock); | ||
1111 | dev->power.irq_safe = 1; | ||
1112 | spin_unlock_irq(&dev->power.lock); | ||
1113 | } | ||
1114 | EXPORT_SYMBOL_GPL(pm_runtime_irq_safe); | ||
1115 | |||
1116 | /** | ||
1117 | * update_autosuspend - Handle a change to a device's autosuspend settings. | ||
1118 | * @dev: Device to handle. | ||
1119 | * @old_delay: The former autosuspend_delay value. | ||
1120 | * @old_use: The former use_autosuspend value. | ||
1121 | * | ||
1122 | * Prevent runtime suspend if the new delay is negative and use_autosuspend is | ||
1123 | * set; otherwise allow it. Send an idle notification if suspends are allowed. | ||
1124 | * | ||
1125 | * This function must be called under dev->power.lock with interrupts disabled. | ||
1126 | */ | ||
1127 | static void update_autosuspend(struct device *dev, int old_delay, int old_use) | ||
1128 | { | ||
1129 | int delay = dev->power.autosuspend_delay; | ||
1130 | |||
1131 | /* Should runtime suspend be prevented now? */ | ||
1132 | if (dev->power.use_autosuspend && delay < 0) { | ||
1133 | |||
1134 | /* If it used to be allowed then prevent it. */ | ||
1135 | if (!old_use || old_delay >= 0) { | ||
1136 | atomic_inc(&dev->power.usage_count); | ||
1137 | rpm_resume(dev, 0); | ||
1138 | } | ||
1139 | } | ||
1140 | |||
1141 | /* Runtime suspend should be allowed now. */ | ||
1142 | else { | ||
1143 | |||
1144 | /* If it used to be prevented then allow it. */ | ||
1145 | if (old_use && old_delay < 0) | ||
1146 | atomic_dec(&dev->power.usage_count); | ||
1147 | |||
1148 | /* Maybe we can autosuspend now. */ | ||
1149 | rpm_idle(dev, RPM_AUTO); | ||
1150 | } | ||
1151 | } | ||
1152 | |||
1153 | /** | ||
1154 | * pm_runtime_set_autosuspend_delay - Set a device's autosuspend_delay value. | ||
1155 | * @dev: Device to handle. | ||
1156 | * @delay: Value of the new delay in milliseconds. | ||
1157 | * | ||
1158 | * Set the device's power.autosuspend_delay value. If it changes to negative | ||
1159 | * and the power.use_autosuspend flag is set, prevent run-time suspends. If it | ||
1160 | * changes the other way, allow run-time suspends. | ||
1161 | */ | ||
1162 | void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) | ||
1163 | { | ||
1164 | int old_delay, old_use; | ||
1165 | |||
1166 | spin_lock_irq(&dev->power.lock); | ||
1167 | old_delay = dev->power.autosuspend_delay; | ||
1168 | old_use = dev->power.use_autosuspend; | ||
1169 | dev->power.autosuspend_delay = delay; | ||
1170 | update_autosuspend(dev, old_delay, old_use); | ||
1171 | spin_unlock_irq(&dev->power.lock); | ||
1172 | } | ||
1173 | EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay); | ||
1174 | |||
1175 | /** | ||
1176 | * __pm_runtime_use_autosuspend - Set a device's use_autosuspend flag. | ||
1177 | * @dev: Device to handle. | ||
1178 | * @use: New value for use_autosuspend. | ||
1179 | * | ||
1180 | * Set the device's power.use_autosuspend flag, and allow or prevent run-time | ||
1181 | * suspends as needed. | ||
1182 | */ | ||
1183 | void __pm_runtime_use_autosuspend(struct device *dev, bool use) | ||
1184 | { | ||
1185 | int old_delay, old_use; | ||
1186 | |||
1187 | spin_lock_irq(&dev->power.lock); | ||
1188 | old_delay = dev->power.autosuspend_delay; | ||
1189 | old_use = dev->power.use_autosuspend; | ||
1190 | dev->power.use_autosuspend = use; | ||
1191 | update_autosuspend(dev, old_delay, old_use); | ||
1192 | spin_unlock_irq(&dev->power.lock); | ||
1193 | } | ||
1194 | EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend); | ||
1195 | |||
1196 | /** | ||
1097 | * pm_runtime_init - Initialize run-time PM fields in given device object. | 1197 | * pm_runtime_init - Initialize run-time PM fields in given device object. |
1098 | * @dev: Device object to initialize. | 1198 | * @dev: Device object to initialize. |
1099 | */ | 1199 | */ |
1100 | void pm_runtime_init(struct device *dev) | 1200 | void pm_runtime_init(struct device *dev) |
1101 | { | 1201 | { |
1102 | spin_lock_init(&dev->power.lock); | ||
1103 | |||
1104 | dev->power.runtime_status = RPM_SUSPENDED; | 1202 | dev->power.runtime_status = RPM_SUSPENDED; |
1105 | dev->power.idle_notification = false; | 1203 | dev->power.idle_notification = false; |
1106 | 1204 | ||
@@ -1137,4 +1235,6 @@ void pm_runtime_remove(struct device *dev) | |||
1137 | /* Change the status back to 'suspended' to match the initial status. */ | 1235 | /* Change the status back to 'suspended' to match the initial status. */ |
1138 | if (dev->power.runtime_status == RPM_ACTIVE) | 1236 | if (dev->power.runtime_status == RPM_ACTIVE) |
1139 | pm_runtime_set_suspended(dev); | 1237 | pm_runtime_set_suspended(dev); |
1238 | if (dev->power.irq_safe && dev->parent) | ||
1239 | pm_runtime_put_sync(dev->parent); | ||
1140 | } | 1240 | } |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index e56b4388fe61..a9f5b8979611 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -75,12 +75,27 @@ | |||
75 | * attribute is set to "enabled" by bus type code or device drivers and in | 75 | * attribute is set to "enabled" by bus type code or device drivers and in |
76 | * that cases it should be safe to leave the default value. | 76 | * that cases it should be safe to leave the default value. |
77 | * | 77 | * |
78 | * autosuspend_delay_ms - Report/change a device's autosuspend_delay value | ||
79 | * | ||
80 | * Some drivers don't want to carry out a runtime suspend as soon as a | ||
81 | * device becomes idle; they want it always to remain idle for some period | ||
82 | * of time before suspending it. This period is the autosuspend_delay | ||
83 | * value (expressed in milliseconds) and it can be controlled by the user. | ||
84 | * If the value is negative then the device will never be runtime | ||
85 | * suspended. | ||
86 | * | ||
87 | * NOTE: The autosuspend_delay_ms attribute and the autosuspend_delay | ||
88 | * value are used only if the driver calls pm_runtime_use_autosuspend(). | ||
89 | * | ||
78 | * wakeup_count - Report the number of wakeup events related to the device | 90 | * wakeup_count - Report the number of wakeup events related to the device |
79 | */ | 91 | */ |
80 | 92 | ||
81 | static const char enabled[] = "enabled"; | 93 | static const char enabled[] = "enabled"; |
82 | static const char disabled[] = "disabled"; | 94 | static const char disabled[] = "disabled"; |
83 | 95 | ||
96 | const char power_group_name[] = "power"; | ||
97 | EXPORT_SYMBOL_GPL(power_group_name); | ||
98 | |||
84 | #ifdef CONFIG_PM_RUNTIME | 99 | #ifdef CONFIG_PM_RUNTIME |
85 | static const char ctrl_auto[] = "auto"; | 100 | static const char ctrl_auto[] = "auto"; |
86 | static const char ctrl_on[] = "on"; | 101 | static const char ctrl_on[] = "on"; |
@@ -170,8 +185,36 @@ static ssize_t rtpm_status_show(struct device *dev, | |||
170 | } | 185 | } |
171 | 186 | ||
172 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); | 187 | static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); |
173 | #endif | ||
174 | 188 | ||
189 | static ssize_t autosuspend_delay_ms_show(struct device *dev, | ||
190 | struct device_attribute *attr, char *buf) | ||
191 | { | ||
192 | if (!dev->power.use_autosuspend) | ||
193 | return -EIO; | ||
194 | return sprintf(buf, "%d\n", dev->power.autosuspend_delay); | ||
195 | } | ||
196 | |||
197 | static ssize_t autosuspend_delay_ms_store(struct device *dev, | ||
198 | struct device_attribute *attr, const char *buf, size_t n) | ||
199 | { | ||
200 | long delay; | ||
201 | |||
202 | if (!dev->power.use_autosuspend) | ||
203 | return -EIO; | ||
204 | |||
205 | if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay) | ||
206 | return -EINVAL; | ||
207 | |||
208 | pm_runtime_set_autosuspend_delay(dev, delay); | ||
209 | return n; | ||
210 | } | ||
211 | |||
212 | static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, | ||
213 | autosuspend_delay_ms_store); | ||
214 | |||
215 | #endif /* CONFIG_PM_RUNTIME */ | ||
216 | |||
217 | #ifdef CONFIG_PM_SLEEP | ||
175 | static ssize_t | 218 | static ssize_t |
176 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 219 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
177 | { | 220 | { |
@@ -206,15 +249,125 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
206 | 249 | ||
207 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 250 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
208 | 251 | ||
209 | #ifdef CONFIG_PM_SLEEP | ||
210 | static ssize_t wakeup_count_show(struct device *dev, | 252 | static ssize_t wakeup_count_show(struct device *dev, |
211 | struct device_attribute *attr, char *buf) | 253 | struct device_attribute *attr, char *buf) |
212 | { | 254 | { |
213 | return sprintf(buf, "%lu\n", dev->power.wakeup_count); | 255 | unsigned long count = 0; |
256 | bool enabled = false; | ||
257 | |||
258 | spin_lock_irq(&dev->power.lock); | ||
259 | if (dev->power.wakeup) { | ||
260 | count = dev->power.wakeup->event_count; | ||
261 | enabled = true; | ||
262 | } | ||
263 | spin_unlock_irq(&dev->power.lock); | ||
264 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
214 | } | 265 | } |
215 | 266 | ||
216 | static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); | 267 | static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); |
217 | #endif | 268 | |
269 | static ssize_t wakeup_active_count_show(struct device *dev, | ||
270 | struct device_attribute *attr, char *buf) | ||
271 | { | ||
272 | unsigned long count = 0; | ||
273 | bool enabled = false; | ||
274 | |||
275 | spin_lock_irq(&dev->power.lock); | ||
276 | if (dev->power.wakeup) { | ||
277 | count = dev->power.wakeup->active_count; | ||
278 | enabled = true; | ||
279 | } | ||
280 | spin_unlock_irq(&dev->power.lock); | ||
281 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
282 | } | ||
283 | |||
284 | static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); | ||
285 | |||
286 | static ssize_t wakeup_hit_count_show(struct device *dev, | ||
287 | struct device_attribute *attr, char *buf) | ||
288 | { | ||
289 | unsigned long count = 0; | ||
290 | bool enabled = false; | ||
291 | |||
292 | spin_lock_irq(&dev->power.lock); | ||
293 | if (dev->power.wakeup) { | ||
294 | count = dev->power.wakeup->hit_count; | ||
295 | enabled = true; | ||
296 | } | ||
297 | spin_unlock_irq(&dev->power.lock); | ||
298 | return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); | ||
299 | } | ||
300 | |||
301 | static DEVICE_ATTR(wakeup_hit_count, 0444, wakeup_hit_count_show, NULL); | ||
302 | |||
303 | static ssize_t wakeup_active_show(struct device *dev, | ||
304 | struct device_attribute *attr, char *buf) | ||
305 | { | ||
306 | unsigned int active = 0; | ||
307 | bool enabled = false; | ||
308 | |||
309 | spin_lock_irq(&dev->power.lock); | ||
310 | if (dev->power.wakeup) { | ||
311 | active = dev->power.wakeup->active; | ||
312 | enabled = true; | ||
313 | } | ||
314 | spin_unlock_irq(&dev->power.lock); | ||
315 | return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); | ||
316 | } | ||
317 | |||
318 | static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); | ||
319 | |||
320 | static ssize_t wakeup_total_time_show(struct device *dev, | ||
321 | struct device_attribute *attr, char *buf) | ||
322 | { | ||
323 | s64 msec = 0; | ||
324 | bool enabled = false; | ||
325 | |||
326 | spin_lock_irq(&dev->power.lock); | ||
327 | if (dev->power.wakeup) { | ||
328 | msec = ktime_to_ms(dev->power.wakeup->total_time); | ||
329 | enabled = true; | ||
330 | } | ||
331 | spin_unlock_irq(&dev->power.lock); | ||
332 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
333 | } | ||
334 | |||
335 | static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); | ||
336 | |||
337 | static ssize_t wakeup_max_time_show(struct device *dev, | ||
338 | struct device_attribute *attr, char *buf) | ||
339 | { | ||
340 | s64 msec = 0; | ||
341 | bool enabled = false; | ||
342 | |||
343 | spin_lock_irq(&dev->power.lock); | ||
344 | if (dev->power.wakeup) { | ||
345 | msec = ktime_to_ms(dev->power.wakeup->max_time); | ||
346 | enabled = true; | ||
347 | } | ||
348 | spin_unlock_irq(&dev->power.lock); | ||
349 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
350 | } | ||
351 | |||
352 | static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); | ||
353 | |||
354 | static ssize_t wakeup_last_time_show(struct device *dev, | ||
355 | struct device_attribute *attr, char *buf) | ||
356 | { | ||
357 | s64 msec = 0; | ||
358 | bool enabled = false; | ||
359 | |||
360 | spin_lock_irq(&dev->power.lock); | ||
361 | if (dev->power.wakeup) { | ||
362 | msec = ktime_to_ms(dev->power.wakeup->last_time); | ||
363 | enabled = true; | ||
364 | } | ||
365 | spin_unlock_irq(&dev->power.lock); | ||
366 | return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); | ||
367 | } | ||
368 | |||
369 | static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); | ||
370 | #endif /* CONFIG_PM_SLEEP */ | ||
218 | 371 | ||
219 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 372 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
220 | #ifdef CONFIG_PM_RUNTIME | 373 | #ifdef CONFIG_PM_RUNTIME |
@@ -278,38 +431,108 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, | |||
278 | static DEVICE_ATTR(async, 0644, async_show, async_store); | 431 | static DEVICE_ATTR(async, 0644, async_show, async_store); |
279 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ | 432 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
280 | 433 | ||
281 | static struct attribute * power_attrs[] = { | 434 | static struct attribute *power_attrs[] = { |
282 | #ifdef CONFIG_PM_RUNTIME | ||
283 | &dev_attr_control.attr, | ||
284 | &dev_attr_runtime_status.attr, | ||
285 | &dev_attr_runtime_suspended_time.attr, | ||
286 | &dev_attr_runtime_active_time.attr, | ||
287 | #endif | ||
288 | &dev_attr_wakeup.attr, | ||
289 | #ifdef CONFIG_PM_SLEEP | ||
290 | &dev_attr_wakeup_count.attr, | ||
291 | #endif | ||
292 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 435 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
436 | #ifdef CONFIG_PM_SLEEP | ||
293 | &dev_attr_async.attr, | 437 | &dev_attr_async.attr, |
438 | #endif | ||
294 | #ifdef CONFIG_PM_RUNTIME | 439 | #ifdef CONFIG_PM_RUNTIME |
440 | &dev_attr_runtime_status.attr, | ||
295 | &dev_attr_runtime_usage.attr, | 441 | &dev_attr_runtime_usage.attr, |
296 | &dev_attr_runtime_active_kids.attr, | 442 | &dev_attr_runtime_active_kids.attr, |
297 | &dev_attr_runtime_enabled.attr, | 443 | &dev_attr_runtime_enabled.attr, |
298 | #endif | 444 | #endif |
299 | #endif | 445 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
300 | NULL, | 446 | NULL, |
301 | }; | 447 | }; |
302 | static struct attribute_group pm_attr_group = { | 448 | static struct attribute_group pm_attr_group = { |
303 | .name = "power", | 449 | .name = power_group_name, |
304 | .attrs = power_attrs, | 450 | .attrs = power_attrs, |
305 | }; | 451 | }; |
306 | 452 | ||
307 | int dpm_sysfs_add(struct device * dev) | 453 | static struct attribute *wakeup_attrs[] = { |
454 | #ifdef CONFIG_PM_SLEEP | ||
455 | &dev_attr_wakeup.attr, | ||
456 | &dev_attr_wakeup_count.attr, | ||
457 | &dev_attr_wakeup_active_count.attr, | ||
458 | &dev_attr_wakeup_hit_count.attr, | ||
459 | &dev_attr_wakeup_active.attr, | ||
460 | &dev_attr_wakeup_total_time_ms.attr, | ||
461 | &dev_attr_wakeup_max_time_ms.attr, | ||
462 | &dev_attr_wakeup_last_time_ms.attr, | ||
463 | #endif | ||
464 | NULL, | ||
465 | }; | ||
466 | static struct attribute_group pm_wakeup_attr_group = { | ||
467 | .name = power_group_name, | ||
468 | .attrs = wakeup_attrs, | ||
469 | }; | ||
470 | |||
471 | static struct attribute *runtime_attrs[] = { | ||
472 | #ifdef CONFIG_PM_RUNTIME | ||
473 | #ifndef CONFIG_PM_ADVANCED_DEBUG | ||
474 | &dev_attr_runtime_status.attr, | ||
475 | #endif | ||
476 | &dev_attr_control.attr, | ||
477 | &dev_attr_runtime_suspended_time.attr, | ||
478 | &dev_attr_runtime_active_time.attr, | ||
479 | &dev_attr_autosuspend_delay_ms.attr, | ||
480 | #endif /* CONFIG_PM_RUNTIME */ | ||
481 | NULL, | ||
482 | }; | ||
483 | static struct attribute_group pm_runtime_attr_group = { | ||
484 | .name = power_group_name, | ||
485 | .attrs = runtime_attrs, | ||
486 | }; | ||
487 | |||
488 | int dpm_sysfs_add(struct device *dev) | ||
489 | { | ||
490 | int rc; | ||
491 | |||
492 | rc = sysfs_create_group(&dev->kobj, &pm_attr_group); | ||
493 | if (rc) | ||
494 | return rc; | ||
495 | |||
496 | if (pm_runtime_callbacks_present(dev)) { | ||
497 | rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); | ||
498 | if (rc) | ||
499 | goto err_out; | ||
500 | } | ||
501 | |||
502 | if (device_can_wakeup(dev)) { | ||
503 | rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
504 | if (rc) { | ||
505 | if (pm_runtime_callbacks_present(dev)) | ||
506 | sysfs_unmerge_group(&dev->kobj, | ||
507 | &pm_runtime_attr_group); | ||
508 | goto err_out; | ||
509 | } | ||
510 | } | ||
511 | return 0; | ||
512 | |||
513 | err_out: | ||
514 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | ||
515 | return rc; | ||
516 | } | ||
517 | |||
518 | int wakeup_sysfs_add(struct device *dev) | ||
519 | { | ||
520 | return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
521 | } | ||
522 | |||
523 | void wakeup_sysfs_remove(struct device *dev) | ||
524 | { | ||
525 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
526 | } | ||
527 | |||
528 | void rpm_sysfs_remove(struct device *dev) | ||
308 | { | 529 | { |
309 | return sysfs_create_group(&dev->kobj, &pm_attr_group); | 530 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); |
310 | } | 531 | } |
311 | 532 | ||
312 | void dpm_sysfs_remove(struct device * dev) | 533 | void dpm_sysfs_remove(struct device *dev) |
313 | { | 534 | { |
535 | rpm_sysfs_remove(dev); | ||
536 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
314 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | 537 | sysfs_remove_group(&dev->kobj, &pm_attr_group); |
315 | } | 538 | } |
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 0a1a2c4dbc6e..c80e138b62fe 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
@@ -112,7 +112,7 @@ static unsigned int read_magic_time(void) | |||
112 | unsigned int val; | 112 | unsigned int val; |
113 | 113 | ||
114 | get_rtc_time(&time); | 114 | get_rtc_time(&time); |
115 | printk("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", | 115 | pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", |
116 | time.tm_hour, time.tm_min, time.tm_sec, | 116 | time.tm_hour, time.tm_min, time.tm_sec, |
117 | time.tm_mon + 1, time.tm_mday, time.tm_year % 100); | 117 | time.tm_mon + 1, time.tm_mday, time.tm_year % 100); |
118 | val = time.tm_year; /* 100 years */ | 118 | val = time.tm_year; /* 100 years */ |
@@ -179,7 +179,7 @@ static int show_file_hash(unsigned int value) | |||
179 | unsigned int hash = hash_string(lineno, file, FILEHASH); | 179 | unsigned int hash = hash_string(lineno, file, FILEHASH); |
180 | if (hash != value) | 180 | if (hash != value) |
181 | continue; | 181 | continue; |
182 | printk(" hash matches %s:%u\n", file, lineno); | 182 | pr_info(" hash matches %s:%u\n", file, lineno); |
183 | match++; | 183 | match++; |
184 | } | 184 | } |
185 | return match; | 185 | return match; |
@@ -188,8 +188,10 @@ static int show_file_hash(unsigned int value) | |||
188 | static int show_dev_hash(unsigned int value) | 188 | static int show_dev_hash(unsigned int value) |
189 | { | 189 | { |
190 | int match = 0; | 190 | int match = 0; |
191 | struct list_head *entry = dpm_list.prev; | 191 | struct list_head *entry; |
192 | 192 | ||
193 | device_pm_lock(); | ||
194 | entry = dpm_list.prev; | ||
193 | while (entry != &dpm_list) { | 195 | while (entry != &dpm_list) { |
194 | struct device * dev = to_device(entry); | 196 | struct device * dev = to_device(entry); |
195 | unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); | 197 | unsigned int hash = hash_string(DEVSEED, dev_name(dev), DEVHASH); |
@@ -199,11 +201,43 @@ static int show_dev_hash(unsigned int value) | |||
199 | } | 201 | } |
200 | entry = entry->prev; | 202 | entry = entry->prev; |
201 | } | 203 | } |
204 | device_pm_unlock(); | ||
202 | return match; | 205 | return match; |
203 | } | 206 | } |
204 | 207 | ||
205 | static unsigned int hash_value_early_read; | 208 | static unsigned int hash_value_early_read; |
206 | 209 | ||
210 | int show_trace_dev_match(char *buf, size_t size) | ||
211 | { | ||
212 | unsigned int value = hash_value_early_read / (USERHASH * FILEHASH); | ||
213 | int ret = 0; | ||
214 | struct list_head *entry; | ||
215 | |||
216 | /* | ||
217 | * It's possible that multiple devices will match the hash and we can't | ||
218 | * tell which is the culprit, so it's best to output them all. | ||
219 | */ | ||
220 | device_pm_lock(); | ||
221 | entry = dpm_list.prev; | ||
222 | while (size && entry != &dpm_list) { | ||
223 | struct device *dev = to_device(entry); | ||
224 | unsigned int hash = hash_string(DEVSEED, dev_name(dev), | ||
225 | DEVHASH); | ||
226 | if (hash == value) { | ||
227 | int len = snprintf(buf, size, "%s\n", | ||
228 | dev_driver_string(dev)); | ||
229 | if (len > size) | ||
230 | len = size; | ||
231 | buf += len; | ||
232 | ret += len; | ||
233 | size -= len; | ||
234 | } | ||
235 | entry = entry->prev; | ||
236 | } | ||
237 | device_pm_unlock(); | ||
238 | return ret; | ||
239 | } | ||
240 | |||
207 | static int early_resume_init(void) | 241 | static int early_resume_init(void) |
208 | { | 242 | { |
209 | hash_value_early_read = read_magic_time(); | 243 | hash_value_early_read = read_magic_time(); |
@@ -221,7 +255,7 @@ static int late_resume_init(void) | |||
221 | val = val / FILEHASH; | 255 | val = val / FILEHASH; |
222 | dev = val /* % DEVHASH */; | 256 | dev = val /* % DEVHASH */; |
223 | 257 | ||
224 | printk(" Magic number: %d:%d:%d\n", user, file, dev); | 258 | pr_info(" Magic number: %d:%d:%d\n", user, file, dev); |
225 | show_file_hash(file); | 259 | show_file_hash(file); |
226 | show_dev_hash(dev); | 260 | show_dev_hash(dev); |
227 | return 0; | 261 | return 0; |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index eb594facfc3f..84f7c7d5a098 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -11,7 +11,12 @@ | |||
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/capability.h> | 12 | #include <linux/capability.h> |
13 | #include <linux/suspend.h> | 13 | #include <linux/suspend.h> |
14 | #include <linux/pm.h> | 14 | #include <linux/seq_file.h> |
15 | #include <linux/debugfs.h> | ||
16 | |||
17 | #include "power.h" | ||
18 | |||
19 | #define TIMEOUT 100 | ||
15 | 20 | ||
16 | /* | 21 | /* |
17 | * If set, the suspend/hibernate code will abort transitions to a sleep state | 22 | * If set, the suspend/hibernate code will abort transitions to a sleep state |
@@ -19,19 +24,287 @@ | |||
19 | */ | 24 | */ |
20 | bool events_check_enabled; | 25 | bool events_check_enabled; |
21 | 26 | ||
22 | /* The counter of registered wakeup events. */ | 27 | /* |
23 | static unsigned long event_count; | 28 | * Combined counters of registered wakeup events and wakeup events in progress. |
24 | /* A preserved old value of event_count. */ | 29 | * They need to be modified together atomically, so it's better to use one |
25 | static unsigned long saved_event_count; | 30 | * atomic variable to hold them both. |
26 | /* The counter of wakeup events being processed. */ | 31 | */ |
27 | static unsigned long events_in_progress; | 32 | static atomic_t combined_event_count = ATOMIC_INIT(0); |
33 | |||
34 | #define IN_PROGRESS_BITS (sizeof(int) * 4) | ||
35 | #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) | ||
36 | |||
37 | static void split_counters(unsigned int *cnt, unsigned int *inpr) | ||
38 | { | ||
39 | unsigned int comb = atomic_read(&combined_event_count); | ||
40 | |||
41 | *cnt = (comb >> IN_PROGRESS_BITS); | ||
42 | *inpr = comb & MAX_IN_PROGRESS; | ||
43 | } | ||
44 | |||
45 | /* A preserved old value of the events counter. */ | ||
46 | static unsigned int saved_count; | ||
28 | 47 | ||
29 | static DEFINE_SPINLOCK(events_lock); | 48 | static DEFINE_SPINLOCK(events_lock); |
30 | 49 | ||
31 | static void pm_wakeup_timer_fn(unsigned long data); | 50 | static void pm_wakeup_timer_fn(unsigned long data); |
32 | 51 | ||
33 | static DEFINE_TIMER(events_timer, pm_wakeup_timer_fn, 0, 0); | 52 | static LIST_HEAD(wakeup_sources); |
34 | static unsigned long events_timer_expires; | 53 | |
54 | /** | ||
55 | * wakeup_source_create - Create a struct wakeup_source object. | ||
56 | * @name: Name of the new wakeup source. | ||
57 | */ | ||
58 | struct wakeup_source *wakeup_source_create(const char *name) | ||
59 | { | ||
60 | struct wakeup_source *ws; | ||
61 | |||
62 | ws = kzalloc(sizeof(*ws), GFP_KERNEL); | ||
63 | if (!ws) | ||
64 | return NULL; | ||
65 | |||
66 | spin_lock_init(&ws->lock); | ||
67 | if (name) | ||
68 | ws->name = kstrdup(name, GFP_KERNEL); | ||
69 | |||
70 | return ws; | ||
71 | } | ||
72 | EXPORT_SYMBOL_GPL(wakeup_source_create); | ||
73 | |||
74 | /** | ||
75 | * wakeup_source_destroy - Destroy a struct wakeup_source object. | ||
76 | * @ws: Wakeup source to destroy. | ||
77 | */ | ||
78 | void wakeup_source_destroy(struct wakeup_source *ws) | ||
79 | { | ||
80 | if (!ws) | ||
81 | return; | ||
82 | |||
83 | spin_lock_irq(&ws->lock); | ||
84 | while (ws->active) { | ||
85 | spin_unlock_irq(&ws->lock); | ||
86 | |||
87 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | ||
88 | |||
89 | spin_lock_irq(&ws->lock); | ||
90 | } | ||
91 | spin_unlock_irq(&ws->lock); | ||
92 | |||
93 | kfree(ws->name); | ||
94 | kfree(ws); | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(wakeup_source_destroy); | ||
97 | |||
98 | /** | ||
99 | * wakeup_source_add - Add given object to the list of wakeup sources. | ||
100 | * @ws: Wakeup source object to add to the list. | ||
101 | */ | ||
102 | void wakeup_source_add(struct wakeup_source *ws) | ||
103 | { | ||
104 | if (WARN_ON(!ws)) | ||
105 | return; | ||
106 | |||
107 | setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws); | ||
108 | ws->active = false; | ||
109 | |||
110 | spin_lock_irq(&events_lock); | ||
111 | list_add_rcu(&ws->entry, &wakeup_sources); | ||
112 | spin_unlock_irq(&events_lock); | ||
113 | } | ||
114 | EXPORT_SYMBOL_GPL(wakeup_source_add); | ||
115 | |||
116 | /** | ||
117 | * wakeup_source_remove - Remove given object from the wakeup sources list. | ||
118 | * @ws: Wakeup source object to remove from the list. | ||
119 | */ | ||
120 | void wakeup_source_remove(struct wakeup_source *ws) | ||
121 | { | ||
122 | if (WARN_ON(!ws)) | ||
123 | return; | ||
124 | |||
125 | spin_lock_irq(&events_lock); | ||
126 | list_del_rcu(&ws->entry); | ||
127 | spin_unlock_irq(&events_lock); | ||
128 | synchronize_rcu(); | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(wakeup_source_remove); | ||
131 | |||
132 | /** | ||
133 | * wakeup_source_register - Create wakeup source and add it to the list. | ||
134 | * @name: Name of the wakeup source to register. | ||
135 | */ | ||
136 | struct wakeup_source *wakeup_source_register(const char *name) | ||
137 | { | ||
138 | struct wakeup_source *ws; | ||
139 | |||
140 | ws = wakeup_source_create(name); | ||
141 | if (ws) | ||
142 | wakeup_source_add(ws); | ||
143 | |||
144 | return ws; | ||
145 | } | ||
146 | EXPORT_SYMBOL_GPL(wakeup_source_register); | ||
147 | |||
148 | /** | ||
149 | * wakeup_source_unregister - Remove wakeup source from the list and remove it. | ||
150 | * @ws: Wakeup source object to unregister. | ||
151 | */ | ||
152 | void wakeup_source_unregister(struct wakeup_source *ws) | ||
153 | { | ||
154 | wakeup_source_remove(ws); | ||
155 | wakeup_source_destroy(ws); | ||
156 | } | ||
157 | EXPORT_SYMBOL_GPL(wakeup_source_unregister); | ||
158 | |||
159 | /** | ||
160 | * device_wakeup_attach - Attach a wakeup source object to a device object. | ||
161 | * @dev: Device to handle. | ||
162 | * @ws: Wakeup source object to attach to @dev. | ||
163 | * | ||
164 | * This causes @dev to be treated as a wakeup device. | ||
165 | */ | ||
166 | static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) | ||
167 | { | ||
168 | spin_lock_irq(&dev->power.lock); | ||
169 | if (dev->power.wakeup) { | ||
170 | spin_unlock_irq(&dev->power.lock); | ||
171 | return -EEXIST; | ||
172 | } | ||
173 | dev->power.wakeup = ws; | ||
174 | spin_unlock_irq(&dev->power.lock); | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /** | ||
179 | * device_wakeup_enable - Enable given device to be a wakeup source. | ||
180 | * @dev: Device to handle. | ||
181 | * | ||
182 | * Create a wakeup source object, register it and attach it to @dev. | ||
183 | */ | ||
184 | int device_wakeup_enable(struct device *dev) | ||
185 | { | ||
186 | struct wakeup_source *ws; | ||
187 | int ret; | ||
188 | |||
189 | if (!dev || !dev->power.can_wakeup) | ||
190 | return -EINVAL; | ||
191 | |||
192 | ws = wakeup_source_register(dev_name(dev)); | ||
193 | if (!ws) | ||
194 | return -ENOMEM; | ||
195 | |||
196 | ret = device_wakeup_attach(dev, ws); | ||
197 | if (ret) | ||
198 | wakeup_source_unregister(ws); | ||
199 | |||
200 | return ret; | ||
201 | } | ||
202 | EXPORT_SYMBOL_GPL(device_wakeup_enable); | ||
203 | |||
204 | /** | ||
205 | * device_wakeup_detach - Detach a device's wakeup source object from it. | ||
206 | * @dev: Device to detach the wakeup source object from. | ||
207 | * | ||
208 | * After it returns, @dev will not be treated as a wakeup device any more. | ||
209 | */ | ||
210 | static struct wakeup_source *device_wakeup_detach(struct device *dev) | ||
211 | { | ||
212 | struct wakeup_source *ws; | ||
213 | |||
214 | spin_lock_irq(&dev->power.lock); | ||
215 | ws = dev->power.wakeup; | ||
216 | dev->power.wakeup = NULL; | ||
217 | spin_unlock_irq(&dev->power.lock); | ||
218 | return ws; | ||
219 | } | ||
220 | |||
221 | /** | ||
222 | * device_wakeup_disable - Do not regard a device as a wakeup source any more. | ||
223 | * @dev: Device to handle. | ||
224 | * | ||
225 | * Detach the @dev's wakeup source object from it, unregister this wakeup source | ||
226 | * object and destroy it. | ||
227 | */ | ||
228 | int device_wakeup_disable(struct device *dev) | ||
229 | { | ||
230 | struct wakeup_source *ws; | ||
231 | |||
232 | if (!dev || !dev->power.can_wakeup) | ||
233 | return -EINVAL; | ||
234 | |||
235 | ws = device_wakeup_detach(dev); | ||
236 | if (ws) | ||
237 | wakeup_source_unregister(ws); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | EXPORT_SYMBOL_GPL(device_wakeup_disable); | ||
242 | |||
243 | /** | ||
244 | * device_set_wakeup_capable - Set/reset device wakeup capability flag. | ||
245 | * @dev: Device to handle. | ||
246 | * @capable: Whether or not @dev is capable of waking up the system from sleep. | ||
247 | * | ||
248 | * If @capable is set, set the @dev's power.can_wakeup flag and add its | ||
249 | * wakeup-related attributes to sysfs. Otherwise, unset the @dev's | ||
250 | * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. | ||
251 | * | ||
252 | * This function may sleep and it can't be called from any context where | ||
253 | * sleeping is not allowed. | ||
254 | */ | ||
255 | void device_set_wakeup_capable(struct device *dev, bool capable) | ||
256 | { | ||
257 | if (!!dev->power.can_wakeup == !!capable) | ||
258 | return; | ||
259 | |||
260 | if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { | ||
261 | if (capable) { | ||
262 | if (wakeup_sysfs_add(dev)) | ||
263 | return; | ||
264 | } else { | ||
265 | wakeup_sysfs_remove(dev); | ||
266 | } | ||
267 | } | ||
268 | dev->power.can_wakeup = capable; | ||
269 | } | ||
270 | EXPORT_SYMBOL_GPL(device_set_wakeup_capable); | ||
271 | |||
272 | /** | ||
273 | * device_init_wakeup - Device wakeup initialization. | ||
274 | * @dev: Device to handle. | ||
275 | * @enable: Whether or not to enable @dev as a wakeup device. | ||
276 | * | ||
277 | * By default, most devices should leave wakeup disabled. The exceptions are | ||
278 | * devices that everyone expects to be wakeup sources: keyboards, power buttons, | ||
279 | * possibly network interfaces, etc. | ||
280 | */ | ||
281 | int device_init_wakeup(struct device *dev, bool enable) | ||
282 | { | ||
283 | int ret = 0; | ||
284 | |||
285 | if (enable) { | ||
286 | device_set_wakeup_capable(dev, true); | ||
287 | ret = device_wakeup_enable(dev); | ||
288 | } else { | ||
289 | device_set_wakeup_capable(dev, false); | ||
290 | } | ||
291 | |||
292 | return ret; | ||
293 | } | ||
294 | EXPORT_SYMBOL_GPL(device_init_wakeup); | ||
295 | |||
296 | /** | ||
297 | * device_set_wakeup_enable - Enable or disable a device to wake up the system. | ||
298 | * @dev: Device to handle. | ||
299 | */ | ||
300 | int device_set_wakeup_enable(struct device *dev, bool enable) | ||
301 | { | ||
302 | if (!dev || !dev->power.can_wakeup) | ||
303 | return -EINVAL; | ||
304 | |||
305 | return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); | ||
306 | } | ||
307 | EXPORT_SYMBOL_GPL(device_set_wakeup_enable); | ||
35 | 308 | ||
36 | /* | 309 | /* |
37 | * The functions below use the observation that each wakeup event starts a | 310 | * The functions below use the observation that each wakeup event starts a |
@@ -55,139 +328,282 @@ static unsigned long events_timer_expires; | |||
55 | * knowledge, however, may not be available to it, so it can simply specify time | 328 | * knowledge, however, may not be available to it, so it can simply specify time |
56 | * to wait before the system can be suspended and pass it as the second | 329 | * to wait before the system can be suspended and pass it as the second |
57 | * argument of pm_wakeup_event(). | 330 | * argument of pm_wakeup_event(). |
331 | * | ||
332 | * It is valid to call pm_relax() after pm_wakeup_event(), in which case the | ||
333 | * "no suspend" period will be ended either by the pm_relax(), or by the timer | ||
334 | * function executed when the timer expires, whichever comes first. | ||
335 | */ | ||
336 | |||
337 | /** | ||
338 | * wakup_source_activate - Mark given wakeup source as active. | ||
339 | * @ws: Wakeup source to handle. | ||
340 | * | ||
341 | * Update the @ws' statistics and, if @ws has just been activated, notify the PM | ||
342 | * core of the event by incrementing the counter of of wakeup events being | ||
343 | * processed. | ||
58 | */ | 344 | */ |
345 | static void wakeup_source_activate(struct wakeup_source *ws) | ||
346 | { | ||
347 | ws->active = true; | ||
348 | ws->active_count++; | ||
349 | ws->timer_expires = jiffies; | ||
350 | ws->last_time = ktime_get(); | ||
351 | |||
352 | /* Increment the counter of events in progress. */ | ||
353 | atomic_inc(&combined_event_count); | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * __pm_stay_awake - Notify the PM core of a wakeup event. | ||
358 | * @ws: Wakeup source object associated with the source of the event. | ||
359 | * | ||
360 | * It is safe to call this function from interrupt context. | ||
361 | */ | ||
362 | void __pm_stay_awake(struct wakeup_source *ws) | ||
363 | { | ||
364 | unsigned long flags; | ||
365 | |||
366 | if (!ws) | ||
367 | return; | ||
368 | |||
369 | spin_lock_irqsave(&ws->lock, flags); | ||
370 | ws->event_count++; | ||
371 | if (!ws->active) | ||
372 | wakeup_source_activate(ws); | ||
373 | spin_unlock_irqrestore(&ws->lock, flags); | ||
374 | } | ||
375 | EXPORT_SYMBOL_GPL(__pm_stay_awake); | ||
59 | 376 | ||
60 | /** | 377 | /** |
61 | * pm_stay_awake - Notify the PM core that a wakeup event is being processed. | 378 | * pm_stay_awake - Notify the PM core that a wakeup event is being processed. |
62 | * @dev: Device the wakeup event is related to. | 379 | * @dev: Device the wakeup event is related to. |
63 | * | 380 | * |
64 | * Notify the PM core of a wakeup event (signaled by @dev) by incrementing the | 381 | * Notify the PM core of a wakeup event (signaled by @dev) by calling |
65 | * counter of wakeup events being processed. If @dev is not NULL, the counter | 382 | * __pm_stay_awake for the @dev's wakeup source object. |
66 | * of wakeup events related to @dev is incremented too. | ||
67 | * | 383 | * |
68 | * Call this function after detecting of a wakeup event if pm_relax() is going | 384 | * Call this function after detecting of a wakeup event if pm_relax() is going |
69 | * to be called directly after processing the event (and possibly passing it to | 385 | * to be called directly after processing the event (and possibly passing it to |
70 | * user space for further processing). | 386 | * user space for further processing). |
71 | * | ||
72 | * It is safe to call this function from interrupt context. | ||
73 | */ | 387 | */ |
74 | void pm_stay_awake(struct device *dev) | 388 | void pm_stay_awake(struct device *dev) |
75 | { | 389 | { |
76 | unsigned long flags; | 390 | unsigned long flags; |
77 | 391 | ||
78 | spin_lock_irqsave(&events_lock, flags); | 392 | if (!dev) |
79 | if (dev) | 393 | return; |
80 | dev->power.wakeup_count++; | ||
81 | 394 | ||
82 | events_in_progress++; | 395 | spin_lock_irqsave(&dev->power.lock, flags); |
83 | spin_unlock_irqrestore(&events_lock, flags); | 396 | __pm_stay_awake(dev->power.wakeup); |
397 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
84 | } | 398 | } |
399 | EXPORT_SYMBOL_GPL(pm_stay_awake); | ||
85 | 400 | ||
86 | /** | 401 | /** |
87 | * pm_relax - Notify the PM core that processing of a wakeup event has ended. | 402 | * wakup_source_deactivate - Mark given wakeup source as inactive. |
403 | * @ws: Wakeup source to handle. | ||
88 | * | 404 | * |
89 | * Notify the PM core that a wakeup event has been processed by decrementing | 405 | * Update the @ws' statistics and notify the PM core that the wakeup source has |
90 | * the counter of wakeup events being processed and incrementing the counter | 406 | * become inactive by decrementing the counter of wakeup events being processed |
91 | * of registered wakeup events. | 407 | * and incrementing the counter of registered wakeup events. |
408 | */ | ||
409 | static void wakeup_source_deactivate(struct wakeup_source *ws) | ||
410 | { | ||
411 | ktime_t duration; | ||
412 | ktime_t now; | ||
413 | |||
414 | ws->relax_count++; | ||
415 | /* | ||
416 | * __pm_relax() may be called directly or from a timer function. | ||
417 | * If it is called directly right after the timer function has been | ||
418 | * started, but before the timer function calls __pm_relax(), it is | ||
419 | * possible that __pm_stay_awake() will be called in the meantime and | ||
420 | * will set ws->active. Then, ws->active may be cleared immediately | ||
421 | * by the __pm_relax() called from the timer function, but in such a | ||
422 | * case ws->relax_count will be different from ws->active_count. | ||
423 | */ | ||
424 | if (ws->relax_count != ws->active_count) { | ||
425 | ws->relax_count--; | ||
426 | return; | ||
427 | } | ||
428 | |||
429 | ws->active = false; | ||
430 | |||
431 | now = ktime_get(); | ||
432 | duration = ktime_sub(now, ws->last_time); | ||
433 | ws->total_time = ktime_add(ws->total_time, duration); | ||
434 | if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time)) | ||
435 | ws->max_time = duration; | ||
436 | |||
437 | del_timer(&ws->timer); | ||
438 | |||
439 | /* | ||
440 | * Increment the counter of registered wakeup events and decrement the | ||
441 | * couter of wakeup events in progress simultaneously. | ||
442 | */ | ||
443 | atomic_add(MAX_IN_PROGRESS, &combined_event_count); | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * __pm_relax - Notify the PM core that processing of a wakeup event has ended. | ||
448 | * @ws: Wakeup source object associated with the source of the event. | ||
92 | * | 449 | * |
93 | * Call this function for wakeup events whose processing started with calling | 450 | * Call this function for wakeup events whose processing started with calling |
94 | * pm_stay_awake(). | 451 | * __pm_stay_awake(). |
95 | * | 452 | * |
96 | * It is safe to call it from interrupt context. | 453 | * It is safe to call it from interrupt context. |
97 | */ | 454 | */ |
98 | void pm_relax(void) | 455 | void __pm_relax(struct wakeup_source *ws) |
99 | { | 456 | { |
100 | unsigned long flags; | 457 | unsigned long flags; |
101 | 458 | ||
102 | spin_lock_irqsave(&events_lock, flags); | 459 | if (!ws) |
103 | if (events_in_progress) { | 460 | return; |
104 | events_in_progress--; | 461 | |
105 | event_count++; | 462 | spin_lock_irqsave(&ws->lock, flags); |
106 | } | 463 | if (ws->active) |
107 | spin_unlock_irqrestore(&events_lock, flags); | 464 | wakeup_source_deactivate(ws); |
465 | spin_unlock_irqrestore(&ws->lock, flags); | ||
108 | } | 466 | } |
467 | EXPORT_SYMBOL_GPL(__pm_relax); | ||
468 | |||
469 | /** | ||
470 | * pm_relax - Notify the PM core that processing of a wakeup event has ended. | ||
471 | * @dev: Device that signaled the event. | ||
472 | * | ||
473 | * Execute __pm_relax() for the @dev's wakeup source object. | ||
474 | */ | ||
475 | void pm_relax(struct device *dev) | ||
476 | { | ||
477 | unsigned long flags; | ||
478 | |||
479 | if (!dev) | ||
480 | return; | ||
481 | |||
482 | spin_lock_irqsave(&dev->power.lock, flags); | ||
483 | __pm_relax(dev->power.wakeup); | ||
484 | spin_unlock_irqrestore(&dev->power.lock, flags); | ||
485 | } | ||
486 | EXPORT_SYMBOL_GPL(pm_relax); | ||
109 | 487 | ||
110 | /** | 488 | /** |
111 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. | 489 | * pm_wakeup_timer_fn - Delayed finalization of a wakeup event. |
490 | * @data: Address of the wakeup source object associated with the event source. | ||
112 | * | 491 | * |
113 | * Decrease the counter of wakeup events being processed after it was increased | 492 | * Call __pm_relax() for the wakeup source whose address is stored in @data. |
114 | * by pm_wakeup_event(). | ||
115 | */ | 493 | */ |
116 | static void pm_wakeup_timer_fn(unsigned long data) | 494 | static void pm_wakeup_timer_fn(unsigned long data) |
117 | { | 495 | { |
496 | __pm_relax((struct wakeup_source *)data); | ||
497 | } | ||
498 | |||
499 | /** | ||
500 | * __pm_wakeup_event - Notify the PM core of a wakeup event. | ||
501 | * @ws: Wakeup source object associated with the event source. | ||
502 | * @msec: Anticipated event processing time (in milliseconds). | ||
503 | * | ||
504 | * Notify the PM core of a wakeup event whose source is @ws that will take | ||
505 | * approximately @msec milliseconds to be processed by the kernel. If @ws is | ||
506 | * not active, activate it. If @msec is nonzero, set up the @ws' timer to | ||
507 | * execute pm_wakeup_timer_fn() in future. | ||
508 | * | ||
509 | * It is safe to call this function from interrupt context. | ||
510 | */ | ||
511 | void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | ||
512 | { | ||
118 | unsigned long flags; | 513 | unsigned long flags; |
514 | unsigned long expires; | ||
119 | 515 | ||
120 | spin_lock_irqsave(&events_lock, flags); | 516 | if (!ws) |
121 | if (events_timer_expires | 517 | return; |
122 | && time_before_eq(events_timer_expires, jiffies)) { | 518 | |
123 | events_in_progress--; | 519 | spin_lock_irqsave(&ws->lock, flags); |
124 | events_timer_expires = 0; | 520 | |
521 | ws->event_count++; | ||
522 | if (!ws->active) | ||
523 | wakeup_source_activate(ws); | ||
524 | |||
525 | if (!msec) { | ||
526 | wakeup_source_deactivate(ws); | ||
527 | goto unlock; | ||
125 | } | 528 | } |
126 | spin_unlock_irqrestore(&events_lock, flags); | 529 | |
530 | expires = jiffies + msecs_to_jiffies(msec); | ||
531 | if (!expires) | ||
532 | expires = 1; | ||
533 | |||
534 | if (time_after(expires, ws->timer_expires)) { | ||
535 | mod_timer(&ws->timer, expires); | ||
536 | ws->timer_expires = expires; | ||
537 | } | ||
538 | |||
539 | unlock: | ||
540 | spin_unlock_irqrestore(&ws->lock, flags); | ||
127 | } | 541 | } |
542 | EXPORT_SYMBOL_GPL(__pm_wakeup_event); | ||
543 | |||
128 | 544 | ||
129 | /** | 545 | /** |
130 | * pm_wakeup_event - Notify the PM core of a wakeup event. | 546 | * pm_wakeup_event - Notify the PM core of a wakeup event. |
131 | * @dev: Device the wakeup event is related to. | 547 | * @dev: Device the wakeup event is related to. |
132 | * @msec: Anticipated event processing time (in milliseconds). | 548 | * @msec: Anticipated event processing time (in milliseconds). |
133 | * | 549 | * |
134 | * Notify the PM core of a wakeup event (signaled by @dev) that will take | 550 | * Call __pm_wakeup_event() for the @dev's wakeup source object. |
135 | * approximately @msec milliseconds to be processed by the kernel. Increment | ||
136 | * the counter of registered wakeup events and (if @msec is nonzero) set up | ||
137 | * the wakeup events timer to execute pm_wakeup_timer_fn() in future (if the | ||
138 | * timer has not been set up already, increment the counter of wakeup events | ||
139 | * being processed). If @dev is not NULL, the counter of wakeup events related | ||
140 | * to @dev is incremented too. | ||
141 | * | ||
142 | * It is safe to call this function from interrupt context. | ||
143 | */ | 551 | */ |
144 | void pm_wakeup_event(struct device *dev, unsigned int msec) | 552 | void pm_wakeup_event(struct device *dev, unsigned int msec) |
145 | { | 553 | { |
146 | unsigned long flags; | 554 | unsigned long flags; |
147 | 555 | ||
148 | spin_lock_irqsave(&events_lock, flags); | 556 | if (!dev) |
149 | event_count++; | 557 | return; |
150 | if (dev) | ||
151 | dev->power.wakeup_count++; | ||
152 | |||
153 | if (msec) { | ||
154 | unsigned long expires; | ||
155 | 558 | ||
156 | expires = jiffies + msecs_to_jiffies(msec); | 559 | spin_lock_irqsave(&dev->power.lock, flags); |
157 | if (!expires) | 560 | __pm_wakeup_event(dev->power.wakeup, msec); |
158 | expires = 1; | 561 | spin_unlock_irqrestore(&dev->power.lock, flags); |
562 | } | ||
563 | EXPORT_SYMBOL_GPL(pm_wakeup_event); | ||
159 | 564 | ||
160 | if (!events_timer_expires | 565 | /** |
161 | || time_after(expires, events_timer_expires)) { | 566 | * pm_wakeup_update_hit_counts - Update hit counts of all active wakeup sources. |
162 | if (!events_timer_expires) | 567 | */ |
163 | events_in_progress++; | 568 | static void pm_wakeup_update_hit_counts(void) |
569 | { | ||
570 | unsigned long flags; | ||
571 | struct wakeup_source *ws; | ||
164 | 572 | ||
165 | mod_timer(&events_timer, expires); | 573 | rcu_read_lock(); |
166 | events_timer_expires = expires; | 574 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) { |
167 | } | 575 | spin_lock_irqsave(&ws->lock, flags); |
576 | if (ws->active) | ||
577 | ws->hit_count++; | ||
578 | spin_unlock_irqrestore(&ws->lock, flags); | ||
168 | } | 579 | } |
169 | spin_unlock_irqrestore(&events_lock, flags); | 580 | rcu_read_unlock(); |
170 | } | 581 | } |
171 | 582 | ||
172 | /** | 583 | /** |
173 | * pm_check_wakeup_events - Check for new wakeup events. | 584 | * pm_wakeup_pending - Check if power transition in progress should be aborted. |
174 | * | 585 | * |
175 | * Compare the current number of registered wakeup events with its preserved | 586 | * Compare the current number of registered wakeup events with its preserved |
176 | * value from the past to check if new wakeup events have been registered since | 587 | * value from the past and return true if new wakeup events have been registered |
177 | * the old value was stored. Check if the current number of wakeup events being | 588 | * since the old value was stored. Also return true if the current number of |
178 | * processed is zero. | 589 | * wakeup events being processed is different from zero. |
179 | */ | 590 | */ |
180 | bool pm_check_wakeup_events(void) | 591 | bool pm_wakeup_pending(void) |
181 | { | 592 | { |
182 | unsigned long flags; | 593 | unsigned long flags; |
183 | bool ret = true; | 594 | bool ret = false; |
184 | 595 | ||
185 | spin_lock_irqsave(&events_lock, flags); | 596 | spin_lock_irqsave(&events_lock, flags); |
186 | if (events_check_enabled) { | 597 | if (events_check_enabled) { |
187 | ret = (event_count == saved_event_count) && !events_in_progress; | 598 | unsigned int cnt, inpr; |
188 | events_check_enabled = ret; | 599 | |
600 | split_counters(&cnt, &inpr); | ||
601 | ret = (cnt != saved_count || inpr > 0); | ||
602 | events_check_enabled = !ret; | ||
189 | } | 603 | } |
190 | spin_unlock_irqrestore(&events_lock, flags); | 604 | spin_unlock_irqrestore(&events_lock, flags); |
605 | if (ret) | ||
606 | pm_wakeup_update_hit_counts(); | ||
191 | return ret; | 607 | return ret; |
192 | } | 608 | } |
193 | 609 | ||
@@ -198,29 +614,25 @@ bool pm_check_wakeup_events(void) | |||
198 | * Store the number of registered wakeup events at the address in @count. Block | 614 | * Store the number of registered wakeup events at the address in @count. Block |
199 | * if the current number of wakeup events being processed is nonzero. | 615 | * if the current number of wakeup events being processed is nonzero. |
200 | * | 616 | * |
201 | * Return false if the wait for the number of wakeup events being processed to | 617 | * Return 'false' if the wait for the number of wakeup events being processed to |
202 | * drop down to zero has been interrupted by a signal (and the current number | 618 | * drop down to zero has been interrupted by a signal (and the current number |
203 | * of wakeup events being processed is still nonzero). Otherwise return true. | 619 | * of wakeup events being processed is still nonzero). Otherwise return 'true'. |
204 | */ | 620 | */ |
205 | bool pm_get_wakeup_count(unsigned long *count) | 621 | bool pm_get_wakeup_count(unsigned int *count) |
206 | { | 622 | { |
207 | bool ret; | 623 | unsigned int cnt, inpr; |
208 | |||
209 | spin_lock_irq(&events_lock); | ||
210 | if (capable(CAP_SYS_ADMIN)) | ||
211 | events_check_enabled = false; | ||
212 | |||
213 | while (events_in_progress && !signal_pending(current)) { | ||
214 | spin_unlock_irq(&events_lock); | ||
215 | 624 | ||
216 | schedule_timeout_interruptible(msecs_to_jiffies(100)); | 625 | for (;;) { |
217 | 626 | split_counters(&cnt, &inpr); | |
218 | spin_lock_irq(&events_lock); | 627 | if (inpr == 0 || signal_pending(current)) |
628 | break; | ||
629 | pm_wakeup_update_hit_counts(); | ||
630 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | ||
219 | } | 631 | } |
220 | *count = event_count; | 632 | |
221 | ret = !events_in_progress; | 633 | split_counters(&cnt, &inpr); |
222 | spin_unlock_irq(&events_lock); | 634 | *count = cnt; |
223 | return ret; | 635 | return !inpr; |
224 | } | 636 | } |
225 | 637 | ||
226 | /** | 638 | /** |
@@ -229,19 +641,106 @@ bool pm_get_wakeup_count(unsigned long *count) | |||
229 | * | 641 | * |
230 | * If @count is equal to the current number of registered wakeup events and the | 642 | * If @count is equal to the current number of registered wakeup events and the |
231 | * current number of wakeup events being processed is zero, store @count as the | 643 | * current number of wakeup events being processed is zero, store @count as the |
232 | * old number of registered wakeup events to be used by pm_check_wakeup_events() | 644 | * old number of registered wakeup events for pm_check_wakeup_events(), enable |
233 | * and return true. Otherwise return false. | 645 | * wakeup events detection and return 'true'. Otherwise disable wakeup events |
646 | * detection and return 'false'. | ||
234 | */ | 647 | */ |
235 | bool pm_save_wakeup_count(unsigned long count) | 648 | bool pm_save_wakeup_count(unsigned int count) |
236 | { | 649 | { |
237 | bool ret = false; | 650 | unsigned int cnt, inpr; |
238 | 651 | ||
652 | events_check_enabled = false; | ||
239 | spin_lock_irq(&events_lock); | 653 | spin_lock_irq(&events_lock); |
240 | if (count == event_count && !events_in_progress) { | 654 | split_counters(&cnt, &inpr); |
241 | saved_event_count = count; | 655 | if (cnt == count && inpr == 0) { |
656 | saved_count = count; | ||
242 | events_check_enabled = true; | 657 | events_check_enabled = true; |
243 | ret = true; | ||
244 | } | 658 | } |
245 | spin_unlock_irq(&events_lock); | 659 | spin_unlock_irq(&events_lock); |
660 | if (!events_check_enabled) | ||
661 | pm_wakeup_update_hit_counts(); | ||
662 | return events_check_enabled; | ||
663 | } | ||
664 | |||
665 | static struct dentry *wakeup_sources_stats_dentry; | ||
666 | |||
667 | /** | ||
668 | * print_wakeup_source_stats - Print wakeup source statistics information. | ||
669 | * @m: seq_file to print the statistics into. | ||
670 | * @ws: Wakeup source object to print the statistics for. | ||
671 | */ | ||
672 | static int print_wakeup_source_stats(struct seq_file *m, | ||
673 | struct wakeup_source *ws) | ||
674 | { | ||
675 | unsigned long flags; | ||
676 | ktime_t total_time; | ||
677 | ktime_t max_time; | ||
678 | unsigned long active_count; | ||
679 | ktime_t active_time; | ||
680 | int ret; | ||
681 | |||
682 | spin_lock_irqsave(&ws->lock, flags); | ||
683 | |||
684 | total_time = ws->total_time; | ||
685 | max_time = ws->max_time; | ||
686 | active_count = ws->active_count; | ||
687 | if (ws->active) { | ||
688 | active_time = ktime_sub(ktime_get(), ws->last_time); | ||
689 | total_time = ktime_add(total_time, active_time); | ||
690 | if (active_time.tv64 > max_time.tv64) | ||
691 | max_time = active_time; | ||
692 | } else { | ||
693 | active_time = ktime_set(0, 0); | ||
694 | } | ||
695 | |||
696 | ret = seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t" | ||
697 | "%lld\t\t%lld\t\t%lld\t\t%lld\n", | ||
698 | ws->name, active_count, ws->event_count, ws->hit_count, | ||
699 | ktime_to_ms(active_time), ktime_to_ms(total_time), | ||
700 | ktime_to_ms(max_time), ktime_to_ms(ws->last_time)); | ||
701 | |||
702 | spin_unlock_irqrestore(&ws->lock, flags); | ||
703 | |||
246 | return ret; | 704 | return ret; |
247 | } | 705 | } |
706 | |||
707 | /** | ||
708 | * wakeup_sources_stats_show - Print wakeup sources statistics information. | ||
709 | * @m: seq_file to print the statistics into. | ||
710 | */ | ||
711 | static int wakeup_sources_stats_show(struct seq_file *m, void *unused) | ||
712 | { | ||
713 | struct wakeup_source *ws; | ||
714 | |||
715 | seq_puts(m, "name\t\tactive_count\tevent_count\thit_count\t" | ||
716 | "active_since\ttotal_time\tmax_time\tlast_change\n"); | ||
717 | |||
718 | rcu_read_lock(); | ||
719 | list_for_each_entry_rcu(ws, &wakeup_sources, entry) | ||
720 | print_wakeup_source_stats(m, ws); | ||
721 | rcu_read_unlock(); | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int wakeup_sources_stats_open(struct inode *inode, struct file *file) | ||
727 | { | ||
728 | return single_open(file, wakeup_sources_stats_show, NULL); | ||
729 | } | ||
730 | |||
731 | static const struct file_operations wakeup_sources_stats_fops = { | ||
732 | .owner = THIS_MODULE, | ||
733 | .open = wakeup_sources_stats_open, | ||
734 | .read = seq_read, | ||
735 | .llseek = seq_lseek, | ||
736 | .release = single_release, | ||
737 | }; | ||
738 | |||
739 | static int __init wakeup_sources_debugfs_init(void) | ||
740 | { | ||
741 | wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources", | ||
742 | S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops); | ||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | postcore_initcall(wakeup_sources_debugfs_init); | ||
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 9354dc10a363..9dff77bfe1e3 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
@@ -166,8 +166,38 @@ EXPORT_SYMBOL_GPL(sysdev_class_unregister); | |||
166 | 166 | ||
167 | static DEFINE_MUTEX(sysdev_drivers_lock); | 167 | static DEFINE_MUTEX(sysdev_drivers_lock); |
168 | 168 | ||
169 | /* | ||
170 | * @dev != NULL means that we're unwinding because some drv->add() | ||
171 | * failed for some reason. You need to grab sysdev_drivers_lock before | ||
172 | * calling this. | ||
173 | */ | ||
174 | static void __sysdev_driver_remove(struct sysdev_class *cls, | ||
175 | struct sysdev_driver *drv, | ||
176 | struct sys_device *from_dev) | ||
177 | { | ||
178 | struct sys_device *dev = from_dev; | ||
179 | |||
180 | list_del_init(&drv->entry); | ||
181 | if (!cls) | ||
182 | return; | ||
183 | |||
184 | if (!drv->remove) | ||
185 | goto kset_put; | ||
186 | |||
187 | if (dev) | ||
188 | list_for_each_entry_continue_reverse(dev, &cls->kset.list, | ||
189 | kobj.entry) | ||
190 | drv->remove(dev); | ||
191 | else | ||
192 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) | ||
193 | drv->remove(dev); | ||
194 | |||
195 | kset_put: | ||
196 | kset_put(&cls->kset); | ||
197 | } | ||
198 | |||
169 | /** | 199 | /** |
170 | * sysdev_driver_register - Register auxillary driver | 200 | * sysdev_driver_register - Register auxiliary driver |
171 | * @cls: Device class driver belongs to. | 201 | * @cls: Device class driver belongs to. |
172 | * @drv: Driver. | 202 | * @drv: Driver. |
173 | * | 203 | * |
@@ -175,14 +205,14 @@ static DEFINE_MUTEX(sysdev_drivers_lock); | |||
175 | * called on each operation on devices of that class. The refcount | 205 | * called on each operation on devices of that class. The refcount |
176 | * of @cls is incremented. | 206 | * of @cls is incremented. |
177 | */ | 207 | */ |
178 | |||
179 | int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) | 208 | int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) |
180 | { | 209 | { |
210 | struct sys_device *dev = NULL; | ||
181 | int err = 0; | 211 | int err = 0; |
182 | 212 | ||
183 | if (!cls) { | 213 | if (!cls) { |
184 | WARN(1, KERN_WARNING "sysdev: invalid class passed to " | 214 | WARN(1, KERN_WARNING "sysdev: invalid class passed to %s!\n", |
185 | "sysdev_driver_register!\n"); | 215 | __func__); |
186 | return -EINVAL; | 216 | return -EINVAL; |
187 | } | 217 | } |
188 | 218 | ||
@@ -198,21 +228,29 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) | |||
198 | 228 | ||
199 | /* If devices of this class already exist, tell the driver */ | 229 | /* If devices of this class already exist, tell the driver */ |
200 | if (drv->add) { | 230 | if (drv->add) { |
201 | struct sys_device *dev; | 231 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) { |
202 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) | 232 | err = drv->add(dev); |
203 | drv->add(dev); | 233 | if (err) |
234 | goto unwind; | ||
235 | } | ||
204 | } | 236 | } |
205 | } else { | 237 | } else { |
206 | err = -EINVAL; | 238 | err = -EINVAL; |
207 | WARN(1, KERN_ERR "%s: invalid device class\n", __func__); | 239 | WARN(1, KERN_ERR "%s: invalid device class\n", __func__); |
208 | } | 240 | } |
241 | |||
242 | goto unlock; | ||
243 | |||
244 | unwind: | ||
245 | __sysdev_driver_remove(cls, drv, dev); | ||
246 | |||
247 | unlock: | ||
209 | mutex_unlock(&sysdev_drivers_lock); | 248 | mutex_unlock(&sysdev_drivers_lock); |
210 | return err; | 249 | return err; |
211 | } | 250 | } |
212 | 251 | ||
213 | |||
214 | /** | 252 | /** |
215 | * sysdev_driver_unregister - Remove an auxillary driver. | 253 | * sysdev_driver_unregister - Remove an auxiliary driver. |
216 | * @cls: Class driver belongs to. | 254 | * @cls: Class driver belongs to. |
217 | * @drv: Driver. | 255 | * @drv: Driver. |
218 | */ | 256 | */ |
@@ -220,23 +258,12 @@ void sysdev_driver_unregister(struct sysdev_class *cls, | |||
220 | struct sysdev_driver *drv) | 258 | struct sysdev_driver *drv) |
221 | { | 259 | { |
222 | mutex_lock(&sysdev_drivers_lock); | 260 | mutex_lock(&sysdev_drivers_lock); |
223 | list_del_init(&drv->entry); | 261 | __sysdev_driver_remove(cls, drv, NULL); |
224 | if (cls) { | ||
225 | if (drv->remove) { | ||
226 | struct sys_device *dev; | ||
227 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) | ||
228 | drv->remove(dev); | ||
229 | } | ||
230 | kset_put(&cls->kset); | ||
231 | } | ||
232 | mutex_unlock(&sysdev_drivers_lock); | 262 | mutex_unlock(&sysdev_drivers_lock); |
233 | } | 263 | } |
234 | |||
235 | EXPORT_SYMBOL_GPL(sysdev_driver_register); | 264 | EXPORT_SYMBOL_GPL(sysdev_driver_register); |
236 | EXPORT_SYMBOL_GPL(sysdev_driver_unregister); | 265 | EXPORT_SYMBOL_GPL(sysdev_driver_unregister); |
237 | 266 | ||
238 | |||
239 | |||
240 | /** | 267 | /** |
241 | * sysdev_register - add a system device to the tree | 268 | * sysdev_register - add a system device to the tree |
242 | * @sysdev: device in question | 269 | * @sysdev: device in question |
@@ -275,7 +302,7 @@ int sysdev_register(struct sys_device *sysdev) | |||
275 | * code that should have called us. | 302 | * code that should have called us. |
276 | */ | 303 | */ |
277 | 304 | ||
278 | /* Notify class auxillary drivers */ | 305 | /* Notify class auxiliary drivers */ |
279 | list_for_each_entry(drv, &cls->drivers, entry) { | 306 | list_for_each_entry(drv, &cls->drivers, entry) { |
280 | if (drv->add) | 307 | if (drv->add) |
281 | drv->add(sysdev); | 308 | drv->add(sysdev); |
@@ -301,202 +328,8 @@ void sysdev_unregister(struct sys_device *sysdev) | |||
301 | kobject_put(&sysdev->kobj); | 328 | kobject_put(&sysdev->kobj); |
302 | } | 329 | } |
303 | 330 | ||
304 | 331 | EXPORT_SYMBOL_GPL(sysdev_register); | |
305 | 332 | EXPORT_SYMBOL_GPL(sysdev_unregister); | |
306 | /** | ||
307 | * sysdev_shutdown - Shut down all system devices. | ||
308 | * | ||
309 | * Loop over each class of system devices, and the devices in each | ||
310 | * of those classes. For each device, we call the shutdown method for | ||
311 | * each driver registered for the device - the auxillaries, | ||
312 | * and the class driver. | ||
313 | * | ||
314 | * Note: The list is iterated in reverse order, so that we shut down | ||
315 | * child devices before we shut down their parents. The list ordering | ||
316 | * is guaranteed by virtue of the fact that child devices are registered | ||
317 | * after their parents. | ||
318 | */ | ||
319 | void sysdev_shutdown(void) | ||
320 | { | ||
321 | struct sysdev_class *cls; | ||
322 | |||
323 | pr_debug("Shutting Down System Devices\n"); | ||
324 | |||
325 | mutex_lock(&sysdev_drivers_lock); | ||
326 | list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { | ||
327 | struct sys_device *sysdev; | ||
328 | |||
329 | pr_debug("Shutting down type '%s':\n", | ||
330 | kobject_name(&cls->kset.kobj)); | ||
331 | |||
332 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
333 | struct sysdev_driver *drv; | ||
334 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
335 | |||
336 | /* Call auxillary drivers first */ | ||
337 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
338 | if (drv->shutdown) | ||
339 | drv->shutdown(sysdev); | ||
340 | } | ||
341 | |||
342 | /* Now call the generic one */ | ||
343 | if (cls->shutdown) | ||
344 | cls->shutdown(sysdev); | ||
345 | } | ||
346 | } | ||
347 | mutex_unlock(&sysdev_drivers_lock); | ||
348 | } | ||
349 | |||
350 | static void __sysdev_resume(struct sys_device *dev) | ||
351 | { | ||
352 | struct sysdev_class *cls = dev->cls; | ||
353 | struct sysdev_driver *drv; | ||
354 | |||
355 | /* First, call the class-specific one */ | ||
356 | if (cls->resume) | ||
357 | cls->resume(dev); | ||
358 | WARN_ONCE(!irqs_disabled(), | ||
359 | "Interrupts enabled after %pF\n", cls->resume); | ||
360 | |||
361 | /* Call auxillary drivers next. */ | ||
362 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
363 | if (drv->resume) | ||
364 | drv->resume(dev); | ||
365 | WARN_ONCE(!irqs_disabled(), | ||
366 | "Interrupts enabled after %pF\n", drv->resume); | ||
367 | } | ||
368 | } | ||
369 | |||
370 | /** | ||
371 | * sysdev_suspend - Suspend all system devices. | ||
372 | * @state: Power state to enter. | ||
373 | * | ||
374 | * We perform an almost identical operation as sysdev_shutdown() | ||
375 | * above, though calling ->suspend() instead. Interrupts are disabled | ||
376 | * when this called. Devices are responsible for both saving state and | ||
377 | * quiescing or powering down the device. | ||
378 | * | ||
379 | * This is only called by the device PM core, so we let them handle | ||
380 | * all synchronization. | ||
381 | */ | ||
382 | int sysdev_suspend(pm_message_t state) | ||
383 | { | ||
384 | struct sysdev_class *cls; | ||
385 | struct sys_device *sysdev, *err_dev; | ||
386 | struct sysdev_driver *drv, *err_drv; | ||
387 | int ret; | ||
388 | |||
389 | pr_debug("Checking wake-up interrupts\n"); | ||
390 | |||
391 | /* Return error code if there are any wake-up interrupts pending */ | ||
392 | ret = check_wakeup_irqs(); | ||
393 | if (ret) | ||
394 | return ret; | ||
395 | |||
396 | WARN_ONCE(!irqs_disabled(), | ||
397 | "Interrupts enabled while suspending system devices\n"); | ||
398 | |||
399 | pr_debug("Suspending System Devices\n"); | ||
400 | |||
401 | list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { | ||
402 | pr_debug("Suspending type '%s':\n", | ||
403 | kobject_name(&cls->kset.kobj)); | ||
404 | |||
405 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
406 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
407 | |||
408 | /* Call auxillary drivers first */ | ||
409 | list_for_each_entry(drv, &cls->drivers, entry) { | ||
410 | if (drv->suspend) { | ||
411 | ret = drv->suspend(sysdev, state); | ||
412 | if (ret) | ||
413 | goto aux_driver; | ||
414 | } | ||
415 | WARN_ONCE(!irqs_disabled(), | ||
416 | "Interrupts enabled after %pF\n", | ||
417 | drv->suspend); | ||
418 | } | ||
419 | |||
420 | /* Now call the generic one */ | ||
421 | if (cls->suspend) { | ||
422 | ret = cls->suspend(sysdev, state); | ||
423 | if (ret) | ||
424 | goto cls_driver; | ||
425 | WARN_ONCE(!irqs_disabled(), | ||
426 | "Interrupts enabled after %pF\n", | ||
427 | cls->suspend); | ||
428 | } | ||
429 | } | ||
430 | } | ||
431 | return 0; | ||
432 | /* resume current sysdev */ | ||
433 | cls_driver: | ||
434 | drv = NULL; | ||
435 | printk(KERN_ERR "Class suspend failed for %s\n", | ||
436 | kobject_name(&sysdev->kobj)); | ||
437 | |||
438 | aux_driver: | ||
439 | if (drv) | ||
440 | printk(KERN_ERR "Class driver suspend failed for %s\n", | ||
441 | kobject_name(&sysdev->kobj)); | ||
442 | list_for_each_entry(err_drv, &cls->drivers, entry) { | ||
443 | if (err_drv == drv) | ||
444 | break; | ||
445 | if (err_drv->resume) | ||
446 | err_drv->resume(sysdev); | ||
447 | } | ||
448 | |||
449 | /* resume other sysdevs in current class */ | ||
450 | list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { | ||
451 | if (err_dev == sysdev) | ||
452 | break; | ||
453 | pr_debug(" %s\n", kobject_name(&err_dev->kobj)); | ||
454 | __sysdev_resume(err_dev); | ||
455 | } | ||
456 | |||
457 | /* resume other classes */ | ||
458 | list_for_each_entry_continue(cls, &system_kset->list, kset.kobj.entry) { | ||
459 | list_for_each_entry(err_dev, &cls->kset.list, kobj.entry) { | ||
460 | pr_debug(" %s\n", kobject_name(&err_dev->kobj)); | ||
461 | __sysdev_resume(err_dev); | ||
462 | } | ||
463 | } | ||
464 | return ret; | ||
465 | } | ||
466 | EXPORT_SYMBOL_GPL(sysdev_suspend); | ||
467 | |||
468 | /** | ||
469 | * sysdev_resume - Bring system devices back to life. | ||
470 | * | ||
471 | * Similar to sysdev_suspend(), but we iterate the list forwards | ||
472 | * to guarantee that parent devices are resumed before their children. | ||
473 | * | ||
474 | * Note: Interrupts are disabled when called. | ||
475 | */ | ||
476 | int sysdev_resume(void) | ||
477 | { | ||
478 | struct sysdev_class *cls; | ||
479 | |||
480 | WARN_ONCE(!irqs_disabled(), | ||
481 | "Interrupts enabled while resuming system devices\n"); | ||
482 | |||
483 | pr_debug("Resuming System Devices\n"); | ||
484 | |||
485 | list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { | ||
486 | struct sys_device *sysdev; | ||
487 | |||
488 | pr_debug("Resuming type '%s':\n", | ||
489 | kobject_name(&cls->kset.kobj)); | ||
490 | |||
491 | list_for_each_entry(sysdev, &cls->kset.list, kobj.entry) { | ||
492 | pr_debug(" %s\n", kobject_name(&sysdev->kobj)); | ||
493 | |||
494 | __sysdev_resume(sysdev); | ||
495 | } | ||
496 | } | ||
497 | return 0; | ||
498 | } | ||
499 | EXPORT_SYMBOL_GPL(sysdev_resume); | ||
500 | 333 | ||
501 | int __init system_bus_init(void) | 334 | int __init system_bus_init(void) |
502 | { | 335 | { |
@@ -506,9 +339,6 @@ int __init system_bus_init(void) | |||
506 | return 0; | 339 | return 0; |
507 | } | 340 | } |
508 | 341 | ||
509 | EXPORT_SYMBOL_GPL(sysdev_register); | ||
510 | EXPORT_SYMBOL_GPL(sysdev_unregister); | ||
511 | |||
512 | #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) | 342 | #define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr) |
513 | 343 | ||
514 | ssize_t sysdev_store_ulong(struct sys_device *sysdev, | 344 | ssize_t sysdev_store_ulong(struct sys_device *sysdev, |
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c new file mode 100644 index 000000000000..e8d11b6630ee --- /dev/null +++ b/drivers/base/syscore.c | |||
@@ -0,0 +1,127 @@ | |||
1 | /* | ||
2 | * syscore.c - Execution of system core operations. | ||
3 | * | ||
4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | */ | ||
8 | |||
9 | #include <linux/syscore_ops.h> | ||
10 | #include <linux/mutex.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | |||
14 | static LIST_HEAD(syscore_ops_list); | ||
15 | static DEFINE_MUTEX(syscore_ops_lock); | ||
16 | |||
17 | /** | ||
18 | * register_syscore_ops - Register a set of system core operations. | ||
19 | * @ops: System core operations to register. | ||
20 | */ | ||
21 | void register_syscore_ops(struct syscore_ops *ops) | ||
22 | { | ||
23 | mutex_lock(&syscore_ops_lock); | ||
24 | list_add_tail(&ops->node, &syscore_ops_list); | ||
25 | mutex_unlock(&syscore_ops_lock); | ||
26 | } | ||
27 | EXPORT_SYMBOL_GPL(register_syscore_ops); | ||
28 | |||
29 | /** | ||
30 | * unregister_syscore_ops - Unregister a set of system core operations. | ||
31 | * @ops: System core operations to unregister. | ||
32 | */ | ||
33 | void unregister_syscore_ops(struct syscore_ops *ops) | ||
34 | { | ||
35 | mutex_lock(&syscore_ops_lock); | ||
36 | list_del(&ops->node); | ||
37 | mutex_unlock(&syscore_ops_lock); | ||
38 | } | ||
39 | EXPORT_SYMBOL_GPL(unregister_syscore_ops); | ||
40 | |||
41 | #ifdef CONFIG_PM_SLEEP | ||
42 | /** | ||
43 | * syscore_suspend - Execute all the registered system core suspend callbacks. | ||
44 | * | ||
45 | * This function is executed with one CPU on-line and disabled interrupts. | ||
46 | */ | ||
47 | int syscore_suspend(void) | ||
48 | { | ||
49 | struct syscore_ops *ops; | ||
50 | int ret = 0; | ||
51 | |||
52 | pr_debug("Checking wakeup interrupts\n"); | ||
53 | |||
54 | /* Return error code if there are any wakeup interrupts pending. */ | ||
55 | ret = check_wakeup_irqs(); | ||
56 | if (ret) | ||
57 | return ret; | ||
58 | |||
59 | WARN_ONCE(!irqs_disabled(), | ||
60 | "Interrupts enabled before system core suspend.\n"); | ||
61 | |||
62 | list_for_each_entry_reverse(ops, &syscore_ops_list, node) | ||
63 | if (ops->suspend) { | ||
64 | if (initcall_debug) | ||
65 | pr_info("PM: Calling %pF\n", ops->suspend); | ||
66 | ret = ops->suspend(); | ||
67 | if (ret) | ||
68 | goto err_out; | ||
69 | WARN_ONCE(!irqs_disabled(), | ||
70 | "Interrupts enabled after %pF\n", ops->suspend); | ||
71 | } | ||
72 | |||
73 | return 0; | ||
74 | |||
75 | err_out: | ||
76 | pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend); | ||
77 | |||
78 | list_for_each_entry_continue(ops, &syscore_ops_list, node) | ||
79 | if (ops->resume) | ||
80 | ops->resume(); | ||
81 | |||
82 | return ret; | ||
83 | } | ||
84 | EXPORT_SYMBOL_GPL(syscore_suspend); | ||
85 | |||
86 | /** | ||
87 | * syscore_resume - Execute all the registered system core resume callbacks. | ||
88 | * | ||
89 | * This function is executed with one CPU on-line and disabled interrupts. | ||
90 | */ | ||
91 | void syscore_resume(void) | ||
92 | { | ||
93 | struct syscore_ops *ops; | ||
94 | |||
95 | WARN_ONCE(!irqs_disabled(), | ||
96 | "Interrupts enabled before system core resume.\n"); | ||
97 | |||
98 | list_for_each_entry(ops, &syscore_ops_list, node) | ||
99 | if (ops->resume) { | ||
100 | if (initcall_debug) | ||
101 | pr_info("PM: Calling %pF\n", ops->resume); | ||
102 | ops->resume(); | ||
103 | WARN_ONCE(!irqs_disabled(), | ||
104 | "Interrupts enabled after %pF\n", ops->resume); | ||
105 | } | ||
106 | } | ||
107 | EXPORT_SYMBOL_GPL(syscore_resume); | ||
108 | #endif /* CONFIG_PM_SLEEP */ | ||
109 | |||
110 | /** | ||
111 | * syscore_shutdown - Execute all the registered system core shutdown callbacks. | ||
112 | */ | ||
113 | void syscore_shutdown(void) | ||
114 | { | ||
115 | struct syscore_ops *ops; | ||
116 | |||
117 | mutex_lock(&syscore_ops_lock); | ||
118 | |||
119 | list_for_each_entry_reverse(ops, &syscore_ops_list, node) | ||
120 | if (ops->shutdown) { | ||
121 | if (initcall_debug) | ||
122 | pr_info("PM: Calling %pF\n", ops->shutdown); | ||
123 | ops->shutdown(); | ||
124 | } | ||
125 | |||
126 | mutex_unlock(&syscore_ops_lock); | ||
127 | } | ||
diff --git a/drivers/base/topology.c b/drivers/base/topology.c index 9fc630ce1ddb..f6f37a05a0c3 100644 --- a/drivers/base/topology.c +++ b/drivers/base/topology.c | |||
@@ -45,7 +45,8 @@ static ssize_t show_##name(struct sys_device *dev, \ | |||
45 | return sprintf(buf, "%d\n", topology_##name(cpu)); \ | 45 | return sprintf(buf, "%d\n", topology_##name(cpu)); \ |
46 | } | 46 | } |
47 | 47 | ||
48 | #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) | 48 | #if defined(topology_thread_cpumask) || defined(topology_core_cpumask) || \ |
49 | defined(topology_book_cpumask) | ||
49 | static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) | 50 | static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf) |
50 | { | 51 | { |
51 | ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; | 52 | ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; |
@@ -114,6 +115,14 @@ define_siblings_show_func(core_cpumask); | |||
114 | define_one_ro_named(core_siblings, show_core_cpumask); | 115 | define_one_ro_named(core_siblings, show_core_cpumask); |
115 | define_one_ro_named(core_siblings_list, show_core_cpumask_list); | 116 | define_one_ro_named(core_siblings_list, show_core_cpumask_list); |
116 | 117 | ||
118 | #ifdef CONFIG_SCHED_BOOK | ||
119 | define_id_show_func(book_id); | ||
120 | define_one_ro(book_id); | ||
121 | define_siblings_show_func(book_cpumask); | ||
122 | define_one_ro_named(book_siblings, show_book_cpumask); | ||
123 | define_one_ro_named(book_siblings_list, show_book_cpumask_list); | ||
124 | #endif | ||
125 | |||
117 | static struct attribute *default_attrs[] = { | 126 | static struct attribute *default_attrs[] = { |
118 | &attr_physical_package_id.attr, | 127 | &attr_physical_package_id.attr, |
119 | &attr_core_id.attr, | 128 | &attr_core_id.attr, |
@@ -121,6 +130,11 @@ static struct attribute *default_attrs[] = { | |||
121 | &attr_thread_siblings_list.attr, | 130 | &attr_thread_siblings_list.attr, |
122 | &attr_core_siblings.attr, | 131 | &attr_core_siblings.attr, |
123 | &attr_core_siblings_list.attr, | 132 | &attr_core_siblings_list.attr, |
133 | #ifdef CONFIG_SCHED_BOOK | ||
134 | &attr_book_id.attr, | ||
135 | &attr_book_siblings.attr, | ||
136 | &attr_book_siblings_list.attr, | ||
137 | #endif | ||
124 | NULL | 138 | NULL |
125 | }; | 139 | }; |
126 | 140 | ||