diff options
author | Rafael J. Wysocki <rjw@sisk.pl> | 2008-01-12 14:40:46 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2008-01-24 23:40:04 -0500 |
commit | 775b64d2b6ca37697de925f70799c710aab5849a (patch) | |
tree | 09e91c89228c8d3c6928a1b2ef56711190c69836 | |
parent | 7a83d456a86d559a6347115d206d23774bc152d9 (diff) |
PM: Acquire device locks on suspend
This patch reorganizes the way suspend and resume notifications are
sent to drivers. The major changes are that now the PM core acquires
every device semaphore before calling the methods, and calls to
device_add() during suspends will fail, while calls to device_del()
during suspends will block.
It also provides a way to safely remove a suspended device with the
help of the PM core, by using the device_pm_schedule_removal() callback
introduced specifically for this purpose, and updates two drivers (msr
and cpuid) that need to use it.
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | arch/x86/kernel/cpuid.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/msr.c | 6 | ||||
-rw-r--r-- | drivers/base/core.c | 65 | ||||
-rw-r--r-- | drivers/base/power/main.c | 502 | ||||
-rw-r--r-- | drivers/base/power/power.h | 12 | ||||
-rw-r--r-- | include/linux/device.h | 8 |
6 files changed, 413 insertions, 186 deletions
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 05c9936a16cc..d387c770c518 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -157,15 +157,15 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, | |||
157 | 157 | ||
158 | switch (action) { | 158 | switch (action) { |
159 | case CPU_UP_PREPARE: | 159 | case CPU_UP_PREPARE: |
160 | case CPU_UP_PREPARE_FROZEN: | ||
161 | err = cpuid_device_create(cpu); | 160 | err = cpuid_device_create(cpu); |
162 | break; | 161 | break; |
163 | case CPU_UP_CANCELED: | 162 | case CPU_UP_CANCELED: |
164 | case CPU_UP_CANCELED_FROZEN: | ||
165 | case CPU_DEAD: | 163 | case CPU_DEAD: |
166 | case CPU_DEAD_FROZEN: | ||
167 | cpuid_device_destroy(cpu); | 164 | cpuid_device_destroy(cpu); |
168 | break; | 165 | break; |
166 | case CPU_UP_CANCELED_FROZEN: | ||
167 | destroy_suspended_device(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); | ||
168 | break; | ||
169 | } | 169 | } |
170 | return err ? NOTIFY_BAD : NOTIFY_OK; | 170 | return err ? NOTIFY_BAD : NOTIFY_OK; |
171 | } | 171 | } |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index ee6eba4ecfea..21f6e3c0be18 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -155,15 +155,15 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, | |||
155 | 155 | ||
156 | switch (action) { | 156 | switch (action) { |
157 | case CPU_UP_PREPARE: | 157 | case CPU_UP_PREPARE: |
158 | case CPU_UP_PREPARE_FROZEN: | ||
159 | err = msr_device_create(cpu); | 158 | err = msr_device_create(cpu); |
160 | break; | 159 | break; |
161 | case CPU_UP_CANCELED: | 160 | case CPU_UP_CANCELED: |
162 | case CPU_UP_CANCELED_FROZEN: | ||
163 | case CPU_DEAD: | 161 | case CPU_DEAD: |
164 | case CPU_DEAD_FROZEN: | ||
165 | msr_device_destroy(cpu); | 162 | msr_device_destroy(cpu); |
166 | break; | 163 | break; |
164 | case CPU_UP_CANCELED_FROZEN: | ||
165 | destroy_suspended_device(msr_class, MKDEV(MSR_MAJOR, cpu)); | ||
166 | break; | ||
167 | } | 167 | } |
168 | return err ? NOTIFY_BAD : NOTIFY_OK; | 168 | return err ? NOTIFY_BAD : NOTIFY_OK; |
169 | } | 169 | } |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 2683eac30c68..ce6b64c489ad 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
@@ -726,11 +726,20 @@ int device_add(struct device *dev) | |||
726 | { | 726 | { |
727 | struct device *parent = NULL; | 727 | struct device *parent = NULL; |
728 | struct class_interface *class_intf; | 728 | struct class_interface *class_intf; |
729 | int error = -EINVAL; | 729 | int error; |
730 | |||
731 | error = pm_sleep_lock(); | ||
732 | if (error) { | ||
733 | dev_warn(dev, "Suspicious %s during suspend\n", __FUNCTION__); | ||
734 | dump_stack(); | ||
735 | return error; | ||
736 | } | ||
730 | 737 | ||
731 | dev = get_device(dev); | 738 | dev = get_device(dev); |
732 | if (!dev || !strlen(dev->bus_id)) | 739 | if (!dev || !strlen(dev->bus_id)) { |
740 | error = -EINVAL; | ||
733 | goto Error; | 741 | goto Error; |
742 | } | ||
734 | 743 | ||
735 | pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id); | 744 | pr_debug("DEV: registering device: ID = '%s'\n", dev->bus_id); |
736 | 745 | ||
@@ -795,6 +804,7 @@ int device_add(struct device *dev) | |||
795 | } | 804 | } |
796 | Done: | 805 | Done: |
797 | put_device(dev); | 806 | put_device(dev); |
807 | pm_sleep_unlock(); | ||
798 | return error; | 808 | return error; |
799 | BusError: | 809 | BusError: |
800 | device_pm_remove(dev); | 810 | device_pm_remove(dev); |
@@ -905,6 +915,7 @@ void device_del(struct device * dev) | |||
905 | struct device * parent = dev->parent; | 915 | struct device * parent = dev->parent; |
906 | struct class_interface *class_intf; | 916 | struct class_interface *class_intf; |
907 | 917 | ||
918 | device_pm_remove(dev); | ||
908 | if (parent) | 919 | if (parent) |
909 | klist_del(&dev->knode_parent); | 920 | klist_del(&dev->knode_parent); |
910 | if (MAJOR(dev->devt)) | 921 | if (MAJOR(dev->devt)) |
@@ -981,7 +992,6 @@ void device_del(struct device * dev) | |||
981 | if (dev->bus) | 992 | if (dev->bus) |
982 | blocking_notifier_call_chain(&dev->bus->bus_notifier, | 993 | blocking_notifier_call_chain(&dev->bus->bus_notifier, |
983 | BUS_NOTIFY_DEL_DEVICE, dev); | 994 | BUS_NOTIFY_DEL_DEVICE, dev); |
984 | device_pm_remove(dev); | ||
985 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); | 995 | kobject_uevent(&dev->kobj, KOBJ_REMOVE); |
986 | kobject_del(&dev->kobj); | 996 | kobject_del(&dev->kobj); |
987 | if (parent) | 997 | if (parent) |
@@ -1156,14 +1166,11 @@ error: | |||
1156 | EXPORT_SYMBOL_GPL(device_create); | 1166 | EXPORT_SYMBOL_GPL(device_create); |
1157 | 1167 | ||
1158 | /** | 1168 | /** |
1159 | * device_destroy - removes a device that was created with device_create() | 1169 | * find_device - finds a device that was created with device_create() |
1160 | * @class: pointer to the struct class that this device was registered with | 1170 | * @class: pointer to the struct class that this device was registered with |
1161 | * @devt: the dev_t of the device that was previously registered | 1171 | * @devt: the dev_t of the device that was previously registered |
1162 | * | ||
1163 | * This call unregisters and cleans up a device that was created with a | ||
1164 | * call to device_create(). | ||
1165 | */ | 1172 | */ |
1166 | void device_destroy(struct class *class, dev_t devt) | 1173 | static struct device *find_device(struct class *class, dev_t devt) |
1167 | { | 1174 | { |
1168 | struct device *dev = NULL; | 1175 | struct device *dev = NULL; |
1169 | struct device *dev_tmp; | 1176 | struct device *dev_tmp; |
@@ -1176,12 +1183,54 @@ void device_destroy(struct class *class, dev_t devt) | |||
1176 | } | 1183 | } |
1177 | } | 1184 | } |
1178 | up(&class->sem); | 1185 | up(&class->sem); |
1186 | return dev; | ||
1187 | } | ||
1179 | 1188 | ||
1189 | /** | ||
1190 | * device_destroy - removes a device that was created with device_create() | ||
1191 | * @class: pointer to the struct class that this device was registered with | ||
1192 | * @devt: the dev_t of the device that was previously registered | ||
1193 | * | ||
1194 | * This call unregisters and cleans up a device that was created with a | ||
1195 | * call to device_create(). | ||
1196 | */ | ||
1197 | void device_destroy(struct class *class, dev_t devt) | ||
1198 | { | ||
1199 | struct device *dev; | ||
1200 | |||
1201 | dev = find_device(class, devt); | ||
1180 | if (dev) | 1202 | if (dev) |
1181 | device_unregister(dev); | 1203 | device_unregister(dev); |
1182 | } | 1204 | } |
1183 | EXPORT_SYMBOL_GPL(device_destroy); | 1205 | EXPORT_SYMBOL_GPL(device_destroy); |
1184 | 1206 | ||
1207 | #ifdef CONFIG_PM_SLEEP | ||
1208 | /** | ||
1209 | * destroy_suspended_device - asks the PM core to remove a suspended device | ||
1210 | * @class: pointer to the struct class that this device was registered with | ||
1211 | * @devt: the dev_t of the device that was previously registered | ||
1212 | * | ||
1213 | * This call notifies the PM core of the necessity to unregister a suspended | ||
1214 | * device created with a call to device_create() (devices cannot be | ||
1215 | * unregistered directly while suspended, since the PM core holds their | ||
1216 | * semaphores at that time). | ||
1217 | * | ||
1218 | * It can only be called within the scope of a system sleep transition. In | ||
1219 | * practice this means it has to be directly or indirectly invoked either by | ||
1220 | * a suspend or resume method, or by the PM core (e.g. via | ||
1221 | * disable_nonboot_cpus() or enable_nonboot_cpus()). | ||
1222 | */ | ||
1223 | void destroy_suspended_device(struct class *class, dev_t devt) | ||
1224 | { | ||
1225 | struct device *dev; | ||
1226 | |||
1227 | dev = find_device(class, devt); | ||
1228 | if (dev) | ||
1229 | device_pm_schedule_removal(dev); | ||
1230 | } | ||
1231 | EXPORT_SYMBOL_GPL(destroy_suspended_device); | ||
1232 | #endif /* CONFIG_PM_SLEEP */ | ||
1233 | |||
1185 | /** | 1234 | /** |
1186 | * device_rename - renames a device | 1235 | * device_rename - renames a device |
1187 | * @dev: the pointer to the struct device to be renamed | 1236 | * @dev: the pointer to the struct device to be renamed |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 691ffb64cc37..200ed5fafd50 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
@@ -24,20 +24,45 @@ | |||
24 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
25 | #include <linux/pm.h> | 25 | #include <linux/pm.h> |
26 | #include <linux/resume-trace.h> | 26 | #include <linux/resume-trace.h> |
27 | #include <linux/rwsem.h> | ||
27 | 28 | ||
28 | #include "../base.h" | 29 | #include "../base.h" |
29 | #include "power.h" | 30 | #include "power.h" |
30 | 31 | ||
32 | /* | ||
33 | * The entries in the dpm_active list are in a depth first order, simply | ||
34 | * because children are guaranteed to be discovered after parents, and | ||
35 | * are inserted at the back of the list on discovery. | ||
36 | * | ||
37 | * All the other lists are kept in the same order, for consistency. | ||
38 | * However the lists aren't always traversed in the same order. | ||
39 | * Semaphores must be acquired from the top (i.e., front) down | ||
40 | * and released in the opposite order. Devices must be suspended | ||
41 | * from the bottom (i.e., end) up and resumed in the opposite order. | ||
42 | * That way no parent will be suspended while it still has an active | ||
43 | * child. | ||
44 | * | ||
45 | * Since device_pm_add() may be called with a device semaphore held, | ||
46 | * we must never try to acquire a device semaphore while holding | ||
47 | * dpm_list_mutex. | ||
48 | */ | ||
49 | |||
31 | LIST_HEAD(dpm_active); | 50 | LIST_HEAD(dpm_active); |
51 | static LIST_HEAD(dpm_locked); | ||
32 | static LIST_HEAD(dpm_off); | 52 | static LIST_HEAD(dpm_off); |
33 | static LIST_HEAD(dpm_off_irq); | 53 | static LIST_HEAD(dpm_off_irq); |
54 | static LIST_HEAD(dpm_destroy); | ||
34 | 55 | ||
35 | static DEFINE_MUTEX(dpm_mtx); | ||
36 | static DEFINE_MUTEX(dpm_list_mtx); | 56 | static DEFINE_MUTEX(dpm_list_mtx); |
37 | 57 | ||
38 | int (*platform_enable_wakeup)(struct device *dev, int is_on); | 58 | static DECLARE_RWSEM(pm_sleep_rwsem); |
39 | 59 | ||
60 | int (*platform_enable_wakeup)(struct device *dev, int is_on); | ||
40 | 61 | ||
62 | /** | ||
63 | * device_pm_add - add a device to the list of active devices | ||
64 | * @dev: Device to be added to the list | ||
65 | */ | ||
41 | void device_pm_add(struct device *dev) | 66 | void device_pm_add(struct device *dev) |
42 | { | 67 | { |
43 | pr_debug("PM: Adding info for %s:%s\n", | 68 | pr_debug("PM: Adding info for %s:%s\n", |
@@ -48,8 +73,36 @@ void device_pm_add(struct device *dev) | |||
48 | mutex_unlock(&dpm_list_mtx); | 73 | mutex_unlock(&dpm_list_mtx); |
49 | } | 74 | } |
50 | 75 | ||
76 | /** | ||
77 | * device_pm_remove - remove a device from the list of active devices | ||
78 | * @dev: Device to be removed from the list | ||
79 | * | ||
80 | * This function also removes the device's PM-related sysfs attributes. | ||
81 | */ | ||
51 | void device_pm_remove(struct device *dev) | 82 | void device_pm_remove(struct device *dev) |
52 | { | 83 | { |
84 | /* | ||
85 | * If this function is called during a suspend, it will be blocked, | ||
86 | * because we're holding the device's semaphore at that time, which may | ||
87 | * lead to a deadlock. In that case we want to print a warning. | ||
88 | * However, it may also be called by unregister_dropped_devices() with | ||
89 | * the device's semaphore released, in which case the warning should | ||
90 | * not be printed. | ||
91 | */ | ||
92 | if (down_trylock(&dev->sem)) { | ||
93 | if (down_read_trylock(&pm_sleep_rwsem)) { | ||
94 | /* No suspend in progress, wait on dev->sem */ | ||
95 | down(&dev->sem); | ||
96 | up_read(&pm_sleep_rwsem); | ||
97 | } else { | ||
98 | /* Suspend in progress, we may deadlock */ | ||
99 | dev_warn(dev, "Suspicious %s during suspend\n", | ||
100 | __FUNCTION__); | ||
101 | dump_stack(); | ||
102 | /* The user has been warned ... */ | ||
103 | down(&dev->sem); | ||
104 | } | ||
105 | } | ||
53 | pr_debug("PM: Removing info for %s:%s\n", | 106 | pr_debug("PM: Removing info for %s:%s\n", |
54 | dev->bus ? dev->bus->name : "No Bus", | 107 | dev->bus ? dev->bus->name : "No Bus", |
55 | kobject_name(&dev->kobj)); | 108 | kobject_name(&dev->kobj)); |
@@ -57,25 +110,124 @@ void device_pm_remove(struct device *dev) | |||
57 | dpm_sysfs_remove(dev); | 110 | dpm_sysfs_remove(dev); |
58 | list_del_init(&dev->power.entry); | 111 | list_del_init(&dev->power.entry); |
59 | mutex_unlock(&dpm_list_mtx); | 112 | mutex_unlock(&dpm_list_mtx); |
113 | up(&dev->sem); | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * device_pm_schedule_removal - schedule the removal of a suspended device | ||
118 | * @dev: Device to destroy | ||
119 | * | ||
120 | * Moves the device to the dpm_destroy list for further processing by | ||
121 | * unregister_dropped_devices(). | ||
122 | */ | ||
123 | void device_pm_schedule_removal(struct device *dev) | ||
124 | { | ||
125 | pr_debug("PM: Preparing for removal: %s:%s\n", | ||
126 | dev->bus ? dev->bus->name : "No Bus", | ||
127 | kobject_name(&dev->kobj)); | ||
128 | mutex_lock(&dpm_list_mtx); | ||
129 | list_move_tail(&dev->power.entry, &dpm_destroy); | ||
130 | mutex_unlock(&dpm_list_mtx); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * pm_sleep_lock - mutual exclusion for registration and suspend | ||
135 | * | ||
136 | * Returns 0 if no suspend is underway and device registration | ||
137 | * may proceed, otherwise -EBUSY. | ||
138 | */ | ||
139 | int pm_sleep_lock(void) | ||
140 | { | ||
141 | if (down_read_trylock(&pm_sleep_rwsem)) | ||
142 | return 0; | ||
143 | |||
144 | return -EBUSY; | ||
145 | } | ||
146 | |||
147 | /** | ||
148 | * pm_sleep_unlock - mutual exclusion for registration and suspend | ||
149 | * | ||
150 | * This routine undoes the effect of device_pm_add_lock | ||
151 | * when a device's registration is complete. | ||
152 | */ | ||
153 | void pm_sleep_unlock(void) | ||
154 | { | ||
155 | up_read(&pm_sleep_rwsem); | ||
60 | } | 156 | } |
61 | 157 | ||
62 | 158 | ||
63 | /*------------------------- Resume routines -------------------------*/ | 159 | /*------------------------- Resume routines -------------------------*/ |
64 | 160 | ||
65 | /** | 161 | /** |
66 | * resume_device - Restore state for one device. | 162 | * resume_device_early - Power on one device (early resume). |
67 | * @dev: Device. | 163 | * @dev: Device. |
68 | * | 164 | * |
165 | * Must be called with interrupts disabled. | ||
69 | */ | 166 | */ |
70 | 167 | static int resume_device_early(struct device *dev) | |
71 | static int resume_device(struct device * dev) | ||
72 | { | 168 | { |
73 | int error = 0; | 169 | int error = 0; |
74 | 170 | ||
75 | TRACE_DEVICE(dev); | 171 | TRACE_DEVICE(dev); |
76 | TRACE_RESUME(0); | 172 | TRACE_RESUME(0); |
77 | 173 | ||
78 | down(&dev->sem); | 174 | if (dev->bus && dev->bus->resume_early) { |
175 | dev_dbg(dev, "EARLY resume\n"); | ||
176 | error = dev->bus->resume_early(dev); | ||
177 | } | ||
178 | |||
179 | TRACE_RESUME(error); | ||
180 | return error; | ||
181 | } | ||
182 | |||
183 | /** | ||
184 | * dpm_power_up - Power on all regular (non-sysdev) devices. | ||
185 | * | ||
186 | * Walk the dpm_off_irq list and power each device up. This | ||
187 | * is used for devices that required they be powered down with | ||
188 | * interrupts disabled. As devices are powered on, they are moved | ||
189 | * to the dpm_off list. | ||
190 | * | ||
191 | * Must be called with interrupts disabled and only one CPU running. | ||
192 | */ | ||
193 | static void dpm_power_up(void) | ||
194 | { | ||
195 | |||
196 | while (!list_empty(&dpm_off_irq)) { | ||
197 | struct list_head *entry = dpm_off_irq.next; | ||
198 | struct device *dev = to_device(entry); | ||
199 | |||
200 | list_move_tail(entry, &dpm_off); | ||
201 | resume_device_early(dev); | ||
202 | } | ||
203 | } | ||
204 | |||
205 | /** | ||
206 | * device_power_up - Turn on all devices that need special attention. | ||
207 | * | ||
208 | * Power on system devices, then devices that required we shut them down | ||
209 | * with interrupts disabled. | ||
210 | * | ||
211 | * Must be called with interrupts disabled. | ||
212 | */ | ||
213 | void device_power_up(void) | ||
214 | { | ||
215 | sysdev_resume(); | ||
216 | dpm_power_up(); | ||
217 | } | ||
218 | EXPORT_SYMBOL_GPL(device_power_up); | ||
219 | |||
220 | /** | ||
221 | * resume_device - Restore state for one device. | ||
222 | * @dev: Device. | ||
223 | * | ||
224 | */ | ||
225 | static int resume_device(struct device *dev) | ||
226 | { | ||
227 | int error = 0; | ||
228 | |||
229 | TRACE_DEVICE(dev); | ||
230 | TRACE_RESUME(0); | ||
79 | 231 | ||
80 | if (dev->bus && dev->bus->resume) { | 232 | if (dev->bus && dev->bus->resume) { |
81 | dev_dbg(dev,"resuming\n"); | 233 | dev_dbg(dev,"resuming\n"); |
@@ -92,126 +244,94 @@ static int resume_device(struct device * dev) | |||
92 | error = dev->class->resume(dev); | 244 | error = dev->class->resume(dev); |
93 | } | 245 | } |
94 | 246 | ||
95 | up(&dev->sem); | ||
96 | |||
97 | TRACE_RESUME(error); | 247 | TRACE_RESUME(error); |
98 | return error; | 248 | return error; |
99 | } | 249 | } |
100 | 250 | ||
101 | 251 | /** | |
102 | static int resume_device_early(struct device * dev) | 252 | * dpm_resume - Resume every device. |
103 | { | 253 | * |
104 | int error = 0; | 254 | * Resume the devices that have either not gone through |
105 | 255 | * the late suspend, or that did go through it but also | |
106 | TRACE_DEVICE(dev); | 256 | * went through the early resume. |
107 | TRACE_RESUME(0); | 257 | * |
108 | if (dev->bus && dev->bus->resume_early) { | 258 | * Take devices from the dpm_off_list, resume them, |
109 | dev_dbg(dev,"EARLY resume\n"); | 259 | * and put them on the dpm_locked list. |
110 | error = dev->bus->resume_early(dev); | ||
111 | } | ||
112 | TRACE_RESUME(error); | ||
113 | return error; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * Resume the devices that have either not gone through | ||
118 | * the late suspend, or that did go through it but also | ||
119 | * went through the early resume | ||
120 | */ | 260 | */ |
121 | static void dpm_resume(void) | 261 | static void dpm_resume(void) |
122 | { | 262 | { |
123 | mutex_lock(&dpm_list_mtx); | 263 | mutex_lock(&dpm_list_mtx); |
124 | while(!list_empty(&dpm_off)) { | 264 | while(!list_empty(&dpm_off)) { |
125 | struct list_head * entry = dpm_off.next; | 265 | struct list_head *entry = dpm_off.next; |
126 | struct device * dev = to_device(entry); | 266 | struct device *dev = to_device(entry); |
127 | |||
128 | get_device(dev); | ||
129 | list_move_tail(entry, &dpm_active); | ||
130 | 267 | ||
268 | list_move_tail(entry, &dpm_locked); | ||
131 | mutex_unlock(&dpm_list_mtx); | 269 | mutex_unlock(&dpm_list_mtx); |
132 | resume_device(dev); | 270 | resume_device(dev); |
133 | mutex_lock(&dpm_list_mtx); | 271 | mutex_lock(&dpm_list_mtx); |
134 | put_device(dev); | ||
135 | } | 272 | } |
136 | mutex_unlock(&dpm_list_mtx); | 273 | mutex_unlock(&dpm_list_mtx); |
137 | } | 274 | } |
138 | 275 | ||
139 | |||
140 | /** | 276 | /** |
141 | * device_resume - Restore state of each device in system. | 277 | * unlock_all_devices - Release each device's semaphore |
142 | * | 278 | * |
143 | * Walk the dpm_off list, remove each entry, resume the device, | 279 | * Go through the dpm_off list. Put each device on the dpm_active |
144 | * then add it to the dpm_active list. | 280 | * list and unlock it. |
145 | */ | 281 | */ |
146 | 282 | static void unlock_all_devices(void) | |
147 | void device_resume(void) | ||
148 | { | 283 | { |
149 | might_sleep(); | 284 | mutex_lock(&dpm_list_mtx); |
150 | mutex_lock(&dpm_mtx); | 285 | while (!list_empty(&dpm_locked)) { |
151 | dpm_resume(); | 286 | struct list_head *entry = dpm_locked.prev; |
152 | mutex_unlock(&dpm_mtx); | 287 | struct device *dev = to_device(entry); |
153 | } | ||
154 | |||
155 | EXPORT_SYMBOL_GPL(device_resume); | ||
156 | 288 | ||
289 | list_move(entry, &dpm_active); | ||
290 | up(&dev->sem); | ||
291 | } | ||
292 | mutex_unlock(&dpm_list_mtx); | ||
293 | } | ||
157 | 294 | ||
158 | /** | 295 | /** |
159 | * dpm_power_up - Power on some devices. | 296 | * unregister_dropped_devices - Unregister devices scheduled for removal |
160 | * | ||
161 | * Walk the dpm_off_irq list and power each device up. This | ||
162 | * is used for devices that required they be powered down with | ||
163 | * interrupts disabled. As devices are powered on, they are moved | ||
164 | * to the dpm_active list. | ||
165 | * | 297 | * |
166 | * Interrupts must be disabled when calling this. | 298 | * Unregister all devices on the dpm_destroy list. |
167 | */ | 299 | */ |
168 | 300 | static void unregister_dropped_devices(void) | |
169 | static void dpm_power_up(void) | ||
170 | { | 301 | { |
171 | while(!list_empty(&dpm_off_irq)) { | 302 | mutex_lock(&dpm_list_mtx); |
172 | struct list_head * entry = dpm_off_irq.next; | 303 | while (!list_empty(&dpm_destroy)) { |
173 | struct device * dev = to_device(entry); | 304 | struct list_head *entry = dpm_destroy.next; |
305 | struct device *dev = to_device(entry); | ||
174 | 306 | ||
175 | list_move_tail(entry, &dpm_off); | 307 | up(&dev->sem); |
176 | resume_device_early(dev); | 308 | mutex_unlock(&dpm_list_mtx); |
309 | /* This also removes the device from the list */ | ||
310 | device_unregister(dev); | ||
311 | mutex_lock(&dpm_list_mtx); | ||
177 | } | 312 | } |
313 | mutex_unlock(&dpm_list_mtx); | ||
178 | } | 314 | } |
179 | 315 | ||
180 | |||
181 | /** | 316 | /** |
182 | * device_power_up - Turn on all devices that need special attention. | 317 | * device_resume - Restore state of each device in system. |
183 | * | 318 | * |
184 | * Power on system devices then devices that required we shut them down | 319 | * Resume all the devices, unlock them all, and allow new |
185 | * with interrupts disabled. | 320 | * devices to be registered once again. |
186 | * Called with interrupts disabled. | ||
187 | */ | 321 | */ |
188 | 322 | void device_resume(void) | |
189 | void device_power_up(void) | ||
190 | { | 323 | { |
191 | sysdev_resume(); | 324 | might_sleep(); |
192 | dpm_power_up(); | 325 | dpm_resume(); |
326 | unlock_all_devices(); | ||
327 | unregister_dropped_devices(); | ||
328 | up_write(&pm_sleep_rwsem); | ||
193 | } | 329 | } |
194 | 330 | EXPORT_SYMBOL_GPL(device_resume); | |
195 | EXPORT_SYMBOL_GPL(device_power_up); | ||
196 | 331 | ||
197 | 332 | ||
198 | /*------------------------- Suspend routines -------------------------*/ | 333 | /*------------------------- Suspend routines -------------------------*/ |
199 | 334 | ||
200 | /* | ||
201 | * The entries in the dpm_active list are in a depth first order, simply | ||
202 | * because children are guaranteed to be discovered after parents, and | ||
203 | * are inserted at the back of the list on discovery. | ||
204 | * | ||
205 | * All list on the suspend path are done in reverse order, so we operate | ||
206 | * on the leaves of the device tree (or forests, depending on how you want | ||
207 | * to look at it ;) first. As nodes are removed from the back of the list, | ||
208 | * they are inserted into the front of their destintation lists. | ||
209 | * | ||
210 | * Things are the reverse on the resume path - iterations are done in | ||
211 | * forward order, and nodes are inserted at the back of their destination | ||
212 | * lists. This way, the ancestors will be accessed before their descendents. | ||
213 | */ | ||
214 | |||
215 | static inline char *suspend_verb(u32 event) | 335 | static inline char *suspend_verb(u32 event) |
216 | { | 336 | { |
217 | switch (event) { | 337 | switch (event) { |
@@ -222,7 +342,6 @@ static inline char *suspend_verb(u32 event) | |||
222 | } | 342 | } |
223 | } | 343 | } |
224 | 344 | ||
225 | |||
226 | static void | 345 | static void |
227 | suspend_device_dbg(struct device *dev, pm_message_t state, char *info) | 346 | suspend_device_dbg(struct device *dev, pm_message_t state, char *info) |
228 | { | 347 | { |
@@ -232,16 +351,73 @@ suspend_device_dbg(struct device *dev, pm_message_t state, char *info) | |||
232 | } | 351 | } |
233 | 352 | ||
234 | /** | 353 | /** |
235 | * suspend_device - Save state of one device. | 354 | * suspend_device_late - Shut down one device (late suspend). |
236 | * @dev: Device. | 355 | * @dev: Device. |
237 | * @state: Power state device is entering. | 356 | * @state: Power state device is entering. |
357 | * | ||
358 | * This is called with interrupts off and only a single CPU running. | ||
238 | */ | 359 | */ |
360 | static int suspend_device_late(struct device *dev, pm_message_t state) | ||
361 | { | ||
362 | int error = 0; | ||
239 | 363 | ||
240 | static int suspend_device(struct device * dev, pm_message_t state) | 364 | if (dev->bus && dev->bus->suspend_late) { |
365 | suspend_device_dbg(dev, state, "LATE "); | ||
366 | error = dev->bus->suspend_late(dev, state); | ||
367 | suspend_report_result(dev->bus->suspend_late, error); | ||
368 | } | ||
369 | return error; | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * device_power_down - Shut down special devices. | ||
374 | * @state: Power state to enter. | ||
375 | * | ||
376 | * Power down devices that require interrupts to be disabled | ||
377 | * and move them from the dpm_off list to the dpm_off_irq list. | ||
378 | * Then power down system devices. | ||
379 | * | ||
380 | * Must be called with interrupts disabled and only one CPU running. | ||
381 | */ | ||
382 | int device_power_down(pm_message_t state) | ||
383 | { | ||
384 | int error = 0; | ||
385 | |||
386 | while (!list_empty(&dpm_off)) { | ||
387 | struct list_head *entry = dpm_off.prev; | ||
388 | struct device *dev = to_device(entry); | ||
389 | |||
390 | list_del_init(&dev->power.entry); | ||
391 | error = suspend_device_late(dev, state); | ||
392 | if (error) { | ||
393 | printk(KERN_ERR "Could not power down device %s: " | ||
394 | "error %d\n", | ||
395 | kobject_name(&dev->kobj), error); | ||
396 | if (list_empty(&dev->power.entry)) | ||
397 | list_add(&dev->power.entry, &dpm_off); | ||
398 | break; | ||
399 | } | ||
400 | if (list_empty(&dev->power.entry)) | ||
401 | list_add(&dev->power.entry, &dpm_off_irq); | ||
402 | } | ||
403 | |||
404 | if (!error) | ||
405 | error = sysdev_suspend(state); | ||
406 | if (error) | ||
407 | dpm_power_up(); | ||
408 | return error; | ||
409 | } | ||
410 | EXPORT_SYMBOL_GPL(device_power_down); | ||
411 | |||
412 | /** | ||
413 | * suspend_device - Save state of one device. | ||
414 | * @dev: Device. | ||
415 | * @state: Power state device is entering. | ||
416 | */ | ||
417 | int suspend_device(struct device *dev, pm_message_t state) | ||
241 | { | 418 | { |
242 | int error = 0; | 419 | int error = 0; |
243 | 420 | ||
244 | down(&dev->sem); | ||
245 | if (dev->power.power_state.event) { | 421 | if (dev->power.power_state.event) { |
246 | dev_dbg(dev, "PM: suspend %d-->%d\n", | 422 | dev_dbg(dev, "PM: suspend %d-->%d\n", |
247 | dev->power.power_state.event, state.event); | 423 | dev->power.power_state.event, state.event); |
@@ -264,123 +440,105 @@ static int suspend_device(struct device * dev, pm_message_t state) | |||
264 | error = dev->bus->suspend(dev, state); | 440 | error = dev->bus->suspend(dev, state); |
265 | suspend_report_result(dev->bus->suspend, error); | 441 | suspend_report_result(dev->bus->suspend, error); |
266 | } | 442 | } |
267 | up(&dev->sem); | ||
268 | return error; | ||
269 | } | ||
270 | |||
271 | |||
272 | /* | ||
273 | * This is called with interrupts off, only a single CPU | ||
274 | * running. We can't acquire a mutex or semaphore (and we don't | ||
275 | * need the protection) | ||
276 | */ | ||
277 | static int suspend_device_late(struct device *dev, pm_message_t state) | ||
278 | { | ||
279 | int error = 0; | ||
280 | |||
281 | if (dev->bus && dev->bus->suspend_late) { | ||
282 | suspend_device_dbg(dev, state, "LATE "); | ||
283 | error = dev->bus->suspend_late(dev, state); | ||
284 | suspend_report_result(dev->bus->suspend_late, error); | ||
285 | } | ||
286 | return error; | 443 | return error; |
287 | } | 444 | } |
288 | 445 | ||
289 | /** | 446 | /** |
290 | * device_suspend - Save state and stop all devices in system. | 447 | * dpm_suspend - Suspend every device. |
291 | * @state: Power state to put each device in. | 448 | * @state: Power state to put each device in. |
292 | * | 449 | * |
293 | * Walk the dpm_active list, call ->suspend() for each device, and move | 450 | * Walk the dpm_locked list. Suspend each device and move it |
294 | * it to the dpm_off list. | 451 | * to the dpm_off list. |
295 | * | 452 | * |
296 | * (For historical reasons, if it returns -EAGAIN, that used to mean | 453 | * (For historical reasons, if it returns -EAGAIN, that used to mean |
297 | * that the device would be called again with interrupts disabled. | 454 | * that the device would be called again with interrupts disabled. |
298 | * These days, we use the "suspend_late()" callback for that, so we | 455 | * These days, we use the "suspend_late()" callback for that, so we |
299 | * print a warning and consider it an error). | 456 | * print a warning and consider it an error). |
300 | * | ||
301 | * If we get a different error, try and back out. | ||
302 | * | ||
303 | * If we hit a failure with any of the devices, call device_resume() | ||
304 | * above to bring the suspended devices back to life. | ||
305 | * | ||
306 | */ | 457 | */ |
307 | 458 | static int dpm_suspend(pm_message_t state) | |
308 | int device_suspend(pm_message_t state) | ||
309 | { | 459 | { |
310 | int error = 0; | 460 | int error = 0; |
311 | 461 | ||
312 | might_sleep(); | ||
313 | mutex_lock(&dpm_mtx); | ||
314 | mutex_lock(&dpm_list_mtx); | 462 | mutex_lock(&dpm_list_mtx); |
315 | while (!list_empty(&dpm_active) && error == 0) { | 463 | while (!list_empty(&dpm_locked)) { |
316 | struct list_head * entry = dpm_active.prev; | 464 | struct list_head *entry = dpm_locked.prev; |
317 | struct device * dev = to_device(entry); | 465 | struct device *dev = to_device(entry); |
318 | 466 | ||
319 | get_device(dev); | 467 | list_del_init(&dev->power.entry); |
320 | mutex_unlock(&dpm_list_mtx); | 468 | mutex_unlock(&dpm_list_mtx); |
321 | |||
322 | error = suspend_device(dev, state); | 469 | error = suspend_device(dev, state); |
323 | 470 | if (error) { | |
324 | mutex_lock(&dpm_list_mtx); | ||
325 | |||
326 | /* Check if the device got removed */ | ||
327 | if (!list_empty(&dev->power.entry)) { | ||
328 | /* Move it to the dpm_off list */ | ||
329 | if (!error) | ||
330 | list_move(&dev->power.entry, &dpm_off); | ||
331 | } | ||
332 | if (error) | ||
333 | printk(KERN_ERR "Could not suspend device %s: " | 471 | printk(KERN_ERR "Could not suspend device %s: " |
334 | "error %d%s\n", | 472 | "error %d%s\n", |
335 | kobject_name(&dev->kobj), error, | 473 | kobject_name(&dev->kobj), |
336 | error == -EAGAIN ? " (please convert to suspend_late)" : ""); | 474 | error, |
337 | put_device(dev); | 475 | (error == -EAGAIN ? |
476 | " (please convert to suspend_late)" : | ||
477 | "")); | ||
478 | mutex_lock(&dpm_list_mtx); | ||
479 | if (list_empty(&dev->power.entry)) | ||
480 | list_add(&dev->power.entry, &dpm_locked); | ||
481 | mutex_unlock(&dpm_list_mtx); | ||
482 | break; | ||
483 | } | ||
484 | mutex_lock(&dpm_list_mtx); | ||
485 | if (list_empty(&dev->power.entry)) | ||
486 | list_add(&dev->power.entry, &dpm_off); | ||
338 | } | 487 | } |
339 | mutex_unlock(&dpm_list_mtx); | 488 | mutex_unlock(&dpm_list_mtx); |
340 | if (error) | ||
341 | dpm_resume(); | ||
342 | 489 | ||
343 | mutex_unlock(&dpm_mtx); | ||
344 | return error; | 490 | return error; |
345 | } | 491 | } |
346 | 492 | ||
347 | EXPORT_SYMBOL_GPL(device_suspend); | ||
348 | |||
349 | /** | 493 | /** |
350 | * device_power_down - Shut down special devices. | 494 | * lock_all_devices - Acquire every device's semaphore |
351 | * @state: Power state to enter. | ||
352 | * | 495 | * |
353 | * Walk the dpm_off_irq list, calling ->power_down() for each device that | 496 | * Go through the dpm_active list. Carefully lock each device's |
354 | * couldn't power down the device with interrupts enabled. When we're | 497 | * semaphore and put it in on the dpm_locked list. |
355 | * done, power down system devices. | ||
356 | */ | 498 | */ |
357 | 499 | static void lock_all_devices(void) | |
358 | int device_power_down(pm_message_t state) | ||
359 | { | 500 | { |
360 | int error = 0; | 501 | mutex_lock(&dpm_list_mtx); |
361 | struct device * dev; | 502 | while (!list_empty(&dpm_active)) { |
503 | struct list_head *entry = dpm_active.next; | ||
504 | struct device *dev = to_device(entry); | ||
362 | 505 | ||
363 | while (!list_empty(&dpm_off)) { | 506 | /* Required locking order is dev->sem first, |
364 | struct list_head * entry = dpm_off.prev; | 507 | * then dpm_list_mutex. Hence this awkward code. |
508 | */ | ||
509 | get_device(dev); | ||
510 | mutex_unlock(&dpm_list_mtx); | ||
511 | down(&dev->sem); | ||
512 | mutex_lock(&dpm_list_mtx); | ||
365 | 513 | ||
366 | dev = to_device(entry); | 514 | if (list_empty(entry)) |
367 | error = suspend_device_late(dev, state); | 515 | up(&dev->sem); /* Device was removed */ |
368 | if (error) | 516 | else |
369 | goto Error; | 517 | list_move_tail(entry, &dpm_locked); |
370 | list_move(&dev->power.entry, &dpm_off_irq); | 518 | put_device(dev); |
371 | } | 519 | } |
520 | mutex_unlock(&dpm_list_mtx); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * device_suspend - Save state and stop all devices in system. | ||
525 | * | ||
526 | * Prevent new devices from being registered, then lock all devices | ||
527 | * and suspend them. | ||
528 | */ | ||
529 | int device_suspend(pm_message_t state) | ||
530 | { | ||
531 | int error; | ||
372 | 532 | ||
373 | error = sysdev_suspend(state); | 533 | might_sleep(); |
374 | Done: | 534 | down_write(&pm_sleep_rwsem); |
535 | lock_all_devices(); | ||
536 | error = dpm_suspend(state); | ||
537 | if (error) | ||
538 | device_resume(); | ||
375 | return error; | 539 | return error; |
376 | Error: | ||
377 | printk(KERN_ERR "Could not power down device %s: " | ||
378 | "error %d\n", kobject_name(&dev->kobj), error); | ||
379 | dpm_power_up(); | ||
380 | goto Done; | ||
381 | } | 540 | } |
382 | 541 | EXPORT_SYMBOL_GPL(device_suspend); | |
383 | EXPORT_SYMBOL_GPL(device_power_down); | ||
384 | 542 | ||
385 | void __suspend_report_result(const char *function, void *fn, int ret) | 543 | void __suspend_report_result(const char *function, void *fn, int ret) |
386 | { | 544 | { |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 379da4e958e0..10c20840395e 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
@@ -20,6 +20,9 @@ static inline struct device *to_device(struct list_head *entry) | |||
20 | 20 | ||
21 | extern void device_pm_add(struct device *); | 21 | extern void device_pm_add(struct device *); |
22 | extern void device_pm_remove(struct device *); | 22 | extern void device_pm_remove(struct device *); |
23 | extern void device_pm_schedule_removal(struct device *); | ||
24 | extern int pm_sleep_lock(void); | ||
25 | extern void pm_sleep_unlock(void); | ||
23 | 26 | ||
24 | #else /* CONFIG_PM_SLEEP */ | 27 | #else /* CONFIG_PM_SLEEP */ |
25 | 28 | ||
@@ -32,6 +35,15 @@ static inline void device_pm_remove(struct device *dev) | |||
32 | { | 35 | { |
33 | } | 36 | } |
34 | 37 | ||
38 | static inline int pm_sleep_lock(void) | ||
39 | { | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static inline void pm_sleep_unlock(void) | ||
44 | { | ||
45 | } | ||
46 | |||
35 | #endif | 47 | #endif |
36 | 48 | ||
37 | #ifdef CONFIG_PM | 49 | #ifdef CONFIG_PM |
diff --git a/include/linux/device.h b/include/linux/device.h index 2e15822fe409..cf4ae5c5d193 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -521,6 +521,14 @@ extern struct device *device_create(struct class *cls, struct device *parent, | |||
521 | dev_t devt, const char *fmt, ...) | 521 | dev_t devt, const char *fmt, ...) |
522 | __attribute__((format(printf,4,5))); | 522 | __attribute__((format(printf,4,5))); |
523 | extern void device_destroy(struct class *cls, dev_t devt); | 523 | extern void device_destroy(struct class *cls, dev_t devt); |
524 | #ifdef CONFIG_PM_SLEEP | ||
525 | extern void destroy_suspended_device(struct class *cls, dev_t devt); | ||
526 | #else /* !CONFIG_PM_SLEEP */ | ||
527 | static inline void destroy_suspended_device(struct class *cls, dev_t devt) | ||
528 | { | ||
529 | device_destroy(cls, devt); | ||
530 | } | ||
531 | #endif /* !CONFIG_PM_SLEEP */ | ||
524 | 532 | ||
525 | /* | 533 | /* |
526 | * Platform "fixup" functions - allow the platform to have their say | 534 | * Platform "fixup" functions - allow the platform to have their say |