aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/firmware_class.c129
-rw-r--r--drivers/base/platform.c36
-rw-r--r--drivers/base/power/main.c94
-rw-r--r--drivers/base/sys.c16
4 files changed, 165 insertions, 110 deletions
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d3a59c688fe4..8a267c427629 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -17,7 +17,7 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20 20#include <linux/highmem.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include "base.h" 22#include "base.h"
23 23
@@ -45,7 +45,10 @@ struct firmware_priv {
45 struct bin_attribute attr_data; 45 struct bin_attribute attr_data;
46 struct firmware *fw; 46 struct firmware *fw;
47 unsigned long status; 47 unsigned long status;
48 int alloc_size; 48 struct page **pages;
49 int nr_pages;
50 int page_array_size;
51 const char *vdata;
49 struct timer_list timeout; 52 struct timer_list timeout;
50}; 53};
51 54
@@ -122,6 +125,10 @@ static ssize_t firmware_loading_show(struct device *dev,
122 return sprintf(buf, "%d\n", loading); 125 return sprintf(buf, "%d\n", loading);
123} 126}
124 127
128/* Some architectures don't have PAGE_KERNEL_RO */
129#ifndef PAGE_KERNEL_RO
130#define PAGE_KERNEL_RO PAGE_KERNEL
131#endif
125/** 132/**
126 * firmware_loading_store - set value in the 'loading' control file 133 * firmware_loading_store - set value in the 'loading' control file
127 * @dev: device pointer 134 * @dev: device pointer
@@ -141,6 +148,7 @@ static ssize_t firmware_loading_store(struct device *dev,
141{ 148{
142 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 149 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
143 int loading = simple_strtol(buf, NULL, 10); 150 int loading = simple_strtol(buf, NULL, 10);
151 int i;
144 152
145 switch (loading) { 153 switch (loading) {
146 case 1: 154 case 1:
@@ -151,13 +159,30 @@ static ssize_t firmware_loading_store(struct device *dev,
151 } 159 }
152 vfree(fw_priv->fw->data); 160 vfree(fw_priv->fw->data);
153 fw_priv->fw->data = NULL; 161 fw_priv->fw->data = NULL;
162 for (i = 0; i < fw_priv->nr_pages; i++)
163 __free_page(fw_priv->pages[i]);
164 kfree(fw_priv->pages);
165 fw_priv->pages = NULL;
166 fw_priv->page_array_size = 0;
167 fw_priv->nr_pages = 0;
154 fw_priv->fw->size = 0; 168 fw_priv->fw->size = 0;
155 fw_priv->alloc_size = 0;
156 set_bit(FW_STATUS_LOADING, &fw_priv->status); 169 set_bit(FW_STATUS_LOADING, &fw_priv->status);
157 mutex_unlock(&fw_lock); 170 mutex_unlock(&fw_lock);
158 break; 171 break;
159 case 0: 172 case 0:
160 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) { 173 if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
174 vfree(fw_priv->fw->data);
175 fw_priv->fw->data = vmap(fw_priv->pages,
176 fw_priv->nr_pages,
177 0, PAGE_KERNEL_RO);
178 if (!fw_priv->fw->data) {
179 dev_err(dev, "%s: vmap() failed\n", __func__);
180 goto err;
181 }
182 /* Pages will be freed by vfree() */
183 fw_priv->pages = NULL;
184 fw_priv->page_array_size = 0;
185 fw_priv->nr_pages = 0;
161 complete(&fw_priv->completion); 186 complete(&fw_priv->completion);
162 clear_bit(FW_STATUS_LOADING, &fw_priv->status); 187 clear_bit(FW_STATUS_LOADING, &fw_priv->status);
163 break; 188 break;
@@ -167,6 +192,7 @@ static ssize_t firmware_loading_store(struct device *dev,
167 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading); 192 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
168 /* fallthrough */ 193 /* fallthrough */
169 case -1: 194 case -1:
195 err:
170 fw_load_abort(fw_priv); 196 fw_load_abort(fw_priv);
171 break; 197 break;
172 } 198 }
@@ -191,8 +217,28 @@ firmware_data_read(struct kobject *kobj, struct bin_attribute *bin_attr,
191 ret_count = -ENODEV; 217 ret_count = -ENODEV;
192 goto out; 218 goto out;
193 } 219 }
194 ret_count = memory_read_from_buffer(buffer, count, &offset, 220 if (offset > fw->size)
195 fw->data, fw->size); 221 return 0;
222 if (count > fw->size - offset)
223 count = fw->size - offset;
224
225 ret_count = count;
226
227 while (count) {
228 void *page_data;
229 int page_nr = offset >> PAGE_SHIFT;
230 int page_ofs = offset & (PAGE_SIZE-1);
231 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
232
233 page_data = kmap(fw_priv->pages[page_nr]);
234
235 memcpy(buffer, page_data + page_ofs, page_cnt);
236
237 kunmap(fw_priv->pages[page_nr]);
238 buffer += page_cnt;
239 offset += page_cnt;
240 count -= page_cnt;
241 }
196out: 242out:
197 mutex_unlock(&fw_lock); 243 mutex_unlock(&fw_lock);
198 return ret_count; 244 return ret_count;
@@ -201,27 +247,39 @@ out:
201static int 247static int
202fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size) 248fw_realloc_buffer(struct firmware_priv *fw_priv, int min_size)
203{ 249{
204 u8 *new_data; 250 int pages_needed = ALIGN(min_size, PAGE_SIZE) >> PAGE_SHIFT;
205 int new_size = fw_priv->alloc_size; 251
252 /* If the array of pages is too small, grow it... */
253 if (fw_priv->page_array_size < pages_needed) {
254 int new_array_size = max(pages_needed,
255 fw_priv->page_array_size * 2);
256 struct page **new_pages;
257
258 new_pages = kmalloc(new_array_size * sizeof(void *),
259 GFP_KERNEL);
260 if (!new_pages) {
261 fw_load_abort(fw_priv);
262 return -ENOMEM;
263 }
264 memcpy(new_pages, fw_priv->pages,
265 fw_priv->page_array_size * sizeof(void *));
266 memset(&new_pages[fw_priv->page_array_size], 0, sizeof(void *) *
267 (new_array_size - fw_priv->page_array_size));
268 kfree(fw_priv->pages);
269 fw_priv->pages = new_pages;
270 fw_priv->page_array_size = new_array_size;
271 }
206 272
207 if (min_size <= fw_priv->alloc_size) 273 while (fw_priv->nr_pages < pages_needed) {
208 return 0; 274 fw_priv->pages[fw_priv->nr_pages] =
275 alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
209 276
210 new_size = ALIGN(min_size, PAGE_SIZE); 277 if (!fw_priv->pages[fw_priv->nr_pages]) {
211 new_data = vmalloc(new_size); 278 fw_load_abort(fw_priv);
212 if (!new_data) { 279 return -ENOMEM;
213 printk(KERN_ERR "%s: unable to alloc buffer\n", __func__); 280 }
214 /* Make sure that we don't keep incomplete data */ 281 fw_priv->nr_pages++;
215 fw_load_abort(fw_priv);
216 return -ENOMEM;
217 }
218 fw_priv->alloc_size = new_size;
219 if (fw_priv->fw->data) {
220 memcpy(new_data, fw_priv->fw->data, fw_priv->fw->size);
221 vfree(fw_priv->fw->data);
222 } 282 }
223 fw_priv->fw->data = new_data;
224 BUG_ON(min_size > fw_priv->alloc_size);
225 return 0; 283 return 0;
226} 284}
227 285
@@ -258,10 +316,25 @@ firmware_data_write(struct kobject *kobj, struct bin_attribute *bin_attr,
258 if (retval) 316 if (retval)
259 goto out; 317 goto out;
260 318
261 memcpy((u8 *)fw->data + offset, buffer, count);
262
263 fw->size = max_t(size_t, offset + count, fw->size);
264 retval = count; 319 retval = count;
320
321 while (count) {
322 void *page_data;
323 int page_nr = offset >> PAGE_SHIFT;
324 int page_ofs = offset & (PAGE_SIZE - 1);
325 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
326
327 page_data = kmap(fw_priv->pages[page_nr]);
328
329 memcpy(page_data + page_ofs, buffer, page_cnt);
330
331 kunmap(fw_priv->pages[page_nr]);
332 buffer += page_cnt;
333 offset += page_cnt;
334 count -= page_cnt;
335 }
336
337 fw->size = max_t(size_t, offset, fw->size);
265out: 338out:
266 mutex_unlock(&fw_lock); 339 mutex_unlock(&fw_lock);
267 return retval; 340 return retval;
@@ -277,7 +350,11 @@ static struct bin_attribute firmware_attr_data_tmpl = {
277static void fw_dev_release(struct device *dev) 350static void fw_dev_release(struct device *dev)
278{ 351{
279 struct firmware_priv *fw_priv = dev_get_drvdata(dev); 352 struct firmware_priv *fw_priv = dev_get_drvdata(dev);
353 int i;
280 354
355 for (i = 0; i < fw_priv->nr_pages; i++)
356 __free_page(fw_priv->pages[i]);
357 kfree(fw_priv->pages);
281 kfree(fw_priv); 358 kfree(fw_priv);
282 kfree(dev); 359 kfree(dev);
283 360
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 8b4708e06244..ead3f64c41d0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -469,22 +469,6 @@ static void platform_drv_shutdown(struct device *_dev)
469 drv->shutdown(dev); 469 drv->shutdown(dev);
470} 470}
471 471
472static int platform_drv_suspend(struct device *_dev, pm_message_t state)
473{
474 struct platform_driver *drv = to_platform_driver(_dev->driver);
475 struct platform_device *dev = to_platform_device(_dev);
476
477 return drv->suspend(dev, state);
478}
479
480static int platform_drv_resume(struct device *_dev)
481{
482 struct platform_driver *drv = to_platform_driver(_dev->driver);
483 struct platform_device *dev = to_platform_device(_dev);
484
485 return drv->resume(dev);
486}
487
488/** 472/**
489 * platform_driver_register 473 * platform_driver_register
490 * @drv: platform driver structure 474 * @drv: platform driver structure
@@ -498,10 +482,10 @@ int platform_driver_register(struct platform_driver *drv)
498 drv->driver.remove = platform_drv_remove; 482 drv->driver.remove = platform_drv_remove;
499 if (drv->shutdown) 483 if (drv->shutdown)
500 drv->driver.shutdown = platform_drv_shutdown; 484 drv->driver.shutdown = platform_drv_shutdown;
501 if (drv->suspend) 485 if (drv->suspend || drv->resume)
502 drv->driver.suspend = platform_drv_suspend; 486 pr_warning("Platform driver '%s' needs updating - please use "
503 if (drv->resume) 487 "dev_pm_ops\n", drv->driver.name);
504 drv->driver.resume = platform_drv_resume; 488
505 return driver_register(&drv->driver); 489 return driver_register(&drv->driver);
506} 490}
507EXPORT_SYMBOL_GPL(platform_driver_register); 491EXPORT_SYMBOL_GPL(platform_driver_register);
@@ -633,10 +617,12 @@ static int platform_match(struct device *dev, struct device_driver *drv)
633 617
634static int platform_legacy_suspend(struct device *dev, pm_message_t mesg) 618static int platform_legacy_suspend(struct device *dev, pm_message_t mesg)
635{ 619{
620 struct platform_driver *pdrv = to_platform_driver(dev->driver);
621 struct platform_device *pdev = to_platform_device(dev);
636 int ret = 0; 622 int ret = 0;
637 623
638 if (dev->driver && dev->driver->suspend) 624 if (dev->driver && pdrv->suspend)
639 ret = dev->driver->suspend(dev, mesg); 625 ret = pdrv->suspend(pdev, mesg);
640 626
641 return ret; 627 return ret;
642} 628}
@@ -667,10 +653,12 @@ static int platform_legacy_resume_early(struct device *dev)
667 653
668static int platform_legacy_resume(struct device *dev) 654static int platform_legacy_resume(struct device *dev)
669{ 655{
656 struct platform_driver *pdrv = to_platform_driver(dev->driver);
657 struct platform_device *pdev = to_platform_device(dev);
670 int ret = 0; 658 int ret = 0;
671 659
672 if (dev->driver && dev->driver->resume) 660 if (dev->driver && pdrv->resume)
673 ret = dev->driver->resume(dev); 661 ret = pdrv->resume(pdev);
674 662
675 return ret; 663 return ret;
676} 664}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 3e4bc699bc0f..fae725458981 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -315,13 +315,13 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
315/*------------------------- Resume routines -------------------------*/ 315/*------------------------- Resume routines -------------------------*/
316 316
317/** 317/**
318 * resume_device_noirq - Power on one device (early resume). 318 * device_resume_noirq - Power on one device (early resume).
319 * @dev: Device. 319 * @dev: Device.
320 * @state: PM transition of the system being carried out. 320 * @state: PM transition of the system being carried out.
321 * 321 *
322 * Must be called with interrupts disabled. 322 * Must be called with interrupts disabled.
323 */ 323 */
324static int resume_device_noirq(struct device *dev, pm_message_t state) 324static int device_resume_noirq(struct device *dev, pm_message_t state)
325{ 325{
326 int error = 0; 326 int error = 0;
327 327
@@ -334,9 +334,6 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
334 if (dev->bus->pm) { 334 if (dev->bus->pm) {
335 pm_dev_dbg(dev, state, "EARLY "); 335 pm_dev_dbg(dev, state, "EARLY ");
336 error = pm_noirq_op(dev, dev->bus->pm, state); 336 error = pm_noirq_op(dev, dev->bus->pm, state);
337 } else if (dev->bus->resume_early) {
338 pm_dev_dbg(dev, state, "legacy EARLY ");
339 error = dev->bus->resume_early(dev);
340 } 337 }
341 End: 338 End:
342 TRACE_RESUME(error); 339 TRACE_RESUME(error);
@@ -344,16 +341,16 @@ static int resume_device_noirq(struct device *dev, pm_message_t state)
344} 341}
345 342
346/** 343/**
347 * dpm_power_up - Power on all regular (non-sysdev) devices. 344 * dpm_resume_noirq - Power on all regular (non-sysdev) devices.
348 * @state: PM transition of the system being carried out. 345 * @state: PM transition of the system being carried out.
349 * 346 *
350 * Execute the appropriate "noirq resume" callback for all devices marked 347 * Call the "noirq" resume handlers for all devices marked as
351 * as DPM_OFF_IRQ. 348 * DPM_OFF_IRQ and enable device drivers to receive interrupts.
352 * 349 *
353 * Must be called under dpm_list_mtx. Device drivers should not receive 350 * Must be called under dpm_list_mtx. Device drivers should not receive
354 * interrupts while it's being executed. 351 * interrupts while it's being executed.
355 */ 352 */
356static void dpm_power_up(pm_message_t state) 353void dpm_resume_noirq(pm_message_t state)
357{ 354{
358 struct device *dev; 355 struct device *dev;
359 356
@@ -363,33 +360,21 @@ static void dpm_power_up(pm_message_t state)
363 int error; 360 int error;
364 361
365 dev->power.status = DPM_OFF; 362 dev->power.status = DPM_OFF;
366 error = resume_device_noirq(dev, state); 363 error = device_resume_noirq(dev, state);
367 if (error) 364 if (error)
368 pm_dev_err(dev, state, " early", error); 365 pm_dev_err(dev, state, " early", error);
369 } 366 }
370 mutex_unlock(&dpm_list_mtx); 367 mutex_unlock(&dpm_list_mtx);
371}
372
373/**
374 * device_power_up - Turn on all devices that need special attention.
375 * @state: PM transition of the system being carried out.
376 *
377 * Call the "early" resume handlers and enable device drivers to receive
378 * interrupts.
379 */
380void device_power_up(pm_message_t state)
381{
382 dpm_power_up(state);
383 resume_device_irqs(); 368 resume_device_irqs();
384} 369}
385EXPORT_SYMBOL_GPL(device_power_up); 370EXPORT_SYMBOL_GPL(dpm_resume_noirq);
386 371
387/** 372/**
388 * resume_device - Restore state for one device. 373 * device_resume - Restore state for one device.
389 * @dev: Device. 374 * @dev: Device.
390 * @state: PM transition of the system being carried out. 375 * @state: PM transition of the system being carried out.
391 */ 376 */
392static int resume_device(struct device *dev, pm_message_t state) 377static int device_resume(struct device *dev, pm_message_t state)
393{ 378{
394 int error = 0; 379 int error = 0;
395 380
@@ -414,9 +399,6 @@ static int resume_device(struct device *dev, pm_message_t state)
414 if (dev->type->pm) { 399 if (dev->type->pm) {
415 pm_dev_dbg(dev, state, "type "); 400 pm_dev_dbg(dev, state, "type ");
416 error = pm_op(dev, dev->type->pm, state); 401 error = pm_op(dev, dev->type->pm, state);
417 } else if (dev->type->resume) {
418 pm_dev_dbg(dev, state, "legacy type ");
419 error = dev->type->resume(dev);
420 } 402 }
421 if (error) 403 if (error)
422 goto End; 404 goto End;
@@ -462,7 +444,7 @@ static void dpm_resume(pm_message_t state)
462 dev->power.status = DPM_RESUMING; 444 dev->power.status = DPM_RESUMING;
463 mutex_unlock(&dpm_list_mtx); 445 mutex_unlock(&dpm_list_mtx);
464 446
465 error = resume_device(dev, state); 447 error = device_resume(dev, state);
466 448
467 mutex_lock(&dpm_list_mtx); 449 mutex_lock(&dpm_list_mtx);
468 if (error) 450 if (error)
@@ -480,11 +462,11 @@ static void dpm_resume(pm_message_t state)
480} 462}
481 463
482/** 464/**
483 * complete_device - Complete a PM transition for given device 465 * device_complete - Complete a PM transition for given device
484 * @dev: Device. 466 * @dev: Device.
485 * @state: PM transition of the system being carried out. 467 * @state: PM transition of the system being carried out.
486 */ 468 */
487static void complete_device(struct device *dev, pm_message_t state) 469static void device_complete(struct device *dev, pm_message_t state)
488{ 470{
489 down(&dev->sem); 471 down(&dev->sem);
490 472
@@ -527,7 +509,7 @@ static void dpm_complete(pm_message_t state)
527 dev->power.status = DPM_ON; 509 dev->power.status = DPM_ON;
528 mutex_unlock(&dpm_list_mtx); 510 mutex_unlock(&dpm_list_mtx);
529 511
530 complete_device(dev, state); 512 device_complete(dev, state);
531 513
532 mutex_lock(&dpm_list_mtx); 514 mutex_lock(&dpm_list_mtx);
533 } 515 }
@@ -540,19 +522,19 @@ static void dpm_complete(pm_message_t state)
540} 522}
541 523
542/** 524/**
543 * device_resume - Restore state of each device in system. 525 * dpm_resume_end - Restore state of each device in system.
544 * @state: PM transition of the system being carried out. 526 * @state: PM transition of the system being carried out.
545 * 527 *
546 * Resume all the devices, unlock them all, and allow new 528 * Resume all the devices, unlock them all, and allow new
547 * devices to be registered once again. 529 * devices to be registered once again.
548 */ 530 */
549void device_resume(pm_message_t state) 531void dpm_resume_end(pm_message_t state)
550{ 532{
551 might_sleep(); 533 might_sleep();
552 dpm_resume(state); 534 dpm_resume(state);
553 dpm_complete(state); 535 dpm_complete(state);
554} 536}
555EXPORT_SYMBOL_GPL(device_resume); 537EXPORT_SYMBOL_GPL(dpm_resume_end);
556 538
557 539
558/*------------------------- Suspend routines -------------------------*/ 540/*------------------------- Suspend routines -------------------------*/
@@ -577,13 +559,13 @@ static pm_message_t resume_event(pm_message_t sleep_state)
577} 559}
578 560
579/** 561/**
580 * suspend_device_noirq - Shut down one device (late suspend). 562 * device_suspend_noirq - Shut down one device (late suspend).
581 * @dev: Device. 563 * @dev: Device.
582 * @state: PM transition of the system being carried out. 564 * @state: PM transition of the system being carried out.
583 * 565 *
584 * This is called with interrupts off and only a single CPU running. 566 * This is called with interrupts off and only a single CPU running.
585 */ 567 */
586static int suspend_device_noirq(struct device *dev, pm_message_t state) 568static int device_suspend_noirq(struct device *dev, pm_message_t state)
587{ 569{
588 int error = 0; 570 int error = 0;
589 571
@@ -593,24 +575,20 @@ static int suspend_device_noirq(struct device *dev, pm_message_t state)
593 if (dev->bus->pm) { 575 if (dev->bus->pm) {
594 pm_dev_dbg(dev, state, "LATE "); 576 pm_dev_dbg(dev, state, "LATE ");
595 error = pm_noirq_op(dev, dev->bus->pm, state); 577 error = pm_noirq_op(dev, dev->bus->pm, state);
596 } else if (dev->bus->suspend_late) {
597 pm_dev_dbg(dev, state, "legacy LATE ");
598 error = dev->bus->suspend_late(dev, state);
599 suspend_report_result(dev->bus->suspend_late, error);
600 } 578 }
601 return error; 579 return error;
602} 580}
603 581
604/** 582/**
605 * device_power_down - Shut down special devices. 583 * dpm_suspend_noirq - Power down all regular (non-sysdev) devices.
606 * @state: PM transition of the system being carried out. 584 * @state: PM transition of the system being carried out.
607 * 585 *
608 * Prevent device drivers from receiving interrupts and call the "late" 586 * Prevent device drivers from receiving interrupts and call the "noirq"
609 * suspend handlers. 587 * suspend handlers.
610 * 588 *
611 * Must be called under dpm_list_mtx. 589 * Must be called under dpm_list_mtx.
612 */ 590 */
613int device_power_down(pm_message_t state) 591int dpm_suspend_noirq(pm_message_t state)
614{ 592{
615 struct device *dev; 593 struct device *dev;
616 int error = 0; 594 int error = 0;
@@ -618,7 +596,7 @@ int device_power_down(pm_message_t state)
618 suspend_device_irqs(); 596 suspend_device_irqs();
619 mutex_lock(&dpm_list_mtx); 597 mutex_lock(&dpm_list_mtx);
620 list_for_each_entry_reverse(dev, &dpm_list, power.entry) { 598 list_for_each_entry_reverse(dev, &dpm_list, power.entry) {
621 error = suspend_device_noirq(dev, state); 599 error = device_suspend_noirq(dev, state);
622 if (error) { 600 if (error) {
623 pm_dev_err(dev, state, " late", error); 601 pm_dev_err(dev, state, " late", error);
624 break; 602 break;
@@ -627,17 +605,17 @@ int device_power_down(pm_message_t state)
627 } 605 }
628 mutex_unlock(&dpm_list_mtx); 606 mutex_unlock(&dpm_list_mtx);
629 if (error) 607 if (error)
630 device_power_up(resume_event(state)); 608 dpm_resume_noirq(resume_event(state));
631 return error; 609 return error;
632} 610}
633EXPORT_SYMBOL_GPL(device_power_down); 611EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
634 612
635/** 613/**
636 * suspend_device - Save state of one device. 614 * device_suspend - Save state of one device.
637 * @dev: Device. 615 * @dev: Device.
638 * @state: PM transition of the system being carried out. 616 * @state: PM transition of the system being carried out.
639 */ 617 */
640static int suspend_device(struct device *dev, pm_message_t state) 618static int device_suspend(struct device *dev, pm_message_t state)
641{ 619{
642 int error = 0; 620 int error = 0;
643 621
@@ -660,10 +638,6 @@ static int suspend_device(struct device *dev, pm_message_t state)
660 if (dev->type->pm) { 638 if (dev->type->pm) {
661 pm_dev_dbg(dev, state, "type "); 639 pm_dev_dbg(dev, state, "type ");
662 error = pm_op(dev, dev->type->pm, state); 640 error = pm_op(dev, dev->type->pm, state);
663 } else if (dev->type->suspend) {
664 pm_dev_dbg(dev, state, "legacy type ");
665 error = dev->type->suspend(dev, state);
666 suspend_report_result(dev->type->suspend, error);
667 } 641 }
668 if (error) 642 if (error)
669 goto End; 643 goto End;
@@ -704,7 +678,7 @@ static int dpm_suspend(pm_message_t state)
704 get_device(dev); 678 get_device(dev);
705 mutex_unlock(&dpm_list_mtx); 679 mutex_unlock(&dpm_list_mtx);
706 680
707 error = suspend_device(dev, state); 681 error = device_suspend(dev, state);
708 682
709 mutex_lock(&dpm_list_mtx); 683 mutex_lock(&dpm_list_mtx);
710 if (error) { 684 if (error) {
@@ -723,11 +697,11 @@ static int dpm_suspend(pm_message_t state)
723} 697}
724 698
725/** 699/**
726 * prepare_device - Execute the ->prepare() callback(s) for given device. 700 * device_prepare - Execute the ->prepare() callback(s) for given device.
727 * @dev: Device. 701 * @dev: Device.
728 * @state: PM transition of the system being carried out. 702 * @state: PM transition of the system being carried out.
729 */ 703 */
730static int prepare_device(struct device *dev, pm_message_t state) 704static int device_prepare(struct device *dev, pm_message_t state)
731{ 705{
732 int error = 0; 706 int error = 0;
733 707
@@ -781,7 +755,7 @@ static int dpm_prepare(pm_message_t state)
781 dev->power.status = DPM_PREPARING; 755 dev->power.status = DPM_PREPARING;
782 mutex_unlock(&dpm_list_mtx); 756 mutex_unlock(&dpm_list_mtx);
783 757
784 error = prepare_device(dev, state); 758 error = device_prepare(dev, state);
785 759
786 mutex_lock(&dpm_list_mtx); 760 mutex_lock(&dpm_list_mtx);
787 if (error) { 761 if (error) {
@@ -807,12 +781,12 @@ static int dpm_prepare(pm_message_t state)
807} 781}
808 782
809/** 783/**
810 * device_suspend - Save state and stop all devices in system. 784 * dpm_suspend_start - Save state and stop all devices in system.
811 * @state: PM transition of the system being carried out. 785 * @state: PM transition of the system being carried out.
812 * 786 *
813 * Prepare and suspend all devices. 787 * Prepare and suspend all devices.
814 */ 788 */
815int device_suspend(pm_message_t state) 789int dpm_suspend_start(pm_message_t state)
816{ 790{
817 int error; 791 int error;
818 792
@@ -822,7 +796,7 @@ int device_suspend(pm_message_t state)
822 error = dpm_suspend(state); 796 error = dpm_suspend(state);
823 return error; 797 return error;
824} 798}
825EXPORT_SYMBOL_GPL(device_suspend); 799EXPORT_SYMBOL_GPL(dpm_suspend_start);
826 800
827void __suspend_report_result(const char *function, void *fn, int ret) 801void __suspend_report_result(const char *function, void *fn, int ret)
828{ 802{
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
index 3236b434b964..9742a78c9fe4 100644
--- a/drivers/base/sys.c
+++ b/drivers/base/sys.c
@@ -343,11 +343,15 @@ static void __sysdev_resume(struct sys_device *dev)
343 /* First, call the class-specific one */ 343 /* First, call the class-specific one */
344 if (cls->resume) 344 if (cls->resume)
345 cls->resume(dev); 345 cls->resume(dev);
346 WARN_ONCE(!irqs_disabled(),
347 "Interrupts enabled after %pF\n", cls->resume);
346 348
347 /* Call auxillary drivers next. */ 349 /* Call auxillary drivers next. */
348 list_for_each_entry(drv, &cls->drivers, entry) { 350 list_for_each_entry(drv, &cls->drivers, entry) {
349 if (drv->resume) 351 if (drv->resume)
350 drv->resume(dev); 352 drv->resume(dev);
353 WARN_ONCE(!irqs_disabled(),
354 "Interrupts enabled after %pF\n", drv->resume);
351 } 355 }
352} 356}
353 357
@@ -377,6 +381,9 @@ int sysdev_suspend(pm_message_t state)
377 if (ret) 381 if (ret)
378 return ret; 382 return ret;
379 383
384 WARN_ONCE(!irqs_disabled(),
385 "Interrupts enabled while suspending system devices\n");
386
380 pr_debug("Suspending System Devices\n"); 387 pr_debug("Suspending System Devices\n");
381 388
382 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) { 389 list_for_each_entry_reverse(cls, &system_kset->list, kset.kobj.entry) {
@@ -393,6 +400,9 @@ int sysdev_suspend(pm_message_t state)
393 if (ret) 400 if (ret)
394 goto aux_driver; 401 goto aux_driver;
395 } 402 }
403 WARN_ONCE(!irqs_disabled(),
404 "Interrupts enabled after %pF\n",
405 drv->suspend);
396 } 406 }
397 407
398 /* Now call the generic one */ 408 /* Now call the generic one */
@@ -400,6 +410,9 @@ int sysdev_suspend(pm_message_t state)
400 ret = cls->suspend(sysdev, state); 410 ret = cls->suspend(sysdev, state);
401 if (ret) 411 if (ret)
402 goto cls_driver; 412 goto cls_driver;
413 WARN_ONCE(!irqs_disabled(),
414 "Interrupts enabled after %pF\n",
415 cls->suspend);
403 } 416 }
404 } 417 }
405 } 418 }
@@ -452,6 +465,9 @@ int sysdev_resume(void)
452{ 465{
453 struct sysdev_class *cls; 466 struct sysdev_class *cls;
454 467
468 WARN_ONCE(!irqs_disabled(),
469 "Interrupts enabled while resuming system devices\n");
470
455 pr_debug("Resuming System Devices\n"); 471 pr_debug("Resuming System Devices\n");
456 472
457 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) { 473 list_for_each_entry(cls, &system_kset->list, kset.kobj.entry) {