diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/power/domain.c | 551 |
1 files changed, 536 insertions, 15 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index f14ba32818dc..33086e9afaf6 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c | |||
@@ -21,7 +21,7 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev) | |||
21 | if (IS_ERR_OR_NULL(dev->pm_domain)) | 21 | if (IS_ERR_OR_NULL(dev->pm_domain)) |
22 | return ERR_PTR(-EINVAL); | 22 | return ERR_PTR(-EINVAL); |
23 | 23 | ||
24 | return container_of(dev->pm_domain, struct generic_pm_domain, domain); | 24 | return pd_to_genpd(dev->pm_domain); |
25 | } | 25 | } |
26 | 26 | ||
27 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) | 27 | static void genpd_sd_counter_dec(struct generic_pm_domain *genpd) |
@@ -46,7 +46,8 @@ static int pm_genpd_poweron(struct generic_pm_domain *genpd) | |||
46 | mutex_lock(&genpd->parent->lock); | 46 | mutex_lock(&genpd->parent->lock); |
47 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); | 47 | mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING); |
48 | 48 | ||
49 | if (!genpd->power_is_off) | 49 | if (!genpd->power_is_off |
50 | || (genpd->prepared_count > 0 && genpd->suspend_power_off)) | ||
50 | goto out; | 51 | goto out; |
51 | 52 | ||
52 | if (genpd->parent && genpd->parent->power_is_off) { | 53 | if (genpd->parent && genpd->parent->power_is_off) { |
@@ -155,7 +156,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) | |||
155 | unsigned int not_suspended; | 156 | unsigned int not_suspended; |
156 | int ret; | 157 | int ret; |
157 | 158 | ||
158 | if (genpd->power_is_off) | 159 | if (genpd->power_is_off || genpd->prepared_count > 0) |
159 | return 0; | 160 | return 0; |
160 | 161 | ||
161 | if (genpd->sd_count > 0) | 162 | if (genpd->sd_count > 0) |
@@ -260,6 +261,27 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
260 | } | 261 | } |
261 | 262 | ||
262 | /** | 263 | /** |
264 | * __pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | ||
265 | * @dev: Device to resume. | ||
266 | * @genpd: PM domain the device belongs to. | ||
267 | */ | ||
268 | static void __pm_genpd_runtime_resume(struct device *dev, | ||
269 | struct generic_pm_domain *genpd) | ||
270 | { | ||
271 | struct dev_list_entry *dle; | ||
272 | |||
273 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
274 | if (dle->dev == dev) { | ||
275 | __pm_genpd_restore_device(dle, genpd); | ||
276 | break; | ||
277 | } | ||
278 | } | ||
279 | |||
280 | if (genpd->start_device) | ||
281 | genpd->start_device(dev); | ||
282 | } | ||
283 | |||
284 | /** | ||
263 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. | 285 | * pm_genpd_runtime_resume - Resume a device belonging to I/O PM domain. |
264 | * @dev: Device to resume. | 286 | * @dev: Device to resume. |
265 | * | 287 | * |
@@ -270,7 +292,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) | |||
270 | static int pm_genpd_runtime_resume(struct device *dev) | 292 | static int pm_genpd_runtime_resume(struct device *dev) |
271 | { | 293 | { |
272 | struct generic_pm_domain *genpd; | 294 | struct generic_pm_domain *genpd; |
273 | struct dev_list_entry *dle; | ||
274 | int ret; | 295 | int ret; |
275 | 296 | ||
276 | dev_dbg(dev, "%s()\n", __func__); | 297 | dev_dbg(dev, "%s()\n", __func__); |
@@ -284,17 +305,7 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
284 | return ret; | 305 | return ret; |
285 | 306 | ||
286 | mutex_lock(&genpd->lock); | 307 | mutex_lock(&genpd->lock); |
287 | 308 | __pm_genpd_runtime_resume(dev, genpd); | |
288 | list_for_each_entry(dle, &genpd->dev_list, node) { | ||
289 | if (dle->dev == dev) { | ||
290 | __pm_genpd_restore_device(dle, genpd); | ||
291 | break; | ||
292 | } | ||
293 | } | ||
294 | |||
295 | if (genpd->start_device) | ||
296 | genpd->start_device(dev); | ||
297 | |||
298 | mutex_unlock(&genpd->lock); | 309 | mutex_unlock(&genpd->lock); |
299 | 310 | ||
300 | return 0; | 311 | return 0; |
@@ -303,12 +314,493 @@ static int pm_genpd_runtime_resume(struct device *dev) | |||
303 | #else | 314 | #else |
304 | 315 | ||
305 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} | 316 | static inline void genpd_power_off_work_fn(struct work_struct *work) {} |
317 | static inline void __pm_genpd_runtime_resume(struct device *dev, | ||
318 | struct generic_pm_domain *genpd) {} | ||
306 | 319 | ||
307 | #define pm_genpd_runtime_suspend NULL | 320 | #define pm_genpd_runtime_suspend NULL |
308 | #define pm_genpd_runtime_resume NULL | 321 | #define pm_genpd_runtime_resume NULL |
309 | 322 | ||
310 | #endif /* CONFIG_PM_RUNTIME */ | 323 | #endif /* CONFIG_PM_RUNTIME */ |
311 | 324 | ||
325 | #ifdef CONFIG_PM_SLEEP | ||
326 | |||
327 | /** | ||
328 | * pm_genpd_sync_poweroff - Synchronously power off a PM domain and its parents. | ||
329 | * @genpd: PM domain to power off, if possible. | ||
330 | * | ||
331 | * Check if the given PM domain can be powered off (during system suspend or | ||
332 | * hibernation) and do that if so. Also, in that case propagate to its parent. | ||
333 | * | ||
334 | * This function is only called in "noirq" stages of system power transitions, | ||
335 | * so it need not acquire locks (all of the "noirq" callbacks are executed | ||
336 | * sequentially, so it is guaranteed that it will never run twice in parallel). | ||
337 | */ | ||
338 | static void pm_genpd_sync_poweroff(struct generic_pm_domain *genpd) | ||
339 | { | ||
340 | struct generic_pm_domain *parent = genpd->parent; | ||
341 | |||
342 | if (genpd->power_is_off) | ||
343 | return; | ||
344 | |||
345 | if (genpd->suspended_count != genpd->device_count || genpd->sd_count > 0) | ||
346 | return; | ||
347 | |||
348 | if (genpd->power_off) | ||
349 | genpd->power_off(genpd); | ||
350 | |||
351 | genpd->power_is_off = true; | ||
352 | if (parent) { | ||
353 | genpd_sd_counter_dec(parent); | ||
354 | pm_genpd_sync_poweroff(parent); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | /** | ||
359 | * pm_genpd_prepare - Start power transition of a device in a PM domain. | ||
360 | * @dev: Device to start the transition of. | ||
361 | * | ||
362 | * Start a power transition of a device (during a system-wide power transition) | ||
363 | * under the assumption that its pm_domain field points to the domain member of | ||
364 | * an object of type struct generic_pm_domain representing a PM domain | ||
365 | * consisting of I/O devices. | ||
366 | */ | ||
367 | static int pm_genpd_prepare(struct device *dev) | ||
368 | { | ||
369 | struct generic_pm_domain *genpd; | ||
370 | |||
371 | dev_dbg(dev, "%s()\n", __func__); | ||
372 | |||
373 | genpd = dev_to_genpd(dev); | ||
374 | if (IS_ERR(genpd)) | ||
375 | return -EINVAL; | ||
376 | |||
377 | mutex_lock(&genpd->lock); | ||
378 | |||
379 | if (genpd->prepared_count++ == 0) | ||
380 | genpd->suspend_power_off = genpd->power_is_off; | ||
381 | |||
382 | if (genpd->suspend_power_off) { | ||
383 | mutex_unlock(&genpd->lock); | ||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | /* | ||
388 | * If the device is in the (runtime) "suspended" state, call | ||
389 | * .start_device() for it, if defined. | ||
390 | */ | ||
391 | if (pm_runtime_suspended(dev)) | ||
392 | __pm_genpd_runtime_resume(dev, genpd); | ||
393 | |||
394 | /* | ||
395 | * Do not check if runtime resume is pending at this point, because it | ||
396 | * has been taken care of already and if pm_genpd_poweron() ran at this | ||
397 | * point as a result of the check, it would deadlock. | ||
398 | */ | ||
399 | __pm_runtime_disable(dev, false); | ||
400 | |||
401 | mutex_unlock(&genpd->lock); | ||
402 | |||
403 | return pm_generic_prepare(dev); | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * pm_genpd_suspend - Suspend a device belonging to an I/O PM domain. | ||
408 | * @dev: Device to suspend. | ||
409 | * | ||
410 | * Suspend a device under the assumption that its pm_domain field points to the | ||
411 | * domain member of an object of type struct generic_pm_domain representing | ||
412 | * a PM domain consisting of I/O devices. | ||
413 | */ | ||
414 | static int pm_genpd_suspend(struct device *dev) | ||
415 | { | ||
416 | struct generic_pm_domain *genpd; | ||
417 | |||
418 | dev_dbg(dev, "%s()\n", __func__); | ||
419 | |||
420 | genpd = dev_to_genpd(dev); | ||
421 | if (IS_ERR(genpd)) | ||
422 | return -EINVAL; | ||
423 | |||
424 | return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev); | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * pm_genpd_suspend_noirq - Late suspend of a device from an I/O PM domain. | ||
429 | * @dev: Device to suspend. | ||
430 | * | ||
431 | * Carry out a late suspend of a device under the assumption that its | ||
432 | * pm_domain field points to the domain member of an object of type | ||
433 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | ||
434 | */ | ||
435 | static int pm_genpd_suspend_noirq(struct device *dev) | ||
436 | { | ||
437 | struct generic_pm_domain *genpd; | ||
438 | int ret; | ||
439 | |||
440 | dev_dbg(dev, "%s()\n", __func__); | ||
441 | |||
442 | genpd = dev_to_genpd(dev); | ||
443 | if (IS_ERR(genpd)) | ||
444 | return -EINVAL; | ||
445 | |||
446 | if (genpd->suspend_power_off) | ||
447 | return 0; | ||
448 | |||
449 | ret = pm_generic_suspend_noirq(dev); | ||
450 | if (ret) | ||
451 | return ret; | ||
452 | |||
453 | if (genpd->stop_device) | ||
454 | genpd->stop_device(dev); | ||
455 | |||
456 | /* | ||
457 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
458 | * guaranteed that this function will never run twice in parallel for | ||
459 | * the same PM domain, so it is not necessary to use locking here. | ||
460 | */ | ||
461 | genpd->suspended_count++; | ||
462 | pm_genpd_sync_poweroff(genpd); | ||
463 | |||
464 | return 0; | ||
465 | } | ||
466 | |||
467 | /** | ||
468 | * pm_genpd_resume_noirq - Early resume of a device from an I/O power domain. | ||
469 | * @dev: Device to resume. | ||
470 | * | ||
471 | * Carry out an early resume of a device under the assumption that its | ||
472 | * pm_domain field points to the domain member of an object of type | ||
473 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
474 | * devices. | ||
475 | */ | ||
476 | static int pm_genpd_resume_noirq(struct device *dev) | ||
477 | { | ||
478 | struct generic_pm_domain *genpd; | ||
479 | |||
480 | dev_dbg(dev, "%s()\n", __func__); | ||
481 | |||
482 | genpd = dev_to_genpd(dev); | ||
483 | if (IS_ERR(genpd)) | ||
484 | return -EINVAL; | ||
485 | |||
486 | if (genpd->suspend_power_off) | ||
487 | return 0; | ||
488 | |||
489 | /* | ||
490 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
491 | * guaranteed that this function will never run twice in parallel for | ||
492 | * the same PM domain, so it is not necessary to use locking here. | ||
493 | */ | ||
494 | pm_genpd_poweron(genpd); | ||
495 | genpd->suspended_count--; | ||
496 | if (genpd->start_device) | ||
497 | genpd->start_device(dev); | ||
498 | |||
499 | return pm_generic_resume_noirq(dev); | ||
500 | } | ||
501 | |||
502 | /** | ||
503 | * pm_genpd_resume - Resume a device belonging to an I/O power domain. | ||
504 | * @dev: Device to resume. | ||
505 | * | ||
506 | * Resume a device under the assumption that its pm_domain field points to the | ||
507 | * domain member of an object of type struct generic_pm_domain representing | ||
508 | * a power domain consisting of I/O devices. | ||
509 | */ | ||
510 | static int pm_genpd_resume(struct device *dev) | ||
511 | { | ||
512 | struct generic_pm_domain *genpd; | ||
513 | |||
514 | dev_dbg(dev, "%s()\n", __func__); | ||
515 | |||
516 | genpd = dev_to_genpd(dev); | ||
517 | if (IS_ERR(genpd)) | ||
518 | return -EINVAL; | ||
519 | |||
520 | return genpd->suspend_power_off ? 0 : pm_generic_resume(dev); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * pm_genpd_freeze - Freeze a device belonging to an I/O power domain. | ||
525 | * @dev: Device to freeze. | ||
526 | * | ||
527 | * Freeze a device under the assumption that its pm_domain field points to the | ||
528 | * domain member of an object of type struct generic_pm_domain representing | ||
529 | * a power domain consisting of I/O devices. | ||
530 | */ | ||
531 | static int pm_genpd_freeze(struct device *dev) | ||
532 | { | ||
533 | struct generic_pm_domain *genpd; | ||
534 | |||
535 | dev_dbg(dev, "%s()\n", __func__); | ||
536 | |||
537 | genpd = dev_to_genpd(dev); | ||
538 | if (IS_ERR(genpd)) | ||
539 | return -EINVAL; | ||
540 | |||
541 | return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev); | ||
542 | } | ||
543 | |||
544 | /** | ||
545 | * pm_genpd_freeze_noirq - Late freeze of a device from an I/O power domain. | ||
546 | * @dev: Device to freeze. | ||
547 | * | ||
548 | * Carry out a late freeze of a device under the assumption that its | ||
549 | * pm_domain field points to the domain member of an object of type | ||
550 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
551 | * devices. | ||
552 | */ | ||
553 | static int pm_genpd_freeze_noirq(struct device *dev) | ||
554 | { | ||
555 | struct generic_pm_domain *genpd; | ||
556 | int ret; | ||
557 | |||
558 | dev_dbg(dev, "%s()\n", __func__); | ||
559 | |||
560 | genpd = dev_to_genpd(dev); | ||
561 | if (IS_ERR(genpd)) | ||
562 | return -EINVAL; | ||
563 | |||
564 | if (genpd->suspend_power_off) | ||
565 | return 0; | ||
566 | |||
567 | ret = pm_generic_freeze_noirq(dev); | ||
568 | if (ret) | ||
569 | return ret; | ||
570 | |||
571 | if (genpd->stop_device) | ||
572 | genpd->stop_device(dev); | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | /** | ||
578 | * pm_genpd_thaw_noirq - Early thaw of a device from an I/O power domain. | ||
579 | * @dev: Device to thaw. | ||
580 | * | ||
581 | * Carry out an early thaw of a device under the assumption that its | ||
582 | * pm_domain field points to the domain member of an object of type | ||
583 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
584 | * devices. | ||
585 | */ | ||
586 | static int pm_genpd_thaw_noirq(struct device *dev) | ||
587 | { | ||
588 | struct generic_pm_domain *genpd; | ||
589 | |||
590 | dev_dbg(dev, "%s()\n", __func__); | ||
591 | |||
592 | genpd = dev_to_genpd(dev); | ||
593 | if (IS_ERR(genpd)) | ||
594 | return -EINVAL; | ||
595 | |||
596 | if (genpd->suspend_power_off) | ||
597 | return 0; | ||
598 | |||
599 | if (genpd->start_device) | ||
600 | genpd->start_device(dev); | ||
601 | |||
602 | return pm_generic_thaw_noirq(dev); | ||
603 | } | ||
604 | |||
605 | /** | ||
606 | * pm_genpd_thaw - Thaw a device belonging to an I/O power domain. | ||
607 | * @dev: Device to thaw. | ||
608 | * | ||
609 | * Thaw a device under the assumption that its pm_domain field points to the | ||
610 | * domain member of an object of type struct generic_pm_domain representing | ||
611 | * a power domain consisting of I/O devices. | ||
612 | */ | ||
613 | static int pm_genpd_thaw(struct device *dev) | ||
614 | { | ||
615 | struct generic_pm_domain *genpd; | ||
616 | |||
617 | dev_dbg(dev, "%s()\n", __func__); | ||
618 | |||
619 | genpd = dev_to_genpd(dev); | ||
620 | if (IS_ERR(genpd)) | ||
621 | return -EINVAL; | ||
622 | |||
623 | return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev); | ||
624 | } | ||
625 | |||
626 | /** | ||
627 | * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain. | ||
628 | * @dev: Device to suspend. | ||
629 | * | ||
630 | * Power off a device under the assumption that its pm_domain field points to | ||
631 | * the domain member of an object of type struct generic_pm_domain representing | ||
632 | * a PM domain consisting of I/O devices. | ||
633 | */ | ||
634 | static int pm_genpd_dev_poweroff(struct device *dev) | ||
635 | { | ||
636 | struct generic_pm_domain *genpd; | ||
637 | |||
638 | dev_dbg(dev, "%s()\n", __func__); | ||
639 | |||
640 | genpd = dev_to_genpd(dev); | ||
641 | if (IS_ERR(genpd)) | ||
642 | return -EINVAL; | ||
643 | |||
644 | return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev); | ||
645 | } | ||
646 | |||
647 | /** | ||
648 | * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain. | ||
649 | * @dev: Device to suspend. | ||
650 | * | ||
651 | * Carry out a late powering off of a device under the assumption that its | ||
652 | * pm_domain field points to the domain member of an object of type | ||
653 | * struct generic_pm_domain representing a PM domain consisting of I/O devices. | ||
654 | */ | ||
655 | static int pm_genpd_dev_poweroff_noirq(struct device *dev) | ||
656 | { | ||
657 | struct generic_pm_domain *genpd; | ||
658 | int ret; | ||
659 | |||
660 | dev_dbg(dev, "%s()\n", __func__); | ||
661 | |||
662 | genpd = dev_to_genpd(dev); | ||
663 | if (IS_ERR(genpd)) | ||
664 | return -EINVAL; | ||
665 | |||
666 | if (genpd->suspend_power_off) | ||
667 | return 0; | ||
668 | |||
669 | ret = pm_generic_poweroff_noirq(dev); | ||
670 | if (ret) | ||
671 | return ret; | ||
672 | |||
673 | if (genpd->stop_device) | ||
674 | genpd->stop_device(dev); | ||
675 | |||
676 | /* | ||
677 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
678 | * guaranteed that this function will never run twice in parallel for | ||
679 | * the same PM domain, so it is not necessary to use locking here. | ||
680 | */ | ||
681 | genpd->suspended_count++; | ||
682 | pm_genpd_sync_poweroff(genpd); | ||
683 | |||
684 | return 0; | ||
685 | } | ||
686 | |||
687 | /** | ||
688 | * pm_genpd_restore_noirq - Early restore of a device from an I/O power domain. | ||
689 | * @dev: Device to resume. | ||
690 | * | ||
691 | * Carry out an early restore of a device under the assumption that its | ||
692 | * pm_domain field points to the domain member of an object of type | ||
693 | * struct generic_pm_domain representing a power domain consisting of I/O | ||
694 | * devices. | ||
695 | */ | ||
696 | static int pm_genpd_restore_noirq(struct device *dev) | ||
697 | { | ||
698 | struct generic_pm_domain *genpd; | ||
699 | |||
700 | dev_dbg(dev, "%s()\n", __func__); | ||
701 | |||
702 | genpd = dev_to_genpd(dev); | ||
703 | if (IS_ERR(genpd)) | ||
704 | return -EINVAL; | ||
705 | |||
706 | /* | ||
707 | * Since all of the "noirq" callbacks are executed sequentially, it is | ||
708 | * guaranteed that this function will never run twice in parallel for | ||
709 | * the same PM domain, so it is not necessary to use locking here. | ||
710 | */ | ||
711 | genpd->power_is_off = true; | ||
712 | if (genpd->suspend_power_off) { | ||
713 | /* | ||
714 | * The boot kernel might put the domain into the power on state, | ||
715 | * so make sure it really is powered off. | ||
716 | */ | ||
717 | if (genpd->power_off) | ||
718 | genpd->power_off(genpd); | ||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | pm_genpd_poweron(genpd); | ||
723 | genpd->suspended_count--; | ||
724 | if (genpd->start_device) | ||
725 | genpd->start_device(dev); | ||
726 | |||
727 | return pm_generic_restore_noirq(dev); | ||
728 | } | ||
729 | |||
730 | /** | ||
731 | * pm_genpd_restore - Restore a device belonging to an I/O power domain. | ||
732 | * @dev: Device to resume. | ||
733 | * | ||
734 | * Restore a device under the assumption that its pm_domain field points to the | ||
735 | * domain member of an object of type struct generic_pm_domain representing | ||
736 | * a power domain consisting of I/O devices. | ||
737 | */ | ||
738 | static int pm_genpd_restore(struct device *dev) | ||
739 | { | ||
740 | struct generic_pm_domain *genpd; | ||
741 | |||
742 | dev_dbg(dev, "%s()\n", __func__); | ||
743 | |||
744 | genpd = dev_to_genpd(dev); | ||
745 | if (IS_ERR(genpd)) | ||
746 | return -EINVAL; | ||
747 | |||
748 | return genpd->suspend_power_off ? 0 : pm_generic_restore(dev); | ||
749 | } | ||
750 | |||
751 | /** | ||
752 | * pm_genpd_complete - Complete power transition of a device in a power domain. | ||
753 | * @dev: Device to complete the transition of. | ||
754 | * | ||
755 | * Complete a power transition of a device (during a system-wide power | ||
756 | * transition) under the assumption that its pm_domain field points to the | ||
757 | * domain member of an object of type struct generic_pm_domain representing | ||
758 | * a power domain consisting of I/O devices. | ||
759 | */ | ||
760 | static void pm_genpd_complete(struct device *dev) | ||
761 | { | ||
762 | struct generic_pm_domain *genpd; | ||
763 | bool run_complete; | ||
764 | |||
765 | dev_dbg(dev, "%s()\n", __func__); | ||
766 | |||
767 | genpd = dev_to_genpd(dev); | ||
768 | if (IS_ERR(genpd)) | ||
769 | return; | ||
770 | |||
771 | mutex_lock(&genpd->lock); | ||
772 | |||
773 | run_complete = !genpd->suspend_power_off; | ||
774 | if (--genpd->prepared_count == 0) | ||
775 | genpd->suspend_power_off = false; | ||
776 | |||
777 | mutex_unlock(&genpd->lock); | ||
778 | |||
779 | if (run_complete) { | ||
780 | pm_generic_complete(dev); | ||
781 | pm_runtime_enable(dev); | ||
782 | } | ||
783 | } | ||
784 | |||
785 | #else | ||
786 | |||
787 | #define pm_genpd_prepare NULL | ||
788 | #define pm_genpd_suspend NULL | ||
789 | #define pm_genpd_suspend_noirq NULL | ||
790 | #define pm_genpd_resume_noirq NULL | ||
791 | #define pm_genpd_resume NULL | ||
792 | #define pm_genpd_freeze NULL | ||
793 | #define pm_genpd_freeze_noirq NULL | ||
794 | #define pm_genpd_thaw_noirq NULL | ||
795 | #define pm_genpd_thaw NULL | ||
796 | #define pm_genpd_dev_poweroff_noirq NULL | ||
797 | #define pm_genpd_dev_poweroff NULL | ||
798 | #define pm_genpd_restore_noirq NULL | ||
799 | #define pm_genpd_restore NULL | ||
800 | #define pm_genpd_complete NULL | ||
801 | |||
802 | #endif /* CONFIG_PM_SLEEP */ | ||
803 | |||
312 | /** | 804 | /** |
313 | * pm_genpd_add_device - Add a device to an I/O PM domain. | 805 | * pm_genpd_add_device - Add a device to an I/O PM domain. |
314 | * @genpd: PM domain to add the device to. | 806 | * @genpd: PM domain to add the device to. |
@@ -331,6 +823,11 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
331 | goto out; | 823 | goto out; |
332 | } | 824 | } |
333 | 825 | ||
826 | if (genpd->prepared_count > 0) { | ||
827 | ret = -EAGAIN; | ||
828 | goto out; | ||
829 | } | ||
830 | |||
334 | list_for_each_entry(dle, &genpd->dev_list, node) | 831 | list_for_each_entry(dle, &genpd->dev_list, node) |
335 | if (dle->dev == dev) { | 832 | if (dle->dev == dev) { |
336 | ret = -EINVAL; | 833 | ret = -EINVAL; |
@@ -346,6 +843,7 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev) | |||
346 | dle->dev = dev; | 843 | dle->dev = dev; |
347 | dle->need_restore = false; | 844 | dle->need_restore = false; |
348 | list_add_tail(&dle->node, &genpd->dev_list); | 845 | list_add_tail(&dle->node, &genpd->dev_list); |
846 | genpd->device_count++; | ||
349 | 847 | ||
350 | spin_lock_irq(&dev->power.lock); | 848 | spin_lock_irq(&dev->power.lock); |
351 | dev->pm_domain = &genpd->domain; | 849 | dev->pm_domain = &genpd->domain; |
@@ -375,6 +873,11 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
375 | 873 | ||
376 | mutex_lock(&genpd->lock); | 874 | mutex_lock(&genpd->lock); |
377 | 875 | ||
876 | if (genpd->prepared_count > 0) { | ||
877 | ret = -EAGAIN; | ||
878 | goto out; | ||
879 | } | ||
880 | |||
378 | list_for_each_entry(dle, &genpd->dev_list, node) { | 881 | list_for_each_entry(dle, &genpd->dev_list, node) { |
379 | if (dle->dev != dev) | 882 | if (dle->dev != dev) |
380 | continue; | 883 | continue; |
@@ -383,6 +886,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
383 | dev->pm_domain = NULL; | 886 | dev->pm_domain = NULL; |
384 | spin_unlock_irq(&dev->power.lock); | 887 | spin_unlock_irq(&dev->power.lock); |
385 | 888 | ||
889 | genpd->device_count--; | ||
386 | list_del(&dle->node); | 890 | list_del(&dle->node); |
387 | kfree(dle); | 891 | kfree(dle); |
388 | 892 | ||
@@ -390,6 +894,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, | |||
390 | break; | 894 | break; |
391 | } | 895 | } |
392 | 896 | ||
897 | out: | ||
393 | mutex_unlock(&genpd->lock); | 898 | mutex_unlock(&genpd->lock); |
394 | 899 | ||
395 | return ret; | 900 | return ret; |
@@ -498,7 +1003,23 @@ void pm_genpd_init(struct generic_pm_domain *genpd, | |||
498 | genpd->in_progress = 0; | 1003 | genpd->in_progress = 0; |
499 | genpd->sd_count = 0; | 1004 | genpd->sd_count = 0; |
500 | genpd->power_is_off = is_off; | 1005 | genpd->power_is_off = is_off; |
1006 | genpd->device_count = 0; | ||
1007 | genpd->suspended_count = 0; | ||
501 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; | 1008 | genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend; |
502 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; | 1009 | genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume; |
503 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; | 1010 | genpd->domain.ops.runtime_idle = pm_generic_runtime_idle; |
1011 | genpd->domain.ops.prepare = pm_genpd_prepare; | ||
1012 | genpd->domain.ops.suspend = pm_genpd_suspend; | ||
1013 | genpd->domain.ops.suspend_noirq = pm_genpd_suspend_noirq; | ||
1014 | genpd->domain.ops.resume_noirq = pm_genpd_resume_noirq; | ||
1015 | genpd->domain.ops.resume = pm_genpd_resume; | ||
1016 | genpd->domain.ops.freeze = pm_genpd_freeze; | ||
1017 | genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq; | ||
1018 | genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq; | ||
1019 | genpd->domain.ops.thaw = pm_genpd_thaw; | ||
1020 | genpd->domain.ops.poweroff = pm_genpd_dev_poweroff; | ||
1021 | genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq; | ||
1022 | genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq; | ||
1023 | genpd->domain.ops.restore = pm_genpd_restore; | ||
1024 | genpd->domain.ops.complete = pm_genpd_complete; | ||
504 | } | 1025 | } |