aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorImre Deak <imre.deak@intel.com>2016-04-20 13:27:54 -0400
committerImre Deak <imre.deak@intel.com>2016-04-22 08:11:39 -0400
commit507e126e0700a71939935636a4d581a9323c5ec1 (patch)
treeaebb9b48e2e96220d8445a0abaaabba4b01d47ac
parent80dbe9973afc6be722bb6dbc1a303f3a7aaa3e1f (diff)
drm/i915: Inline intel_suspend_complete
Initially we thought that the platform specific suspend/resume sequences can be shared between the runtime and system suspend/resume handlers. This turned out to be not true, we have quite a few differences on most of the platforms. This was realized already earlier by Paulo who inlined the platform specific resume_prepare handlers. We have the same problem with the corresponding suspend_complete handlers, there are platform differences that make it unfeasible to share the code between the runtime and system suspend paths. Also now we call functions that need to be paired like hsw_enable_pc8()/hsw_disable_pc8() from different levels of the call stack, which is confusing. Fix this by inlining the suspend_complete handlers too. This is also needed by the next patch that removes a redundant uninit/init call during system suspend/resume on BXT. No functional change. CC: Paulo Zanoni <przanoni@gmail.com> Signed-off-by: Imre Deak <imre.deak@intel.com> Reviewed-by: Bob Paauwe <bob.j.paauwe@intel.com> [s/uninline/inline in the commit message] Link: http://patchwork.freedesktop.org/patch/msgid/1461173277-16090-2-git-send-email-imre.deak@intel.com
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c83
1 files changed, 29 insertions, 54 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 1b449f96d2c1..191287394543 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -567,10 +567,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
567 drm_modeset_unlock_all(dev); 567 drm_modeset_unlock_all(dev);
568} 568}
569 569
570static int intel_suspend_complete(struct drm_i915_private *dev_priv);
571static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 570static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
572 bool rpm_resume); 571 bool rpm_resume);
573static int bxt_resume_prepare(struct drm_i915_private *dev_priv); 572static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
574 573
575static bool suspend_to_idle(struct drm_i915_private *dev_priv) 574static bool suspend_to_idle(struct drm_i915_private *dev_priv)
576{ 575{
@@ -668,7 +667,14 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
668 if (!fw_csr) 667 if (!fw_csr)
669 intel_power_domains_suspend(dev_priv); 668 intel_power_domains_suspend(dev_priv);
670 669
671 ret = intel_suspend_complete(dev_priv); 670 ret = 0;
671 if (IS_BROXTON(dev_priv)) {
672 bxt_display_core_uninit(dev_priv);
673 bxt_enable_dc9(dev_priv);
674 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
675 hsw_enable_pc8(dev_priv);
676 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
677 ret = vlv_suspend_complete(dev_priv);
672 678
673 if (ret) { 679 if (ret) {
674 DRM_ERROR("Suspend complete failed: %d\n", ret); 680 DRM_ERROR("Suspend complete failed: %d\n", ret);
@@ -862,9 +868,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
862 868
863 intel_uncore_early_sanitize(dev, true); 869 intel_uncore_early_sanitize(dev, true);
864 870
865 if (IS_BROXTON(dev)) 871 if (IS_BROXTON(dev)) {
866 ret = bxt_resume_prepare(dev_priv); 872 bxt_disable_dc9(dev_priv);
867 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 873 bxt_display_core_init(dev_priv, true);
874 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
868 hsw_disable_pc8(dev_priv); 875 hsw_disable_pc8(dev_priv);
869 876
870 intel_uncore_sanitize(dev); 877 intel_uncore_sanitize(dev);
@@ -1102,29 +1109,6 @@ static int i915_pm_resume(struct device *dev)
1102 return i915_drm_resume(drm_dev); 1109 return i915_drm_resume(drm_dev);
1103} 1110}
1104 1111
1105static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1106{
1107 hsw_enable_pc8(dev_priv);
1108
1109 return 0;
1110}
1111
1112static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
1113{
1114 bxt_display_core_uninit(dev_priv);
1115 bxt_enable_dc9(dev_priv);
1116
1117 return 0;
1118}
1119
1120static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
1121{
1122 bxt_disable_dc9(dev_priv);
1123 bxt_display_core_init(dev_priv, true);
1124
1125 return 0;
1126}
1127
1128/* 1112/*
1129 * Save all Gunit registers that may be lost after a D3 and a subsequent 1113 * Save all Gunit registers that may be lost after a D3 and a subsequent
1130 * S0i[R123] transition. The list of registers needing a save/restore is 1114 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1530,7 +1514,16 @@ static int intel_runtime_suspend(struct device *device)
1530 intel_suspend_gt_powersave(dev); 1514 intel_suspend_gt_powersave(dev);
1531 intel_runtime_pm_disable_interrupts(dev_priv); 1515 intel_runtime_pm_disable_interrupts(dev_priv);
1532 1516
1533 ret = intel_suspend_complete(dev_priv); 1517 ret = 0;
1518 if (IS_BROXTON(dev_priv)) {
1519 bxt_display_core_uninit(dev_priv);
1520 bxt_enable_dc9(dev_priv);
1521 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1522 hsw_enable_pc8(dev_priv);
1523 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1524 ret = vlv_suspend_complete(dev_priv);
1525 }
1526
1534 if (ret) { 1527 if (ret) {
1535 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1528 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1536 intel_runtime_pm_enable_interrupts(dev_priv); 1529 intel_runtime_pm_enable_interrupts(dev_priv);
@@ -1604,12 +1597,14 @@ static int intel_runtime_resume(struct device *device)
1604 if (IS_GEN6(dev_priv)) 1597 if (IS_GEN6(dev_priv))
1605 intel_init_pch_refclk(dev); 1598 intel_init_pch_refclk(dev);
1606 1599
1607 if (IS_BROXTON(dev)) 1600 if (IS_BROXTON(dev)) {
1608 ret = bxt_resume_prepare(dev_priv); 1601 bxt_disable_dc9(dev_priv);
1609 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 1602 bxt_display_core_init(dev_priv, true);
1603 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1610 hsw_disable_pc8(dev_priv); 1604 hsw_disable_pc8(dev_priv);
1611 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 1605 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1612 ret = vlv_resume_prepare(dev_priv, true); 1606 ret = vlv_resume_prepare(dev_priv, true);
1607 }
1613 1608
1614 /* 1609 /*
1615 * No point of rolling back things in case of an error, as the best 1610 * No point of rolling back things in case of an error, as the best
@@ -1640,26 +1635,6 @@ static int intel_runtime_resume(struct device *device)
1640 return ret; 1635 return ret;
1641} 1636}
1642 1637
1643/*
1644 * This function implements common functionality of runtime and system
1645 * suspend sequence.
1646 */
1647static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1648{
1649 int ret;
1650
1651 if (IS_BROXTON(dev_priv))
1652 ret = bxt_suspend_complete(dev_priv);
1653 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1654 ret = hsw_suspend_complete(dev_priv);
1655 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1656 ret = vlv_suspend_complete(dev_priv);
1657 else
1658 ret = 0;
1659
1660 return ret;
1661}
1662
1663static const struct dev_pm_ops i915_pm_ops = { 1638static const struct dev_pm_ops i915_pm_ops = {
1664 /* 1639 /*
1665 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 1640 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,