aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-11 13:30:16 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-07-23 01:05:19 -0400
commit9ca153017e00550dbeda2718cfd69ca37de9c523 (patch)
tree05d0a0d6f7be9d274d54e09fb8262c2961fc4e52
parentf0355c4a9eaf4cb803930d9fe6a26fb46846e576 (diff)
drm/i915: Fix up PSR frontbuffer tracking
I've tried to split this up, but all the changes are so tightly related that I didn't find a good way to do this without breaking bisecting. Essentially this completely changes how psr is glued into the overall driver, and there's not much you can do to soften such a paradigm change. - Use frontbuffer tracking bits stuff to separate disable and re-enable. - Don't re-check everything in the psr work. We have now accurate tracking for everything, so no need to check for sprites or tiling really. Allows us to ditch tons of locks. - That in turn allows us to properly cancel the work in the disable function - no more deadlocks. - Add a check for HSW sprites and force a flush. Apparently the hardware doesn't forward the flushing when updating the sprite base address. We can do the same trick everywhere else we have such issues, e.g. on baytrail with ... everything. - Don't re-enable psr with a delay in psr_exit. It really must be turned off forever if we detect a gtt write. At least with the current frontbuffer render tracking. Userspace can do a busy ioctl call or no-op pageflip to re-enable psr. - Drop redundant checks for crtc and crtc->active - now that they're only called from enable this is guaranteed. - Fix up the hsw port check. eDP can also happen on port D, but the issue is exactly that it doesn't work there. So an || check is wrong. - We still schedule the psr work with a delay. The frontbuffer flushing interface mandates that we upload the next full frame, so need to wait a bit. Once we have single-shot frame uploads we can do better here. v2: Don't enable psr initially, rely upon the fb flush of the initial plane setup for that. Gives us more unified code flow and makes the crtc enable sequence less a special case. v3: s/psr_exit/psr_invalidate/ for consistency v4: Fixup whitespace. v5: Correctly bail out of psr_invalidate/flush when dev_priv->psr.enabled is NULL. Spotted by Rodrigo. v6: - Only schedule work when there's work to do. Fixes WARNINGs reported by Rodrigo. - Comments Chris requested to clarify the code. v7: Fix conflict on rebase (Rodrigo) Cc: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> (v6) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c124
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h5
4 files changed, 85 insertions, 49 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index faa27d0044f8..0b9f7894ee82 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -666,6 +666,7 @@ struct i915_psr {
666 struct intel_dp *enabled; 666 struct intel_dp *enabled;
667 bool active; 667 bool active;
668 struct delayed_work work; 668 struct delayed_work work;
669 unsigned busy_frontbuffer_bits;
669}; 670};
670 671
671enum intel_pch { 672enum intel_pch {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7e0dc46ec505..9064dd9805cd 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -8942,7 +8942,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
8942 8942
8943 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring); 8943 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
8944 8944
8945 intel_edp_psr_exit(dev); 8945 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
8946} 8946}
8947 8947
8948/** 8948/**
@@ -8968,7 +8968,7 @@ void intel_frontbuffer_flush(struct drm_device *dev,
8968 8968
8969 intel_mark_fb_busy(dev, frontbuffer_bits, NULL); 8969 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
8970 8970
8971 intel_edp_psr_exit(dev); 8971 intel_edp_psr_flush(dev, frontbuffer_bits);
8972} 8972}
8973 8973
8974/** 8974/**
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 3a3bb0904515..333471c4dcd1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1764,8 +1764,6 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1764 struct drm_i915_private *dev_priv = dev->dev_private; 1764 struct drm_i915_private *dev_priv = dev->dev_private;
1765 struct drm_crtc *crtc = dig_port->base.base.crtc; 1765 struct drm_crtc *crtc = dig_port->base.base.crtc;
1766 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1766 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1767 struct drm_i915_gem_object *obj = intel_fb_obj(crtc->primary->fb);
1768 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1769 1767
1770 lockdep_assert_held(&dev_priv->psr.lock); 1768 lockdep_assert_held(&dev_priv->psr.lock);
1771 lockdep_assert_held(&dev->struct_mutex); 1769 lockdep_assert_held(&dev->struct_mutex);
@@ -1779,8 +1777,7 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1779 return false; 1777 return false;
1780 } 1778 }
1781 1779
1782 if (IS_HASWELL(dev) && (intel_encoder->type != INTEL_OUTPUT_EDP || 1780 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
1783 dig_port->port != PORT_A)) {
1784 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n"); 1781 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1785 return false; 1782 return false;
1786 } 1783 }
@@ -1790,33 +1787,10 @@ static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1790 return false; 1787 return false;
1791 } 1788 }
1792 1789
1793 crtc = dig_port->base.base.crtc;
1794 if (crtc == NULL) {
1795 DRM_DEBUG_KMS("crtc not active for PSR\n");
1796 return false;
1797 }
1798
1799 intel_crtc = to_intel_crtc(crtc);
1800 if (!intel_crtc_active(crtc)) {
1801 DRM_DEBUG_KMS("crtc not active for PSR\n");
1802 return false;
1803 }
1804
1805 if (obj->tiling_mode != I915_TILING_X ||
1806 obj->fence_reg == I915_FENCE_REG_NONE) {
1807 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1808 return false;
1809 }
1810
1811 /* Below limitations aren't valid for Broadwell */ 1790 /* Below limitations aren't valid for Broadwell */
1812 if (IS_BROADWELL(dev)) 1791 if (IS_BROADWELL(dev))
1813 goto out; 1792 goto out;
1814 1793
1815 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1816 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1817 return false;
1818 }
1819
1820 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) & 1794 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1821 S3D_ENABLE) { 1795 S3D_ENABLE) {
1822 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n"); 1796 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
@@ -1849,7 +1823,6 @@ static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
1849 /* Enable PSR on the host */ 1823 /* Enable PSR on the host */
1850 intel_edp_psr_enable_source(intel_dp); 1824 intel_edp_psr_enable_source(intel_dp);
1851 1825
1852 dev_priv->psr.enabled = intel_dp;
1853 dev_priv->psr.active = true; 1826 dev_priv->psr.active = true;
1854} 1827}
1855 1828
@@ -1875,11 +1848,13 @@ void intel_edp_psr_enable(struct intel_dp *intel_dp)
1875 return; 1848 return;
1876 } 1849 }
1877 1850
1851 dev_priv->psr.busy_frontbuffer_bits = 0;
1852
1878 /* Setup PSR once */ 1853 /* Setup PSR once */
1879 intel_edp_psr_setup(intel_dp); 1854 intel_edp_psr_setup(intel_dp);
1880 1855
1881 if (intel_edp_psr_match_conditions(intel_dp)) 1856 if (intel_edp_psr_match_conditions(intel_dp))
1882 intel_edp_psr_do_enable(intel_dp); 1857 dev_priv->psr.enabled = intel_dp;
1883 mutex_unlock(&dev_priv->psr.lock); 1858 mutex_unlock(&dev_priv->psr.lock);
1884} 1859}
1885 1860
@@ -1913,42 +1888,39 @@ void intel_edp_psr_disable(struct intel_dp *intel_dp)
1913 1888
1914 dev_priv->psr.enabled = NULL; 1889 dev_priv->psr.enabled = NULL;
1915 mutex_unlock(&dev_priv->psr.lock); 1890 mutex_unlock(&dev_priv->psr.lock);
1891
1892 cancel_delayed_work_sync(&dev_priv->psr.work);
1916} 1893}
1917 1894
1918static void intel_edp_psr_work(struct work_struct *work) 1895static void intel_edp_psr_work(struct work_struct *work)
1919{ 1896{
1920 struct drm_i915_private *dev_priv = 1897 struct drm_i915_private *dev_priv =
1921 container_of(work, typeof(*dev_priv), psr.work.work); 1898 container_of(work, typeof(*dev_priv), psr.work.work);
1922 struct drm_device *dev = dev_priv->dev;
1923 struct intel_dp *intel_dp = dev_priv->psr.enabled; 1899 struct intel_dp *intel_dp = dev_priv->psr.enabled;
1924 1900
1925 drm_modeset_lock_all(dev);
1926 mutex_lock(&dev->struct_mutex);
1927 mutex_lock(&dev_priv->psr.lock); 1901 mutex_lock(&dev_priv->psr.lock);
1928 intel_dp = dev_priv->psr.enabled; 1902 intel_dp = dev_priv->psr.enabled;
1929 1903
1930 if (!intel_dp) 1904 if (!intel_dp)
1931 goto unlock; 1905 goto unlock;
1932 1906
1933 if (intel_edp_psr_match_conditions(intel_dp)) 1907 /*
1934 intel_edp_psr_do_enable(intel_dp); 1908 * The delayed work can race with an invalidate hence we need to
1909 * recheck. Since psr_flush first clears this and then reschedules we
1910 * won't ever miss a flush when bailing out here.
1911 */
1912 if (dev_priv->psr.busy_frontbuffer_bits)
1913 goto unlock;
1914
1915 intel_edp_psr_do_enable(intel_dp);
1935unlock: 1916unlock:
1936 mutex_unlock(&dev_priv->psr.lock); 1917 mutex_unlock(&dev_priv->psr.lock);
1937 mutex_unlock(&dev->struct_mutex);
1938 drm_modeset_unlock_all(dev);
1939} 1918}
1940 1919
1941void intel_edp_psr_exit(struct drm_device *dev) 1920static void intel_edp_psr_do_exit(struct drm_device *dev)
1942{ 1921{
1943 struct drm_i915_private *dev_priv = dev->dev_private; 1922 struct drm_i915_private *dev_priv = dev->dev_private;
1944 1923
1945 if (!HAS_PSR(dev))
1946 return;
1947
1948 if (!dev_priv->psr.enabled)
1949 return;
1950
1951 mutex_lock(&dev_priv->psr.lock);
1952 if (dev_priv->psr.active) { 1924 if (dev_priv->psr.active) {
1953 u32 val = I915_READ(EDP_PSR_CTL(dev)); 1925 u32 val = I915_READ(EDP_PSR_CTL(dev));
1954 1926
@@ -1959,8 +1931,68 @@ void intel_edp_psr_exit(struct drm_device *dev)
1959 dev_priv->psr.active = false; 1931 dev_priv->psr.active = false;
1960 } 1932 }
1961 1933
1962 schedule_delayed_work(&dev_priv->psr.work, 1934}
1963 msecs_to_jiffies(100)); 1935
1936void intel_edp_psr_invalidate(struct drm_device *dev,
1937 unsigned frontbuffer_bits)
1938{
1939 struct drm_i915_private *dev_priv = dev->dev_private;
1940 struct drm_crtc *crtc;
1941 enum pipe pipe;
1942
1943 if (!HAS_PSR(dev))
1944 return;
1945
1946 mutex_lock(&dev_priv->psr.lock);
1947 if (!dev_priv->psr.enabled) {
1948 mutex_unlock(&dev_priv->psr.lock);
1949 return;
1950 }
1951
1952 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1953 pipe = to_intel_crtc(crtc)->pipe;
1954
1955 intel_edp_psr_do_exit(dev);
1956
1957 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
1958
1959 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
1960 mutex_unlock(&dev_priv->psr.lock);
1961}
1962
1963void intel_edp_psr_flush(struct drm_device *dev,
1964 unsigned frontbuffer_bits)
1965{
1966 struct drm_i915_private *dev_priv = dev->dev_private;
1967 struct drm_crtc *crtc;
1968 enum pipe pipe;
1969
1970 if (!HAS_PSR(dev))
1971 return;
1972
1973 mutex_lock(&dev_priv->psr.lock);
1974 if (!dev_priv->psr.enabled) {
1975 mutex_unlock(&dev_priv->psr.lock);
1976 return;
1977 }
1978
1979 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
1980 pipe = to_intel_crtc(crtc)->pipe;
1981 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
1982
1983 /*
1984 * On Haswell sprite plane updates don't result in a psr invalidating
1985 * signal in the hardware. Which means we need to manually fake this in
1986 * software for all flushes, not just when we've seen a preceding
1987 * invalidation through frontbuffer rendering.
1988 */
1989 if (IS_HASWELL(dev) &&
1990 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
1991 intel_edp_psr_do_exit(dev);
1992
1993 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
1994 schedule_delayed_work(&dev_priv->psr.work,
1995 msecs_to_jiffies(100));
1964 mutex_unlock(&dev_priv->psr.lock); 1996 mutex_unlock(&dev_priv->psr.lock);
1965} 1997}
1966 1998
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b9540c01bab3..3adcdd1de6c6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -868,7 +868,10 @@ void intel_edp_panel_off(struct intel_dp *intel_dp);
868void intel_edp_psr_enable(struct intel_dp *intel_dp); 868void intel_edp_psr_enable(struct intel_dp *intel_dp);
869void intel_edp_psr_disable(struct intel_dp *intel_dp); 869void intel_edp_psr_disable(struct intel_dp *intel_dp);
870void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); 870void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
871void intel_edp_psr_exit(struct drm_device *dev); 871void intel_edp_psr_invalidate(struct drm_device *dev,
872 unsigned frontbuffer_bits);
873void intel_edp_psr_flush(struct drm_device *dev,
874 unsigned frontbuffer_bits);
872void intel_edp_psr_init(struct drm_device *dev); 875void intel_edp_psr_init(struct drm_device *dev);
873 876
874/* intel_dsi.c */ 877/* intel_dsi.c */