aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c569
1 files changed, 259 insertions, 310 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ddbb7ed0a193..d52a15df6917 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -52,82 +52,20 @@
52#define INTEL_RC6p_ENABLE (1<<1) 52#define INTEL_RC6p_ENABLE (1<<1)
53#define INTEL_RC6pp_ENABLE (1<<2) 53#define INTEL_RC6pp_ENABLE (1<<2)
54 54
55static void gen9_init_clock_gating(struct drm_device *dev)
56{
57 struct drm_i915_private *dev_priv = dev->dev_private;
58
59 /* WaEnableLbsSlaRetryTimerDecrement:skl */
60 I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
61 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
62
63 /* WaDisableKillLogic:bxt,skl */
64 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
65 ECOCHK_DIS_TLB);
66}
67
68static void skl_init_clock_gating(struct drm_device *dev)
69{
70 struct drm_i915_private *dev_priv = dev->dev_private;
71
72 gen9_init_clock_gating(dev);
73
74 if (INTEL_REVID(dev) <= SKL_REVID_B0) {
75 /*
76 * WaDisableSDEUnitClockGating:skl
77 * WaSetGAPSunitClckGateDisable:skl
78 */
79 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
80 GEN8_GAPSUNIT_CLOCK_GATE_DISABLE |
81 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
82
83 /* WaDisableVFUnitClockGating:skl */
84 I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) |
85 GEN6_VFUNIT_CLOCK_GATE_DISABLE);
86 }
87
88 if (INTEL_REVID(dev) <= SKL_REVID_D0) {
89 /* WaDisableHDCInvalidation:skl */
90 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
91 BDW_DISABLE_HDC_INVALIDATION);
92
93 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
94 I915_WRITE(FF_SLICE_CS_CHICKEN2,
95 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
96 }
97
98 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
99 * involving this register should also be added to WA batch as required.
100 */
101 if (INTEL_REVID(dev) <= SKL_REVID_E0)
102 /* WaDisableLSQCROPERFforOCL:skl */
103 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
104 GEN8_LQSC_RO_PERF_DIS);
105
106 /* WaEnableGapsTsvCreditFix:skl */
107 if (IS_SKYLAKE(dev) && (INTEL_REVID(dev) >= SKL_REVID_C0)) {
108 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
109 GEN9_GAPS_TSV_CREDIT_DISABLE));
110 }
111}
112
113static void bxt_init_clock_gating(struct drm_device *dev) 55static void bxt_init_clock_gating(struct drm_device *dev)
114{ 56{
115 struct drm_i915_private *dev_priv = dev->dev_private; 57 struct drm_i915_private *dev_priv = dev->dev_private;
116 58
117 gen9_init_clock_gating(dev); 59 /* WaDisableSDEUnitClockGating:bxt */
60 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
61 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
118 62
119 /* 63 /*
120 * FIXME: 64 * FIXME:
121 * GEN8_SDEUNIT_CLOCK_GATE_DISABLE applies on A0 only.
122 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. 65 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
123 */ 66 */
124 /* WaDisableSDEUnitClockGating:bxt */
125 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 67 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
126 GEN8_SDEUNIT_CLOCK_GATE_DISABLE |
127 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 68 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
128
129 /* FIXME: apply on A0 only */
130 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
131} 69}
132 70
133static void i915_pineview_get_mem_freq(struct drm_device *dev) 71static void i915_pineview_get_mem_freq(struct drm_device *dev)
@@ -691,12 +629,9 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
691 629
692 crtc = single_enabled_crtc(dev); 630 crtc = single_enabled_crtc(dev);
693 if (crtc) { 631 if (crtc) {
694 const struct drm_display_mode *adjusted_mode; 632 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
695 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 633 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
696 int clock; 634 int clock = adjusted_mode->crtc_clock;
697
698 adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
699 clock = adjusted_mode->crtc_clock;
700 635
701 /* Display SR */ 636 /* Display SR */
702 wm = intel_calculate_wm(clock, &pineview_display_wm, 637 wm = intel_calculate_wm(clock, &pineview_display_wm,
@@ -1200,7 +1135,7 @@ static void vlv_compute_wm(struct intel_crtc *crtc)
1200 case DRM_PLANE_TYPE_CURSOR: 1135 case DRM_PLANE_TYPE_CURSOR:
1201 for (level = 0; level < wm_state->num_levels; level++) 1136 for (level = 0; level < wm_state->num_levels; level++)
1202 wm_state->sr[level].cursor = 1137 wm_state->sr[level].cursor =
1203 wm_state->sr[level].cursor; 1138 wm_state->wm[level].cursor;
1204 break; 1139 break;
1205 case DRM_PLANE_TYPE_PRIMARY: 1140 case DRM_PLANE_TYPE_PRIMARY:
1206 for (level = 0; level < wm_state->num_levels; level++) 1141 for (level = 0; level < wm_state->num_levels; level++)
@@ -1490,8 +1425,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1490 if (crtc) { 1425 if (crtc) {
1491 /* self-refresh has much higher latency */ 1426 /* self-refresh has much higher latency */
1492 static const int sr_latency_ns = 12000; 1427 static const int sr_latency_ns = 12000;
1493 const struct drm_display_mode *adjusted_mode = 1428 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
1494 &to_intel_crtc(crtc)->config->base.adjusted_mode;
1495 int clock = adjusted_mode->crtc_clock; 1429 int clock = adjusted_mode->crtc_clock;
1496 int htotal = adjusted_mode->crtc_htotal; 1430 int htotal = adjusted_mode->crtc_htotal;
1497 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; 1431 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
@@ -1638,8 +1572,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1638 if (HAS_FW_BLC(dev) && enabled) { 1572 if (HAS_FW_BLC(dev) && enabled) {
1639 /* self-refresh has much higher latency */ 1573 /* self-refresh has much higher latency */
1640 static const int sr_latency_ns = 6000; 1574 static const int sr_latency_ns = 6000;
1641 const struct drm_display_mode *adjusted_mode = 1575 const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
1642 &to_intel_crtc(enabled)->config->base.adjusted_mode;
1643 int clock = adjusted_mode->crtc_clock; 1576 int clock = adjusted_mode->crtc_clock;
1644 int htotal = adjusted_mode->crtc_htotal; 1577 int htotal = adjusted_mode->crtc_htotal;
1645 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; 1578 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
@@ -1780,16 +1713,6 @@ struct skl_pipe_wm_parameters {
1780 uint32_t pipe_htotal; 1713 uint32_t pipe_htotal;
1781 uint32_t pixel_rate; /* in KHz */ 1714 uint32_t pixel_rate; /* in KHz */
1782 struct intel_plane_wm_parameters plane[I915_MAX_PLANES]; 1715 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1783 struct intel_plane_wm_parameters cursor;
1784};
1785
1786struct ilk_pipe_wm_parameters {
1787 bool active;
1788 uint32_t pipe_htotal;
1789 uint32_t pixel_rate;
1790 struct intel_plane_wm_parameters pri;
1791 struct intel_plane_wm_parameters spr;
1792 struct intel_plane_wm_parameters cur;
1793}; 1716};
1794 1717
1795struct ilk_wm_maximums { 1718struct ilk_wm_maximums {
@@ -1810,26 +1733,26 @@ struct intel_wm_config {
1810 * For both WM_PIPE and WM_LP. 1733 * For both WM_PIPE and WM_LP.
1811 * mem_value must be in 0.1us units. 1734 * mem_value must be in 0.1us units.
1812 */ 1735 */
1813static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, 1736static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
1737 const struct intel_plane_state *pstate,
1814 uint32_t mem_value, 1738 uint32_t mem_value,
1815 bool is_lp) 1739 bool is_lp)
1816{ 1740{
1741 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1817 uint32_t method1, method2; 1742 uint32_t method1, method2;
1818 1743
1819 if (!params->active || !params->pri.enabled) 1744 if (!cstate->base.active || !pstate->visible)
1820 return 0; 1745 return 0;
1821 1746
1822 method1 = ilk_wm_method1(params->pixel_rate, 1747 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1823 params->pri.bytes_per_pixel,
1824 mem_value);
1825 1748
1826 if (!is_lp) 1749 if (!is_lp)
1827 return method1; 1750 return method1;
1828 1751
1829 method2 = ilk_wm_method2(params->pixel_rate, 1752 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1830 params->pipe_htotal, 1753 cstate->base.adjusted_mode.crtc_htotal,
1831 params->pri.horiz_pixels, 1754 drm_rect_width(&pstate->dst),
1832 params->pri.bytes_per_pixel, 1755 bpp,
1833 mem_value); 1756 mem_value);
1834 1757
1835 return min(method1, method2); 1758 return min(method1, method2);
@@ -1839,21 +1762,21 @@ static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1839 * For both WM_PIPE and WM_LP. 1762 * For both WM_PIPE and WM_LP.
1840 * mem_value must be in 0.1us units. 1763 * mem_value must be in 0.1us units.
1841 */ 1764 */
1842static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, 1765static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
1766 const struct intel_plane_state *pstate,
1843 uint32_t mem_value) 1767 uint32_t mem_value)
1844{ 1768{
1769 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1845 uint32_t method1, method2; 1770 uint32_t method1, method2;
1846 1771
1847 if (!params->active || !params->spr.enabled) 1772 if (!cstate->base.active || !pstate->visible)
1848 return 0; 1773 return 0;
1849 1774
1850 method1 = ilk_wm_method1(params->pixel_rate, 1775 method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
1851 params->spr.bytes_per_pixel, 1776 method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1852 mem_value); 1777 cstate->base.adjusted_mode.crtc_htotal,
1853 method2 = ilk_wm_method2(params->pixel_rate, 1778 drm_rect_width(&pstate->dst),
1854 params->pipe_htotal, 1779 bpp,
1855 params->spr.horiz_pixels,
1856 params->spr.bytes_per_pixel,
1857 mem_value); 1780 mem_value);
1858 return min(method1, method2); 1781 return min(method1, method2);
1859} 1782}
@@ -1862,29 +1785,33 @@ static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1862 * For both WM_PIPE and WM_LP. 1785 * For both WM_PIPE and WM_LP.
1863 * mem_value must be in 0.1us units. 1786 * mem_value must be in 0.1us units.
1864 */ 1787 */
1865static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, 1788static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
1789 const struct intel_plane_state *pstate,
1866 uint32_t mem_value) 1790 uint32_t mem_value)
1867{ 1791{
1868 if (!params->active || !params->cur.enabled) 1792 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1793
1794 if (!cstate->base.active || !pstate->visible)
1869 return 0; 1795 return 0;
1870 1796
1871 return ilk_wm_method2(params->pixel_rate, 1797 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
1872 params->pipe_htotal, 1798 cstate->base.adjusted_mode.crtc_htotal,
1873 params->cur.horiz_pixels, 1799 drm_rect_width(&pstate->dst),
1874 params->cur.bytes_per_pixel, 1800 bpp,
1875 mem_value); 1801 mem_value);
1876} 1802}
1877 1803
1878/* Only for WM_LP. */ 1804/* Only for WM_LP. */
1879static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, 1805static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
1806 const struct intel_plane_state *pstate,
1880 uint32_t pri_val) 1807 uint32_t pri_val)
1881{ 1808{
1882 if (!params->active || !params->pri.enabled) 1809 int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
1810
1811 if (!cstate->base.active || !pstate->visible)
1883 return 0; 1812 return 0;
1884 1813
1885 return ilk_wm_fbc(pri_val, 1814 return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
1886 params->pri.horiz_pixels,
1887 params->pri.bytes_per_pixel);
1888} 1815}
1889 1816
1890static unsigned int ilk_display_fifo_size(const struct drm_device *dev) 1817static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -2049,10 +1976,12 @@ static bool ilk_validate_wm_level(int level,
2049} 1976}
2050 1977
2051static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 1978static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1979 const struct intel_crtc *intel_crtc,
2052 int level, 1980 int level,
2053 const struct ilk_pipe_wm_parameters *p, 1981 struct intel_crtc_state *cstate,
2054 struct intel_wm_level *result) 1982 struct intel_wm_level *result)
2055{ 1983{
1984 struct intel_plane *intel_plane;
2056 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 1985 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2057 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 1986 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2058 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; 1987 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
@@ -2064,10 +1993,29 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2064 cur_latency *= 5; 1993 cur_latency *= 5;
2065 } 1994 }
2066 1995
2067 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level); 1996 for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) {
2068 result->spr_val = ilk_compute_spr_wm(p, spr_latency); 1997 struct intel_plane_state *pstate =
2069 result->cur_val = ilk_compute_cur_wm(p, cur_latency); 1998 to_intel_plane_state(intel_plane->base.state);
2070 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); 1999
2000 switch (intel_plane->base.type) {
2001 case DRM_PLANE_TYPE_PRIMARY:
2002 result->pri_val = ilk_compute_pri_wm(cstate, pstate,
2003 pri_latency,
2004 level);
2005 result->fbc_val = ilk_compute_fbc_wm(cstate, pstate,
2006 result->pri_val);
2007 break;
2008 case DRM_PLANE_TYPE_OVERLAY:
2009 result->spr_val = ilk_compute_spr_wm(cstate, pstate,
2010 spr_latency);
2011 break;
2012 case DRM_PLANE_TYPE_CURSOR:
2013 result->cur_val = ilk_compute_cur_wm(cstate, pstate,
2014 cur_latency);
2015 break;
2016 }
2017 }
2018
2071 result->enable = true; 2019 result->enable = true;
2072} 2020}
2073 2021
@@ -2076,7 +2024,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2076{ 2024{
2077 struct drm_i915_private *dev_priv = dev->dev_private; 2025 struct drm_i915_private *dev_priv = dev->dev_private;
2078 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2079 struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 2027 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
2080 u32 linetime, ips_linetime; 2028 u32 linetime, ips_linetime;
2081 2029
2082 if (!intel_crtc->active) 2030 if (!intel_crtc->active)
@@ -2085,9 +2033,9 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2085 /* The WM are computed with base on how long it takes to fill a single 2033 /* The WM are computed with base on how long it takes to fill a single
2086 * row at the given clock rate, multiplied by 8. 2034 * row at the given clock rate, multiplied by 8.
2087 * */ 2035 * */
2088 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2036 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2089 mode->crtc_clock); 2037 adjusted_mode->crtc_clock);
2090 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2038 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2091 dev_priv->cdclk_freq); 2039 dev_priv->cdclk_freq);
2092 2040
2093 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2041 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
@@ -2326,48 +2274,6 @@ static void skl_setup_wm_latency(struct drm_device *dev)
2326 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2274 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2327} 2275}
2328 2276
2329static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2330 struct ilk_pipe_wm_parameters *p)
2331{
2332 struct drm_device *dev = crtc->dev;
2333 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2334 enum pipe pipe = intel_crtc->pipe;
2335 struct drm_plane *plane;
2336
2337 if (!intel_crtc->active)
2338 return;
2339
2340 p->active = true;
2341 p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
2342 p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config);
2343
2344 if (crtc->primary->state->fb)
2345 p->pri.bytes_per_pixel =
2346 crtc->primary->state->fb->bits_per_pixel / 8;
2347 else
2348 p->pri.bytes_per_pixel = 4;
2349
2350 p->cur.bytes_per_pixel = 4;
2351 /*
2352 * TODO: for now, assume primary and cursor planes are always enabled.
2353 * Setting them to false makes the screen flicker.
2354 */
2355 p->pri.enabled = true;
2356 p->cur.enabled = true;
2357
2358 p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
2359 p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
2360
2361 drm_for_each_legacy_plane(plane, dev) {
2362 struct intel_plane *intel_plane = to_intel_plane(plane);
2363
2364 if (intel_plane->pipe == pipe) {
2365 p->spr = intel_plane->wm;
2366 break;
2367 }
2368 }
2369}
2370
2371static void ilk_compute_wm_config(struct drm_device *dev, 2277static void ilk_compute_wm_config(struct drm_device *dev,
2372 struct intel_wm_config *config) 2278 struct intel_wm_config *config)
2373{ 2279{
@@ -2387,34 +2293,47 @@ static void ilk_compute_wm_config(struct drm_device *dev,
2387} 2293}
2388 2294
2389/* Compute new watermarks for the pipe */ 2295/* Compute new watermarks for the pipe */
2390static bool intel_compute_pipe_wm(struct drm_crtc *crtc, 2296static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate,
2391 const struct ilk_pipe_wm_parameters *params,
2392 struct intel_pipe_wm *pipe_wm) 2297 struct intel_pipe_wm *pipe_wm)
2393{ 2298{
2299 struct drm_crtc *crtc = cstate->base.crtc;
2394 struct drm_device *dev = crtc->dev; 2300 struct drm_device *dev = crtc->dev;
2395 const struct drm_i915_private *dev_priv = dev->dev_private; 2301 const struct drm_i915_private *dev_priv = dev->dev_private;
2302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2303 struct intel_plane *intel_plane;
2304 struct intel_plane_state *sprstate = NULL;
2396 int level, max_level = ilk_wm_max_level(dev); 2305 int level, max_level = ilk_wm_max_level(dev);
2397 /* LP0 watermark maximums depend on this pipe alone */ 2306 /* LP0 watermark maximums depend on this pipe alone */
2398 struct intel_wm_config config = { 2307 struct intel_wm_config config = {
2399 .num_pipes_active = 1, 2308 .num_pipes_active = 1,
2400 .sprites_enabled = params->spr.enabled,
2401 .sprites_scaled = params->spr.scaled,
2402 }; 2309 };
2403 struct ilk_wm_maximums max; 2310 struct ilk_wm_maximums max;
2404 2311
2405 pipe_wm->pipe_enabled = params->active; 2312 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2406 pipe_wm->sprites_enabled = params->spr.enabled; 2313 if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) {
2407 pipe_wm->sprites_scaled = params->spr.scaled; 2314 sprstate = to_intel_plane_state(intel_plane->base.state);
2315 break;
2316 }
2317 }
2318
2319 config.sprites_enabled = sprstate->visible;
2320 config.sprites_scaled = sprstate->visible &&
2321 (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 ||
2322 drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16);
2323
2324 pipe_wm->pipe_enabled = cstate->base.active;
2325 pipe_wm->sprites_enabled = sprstate->visible;
2326 pipe_wm->sprites_scaled = config.sprites_scaled;
2408 2327
2409 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2328 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2410 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) 2329 if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible)
2411 max_level = 1; 2330 max_level = 1;
2412 2331
2413 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2332 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2414 if (params->spr.scaled) 2333 if (config.sprites_scaled)
2415 max_level = 0; 2334 max_level = 0;
2416 2335
2417 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]); 2336 ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]);
2418 2337
2419 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2338 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2420 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); 2339 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
@@ -2431,7 +2350,7 @@ static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2431 for (level = 1; level <= max_level; level++) { 2350 for (level = 1; level <= max_level; level++) {
2432 struct intel_wm_level wm = {}; 2351 struct intel_wm_level wm = {};
2433 2352
2434 ilk_compute_wm_level(dev_priv, level, params, &wm); 2353 ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm);
2435 2354
2436 /* 2355 /*
2437 * Disable any watermark level that exceeds the 2356 * Disable any watermark level that exceeds the
@@ -2899,7 +2818,12 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2899 int plane; 2818 int plane;
2900 u32 val; 2819 u32 val;
2901 2820
2821 memset(ddb, 0, sizeof(*ddb));
2822
2902 for_each_pipe(dev_priv, pipe) { 2823 for_each_pipe(dev_priv, pipe) {
2824 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
2825 continue;
2826
2903 for_each_plane(dev_priv, pipe, plane) { 2827 for_each_plane(dev_priv, pipe, plane) {
2904 val = I915_READ(PLANE_BUF_CFG(pipe, plane)); 2828 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
2905 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], 2829 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
@@ -2907,7 +2831,8 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2907 } 2831 }
2908 2832
2909 val = I915_READ(CUR_BUF_CFG(pipe)); 2833 val = I915_READ(CUR_BUF_CFG(pipe));
2910 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val); 2834 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
2835 val);
2911 } 2836 }
2912} 2837}
2913 2838
@@ -2976,13 +2901,14 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc,
2976 alloc_size = skl_ddb_entry_size(alloc); 2901 alloc_size = skl_ddb_entry_size(alloc);
2977 if (alloc_size == 0) { 2902 if (alloc_size == 0) {
2978 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 2903 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
2979 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe])); 2904 memset(&ddb->plane[pipe][PLANE_CURSOR], 0,
2905 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
2980 return; 2906 return;
2981 } 2907 }
2982 2908
2983 cursor_blocks = skl_cursor_allocation(config); 2909 cursor_blocks = skl_cursor_allocation(config);
2984 ddb->cursor[pipe].start = alloc->end - cursor_blocks; 2910 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
2985 ddb->cursor[pipe].end = alloc->end; 2911 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
2986 2912
2987 alloc_size -= cursor_blocks; 2913 alloc_size -= cursor_blocks;
2988 alloc->end -= cursor_blocks; 2914 alloc->end -= cursor_blocks;
@@ -3121,8 +3047,8 @@ static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3121 sizeof(new_ddb->plane[pipe]))) 3047 sizeof(new_ddb->plane[pipe])))
3122 return true; 3048 return true;
3123 3049
3124 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe], 3050 if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR],
3125 sizeof(new_ddb->cursor[pipe]))) 3051 sizeof(new_ddb->plane[pipe][PLANE_CURSOR])))
3126 return true; 3052 return true;
3127 3053
3128 return false; 3054 return false;
@@ -3166,7 +3092,8 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3166 if (fb) { 3092 if (fb) {
3167 p->plane[0].enabled = true; 3093 p->plane[0].enabled = true;
3168 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 3094 p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3169 drm_format_plane_cpp(fb->pixel_format, 1) : fb->bits_per_pixel / 8; 3095 drm_format_plane_cpp(fb->pixel_format, 1) :
3096 drm_format_plane_cpp(fb->pixel_format, 0);
3170 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 3097 p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ?
3171 drm_format_plane_cpp(fb->pixel_format, 0) : 0; 3098 drm_format_plane_cpp(fb->pixel_format, 0) : 0;
3172 p->plane[0].tiling = fb->modifier[0]; 3099 p->plane[0].tiling = fb->modifier[0];
@@ -3181,17 +3108,17 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3181 p->plane[0].rotation = crtc->primary->state->rotation; 3108 p->plane[0].rotation = crtc->primary->state->rotation;
3182 3109
3183 fb = crtc->cursor->state->fb; 3110 fb = crtc->cursor->state->fb;
3184 p->cursor.y_bytes_per_pixel = 0; 3111 p->plane[PLANE_CURSOR].y_bytes_per_pixel = 0;
3185 if (fb) { 3112 if (fb) {
3186 p->cursor.enabled = true; 3113 p->plane[PLANE_CURSOR].enabled = true;
3187 p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; 3114 p->plane[PLANE_CURSOR].bytes_per_pixel = fb->bits_per_pixel / 8;
3188 p->cursor.horiz_pixels = crtc->cursor->state->crtc_w; 3115 p->plane[PLANE_CURSOR].horiz_pixels = crtc->cursor->state->crtc_w;
3189 p->cursor.vert_pixels = crtc->cursor->state->crtc_h; 3116 p->plane[PLANE_CURSOR].vert_pixels = crtc->cursor->state->crtc_h;
3190 } else { 3117 } else {
3191 p->cursor.enabled = false; 3118 p->plane[PLANE_CURSOR].enabled = false;
3192 p->cursor.bytes_per_pixel = 0; 3119 p->plane[PLANE_CURSOR].bytes_per_pixel = 0;
3193 p->cursor.horiz_pixels = 64; 3120 p->plane[PLANE_CURSOR].horiz_pixels = 64;
3194 p->cursor.vert_pixels = 64; 3121 p->plane[PLANE_CURSOR].vert_pixels = 64;
3195 } 3122 }
3196 } 3123 }
3197 3124
@@ -3305,11 +3232,12 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3305 &result->plane_res_l[i]); 3232 &result->plane_res_l[i]);
3306 } 3233 }
3307 3234
3308 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); 3235 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][PLANE_CURSOR]);
3309 result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor, 3236 result->plane_en[PLANE_CURSOR] = skl_compute_plane_wm(dev_priv, p,
3237 &p->plane[PLANE_CURSOR],
3310 ddb_blocks, level, 3238 ddb_blocks, level,
3311 &result->cursor_res_b, 3239 &result->plane_res_b[PLANE_CURSOR],
3312 &result->cursor_res_l); 3240 &result->plane_res_l[PLANE_CURSOR]);
3313} 3241}
3314 3242
3315static uint32_t 3243static uint32_t
@@ -3337,7 +3265,7 @@ static void skl_compute_transition_wm(struct drm_crtc *crtc,
3337 /* Until we know more, just disable transition WMs */ 3265 /* Until we know more, just disable transition WMs */
3338 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3266 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3339 trans_wm->plane_en[i] = false; 3267 trans_wm->plane_en[i] = false;
3340 trans_wm->cursor_en = false; 3268 trans_wm->plane_en[PLANE_CURSOR] = false;
3341} 3269}
3342 3270
3343static void skl_compute_pipe_wm(struct drm_crtc *crtc, 3271static void skl_compute_pipe_wm(struct drm_crtc *crtc,
@@ -3386,13 +3314,13 @@ static void skl_compute_wm_results(struct drm_device *dev,
3386 3314
3387 temp = 0; 3315 temp = 0;
3388 3316
3389 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT; 3317 temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3390 temp |= p_wm->wm[level].cursor_res_b; 3318 temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR];
3391 3319
3392 if (p_wm->wm[level].cursor_en) 3320 if (p_wm->wm[level].plane_en[PLANE_CURSOR])
3393 temp |= PLANE_WM_EN; 3321 temp |= PLANE_WM_EN;
3394 3322
3395 r->cursor[pipe][level] = temp; 3323 r->plane[pipe][PLANE_CURSOR][level] = temp;
3396 3324
3397 } 3325 }
3398 3326
@@ -3408,12 +3336,12 @@ static void skl_compute_wm_results(struct drm_device *dev,
3408 } 3336 }
3409 3337
3410 temp = 0; 3338 temp = 0;
3411 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT; 3339 temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT;
3412 temp |= p_wm->trans_wm.cursor_res_b; 3340 temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR];
3413 if (p_wm->trans_wm.cursor_en) 3341 if (p_wm->trans_wm.plane_en[PLANE_CURSOR])
3414 temp |= PLANE_WM_EN; 3342 temp |= PLANE_WM_EN;
3415 3343
3416 r->cursor_trans[pipe] = temp; 3344 r->plane_trans[pipe][PLANE_CURSOR] = temp;
3417 3345
3418 r->wm_linetime[pipe] = p_wm->linetime; 3346 r->wm_linetime[pipe] = p_wm->linetime;
3419} 3347}
@@ -3447,12 +3375,13 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3447 I915_WRITE(PLANE_WM(pipe, i, level), 3375 I915_WRITE(PLANE_WM(pipe, i, level),
3448 new->plane[pipe][i][level]); 3376 new->plane[pipe][i][level]);
3449 I915_WRITE(CUR_WM(pipe, level), 3377 I915_WRITE(CUR_WM(pipe, level),
3450 new->cursor[pipe][level]); 3378 new->plane[pipe][PLANE_CURSOR][level]);
3451 } 3379 }
3452 for (i = 0; i < intel_num_planes(crtc); i++) 3380 for (i = 0; i < intel_num_planes(crtc); i++)
3453 I915_WRITE(PLANE_WM_TRANS(pipe, i), 3381 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3454 new->plane_trans[pipe][i]); 3382 new->plane_trans[pipe][i]);
3455 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]); 3383 I915_WRITE(CUR_WM_TRANS(pipe),
3384 new->plane_trans[pipe][PLANE_CURSOR]);
3456 3385
3457 for (i = 0; i < intel_num_planes(crtc); i++) { 3386 for (i = 0; i < intel_num_planes(crtc); i++) {
3458 skl_ddb_entry_write(dev_priv, 3387 skl_ddb_entry_write(dev_priv,
@@ -3464,7 +3393,7 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3464 } 3393 }
3465 3394
3466 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 3395 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3467 &new->ddb.cursor[pipe]); 3396 &new->ddb.plane[pipe][PLANE_CURSOR]);
3468 } 3397 }
3469} 3398}
3470 3399
@@ -3672,6 +3601,26 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
3672 } 3601 }
3673} 3602}
3674 3603
3604static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
3605{
3606 watermarks->wm_linetime[pipe] = 0;
3607 memset(watermarks->plane[pipe], 0,
3608 sizeof(uint32_t) * 8 * I915_MAX_PLANES);
3609 memset(watermarks->plane_trans[pipe],
3610 0, sizeof(uint32_t) * I915_MAX_PLANES);
3611 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0;
3612
3613 /* Clear ddb entries for pipe */
3614 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
3615 memset(&watermarks->ddb.plane[pipe], 0,
3616 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3617 memset(&watermarks->ddb.y_plane[pipe], 0,
3618 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3619 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
3620 sizeof(struct skl_ddb_entry));
3621
3622}
3623
3675static void skl_update_wm(struct drm_crtc *crtc) 3624static void skl_update_wm(struct drm_crtc *crtc)
3676{ 3625{
3677 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3626 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -3682,7 +3631,11 @@ static void skl_update_wm(struct drm_crtc *crtc)
3682 struct skl_pipe_wm pipe_wm = {}; 3631 struct skl_pipe_wm pipe_wm = {};
3683 struct intel_wm_config config = {}; 3632 struct intel_wm_config config = {};
3684 3633
3685 memset(results, 0, sizeof(*results)); 3634
3635 /* Clear all dirty flags */
3636 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3637
3638 skl_clear_wm(results, intel_crtc->pipe);
3686 3639
3687 skl_compute_wm_global_parameters(dev, &config); 3640 skl_compute_wm_global_parameters(dev, &config);
3688 3641
@@ -3737,19 +3690,19 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3737static void ilk_update_wm(struct drm_crtc *crtc) 3690static void ilk_update_wm(struct drm_crtc *crtc)
3738{ 3691{
3739 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3692 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3693 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3740 struct drm_device *dev = crtc->dev; 3694 struct drm_device *dev = crtc->dev;
3741 struct drm_i915_private *dev_priv = dev->dev_private; 3695 struct drm_i915_private *dev_priv = dev->dev_private;
3742 struct ilk_wm_maximums max; 3696 struct ilk_wm_maximums max;
3743 struct ilk_pipe_wm_parameters params = {};
3744 struct ilk_wm_values results = {}; 3697 struct ilk_wm_values results = {};
3745 enum intel_ddb_partitioning partitioning; 3698 enum intel_ddb_partitioning partitioning;
3746 struct intel_pipe_wm pipe_wm = {}; 3699 struct intel_pipe_wm pipe_wm = {};
3747 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 3700 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3748 struct intel_wm_config config = {}; 3701 struct intel_wm_config config = {};
3749 3702
3750 ilk_compute_wm_parameters(crtc, &params); 3703 WARN_ON(cstate->base.active != intel_crtc->active);
3751 3704
3752 intel_compute_pipe_wm(crtc, &params, &pipe_wm); 3705 intel_compute_pipe_wm(cstate, &pipe_wm);
3753 3706
3754 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) 3707 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3755 return; 3708 return;
@@ -3789,12 +3742,6 @@ ilk_update_sprite_wm(struct drm_plane *plane,
3789 struct drm_device *dev = plane->dev; 3742 struct drm_device *dev = plane->dev;
3790 struct intel_plane *intel_plane = to_intel_plane(plane); 3743 struct intel_plane *intel_plane = to_intel_plane(plane);
3791 3744
3792 intel_plane->wm.enabled = enabled;
3793 intel_plane->wm.scaled = scaled;
3794 intel_plane->wm.horiz_pixels = sprite_width;
3795 intel_plane->wm.vert_pixels = sprite_width;
3796 intel_plane->wm.bytes_per_pixel = pixel_size;
3797
3798 /* 3745 /*
3799 * IVB workaround: must disable low power watermarks for at least 3746 * IVB workaround: must disable low power watermarks for at least
3800 * one frame before enabling scaling. LP watermarks can be re-enabled 3747 * one frame before enabling scaling. LP watermarks can be re-enabled
@@ -3826,10 +3773,10 @@ static void skl_pipe_wm_active_state(uint32_t val,
3826 (val >> PLANE_WM_LINES_SHIFT) & 3773 (val >> PLANE_WM_LINES_SHIFT) &
3827 PLANE_WM_LINES_MASK; 3774 PLANE_WM_LINES_MASK;
3828 } else { 3775 } else {
3829 active->wm[level].cursor_en = is_enabled; 3776 active->wm[level].plane_en[PLANE_CURSOR] = is_enabled;
3830 active->wm[level].cursor_res_b = 3777 active->wm[level].plane_res_b[PLANE_CURSOR] =
3831 val & PLANE_WM_BLOCKS_MASK; 3778 val & PLANE_WM_BLOCKS_MASK;
3832 active->wm[level].cursor_res_l = 3779 active->wm[level].plane_res_l[PLANE_CURSOR] =
3833 (val >> PLANE_WM_LINES_SHIFT) & 3780 (val >> PLANE_WM_LINES_SHIFT) &
3834 PLANE_WM_LINES_MASK; 3781 PLANE_WM_LINES_MASK;
3835 } 3782 }
@@ -3842,10 +3789,10 @@ static void skl_pipe_wm_active_state(uint32_t val,
3842 (val >> PLANE_WM_LINES_SHIFT) & 3789 (val >> PLANE_WM_LINES_SHIFT) &
3843 PLANE_WM_LINES_MASK; 3790 PLANE_WM_LINES_MASK;
3844 } else { 3791 } else {
3845 active->trans_wm.cursor_en = is_enabled; 3792 active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled;
3846 active->trans_wm.cursor_res_b = 3793 active->trans_wm.plane_res_b[PLANE_CURSOR] =
3847 val & PLANE_WM_BLOCKS_MASK; 3794 val & PLANE_WM_BLOCKS_MASK;
3848 active->trans_wm.cursor_res_l = 3795 active->trans_wm.plane_res_l[PLANE_CURSOR] =
3849 (val >> PLANE_WM_LINES_SHIFT) & 3796 (val >> PLANE_WM_LINES_SHIFT) &
3850 PLANE_WM_LINES_MASK; 3797 PLANE_WM_LINES_MASK;
3851 } 3798 }
@@ -3871,12 +3818,12 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3871 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3818 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3872 hw->plane[pipe][i][level] = 3819 hw->plane[pipe][i][level] =
3873 I915_READ(PLANE_WM(pipe, i, level)); 3820 I915_READ(PLANE_WM(pipe, i, level));
3874 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level)); 3821 hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level));
3875 } 3822 }
3876 3823
3877 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3824 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3878 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); 3825 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3879 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); 3826 hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe));
3880 3827
3881 if (!intel_crtc->active) 3828 if (!intel_crtc->active)
3882 return; 3829 return;
@@ -3891,7 +3838,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3891 skl_pipe_wm_active_state(temp, active, false, 3838 skl_pipe_wm_active_state(temp, active, false,
3892 false, i, level); 3839 false, i, level);
3893 } 3840 }
3894 temp = hw->cursor[pipe][level]; 3841 temp = hw->plane[pipe][PLANE_CURSOR][level];
3895 skl_pipe_wm_active_state(temp, active, false, true, i, level); 3842 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3896 } 3843 }
3897 3844
@@ -3900,7 +3847,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3900 skl_pipe_wm_active_state(temp, active, true, false, i, 0); 3847 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3901 } 3848 }
3902 3849
3903 temp = hw->cursor_trans[pipe]; 3850 temp = hw->plane_trans[pipe][PLANE_CURSOR];
3904 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 3851 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3905} 3852}
3906 3853
@@ -4261,7 +4208,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
4261 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 4208 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4262 MEMMODE_FSTART_SHIFT; 4209 MEMMODE_FSTART_SHIFT;
4263 4210
4264 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 4211 vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
4265 PXVFREQ_PX_SHIFT; 4212 PXVFREQ_PX_SHIFT;
4266 4213
4267 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ 4214 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
@@ -4292,10 +4239,10 @@ static void ironlake_enable_drps(struct drm_device *dev)
4292 4239
4293 ironlake_set_drps(dev, fstart); 4240 ironlake_set_drps(dev, fstart);
4294 4241
4295 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + 4242 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4296 I915_READ(0x112e0); 4243 I915_READ(DDREC) + I915_READ(CSIEC);
4297 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); 4244 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4298 dev_priv->ips.last_count2 = I915_READ(0x112f4); 4245 dev_priv->ips.last_count2 = I915_READ(GFXEC);
4299 dev_priv->ips.last_time2 = ktime_get_raw_ns(); 4246 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4300 4247
4301 spin_unlock_irq(&mchdev_lock); 4248 spin_unlock_irq(&mchdev_lock);
@@ -4466,6 +4413,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4466{ 4413{
4467 struct drm_i915_private *dev_priv = dev->dev_private; 4414 struct drm_i915_private *dev_priv = dev->dev_private;
4468 4415
4416 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4417 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
4418 return;
4419
4469 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4420 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4470 WARN_ON(val > dev_priv->rps.max_freq); 4421 WARN_ON(val > dev_priv->rps.max_freq);
4471 WARN_ON(val < dev_priv->rps.min_freq); 4422 WARN_ON(val < dev_priv->rps.min_freq);
@@ -4786,6 +4737,12 @@ static void gen9_enable_rps(struct drm_device *dev)
4786 4737
4787 gen6_init_rps_frequencies(dev); 4738 gen6_init_rps_frequencies(dev);
4788 4739
4740 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4741 if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
4742 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4743 return;
4744 }
4745
4789 /* Program defaults and thresholds for RPS*/ 4746 /* Program defaults and thresholds for RPS*/
4790 I915_WRITE(GEN6_RC_VIDEO_FREQ, 4747 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4791 GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); 4748 GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
@@ -4823,13 +4780,22 @@ static void gen9_enable_rc6(struct drm_device *dev)
4823 I915_WRITE(GEN6_RC_CONTROL, 0); 4780 I915_WRITE(GEN6_RC_CONTROL, 0);
4824 4781
4825 /* 2b: Program RC6 thresholds.*/ 4782 /* 2b: Program RC6 thresholds.*/
4826 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 4783
4784 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4785 if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
4786 (INTEL_REVID(dev) <= SKL_REVID_E0)))
4787 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4788 else
4789 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4827 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 4790 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4828 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 4791 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4829 for_each_ring(ring, dev_priv, unused) 4792 for_each_ring(ring, dev_priv, unused)
4830 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); 4793 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4794
4795 if (HAS_GUC_UCODE(dev))
4796 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4797
4831 I915_WRITE(GEN6_RC_SLEEP, 0); 4798 I915_WRITE(GEN6_RC_SLEEP, 0);
4832 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4833 4799
4834 /* 2c: Program Coarse Power Gating Policies. */ 4800 /* 2c: Program Coarse Power Gating Policies. */
4835 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); 4801 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25);
@@ -4840,17 +4806,30 @@ static void gen9_enable_rc6(struct drm_device *dev)
4840 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 4806 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4841 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4807 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4842 "on" : "off"); 4808 "on" : "off");
4843 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4809 /* WaRsUseTimeoutMode */
4844 GEN6_RC_CTL_EI_MODE(1) | 4810 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
4845 rc6_mask); 4811 (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) {
4812 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4813 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4814 GEN7_RC_CTL_TO_MODE |
4815 rc6_mask);
4816 } else {
4817 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4818 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4819 GEN6_RC_CTL_EI_MODE(1) |
4820 rc6_mask);
4821 }
4846 4822
4847 /* 4823 /*
4848 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 4824 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4849 * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6. 4825 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4850 */ 4826 */
4851 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4827 if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
4852 GEN9_MEDIA_PG_ENABLE : 0); 4828 ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
4853 4829 I915_WRITE(GEN9_PG_ENABLE, 0);
4830 else
4831 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4832 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4854 4833
4855 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4834 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4856 4835
@@ -5148,32 +5127,27 @@ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5148 struct drm_device *dev = dev_priv->dev; 5127 struct drm_device *dev = dev_priv->dev;
5149 u32 val, rp0; 5128 u32 val, rp0;
5150 5129
5151 if (dev->pdev->revision >= 0x20) { 5130 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5152 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5153 5131
5154 switch (INTEL_INFO(dev)->eu_total) { 5132 switch (INTEL_INFO(dev)->eu_total) {
5155 case 8: 5133 case 8:
5156 /* (2 * 4) config */ 5134 /* (2 * 4) config */
5157 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5135 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
5158 break; 5136 break;
5159 case 12: 5137 case 12:
5160 /* (2 * 6) config */ 5138 /* (2 * 6) config */
5161 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 5139 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT);
5162 break; 5140 break;
5163 case 16: 5141 case 16:
5164 /* (2 * 8) config */ 5142 /* (2 * 8) config */
5165 default: 5143 default:
5166 /* Setting (2 * 8) Min RP0 for any other combination */ 5144 /* Setting (2 * 8) Min RP0 for any other combination */
5167 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 5145 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT);
5168 break; 5146 break;
5169 }
5170 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5171 } else {
5172 /* For pre-production hardware */
5173 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
5174 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5175 PUNIT_GPU_STATUS_MAX_FREQ_MASK;
5176 } 5147 }
5148
5149 rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK);
5150
5177 return rp0; 5151 return rp0;
5178} 5152}
5179 5153
@@ -5189,18 +5163,11 @@ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5189 5163
5190static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 5164static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
5191{ 5165{
5192 struct drm_device *dev = dev_priv->dev;
5193 u32 val, rp1; 5166 u32 val, rp1;
5194 5167
5195 if (dev->pdev->revision >= 0x20) { 5168 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5196 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5169 rp1 = (val & FB_GFX_FREQ_FUSE_MASK);
5197 rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 5170
5198 } else {
5199 /* For pre-production hardware */
5200 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5201 rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) &
5202 PUNIT_GPU_STATUS_MAX_FREQ_MASK);
5203 }
5204 return rp1; 5171 return rp1;
5205} 5172}
5206 5173
@@ -5415,25 +5382,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5415 mutex_unlock(&dev_priv->sb_lock); 5382 mutex_unlock(&dev_priv->sb_lock);
5416 5383
5417 switch ((val >> 2) & 0x7) { 5384 switch ((val >> 2) & 0x7) {
5418 case 0:
5419 case 1:
5420 dev_priv->rps.cz_freq = 200;
5421 dev_priv->mem_freq = 1600;
5422 break;
5423 case 2:
5424 dev_priv->rps.cz_freq = 267;
5425 dev_priv->mem_freq = 1600;
5426 break;
5427 case 3: 5385 case 3:
5428 dev_priv->rps.cz_freq = 333;
5429 dev_priv->mem_freq = 2000; 5386 dev_priv->mem_freq = 2000;
5430 break; 5387 break;
5431 case 4: 5388 default:
5432 dev_priv->rps.cz_freq = 320;
5433 dev_priv->mem_freq = 1600;
5434 break;
5435 case 5:
5436 dev_priv->rps.cz_freq = 400;
5437 dev_priv->mem_freq = 1600; 5389 dev_priv->mem_freq = 1600;
5438 break; 5390 break;
5439 } 5391 }
@@ -5565,7 +5517,7 @@ static void cherryview_enable_rps(struct drm_device *dev)
5565 /* RPS code assumes GPLL is used */ 5517 /* RPS code assumes GPLL is used */
5566 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5518 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5567 5519
5568 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); 5520 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5569 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5521 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5570 5522
5571 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5523 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5655,7 +5607,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
5655 /* RPS code assumes GPLL is used */ 5607 /* RPS code assumes GPLL is used */
5656 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); 5608 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5657 5609
5658 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no"); 5610 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
5659 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5611 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5660 5612
5661 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5613 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -5864,7 +5816,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5864 5816
5865 assert_spin_locked(&mchdev_lock); 5817 assert_spin_locked(&mchdev_lock);
5866 5818
5867 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); 5819 pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
5868 pxvid = (pxvid >> 24) & 0x7f; 5820 pxvid = (pxvid >> 24) & 0x7f;
5869 ext_v = pvid_to_extvid(dev_priv, pxvid); 5821 ext_v = pvid_to_extvid(dev_priv, pxvid);
5870 5822
@@ -6107,13 +6059,13 @@ static void intel_init_emon(struct drm_device *dev)
6107 I915_WRITE(CSIEW2, 0x04000004); 6059 I915_WRITE(CSIEW2, 0x04000004);
6108 6060
6109 for (i = 0; i < 5; i++) 6061 for (i = 0; i < 5; i++)
6110 I915_WRITE(PEW + (i * 4), 0); 6062 I915_WRITE(PEW(i), 0);
6111 for (i = 0; i < 3; i++) 6063 for (i = 0; i < 3; i++)
6112 I915_WRITE(DEW + (i * 4), 0); 6064 I915_WRITE(DEW(i), 0);
6113 6065
6114 /* Program P-state weights to account for frequency power adjustment */ 6066 /* Program P-state weights to account for frequency power adjustment */
6115 for (i = 0; i < 16; i++) { 6067 for (i = 0; i < 16; i++) {
6116 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); 6068 u32 pxvidfreq = I915_READ(PXVFREQ(i));
6117 unsigned long freq = intel_pxfreq(pxvidfreq); 6069 unsigned long freq = intel_pxfreq(pxvidfreq);
6118 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> 6070 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6119 PXVFREQ_PX_SHIFT; 6071 PXVFREQ_PX_SHIFT;
@@ -6134,7 +6086,7 @@ static void intel_init_emon(struct drm_device *dev)
6134 for (i = 0; i < 4; i++) { 6086 for (i = 0; i < 4; i++) {
6135 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | 6087 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6136 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); 6088 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6137 I915_WRITE(PXW + (i * 4), val); 6089 I915_WRITE(PXW(i), val);
6138 } 6090 }
6139 6091
6140 /* Adjust magic regs to magic values (more experimental results) */ 6092 /* Adjust magic regs to magic values (more experimental results) */
@@ -6150,7 +6102,7 @@ static void intel_init_emon(struct drm_device *dev)
6150 I915_WRITE(EG7, 0); 6102 I915_WRITE(EG7, 0);
6151 6103
6152 for (i = 0; i < 8; i++) 6104 for (i = 0; i < 8; i++)
6153 I915_WRITE(PXWL + (i * 4), 0); 6105 I915_WRITE(PXWL(i), 0);
6154 6106
6155 /* Enable PMON + select events */ 6107 /* Enable PMON + select events */
6156 I915_WRITE(ECR, 0x80000019); 6108 I915_WRITE(ECR, 0x80000019);
@@ -6604,14 +6556,14 @@ static void lpt_init_clock_gating(struct drm_device *dev)
6604 * TODO: this bit should only be enabled when really needed, then 6556 * TODO: this bit should only be enabled when really needed, then
6605 * disabled when not needed anymore in order to save power. 6557 * disabled when not needed anymore in order to save power.
6606 */ 6558 */
6607 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) 6559 if (HAS_PCH_LPT_LP(dev))
6608 I915_WRITE(SOUTH_DSPCLK_GATE_D, 6560 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6609 I915_READ(SOUTH_DSPCLK_GATE_D) | 6561 I915_READ(SOUTH_DSPCLK_GATE_D) |
6610 PCH_LP_PARTITION_LEVEL_DISABLE); 6562 PCH_LP_PARTITION_LEVEL_DISABLE);
6611 6563
6612 /* WADPOClockGatingDisable:hsw */ 6564 /* WADPOClockGatingDisable:hsw */
6613 I915_WRITE(_TRANSA_CHICKEN1, 6565 I915_WRITE(TRANS_CHICKEN1(PIPE_A),
6614 I915_READ(_TRANSA_CHICKEN1) | 6566 I915_READ(TRANS_CHICKEN1(PIPE_A)) |
6615 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6567 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6616} 6568}
6617 6569
@@ -6619,7 +6571,7 @@ static void lpt_suspend_hw(struct drm_device *dev)
6619{ 6571{
6620 struct drm_i915_private *dev_priv = dev->dev_private; 6572 struct drm_i915_private *dev_priv = dev->dev_private;
6621 6573
6622 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 6574 if (HAS_PCH_LPT_LP(dev)) {
6623 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 6575 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6624 6576
6625 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 6577 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
@@ -7105,9 +7057,6 @@ void intel_init_pm(struct drm_device *dev)
7105 if (IS_BROXTON(dev)) 7057 if (IS_BROXTON(dev))
7106 dev_priv->display.init_clock_gating = 7058 dev_priv->display.init_clock_gating =
7107 bxt_init_clock_gating; 7059 bxt_init_clock_gating;
7108 else if (IS_SKYLAKE(dev))
7109 dev_priv->display.init_clock_gating =
7110 skl_init_clock_gating;
7111 dev_priv->display.update_wm = skl_update_wm; 7060 dev_priv->display.update_wm = skl_update_wm;
7112 dev_priv->display.update_sprite_wm = skl_update_sprite_wm; 7061 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
7113 } else if (HAS_PCH_SPLIT(dev)) { 7062 } else if (HAS_PCH_SPLIT(dev)) {
@@ -7260,7 +7209,7 @@ static int vlv_gpu_freq_div(unsigned int czclk_freq)
7260 7209
7261static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7210static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7262{ 7211{
7263 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 7212 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7264 7213
7265 div = vlv_gpu_freq_div(czclk_freq); 7214 div = vlv_gpu_freq_div(czclk_freq);
7266 if (div < 0) 7215 if (div < 0)
@@ -7271,7 +7220,7 @@ static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7271 7220
7272static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7221static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7273{ 7222{
7274 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 7223 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7275 7224
7276 mul = vlv_gpu_freq_div(czclk_freq); 7225 mul = vlv_gpu_freq_div(czclk_freq);
7277 if (mul < 0) 7226 if (mul < 0)
@@ -7282,7 +7231,7 @@ static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7282 7231
7283static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7232static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7284{ 7233{
7285 int div, czclk_freq = dev_priv->rps.cz_freq; 7234 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7286 7235
7287 div = vlv_gpu_freq_div(czclk_freq) / 2; 7236 div = vlv_gpu_freq_div(czclk_freq) / 2;
7288 if (div < 0) 7237 if (div < 0)
@@ -7293,7 +7242,7 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7293 7242
7294static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7243static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7295{ 7244{
7296 int mul, czclk_freq = dev_priv->rps.cz_freq; 7245 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
7297 7246
7298 mul = vlv_gpu_freq_div(czclk_freq) / 2; 7247 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7299 if (mul < 0) 7248 if (mul < 0)