aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c174
1 files changed, 63 insertions, 111 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f4a4e9496893..4d2cd432f739 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -75,9 +75,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
75 I915_WRITE(CHICKEN_PAR1_1, 75 I915_WRITE(CHICKEN_PAR1_1,
76 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); 76 I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
77 77
78 I915_WRITE(GEN8_CONFIG0,
79 I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
80
81 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ 78 /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */
82 I915_WRITE(GEN8_CHICKEN_DCPR_1, 79 I915_WRITE(GEN8_CHICKEN_DCPR_1,
83 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); 80 I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
@@ -124,7 +121,6 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
124 121
125static void glk_init_clock_gating(struct drm_i915_private *dev_priv) 122static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
126{ 123{
127 u32 val;
128 gen9_init_clock_gating(dev_priv); 124 gen9_init_clock_gating(dev_priv);
129 125
130 /* 126 /*
@@ -144,11 +140,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
144 I915_WRITE(CHICKEN_MISC_2, val); 140 I915_WRITE(CHICKEN_MISC_2, val);
145 } 141 }
146 142
147 /* Display WA #1133: WaFbcSkipSegments:glk */
148 val = I915_READ(ILK_DPFC_CHICKEN);
149 val &= ~GLK_SKIP_SEG_COUNT_MASK;
150 val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
151 I915_WRITE(ILK_DPFC_CHICKEN, val);
152} 143}
153 144
154static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv) 145static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -928,7 +919,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
928 * and the size of 8 whole lines. This adjustment is always performed 919 * and the size of 8 whole lines. This adjustment is always performed
929 * in the actual pixel depth regardless of whether FBC is enabled or not." 920 * in the actual pixel depth regardless of whether FBC is enabled or not."
930 */ 921 */
931static int g4x_tlb_miss_wa(int fifo_size, int width, int cpp) 922static unsigned int g4x_tlb_miss_wa(int fifo_size, int width, int cpp)
932{ 923{
933 int tlb_miss = fifo_size * 64 - width * cpp * 8; 924 int tlb_miss = fifo_size * 64 - width * cpp * 8;
934 925
@@ -1105,8 +1096,8 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1105 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1096 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1106 const struct drm_display_mode *adjusted_mode = 1097 const struct drm_display_mode *adjusted_mode =
1107 &crtc_state->base.adjusted_mode; 1098 &crtc_state->base.adjusted_mode;
1108 int clock, htotal, cpp, width, wm; 1099 unsigned int latency = dev_priv->wm.pri_latency[level] * 10;
1109 int latency = dev_priv->wm.pri_latency[level] * 10; 1100 unsigned int clock, htotal, cpp, width, wm;
1110 1101
1111 if (latency == 0) 1102 if (latency == 0)
1112 return USHRT_MAX; 1103 return USHRT_MAX;
@@ -1145,7 +1136,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1145 level == G4X_WM_LEVEL_NORMAL) { 1136 level == G4X_WM_LEVEL_NORMAL) {
1146 wm = intel_wm_method1(clock, cpp, latency); 1137 wm = intel_wm_method1(clock, cpp, latency);
1147 } else { 1138 } else {
1148 int small, large; 1139 unsigned int small, large;
1149 1140
1150 small = intel_wm_method1(clock, cpp, latency); 1141 small = intel_wm_method1(clock, cpp, latency);
1151 large = intel_wm_method2(clock, htotal, width, cpp, latency); 1142 large = intel_wm_method2(clock, htotal, width, cpp, latency);
@@ -1158,7 +1149,7 @@ static uint16_t g4x_compute_wm(const struct intel_crtc_state *crtc_state,
1158 1149
1159 wm = DIV_ROUND_UP(wm, 64) + 2; 1150 wm = DIV_ROUND_UP(wm, 64) + 2;
1160 1151
1161 return min_t(int, wm, USHRT_MAX); 1152 return min_t(unsigned int, wm, USHRT_MAX);
1162} 1153}
1163 1154
1164static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state, 1155static bool g4x_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
@@ -1409,17 +1400,29 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state)
1409 1400
1410static int g4x_compute_intermediate_wm(struct drm_device *dev, 1401static int g4x_compute_intermediate_wm(struct drm_device *dev,
1411 struct intel_crtc *crtc, 1402 struct intel_crtc *crtc,
1412 struct intel_crtc_state *crtc_state) 1403 struct intel_crtc_state *new_crtc_state)
1413{ 1404{
1414 struct g4x_wm_state *intermediate = &crtc_state->wm.g4x.intermediate; 1405 struct g4x_wm_state *intermediate = &new_crtc_state->wm.g4x.intermediate;
1415 const struct g4x_wm_state *optimal = &crtc_state->wm.g4x.optimal; 1406 const struct g4x_wm_state *optimal = &new_crtc_state->wm.g4x.optimal;
1416 const struct g4x_wm_state *active = &crtc->wm.active.g4x; 1407 struct intel_atomic_state *intel_state =
1408 to_intel_atomic_state(new_crtc_state->base.state);
1409 const struct intel_crtc_state *old_crtc_state =
1410 intel_atomic_get_old_crtc_state(intel_state, crtc);
1411 const struct g4x_wm_state *active = &old_crtc_state->wm.g4x.optimal;
1417 enum plane_id plane_id; 1412 enum plane_id plane_id;
1418 1413
1414 if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
1415 *intermediate = *optimal;
1416
1417 intermediate->cxsr = false;
1418 intermediate->hpll_en = false;
1419 goto out;
1420 }
1421
1419 intermediate->cxsr = optimal->cxsr && active->cxsr && 1422 intermediate->cxsr = optimal->cxsr && active->cxsr &&
1420 !crtc_state->disable_cxsr; 1423 !new_crtc_state->disable_cxsr;
1421 intermediate->hpll_en = optimal->hpll_en && active->hpll_en && 1424 intermediate->hpll_en = optimal->hpll_en && active->hpll_en &&
1422 !crtc_state->disable_cxsr; 1425 !new_crtc_state->disable_cxsr;
1423 intermediate->fbc_en = optimal->fbc_en && active->fbc_en; 1426 intermediate->fbc_en = optimal->fbc_en && active->fbc_en;
1424 1427
1425 for_each_plane_id_on_crtc(crtc, plane_id) { 1428 for_each_plane_id_on_crtc(crtc, plane_id) {
@@ -1461,12 +1464,13 @@ static int g4x_compute_intermediate_wm(struct drm_device *dev,
1461 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) && 1464 WARN_ON(intermediate->hpll.fbc > g4x_fbc_fifo_size(2) &&
1462 intermediate->fbc_en && intermediate->hpll_en); 1465 intermediate->fbc_en && intermediate->hpll_en);
1463 1466
1467out:
1464 /* 1468 /*
1465 * If our intermediate WM are identical to the final WM, then we can 1469 * If our intermediate WM are identical to the final WM, then we can
1466 * omit the post-vblank programming; only update if it's different. 1470 * omit the post-vblank programming; only update if it's different.
1467 */ 1471 */
1468 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 1472 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
1469 crtc_state->wm.need_postvbl_update = true; 1473 new_crtc_state->wm.need_postvbl_update = true;
1470 1474
1471 return 0; 1475 return 0;
1472} 1476}
@@ -1602,7 +1606,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1602 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 1606 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1603 const struct drm_display_mode *adjusted_mode = 1607 const struct drm_display_mode *adjusted_mode =
1604 &crtc_state->base.adjusted_mode; 1608 &crtc_state->base.adjusted_mode;
1605 int clock, htotal, cpp, width, wm; 1609 unsigned int clock, htotal, cpp, width, wm;
1606 1610
1607 if (dev_priv->wm.pri_latency[level] == 0) 1611 if (dev_priv->wm.pri_latency[level] == 0)
1608 return USHRT_MAX; 1612 return USHRT_MAX;
@@ -1628,7 +1632,7 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
1628 dev_priv->wm.pri_latency[level] * 10); 1632 dev_priv->wm.pri_latency[level] * 10);
1629 } 1633 }
1630 1634
1631 return min_t(int, wm, USHRT_MAX); 1635 return min_t(unsigned int, wm, USHRT_MAX);
1632} 1636}
1633 1637
1634static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes) 1638static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
@@ -2029,16 +2033,27 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
2029 2033
2030static int vlv_compute_intermediate_wm(struct drm_device *dev, 2034static int vlv_compute_intermediate_wm(struct drm_device *dev,
2031 struct intel_crtc *crtc, 2035 struct intel_crtc *crtc,
2032 struct intel_crtc_state *crtc_state) 2036 struct intel_crtc_state *new_crtc_state)
2033{ 2037{
2034 struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate; 2038 struct vlv_wm_state *intermediate = &new_crtc_state->wm.vlv.intermediate;
2035 const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal; 2039 const struct vlv_wm_state *optimal = &new_crtc_state->wm.vlv.optimal;
2036 const struct vlv_wm_state *active = &crtc->wm.active.vlv; 2040 struct intel_atomic_state *intel_state =
2041 to_intel_atomic_state(new_crtc_state->base.state);
2042 const struct intel_crtc_state *old_crtc_state =
2043 intel_atomic_get_old_crtc_state(intel_state, crtc);
2044 const struct vlv_wm_state *active = &old_crtc_state->wm.vlv.optimal;
2037 int level; 2045 int level;
2038 2046
2047 if (!new_crtc_state->base.active || drm_atomic_crtc_needs_modeset(&new_crtc_state->base)) {
2048 *intermediate = *optimal;
2049
2050 intermediate->cxsr = false;
2051 goto out;
2052 }
2053
2039 intermediate->num_levels = min(optimal->num_levels, active->num_levels); 2054 intermediate->num_levels = min(optimal->num_levels, active->num_levels);
2040 intermediate->cxsr = optimal->cxsr && active->cxsr && 2055 intermediate->cxsr = optimal->cxsr && active->cxsr &&
2041 !crtc_state->disable_cxsr; 2056 !new_crtc_state->disable_cxsr;
2042 2057
2043 for (level = 0; level < intermediate->num_levels; level++) { 2058 for (level = 0; level < intermediate->num_levels; level++) {
2044 enum plane_id plane_id; 2059 enum plane_id plane_id;
@@ -2057,12 +2072,13 @@ static int vlv_compute_intermediate_wm(struct drm_device *dev,
2057 2072
2058 vlv_invalidate_wms(crtc, intermediate, level); 2073 vlv_invalidate_wms(crtc, intermediate, level);
2059 2074
2075out:
2060 /* 2076 /*
2061 * If our intermediate WM are identical to the final WM, then we can 2077 * If our intermediate WM are identical to the final WM, then we can
2062 * omit the post-vblank programming; only update if it's different. 2078 * omit the post-vblank programming; only update if it's different.
2063 */ 2079 */
2064 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0) 2080 if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
2065 crtc_state->wm.need_postvbl_update = true; 2081 new_crtc_state->wm.need_postvbl_update = true;
2066 2082
2067 return 0; 2083 return 0;
2068} 2084}
@@ -3930,6 +3946,7 @@ skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state)
3930int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, 3946int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
3931 struct intel_crtc_state *cstate) 3947 struct intel_crtc_state *cstate)
3932{ 3948{
3949 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3933 struct drm_crtc_state *crtc_state = &cstate->base; 3950 struct drm_crtc_state *crtc_state = &cstate->base;
3934 struct drm_atomic_state *state = crtc_state->state; 3951 struct drm_atomic_state *state = crtc_state->state;
3935 struct drm_plane *plane; 3952 struct drm_plane *plane;
@@ -3972,7 +3989,7 @@ int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
3972 crtc_clock = crtc_state->adjusted_mode.crtc_clock; 3989 crtc_clock = crtc_state->adjusted_mode.crtc_clock;
3973 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk; 3990 dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk;
3974 3991
3975 if (IS_GEMINILAKE(to_i915(intel_crtc->base.dev))) 3992 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
3976 dotclk *= 2; 3993 dotclk *= 2;
3977 3994
3978 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale); 3995 pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale);
@@ -6620,12 +6637,19 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
6620 I915_WRITE(GEN6_RC_CONTROL, 0); 6637 I915_WRITE(GEN6_RC_CONTROL, 0);
6621 6638
6622 /* 2b: Program RC6 thresholds.*/ 6639 /* 2b: Program RC6 thresholds.*/
6623 6640 if (INTEL_GEN(dev_priv) >= 10) {
6624 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 6641 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
6625 if (IS_SKYLAKE(dev_priv)) 6642 I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
6643 } else if (IS_SKYLAKE(dev_priv)) {
6644 /*
6645 * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only
6646 * when CPG is enabled
6647 */
6626 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 6648 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
6627 else 6649 } else {
6628 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 6650 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
6651 }
6652
6629 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ 6653 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
6630 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ 6654 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
6631 for_each_engine(engine, dev_priv, id) 6655 for_each_engine(engine, dev_priv, id)
@@ -7910,7 +7934,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7910 intel_runtime_pm_get(dev_priv); 7934 intel_runtime_pm_get(dev_priv);
7911 } 7935 }
7912 7936
7913 mutex_lock(&dev_priv->drm.struct_mutex);
7914 mutex_lock(&dev_priv->pcu_lock); 7937 mutex_lock(&dev_priv->pcu_lock);
7915 7938
7916 /* Initialize RPS limits (for userspace) */ 7939 /* Initialize RPS limits (for userspace) */
@@ -7952,9 +7975,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
7952 rps->boost_freq = rps->max_freq; 7975 rps->boost_freq = rps->max_freq;
7953 7976
7954 mutex_unlock(&dev_priv->pcu_lock); 7977 mutex_unlock(&dev_priv->pcu_lock);
7955 mutex_unlock(&dev_priv->drm.struct_mutex);
7956
7957 intel_autoenable_gt_powersave(dev_priv);
7958} 7978}
7959 7979
7960void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) 7980void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -7979,9 +7999,6 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
7979 if (INTEL_GEN(dev_priv) < 6) 7999 if (INTEL_GEN(dev_priv) < 6)
7980 return; 8000 return;
7981 8001
7982 if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
7983 intel_runtime_pm_put(dev_priv);
7984
7985 /* gen6_rps_idle() will be called later to disable interrupts */ 8002 /* gen6_rps_idle() will be called later to disable interrupts */
7986} 8003}
7987 8004
@@ -8140,65 +8157,6 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
8140 mutex_unlock(&dev_priv->pcu_lock); 8157 mutex_unlock(&dev_priv->pcu_lock);
8141} 8158}
8142 8159
8143static void __intel_autoenable_gt_powersave(struct work_struct *work)
8144{
8145 struct drm_i915_private *dev_priv =
8146 container_of(work,
8147 typeof(*dev_priv),
8148 gt_pm.autoenable_work.work);
8149 struct intel_engine_cs *rcs;
8150 struct drm_i915_gem_request *req;
8151
8152 rcs = dev_priv->engine[RCS];
8153 if (rcs->last_retired_context)
8154 goto out;
8155
8156 if (!rcs->init_context)
8157 goto out;
8158
8159 mutex_lock(&dev_priv->drm.struct_mutex);
8160
8161 req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
8162 if (IS_ERR(req))
8163 goto unlock;
8164
8165 if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
8166 rcs->init_context(req);
8167
8168 /* Mark the device busy, calling intel_enable_gt_powersave() */
8169 i915_add_request(req);
8170
8171unlock:
8172 mutex_unlock(&dev_priv->drm.struct_mutex);
8173out:
8174 intel_runtime_pm_put(dev_priv);
8175}
8176
8177void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
8178{
8179 if (IS_IRONLAKE_M(dev_priv)) {
8180 ironlake_enable_drps(dev_priv);
8181 intel_init_emon(dev_priv);
8182 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
8183 /*
8184 * PCU communication is slow and this doesn't need to be
8185 * done at any specific time, so do this out of our fast path
8186 * to make resume and init faster.
8187 *
8188 * We depend on the HW RC6 power context save/restore
8189 * mechanism when entering D3 through runtime PM suspend. So
8190 * disable RPM until RPS/RC6 is properly setup. We can only
8191 * get here via the driver load/system resume/runtime resume
8192 * paths, so the _noresume version is enough (and in case of
8193 * runtime resume it's necessary).
8194 */
8195 if (queue_delayed_work(dev_priv->wq,
8196 &dev_priv->gt_pm.autoenable_work,
8197 round_jiffies_up_relative(HZ)))
8198 intel_runtime_pm_get_noresume(dev_priv);
8199 }
8200}
8201
8202static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) 8160static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
8203{ 8161{
8204 /* 8162 /*
@@ -8532,17 +8490,13 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
8532 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | 8490 I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
8533 DISP_FBC_MEMORY_WAKE); 8491 DISP_FBC_MEMORY_WAKE);
8534 8492
8493 val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
8494 /* ReadHitWriteOnlyDisable:cnl */
8495 val |= RCCUNIT_CLKGATE_DIS;
8535 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */ 8496 /* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
8536 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0)) 8497 if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
8537 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, 8498 val |= SARBUNIT_CLKGATE_DIS;
8538 I915_READ(SLICE_UNIT_LEVEL_CLKGATE) | 8499 I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
8539 SARBUNIT_CLKGATE_DIS);
8540
8541 /* Display WA #1133: WaFbcSkipSegments:cnl */
8542 val = I915_READ(ILK_DPFC_CHICKEN);
8543 val &= ~GLK_SKIP_SEG_COUNT_MASK;
8544 val |= GLK_SKIP_SEG_EN | GLK_SKIP_SEG_COUNT(1);
8545 I915_WRITE(ILK_DPFC_CHICKEN, val);
8546} 8500}
8547 8501
8548static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) 8502static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -9429,8 +9383,6 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
9429{ 9383{
9430 mutex_init(&dev_priv->pcu_lock); 9384 mutex_init(&dev_priv->pcu_lock);
9431 9385
9432 INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
9433 __intel_autoenable_gt_powersave);
9434 atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0); 9386 atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
9435 9387
9436 dev_priv->runtime_pm.suspended = false; 9388 dev_priv->runtime_pm.suspended = false;