aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-07-14 23:50:58 -0400
committerDave Airlie <airlied@redhat.com>2016-07-14 23:50:58 -0400
commitff37c05a996bb96eccc21f4fb1b32ba0e24f3443 (patch)
treec09b09b37521f2f8f3f7a9bb3b0a33a2b3bde1a1 /drivers/gpu/drm/i915/intel_pm.c
parent6c181c82106e12dced317e93a7a396cbb8c64f75 (diff)
parent0b2c0582f1570bfc95aa9ac1cd340a215d8e8335 (diff)
Merge tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel into drm-next
- select igt testing depencies for CONFIG_DRM_I915_DEBUG (Chris) - track outputs in crtc state and clean up all our ad-hoc connector/encoder walking in modest code (Ville) - demidlayer drm_device/drm_i915_private (Chris Wilson) - thundering herd fix from Chris Wilson, with lots of help from Tvrtko Ursulin - piles of assorted clean and fallout from the thundering herd fix - documentation and more tuning for waitboosting (Chris) - pooled EU support on bxt (Arun Siluvery) - bxt support is no longer considered prelimary! - ring/engine vfunc cleanup from Tvrtko - introduce intel_wait_for_register helper (Chris) - opregion updates (Jani Nukla) - tuning and fixes for wait_for macros (Tvrkto&Imre) - more kabylake pci ids (Rodrigo) - pps cleanup and fixes for bxt (Imre) - move sink crc support over to atomic state (Maarten) - fix up async fbdev init ordering (Chris) - fbc fixes from Paulo and Chris * tag 'drm-intel-next-2016-07-11' of git://anongit.freedesktop.org/drm-intel: (223 commits) drm/i915: Update DRIVER_DATE to 20160711 drm/i915: Select DRM_VGEM for igt drm/i915: Select X86_MSR for igt drm/i915: Fill unused GGTT with scratch pages for VT-d drm/i915: Introduce Kabypoint PCH for Kabylake H/DT. drm/i915:gen9: implement WaMediaPoolStateCmdInWABB drm/i915: Check for invalid cloning earlier during modeset drm/i915: Simplify hdmi_12bpc_possible() drm/i915: Kill has_dsi_encoder drm/i915: s/INTEL_OUTPUT_DISPLAYPORT/INTEL_OUTPUT_DP/ drm/i915: Replace some open coded intel_crtc_has_dp_encoder()s drm/i915: Kill has_dp_encoder from pipe_config drm/i915: Replace manual lvds and sdvo/hdmi counting with intel_crtc_has_type() drm/i915: Unify intel_pipe_has_type() and intel_pipe_will_have_type() drm/i915: Add output_types bitmask into the crtc state drm/i915: Remove encoder type checks from MST suspend/resume drm/i915: Don't mark eDP encoders as MST capable drm/i915: avoid wait_for_atomic() in non-atomic host2guc_action() drm/i915: Group the irq breadcrumb variables into the same cacheline drm/i915: Wake up the bottom-half if we steal their interrupt ...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c310
1 files changed, 190 insertions, 120 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 658a75659657..5a8ee0c76593 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -57,7 +57,7 @@
57 57
58static void gen9_init_clock_gating(struct drm_device *dev) 58static void gen9_init_clock_gating(struct drm_device *dev)
59{ 59{
60 struct drm_i915_private *dev_priv = dev->dev_private; 60 struct drm_i915_private *dev_priv = to_i915(dev);
61 61
62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */ 62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
63 I915_WRITE(CHICKEN_PAR1_1, 63 I915_WRITE(CHICKEN_PAR1_1,
@@ -83,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev)
83 83
84static void bxt_init_clock_gating(struct drm_device *dev) 84static void bxt_init_clock_gating(struct drm_device *dev)
85{ 85{
86 struct drm_i915_private *dev_priv = dev->dev_private; 86 struct drm_i915_private *dev_priv = to_i915(dev);
87 87
88 gen9_init_clock_gating(dev); 88 gen9_init_clock_gating(dev);
89 89
@@ -109,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
109 109
110static void i915_pineview_get_mem_freq(struct drm_device *dev) 110static void i915_pineview_get_mem_freq(struct drm_device *dev)
111{ 111{
112 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = to_i915(dev);
113 u32 tmp; 113 u32 tmp;
114 114
115 tmp = I915_READ(CLKCFG); 115 tmp = I915_READ(CLKCFG);
@@ -148,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
148 148
149static void i915_ironlake_get_mem_freq(struct drm_device *dev) 149static void i915_ironlake_get_mem_freq(struct drm_device *dev)
150{ 150{
151 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = to_i915(dev);
152 u16 ddrpll, csipll; 152 u16 ddrpll, csipll;
153 153
154 ddrpll = I915_READ16(DDRMPLL1); 154 ddrpll = I915_READ16(DDRMPLL1);
@@ -319,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
319 319
320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
321{ 321{
322 struct drm_device *dev = dev_priv->dev; 322 struct drm_device *dev = &dev_priv->drm;
323 u32 val; 323 u32 val;
324 324
325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -375,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
375static int vlv_get_fifo_size(struct drm_device *dev, 375static int vlv_get_fifo_size(struct drm_device *dev,
376 enum pipe pipe, int plane) 376 enum pipe pipe, int plane)
377{ 377{
378 struct drm_i915_private *dev_priv = dev->dev_private; 378 struct drm_i915_private *dev_priv = to_i915(dev);
379 int sprite0_start, sprite1_start, size; 379 int sprite0_start, sprite1_start, size;
380 380
381 switch (pipe) { 381 switch (pipe) {
@@ -426,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
426 426
427static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 427static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
428{ 428{
429 struct drm_i915_private *dev_priv = dev->dev_private; 429 struct drm_i915_private *dev_priv = to_i915(dev);
430 uint32_t dsparb = I915_READ(DSPARB); 430 uint32_t dsparb = I915_READ(DSPARB);
431 int size; 431 int size;
432 432
@@ -442,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
442 442
443static int i830_get_fifo_size(struct drm_device *dev, int plane) 443static int i830_get_fifo_size(struct drm_device *dev, int plane)
444{ 444{
445 struct drm_i915_private *dev_priv = dev->dev_private; 445 struct drm_i915_private *dev_priv = to_i915(dev);
446 uint32_t dsparb = I915_READ(DSPARB); 446 uint32_t dsparb = I915_READ(DSPARB);
447 int size; 447 int size;
448 448
@@ -459,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
459 459
460static int i845_get_fifo_size(struct drm_device *dev, int plane) 460static int i845_get_fifo_size(struct drm_device *dev, int plane)
461{ 461{
462 struct drm_i915_private *dev_priv = dev->dev_private; 462 struct drm_i915_private *dev_priv = to_i915(dev);
463 uint32_t dsparb = I915_READ(DSPARB); 463 uint32_t dsparb = I915_READ(DSPARB);
464 int size; 464 int size;
465 465
@@ -637,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
637static void pineview_update_wm(struct drm_crtc *unused_crtc) 637static void pineview_update_wm(struct drm_crtc *unused_crtc)
638{ 638{
639 struct drm_device *dev = unused_crtc->dev; 639 struct drm_device *dev = unused_crtc->dev;
640 struct drm_i915_private *dev_priv = dev->dev_private; 640 struct drm_i915_private *dev_priv = to_i915(dev);
641 struct drm_crtc *crtc; 641 struct drm_crtc *crtc;
642 const struct cxsr_latency *latency; 642 const struct cxsr_latency *latency;
643 u32 reg; 643 u32 reg;
@@ -934,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
934 934
935static void vlv_setup_wm_latency(struct drm_device *dev) 935static void vlv_setup_wm_latency(struct drm_device *dev)
936{ 936{
937 struct drm_i915_private *dev_priv = dev->dev_private; 937 struct drm_i915_private *dev_priv = to_i915(dev);
938 938
939 /* all latencies in usec */ 939 /* all latencies in usec */
940 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 940 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1325,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
1325static void vlv_update_wm(struct drm_crtc *crtc) 1325static void vlv_update_wm(struct drm_crtc *crtc)
1326{ 1326{
1327 struct drm_device *dev = crtc->dev; 1327 struct drm_device *dev = crtc->dev;
1328 struct drm_i915_private *dev_priv = dev->dev_private; 1328 struct drm_i915_private *dev_priv = to_i915(dev);
1329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1330 enum pipe pipe = intel_crtc->pipe; 1330 enum pipe pipe = intel_crtc->pipe;
1331 struct vlv_wm_values wm = {}; 1331 struct vlv_wm_values wm = {};
@@ -1381,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1381{ 1381{
1382 struct drm_device *dev = crtc->dev; 1382 struct drm_device *dev = crtc->dev;
1383 static const int sr_latency_ns = 12000; 1383 static const int sr_latency_ns = 12000;
1384 struct drm_i915_private *dev_priv = dev->dev_private; 1384 struct drm_i915_private *dev_priv = to_i915(dev);
1385 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1385 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1386 int plane_sr, cursor_sr; 1386 int plane_sr, cursor_sr;
1387 unsigned int enabled = 0; 1387 unsigned int enabled = 0;
@@ -1438,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1438static void i965_update_wm(struct drm_crtc *unused_crtc) 1438static void i965_update_wm(struct drm_crtc *unused_crtc)
1439{ 1439{
1440 struct drm_device *dev = unused_crtc->dev; 1440 struct drm_device *dev = unused_crtc->dev;
1441 struct drm_i915_private *dev_priv = dev->dev_private; 1441 struct drm_i915_private *dev_priv = to_i915(dev);
1442 struct drm_crtc *crtc; 1442 struct drm_crtc *crtc;
1443 int srwm = 1; 1443 int srwm = 1;
1444 int cursor_sr = 16; 1444 int cursor_sr = 16;
@@ -1512,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1512static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1512static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1513{ 1513{
1514 struct drm_device *dev = unused_crtc->dev; 1514 struct drm_device *dev = unused_crtc->dev;
1515 struct drm_i915_private *dev_priv = dev->dev_private; 1515 struct drm_i915_private *dev_priv = to_i915(dev);
1516 const struct intel_watermark_params *wm_info; 1516 const struct intel_watermark_params *wm_info;
1517 uint32_t fwater_lo; 1517 uint32_t fwater_lo;
1518 uint32_t fwater_hi; 1518 uint32_t fwater_hi;
@@ -1642,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1642static void i845_update_wm(struct drm_crtc *unused_crtc) 1642static void i845_update_wm(struct drm_crtc *unused_crtc)
1643{ 1643{
1644 struct drm_device *dev = unused_crtc->dev; 1644 struct drm_device *dev = unused_crtc->dev;
1645 struct drm_i915_private *dev_priv = dev->dev_private; 1645 struct drm_i915_private *dev_priv = to_i915(dev);
1646 struct drm_crtc *crtc; 1646 struct drm_crtc *crtc;
1647 const struct drm_display_mode *adjusted_mode; 1647 const struct drm_display_mode *adjusted_mode;
1648 uint32_t fwater_lo; 1648 uint32_t fwater_lo;
@@ -2070,7 +2070,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2070 2070
2071static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) 2071static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2072{ 2072{
2073 struct drm_i915_private *dev_priv = dev->dev_private; 2073 struct drm_i915_private *dev_priv = to_i915(dev);
2074 2074
2075 if (IS_GEN9(dev)) { 2075 if (IS_GEN9(dev)) {
2076 uint32_t val; 2076 uint32_t val;
@@ -2236,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
2236static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2236static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2237 uint16_t wm[5], uint16_t min) 2237 uint16_t wm[5], uint16_t min)
2238{ 2238{
2239 int level, max_level = ilk_wm_max_level(dev_priv->dev); 2239 int level, max_level = ilk_wm_max_level(&dev_priv->drm);
2240 2240
2241 if (wm[0] >= min) 2241 if (wm[0] >= min)
2242 return false; 2242 return false;
@@ -2250,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2250 2250
2251static void snb_wm_latency_quirk(struct drm_device *dev) 2251static void snb_wm_latency_quirk(struct drm_device *dev)
2252{ 2252{
2253 struct drm_i915_private *dev_priv = dev->dev_private; 2253 struct drm_i915_private *dev_priv = to_i915(dev);
2254 bool changed; 2254 bool changed;
2255 2255
2256 /* 2256 /*
@@ -2272,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
2272 2272
2273static void ilk_setup_wm_latency(struct drm_device *dev) 2273static void ilk_setup_wm_latency(struct drm_device *dev)
2274{ 2274{
2275 struct drm_i915_private *dev_priv = dev->dev_private; 2275 struct drm_i915_private *dev_priv = to_i915(dev);
2276 2276
2277 intel_read_wm_latency(dev, dev_priv->wm.pri_latency); 2277 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2278 2278
@@ -2294,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2294 2294
2295static void skl_setup_wm_latency(struct drm_device *dev) 2295static void skl_setup_wm_latency(struct drm_device *dev)
2296{ 2296{
2297 struct drm_i915_private *dev_priv = dev->dev_private; 2297 struct drm_i915_private *dev_priv = to_i915(dev);
2298 2298
2299 intel_read_wm_latency(dev, dev_priv->wm.skl_latency); 2299 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2300 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2300 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
@@ -2330,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2330 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2330 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2331 struct intel_pipe_wm *pipe_wm; 2331 struct intel_pipe_wm *pipe_wm;
2332 struct drm_device *dev = state->dev; 2332 struct drm_device *dev = state->dev;
2333 const struct drm_i915_private *dev_priv = dev->dev_private; 2333 const struct drm_i915_private *dev_priv = to_i915(dev);
2334 struct intel_plane *intel_plane; 2334 struct intel_plane *intel_plane;
2335 struct intel_plane_state *pristate = NULL; 2335 struct intel_plane_state *pristate = NULL;
2336 struct intel_plane_state *sprstate = NULL; 2336 struct intel_plane_state *sprstate = NULL;
@@ -2505,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
2505 const struct ilk_wm_maximums *max, 2505 const struct ilk_wm_maximums *max,
2506 struct intel_pipe_wm *merged) 2506 struct intel_pipe_wm *merged)
2507{ 2507{
2508 struct drm_i915_private *dev_priv = dev->dev_private; 2508 struct drm_i915_private *dev_priv = to_i915(dev);
2509 int level, max_level = ilk_wm_max_level(dev); 2509 int level, max_level = ilk_wm_max_level(dev);
2510 int last_enabled_level = max_level; 2510 int last_enabled_level = max_level;
2511 2511
@@ -2565,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2565/* The value we need to program into the WM_LPx latency field */ 2565/* The value we need to program into the WM_LPx latency field */
2566static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2566static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2567{ 2567{
2568 struct drm_i915_private *dev_priv = dev->dev_private; 2568 struct drm_i915_private *dev_priv = to_i915(dev);
2569 2569
2570 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2570 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2571 return 2 * level; 2571 return 2 * level;
@@ -2765,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2765static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2765static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2766 struct ilk_wm_values *results) 2766 struct ilk_wm_values *results)
2767{ 2767{
2768 struct drm_device *dev = dev_priv->dev; 2768 struct drm_device *dev = &dev_priv->drm;
2769 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2769 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2770 unsigned int dirty; 2770 unsigned int dirty;
2771 uint32_t val; 2771 uint32_t val;
@@ -2840,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2840 2840
2841bool ilk_disable_lp_wm(struct drm_device *dev) 2841bool ilk_disable_lp_wm(struct drm_device *dev)
2842{ 2842{
2843 struct drm_i915_private *dev_priv = dev->dev_private; 2843 struct drm_i915_private *dev_priv = to_i915(dev);
2844 2844
2845 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2845 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2846} 2846}
@@ -3498,7 +3498,6 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3498 int level, 3498 int level,
3499 struct skl_wm_level *result) 3499 struct skl_wm_level *result)
3500{ 3500{
3501 struct drm_device *dev = dev_priv->dev;
3502 struct drm_atomic_state *state = cstate->base.state; 3501 struct drm_atomic_state *state = cstate->base.state;
3503 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3502 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3504 struct drm_plane *plane; 3503 struct drm_plane *plane;
@@ -3514,7 +3513,9 @@ skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3514 */ 3513 */
3515 memset(result, 0, sizeof(*result)); 3514 memset(result, 0, sizeof(*result));
3516 3515
3517 for_each_intel_plane_mask(dev, intel_plane, cstate->base.plane_mask) { 3516 for_each_intel_plane_mask(&dev_priv->drm,
3517 intel_plane,
3518 cstate->base.plane_mask) {
3518 int i = skl_wm_plane_id(intel_plane); 3519 int i = skl_wm_plane_id(intel_plane);
3519 3520
3520 plane = &intel_plane->base; 3521 plane = &intel_plane->base;
@@ -3595,7 +3596,7 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3595 struct skl_pipe_wm *pipe_wm) 3596 struct skl_pipe_wm *pipe_wm)
3596{ 3597{
3597 struct drm_device *dev = cstate->base.crtc->dev; 3598 struct drm_device *dev = cstate->base.crtc->dev;
3598 const struct drm_i915_private *dev_priv = dev->dev_private; 3599 const struct drm_i915_private *dev_priv = to_i915(dev);
3599 int level, max_level = ilk_wm_max_level(dev); 3600 int level, max_level = ilk_wm_max_level(dev);
3600 int ret; 3601 int ret;
3601 3602
@@ -3682,7 +3683,7 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3682static void skl_write_wm_values(struct drm_i915_private *dev_priv, 3683static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3683 const struct skl_wm_values *new) 3684 const struct skl_wm_values *new)
3684{ 3685{
3685 struct drm_device *dev = dev_priv->dev; 3686 struct drm_device *dev = &dev_priv->drm;
3686 struct intel_crtc *crtc; 3687 struct intel_crtc *crtc;
3687 3688
3688 for_each_intel_crtc(dev, crtc) { 3689 for_each_intel_crtc(dev, crtc) {
@@ -3779,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3779static void skl_flush_wm_values(struct drm_i915_private *dev_priv, 3780static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3780 struct skl_wm_values *new_values) 3781 struct skl_wm_values *new_values)
3781{ 3782{
3782 struct drm_device *dev = dev_priv->dev; 3783 struct drm_device *dev = &dev_priv->drm;
3783 struct skl_ddb_allocation *cur_ddb, *new_ddb; 3784 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3784 bool reallocated[I915_MAX_PIPES] = {}; 3785 bool reallocated[I915_MAX_PIPES] = {};
3785 struct intel_crtc *crtc; 3786 struct intel_crtc *crtc;
@@ -3879,6 +3880,19 @@ static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3879 return 0; 3880 return 0;
3880} 3881}
3881 3882
3883static uint32_t
3884pipes_modified(struct drm_atomic_state *state)
3885{
3886 struct drm_crtc *crtc;
3887 struct drm_crtc_state *cstate;
3888 uint32_t i, ret = 0;
3889
3890 for_each_crtc_in_state(state, crtc, cstate, i)
3891 ret |= drm_crtc_mask(crtc);
3892
3893 return ret;
3894}
3895
3882static int 3896static int
3883skl_compute_ddb(struct drm_atomic_state *state) 3897skl_compute_ddb(struct drm_atomic_state *state)
3884{ 3898{
@@ -3887,7 +3901,7 @@ skl_compute_ddb(struct drm_atomic_state *state)
3887 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 3901 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3888 struct intel_crtc *intel_crtc; 3902 struct intel_crtc *intel_crtc;
3889 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb; 3903 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3890 unsigned realloc_pipes = dev_priv->active_crtcs; 3904 uint32_t realloc_pipes = pipes_modified(state);
3891 int ret; 3905 int ret;
3892 3906
3893 /* 3907 /*
@@ -4002,7 +4016,7 @@ static void skl_update_wm(struct drm_crtc *crtc)
4002{ 4016{
4003 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4017 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4004 struct drm_device *dev = crtc->dev; 4018 struct drm_device *dev = crtc->dev;
4005 struct drm_i915_private *dev_priv = dev->dev_private; 4019 struct drm_i915_private *dev_priv = to_i915(dev);
4006 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4020 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4007 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4021 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4008 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4022 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
@@ -4043,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
4043 4057
4044static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 4058static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
4045{ 4059{
4046 struct drm_device *dev = dev_priv->dev; 4060 struct drm_device *dev = &dev_priv->drm;
4047 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 4061 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
4048 struct ilk_wm_maximums max; 4062 struct ilk_wm_maximums max;
4049 struct intel_wm_config config = {}; 4063 struct intel_wm_config config = {};
@@ -4145,7 +4159,7 @@ static void skl_pipe_wm_active_state(uint32_t val,
4145static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4159static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4146{ 4160{
4147 struct drm_device *dev = crtc->dev; 4161 struct drm_device *dev = crtc->dev;
4148 struct drm_i915_private *dev_priv = dev->dev_private; 4162 struct drm_i915_private *dev_priv = to_i915(dev);
4149 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 4163 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
4150 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4151 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4165 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@@ -4199,7 +4213,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4199 4213
4200void skl_wm_get_hw_state(struct drm_device *dev) 4214void skl_wm_get_hw_state(struct drm_device *dev)
4201{ 4215{
4202 struct drm_i915_private *dev_priv = dev->dev_private; 4216 struct drm_i915_private *dev_priv = to_i915(dev);
4203 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 4217 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
4204 struct drm_crtc *crtc; 4218 struct drm_crtc *crtc;
4205 4219
@@ -4219,7 +4233,7 @@ void skl_wm_get_hw_state(struct drm_device *dev)
4219static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4233static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
4220{ 4234{
4221 struct drm_device *dev = crtc->dev; 4235 struct drm_device *dev = crtc->dev;
4222 struct drm_i915_private *dev_priv = dev->dev_private; 4236 struct drm_i915_private *dev_priv = to_i915(dev);
4223 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4237 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4225 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4239 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
@@ -4423,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
4423 4437
4424void ilk_wm_get_hw_state(struct drm_device *dev) 4438void ilk_wm_get_hw_state(struct drm_device *dev)
4425{ 4439{
4426 struct drm_i915_private *dev_priv = dev->dev_private; 4440 struct drm_i915_private *dev_priv = to_i915(dev);
4427 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4441 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4428 struct drm_crtc *crtc; 4442 struct drm_crtc *crtc;
4429 4443
@@ -4485,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
4485 */ 4499 */
4486void intel_update_watermarks(struct drm_crtc *crtc) 4500void intel_update_watermarks(struct drm_crtc *crtc)
4487{ 4501{
4488 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 4502 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4489 4503
4490 if (dev_priv->display.update_wm) 4504 if (dev_priv->display.update_wm)
4491 dev_priv->display.update_wm(crtc); 4505 dev_priv->display.update_wm(crtc);
@@ -4654,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4654 new_power = dev_priv->rps.power; 4668 new_power = dev_priv->rps.power;
4655 switch (dev_priv->rps.power) { 4669 switch (dev_priv->rps.power) {
4656 case LOW_POWER: 4670 case LOW_POWER:
4657 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) 4671 if (val > dev_priv->rps.efficient_freq + 1 &&
4672 val > dev_priv->rps.cur_freq)
4658 new_power = BETWEEN; 4673 new_power = BETWEEN;
4659 break; 4674 break;
4660 4675
4661 case BETWEEN: 4676 case BETWEEN:
4662 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) 4677 if (val <= dev_priv->rps.efficient_freq &&
4678 val < dev_priv->rps.cur_freq)
4663 new_power = LOW_POWER; 4679 new_power = LOW_POWER;
4664 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) 4680 else if (val >= dev_priv->rps.rp0_freq &&
4681 val > dev_priv->rps.cur_freq)
4665 new_power = HIGH_POWER; 4682 new_power = HIGH_POWER;
4666 break; 4683 break;
4667 4684
4668 case HIGH_POWER: 4685 case HIGH_POWER:
4669 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) 4686 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4687 val < dev_priv->rps.cur_freq)
4670 new_power = BETWEEN; 4688 new_power = BETWEEN;
4671 break; 4689 break;
4672 } 4690 }
@@ -4712,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4712 } 4730 }
4713 4731
4714 I915_WRITE(GEN6_RP_UP_EI, 4732 I915_WRITE(GEN6_RP_UP_EI,
4715 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4733 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4716 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4734 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4717 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); 4735 GT_INTERVAL_FROM_US(dev_priv,
4736 ei_up * threshold_up / 100));
4718 4737
4719 I915_WRITE(GEN6_RP_DOWN_EI, 4738 I915_WRITE(GEN6_RP_DOWN_EI,
4720 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 4739 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4721 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 4740 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4722 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); 4741 GT_INTERVAL_FROM_US(dev_priv,
4742 ei_down * threshold_down / 100));
4723 4743
4724 I915_WRITE(GEN6_RP_CONTROL, 4744 I915_WRITE(GEN6_RP_CONTROL,
4725 GEN6_RP_MEDIA_TURBO | 4745 GEN6_RP_MEDIA_TURBO |
4726 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4746 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4727 GEN6_RP_MEDIA_IS_GFX | 4747 GEN6_RP_MEDIA_IS_GFX |
4728 GEN6_RP_ENABLE | 4748 GEN6_RP_ENABLE |
4729 GEN6_RP_UP_BUSY_AVG | 4749 GEN6_RP_UP_BUSY_AVG |
4730 GEN6_RP_DOWN_IDLE_AVG); 4750 GEN6_RP_DOWN_IDLE_AVG);
4731 4751
4732 dev_priv->rps.power = new_power; 4752 dev_priv->rps.power = new_power;
4733 dev_priv->rps.up_threshold = threshold_up; 4753 dev_priv->rps.up_threshold = threshold_up;
@@ -4844,12 +4864,27 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
4844 gen6_rps_reset_ei(dev_priv); 4864 gen6_rps_reset_ei(dev_priv);
4845 I915_WRITE(GEN6_PMINTRMSK, 4865 I915_WRITE(GEN6_PMINTRMSK,
4846 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 4866 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4867
4868 gen6_enable_rps_interrupts(dev_priv);
4869
4870 /* Ensure we start at the user's desired frequency */
4871 intel_set_rps(dev_priv,
4872 clamp(dev_priv->rps.cur_freq,
4873 dev_priv->rps.min_freq_softlimit,
4874 dev_priv->rps.max_freq_softlimit));
4847 } 4875 }
4848 mutex_unlock(&dev_priv->rps.hw_lock); 4876 mutex_unlock(&dev_priv->rps.hw_lock);
4849} 4877}
4850 4878
4851void gen6_rps_idle(struct drm_i915_private *dev_priv) 4879void gen6_rps_idle(struct drm_i915_private *dev_priv)
4852{ 4880{
4881 /* Flush our bottom-half so that it does not race with us
4882 * setting the idle frequency and so that it is bounded by
4883 * our rpm wakeref. And then disable the interrupts to stop any
4884 * futher RPS reclocking whilst we are asleep.
4885 */
4886 gen6_disable_rps_interrupts(dev_priv);
4887
4853 mutex_lock(&dev_priv->rps.hw_lock); 4888 mutex_lock(&dev_priv->rps.hw_lock);
4854 if (dev_priv->rps.enabled) { 4889 if (dev_priv->rps.enabled) {
4855 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4890 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -4874,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4874 /* This is intentionally racy! We peek at the state here, then 4909 /* This is intentionally racy! We peek at the state here, then
4875 * validate inside the RPS worker. 4910 * validate inside the RPS worker.
4876 */ 4911 */
4877 if (!(dev_priv->mm.busy && 4912 if (!(dev_priv->gt.awake &&
4878 dev_priv->rps.enabled && 4913 dev_priv->rps.enabled &&
4879 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) 4914 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4880 return; 4915 return;
@@ -4890,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4890 spin_lock_irq(&dev_priv->irq_lock); 4925 spin_lock_irq(&dev_priv->irq_lock);
4891 if (dev_priv->rps.interrupts_enabled) { 4926 if (dev_priv->rps.interrupts_enabled) {
4892 dev_priv->rps.client_boost = true; 4927 dev_priv->rps.client_boost = true;
4893 queue_work(dev_priv->wq, &dev_priv->rps.work); 4928 schedule_work(&dev_priv->rps.work);
4894 } 4929 }
4895 spin_unlock_irq(&dev_priv->irq_lock); 4930 spin_unlock_irq(&dev_priv->irq_lock);
4896 4931
@@ -4954,14 +4989,15 @@ static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4954 mode = 0; 4989 mode = 0;
4955 } 4990 }
4956 if (HAS_RC6p(dev_priv)) 4991 if (HAS_RC6p(dev_priv))
4957 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4992 DRM_DEBUG_DRIVER("Enabling RC6 states: "
4958 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 4993 "RC6 %s RC6p %s RC6pp %s\n",
4959 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 4994 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4960 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); 4995 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4996 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
4961 4997
4962 else 4998 else
4963 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", 4999 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
4964 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 5000 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4965} 5001}
4966 5002
4967static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) 5003static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
@@ -4969,9 +5005,20 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4969 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5005 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4970 bool enable_rc6 = true; 5006 bool enable_rc6 = true;
4971 unsigned long rc6_ctx_base; 5007 unsigned long rc6_ctx_base;
5008 u32 rc_ctl;
5009 int rc_sw_target;
5010
5011 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5012 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5013 RC_SW_TARGET_STATE_SHIFT;
5014 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5015 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5016 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5017 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5018 rc_sw_target);
4972 5019
4973 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { 5020 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
4974 DRM_DEBUG_KMS("RC6 Base location not set properly.\n"); 5021 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
4975 enable_rc6 = false; 5022 enable_rc6 = false;
4976 } 5023 }
4977 5024
@@ -4983,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4983 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && 5030 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
4984 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + 5031 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
4985 ggtt->stolen_reserved_size))) { 5032 ggtt->stolen_reserved_size))) {
4986 DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); 5033 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
4987 enable_rc6 = false; 5034 enable_rc6 = false;
4988 } 5035 }
4989 5036
@@ -4991,15 +5038,24 @@ static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4991 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && 5038 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
4992 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && 5039 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
4993 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { 5040 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
4994 DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n"); 5041 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
4995 enable_rc6 = false; 5042 enable_rc6 = false;
4996 } 5043 }
4997 5044
4998 if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE | 5045 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
4999 GEN6_RC_CTL_HW_ENABLE)) && 5046 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5000 ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) || 5047 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5001 !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) { 5048 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5002 DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n"); 5049 enable_rc6 = false;
5050 }
5051
5052 if (!I915_READ(GEN6_GFXPAUSE)) {
5053 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5054 enable_rc6 = false;
5055 }
5056
5057 if (!I915_READ(GEN8_MISC_CTRL0)) {
5058 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5003 enable_rc6 = false; 5059 enable_rc6 = false;
5004 } 5060 }
5005 5061
@@ -5031,8 +5087,9 @@ int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
5031 mask = INTEL_RC6_ENABLE; 5087 mask = INTEL_RC6_ENABLE;
5032 5088
5033 if ((enable_rc6 & mask) != enable_rc6) 5089 if ((enable_rc6 & mask) != enable_rc6)
5034 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", 5090 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5035 enable_rc6 & mask, enable_rc6, mask); 5091 "(requested %d, valid %d)\n",
5092 enable_rc6 & mask, enable_rc6, mask);
5036 5093
5037 return enable_rc6 & mask; 5094 return enable_rc6 & mask;
5038 } 5095 }
@@ -5643,7 +5700,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5643 u32 pcbr; 5700 u32 pcbr;
5644 int pctx_size = 24*1024; 5701 int pctx_size = 24*1024;
5645 5702
5646 mutex_lock(&dev_priv->dev->struct_mutex); 5703 mutex_lock(&dev_priv->drm.struct_mutex);
5647 5704
5648 pcbr = I915_READ(VLV_PCBR); 5705 pcbr = I915_READ(VLV_PCBR);
5649 if (pcbr) { 5706 if (pcbr) {
@@ -5651,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5651 int pcbr_offset; 5708 int pcbr_offset;
5652 5709
5653 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 5710 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5654 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 5711 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5655 pcbr_offset, 5712 pcbr_offset,
5656 I915_GTT_OFFSET_NONE, 5713 I915_GTT_OFFSET_NONE,
5657 pctx_size); 5714 pctx_size);
@@ -5668,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5668 * overlap with other ranges, such as the frame buffer, protected 5725 * overlap with other ranges, such as the frame buffer, protected
5669 * memory, or any other relevant ranges. 5726 * memory, or any other relevant ranges.
5670 */ 5727 */
5671 pctx = i915_gem_object_create_stolen(dev_priv->dev, pctx_size); 5728 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5672 if (!pctx) { 5729 if (!pctx) {
5673 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5730 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5674 goto out; 5731 goto out;
@@ -5680,7 +5737,7 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5680out: 5737out:
5681 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5738 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5682 dev_priv->vlv_pctx = pctx; 5739 dev_priv->vlv_pctx = pctx;
5683 mutex_unlock(&dev_priv->dev->struct_mutex); 5740 mutex_unlock(&dev_priv->drm.struct_mutex);
5684} 5741}
5685 5742
5686static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) 5743static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
@@ -6624,9 +6681,9 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6624 6681
6625 if (IS_IRONLAKE_M(dev_priv)) { 6682 if (IS_IRONLAKE_M(dev_priv)) {
6626 ironlake_enable_drps(dev_priv); 6683 ironlake_enable_drps(dev_priv);
6627 mutex_lock(&dev_priv->dev->struct_mutex); 6684 mutex_lock(&dev_priv->drm.struct_mutex);
6628 intel_init_emon(dev_priv); 6685 intel_init_emon(dev_priv);
6629 mutex_unlock(&dev_priv->dev->struct_mutex); 6686 mutex_unlock(&dev_priv->drm.struct_mutex);
6630 } else if (INTEL_INFO(dev_priv)->gen >= 6) { 6687 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6631 /* 6688 /*
6632 * PCU communication is slow and this doesn't need to be 6689 * PCU communication is slow and this doesn't need to be
@@ -6657,7 +6714,7 @@ void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
6657 6714
6658static void ibx_init_clock_gating(struct drm_device *dev) 6715static void ibx_init_clock_gating(struct drm_device *dev)
6659{ 6716{
6660 struct drm_i915_private *dev_priv = dev->dev_private; 6717 struct drm_i915_private *dev_priv = to_i915(dev);
6661 6718
6662 /* 6719 /*
6663 * On Ibex Peak and Cougar Point, we need to disable clock 6720 * On Ibex Peak and Cougar Point, we need to disable clock
@@ -6669,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
6669 6726
6670static void g4x_disable_trickle_feed(struct drm_device *dev) 6727static void g4x_disable_trickle_feed(struct drm_device *dev)
6671{ 6728{
6672 struct drm_i915_private *dev_priv = dev->dev_private; 6729 struct drm_i915_private *dev_priv = to_i915(dev);
6673 enum pipe pipe; 6730 enum pipe pipe;
6674 6731
6675 for_each_pipe(dev_priv, pipe) { 6732 for_each_pipe(dev_priv, pipe) {
@@ -6684,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
6684 6741
6685static void ilk_init_lp_watermarks(struct drm_device *dev) 6742static void ilk_init_lp_watermarks(struct drm_device *dev)
6686{ 6743{
6687 struct drm_i915_private *dev_priv = dev->dev_private; 6744 struct drm_i915_private *dev_priv = to_i915(dev);
6688 6745
6689 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 6746 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6690 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 6747 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6698,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
6698 6755
6699static void ironlake_init_clock_gating(struct drm_device *dev) 6756static void ironlake_init_clock_gating(struct drm_device *dev)
6700{ 6757{
6701 struct drm_i915_private *dev_priv = dev->dev_private; 6758 struct drm_i915_private *dev_priv = to_i915(dev);
6702 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6759 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6703 6760
6704 /* 6761 /*
@@ -6772,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
6772 6829
6773static void cpt_init_clock_gating(struct drm_device *dev) 6830static void cpt_init_clock_gating(struct drm_device *dev)
6774{ 6831{
6775 struct drm_i915_private *dev_priv = dev->dev_private; 6832 struct drm_i915_private *dev_priv = to_i915(dev);
6776 int pipe; 6833 int pipe;
6777 uint32_t val; 6834 uint32_t val;
6778 6835
@@ -6809,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
6809 6866
6810static void gen6_check_mch_setup(struct drm_device *dev) 6867static void gen6_check_mch_setup(struct drm_device *dev)
6811{ 6868{
6812 struct drm_i915_private *dev_priv = dev->dev_private; 6869 struct drm_i915_private *dev_priv = to_i915(dev);
6813 uint32_t tmp; 6870 uint32_t tmp;
6814 6871
6815 tmp = I915_READ(MCH_SSKPD); 6872 tmp = I915_READ(MCH_SSKPD);
@@ -6820,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
6820 6877
6821static void gen6_init_clock_gating(struct drm_device *dev) 6878static void gen6_init_clock_gating(struct drm_device *dev)
6822{ 6879{
6823 struct drm_i915_private *dev_priv = dev->dev_private; 6880 struct drm_i915_private *dev_priv = to_i915(dev);
6824 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6881 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6825 6882
6826 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6883 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -6935,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6935 6992
6936static void lpt_init_clock_gating(struct drm_device *dev) 6993static void lpt_init_clock_gating(struct drm_device *dev)
6937{ 6994{
6938 struct drm_i915_private *dev_priv = dev->dev_private; 6995 struct drm_i915_private *dev_priv = to_i915(dev);
6939 6996
6940 /* 6997 /*
6941 * TODO: this bit should only be enabled when really needed, then 6998 * TODO: this bit should only be enabled when really needed, then
@@ -6954,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
6954 7011
6955static void lpt_suspend_hw(struct drm_device *dev) 7012static void lpt_suspend_hw(struct drm_device *dev)
6956{ 7013{
6957 struct drm_i915_private *dev_priv = dev->dev_private; 7014 struct drm_i915_private *dev_priv = to_i915(dev);
6958 7015
6959 if (HAS_PCH_LPT_LP(dev)) { 7016 if (HAS_PCH_LPT_LP(dev)) {
6960 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 7017 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -6989,7 +7046,7 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
6989 7046
6990static void kabylake_init_clock_gating(struct drm_device *dev) 7047static void kabylake_init_clock_gating(struct drm_device *dev)
6991{ 7048{
6992 struct drm_i915_private *dev_priv = dev->dev_private; 7049 struct drm_i915_private *dev_priv = to_i915(dev);
6993 7050
6994 gen9_init_clock_gating(dev); 7051 gen9_init_clock_gating(dev);
6995 7052
@@ -7010,7 +7067,7 @@ static void kabylake_init_clock_gating(struct drm_device *dev)
7010 7067
7011static void skylake_init_clock_gating(struct drm_device *dev) 7068static void skylake_init_clock_gating(struct drm_device *dev)
7012{ 7069{
7013 struct drm_i915_private *dev_priv = dev->dev_private; 7070 struct drm_i915_private *dev_priv = to_i915(dev);
7014 7071
7015 gen9_init_clock_gating(dev); 7072 gen9_init_clock_gating(dev);
7016 7073
@@ -7025,7 +7082,7 @@ static void skylake_init_clock_gating(struct drm_device *dev)
7025 7082
7026static void broadwell_init_clock_gating(struct drm_device *dev) 7083static void broadwell_init_clock_gating(struct drm_device *dev)
7027{ 7084{
7028 struct drm_i915_private *dev_priv = dev->dev_private; 7085 struct drm_i915_private *dev_priv = to_i915(dev);
7029 enum pipe pipe; 7086 enum pipe pipe;
7030 7087
7031 ilk_init_lp_watermarks(dev); 7088 ilk_init_lp_watermarks(dev);
@@ -7076,7 +7133,7 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
7076 7133
7077static void haswell_init_clock_gating(struct drm_device *dev) 7134static void haswell_init_clock_gating(struct drm_device *dev)
7078{ 7135{
7079 struct drm_i915_private *dev_priv = dev->dev_private; 7136 struct drm_i915_private *dev_priv = to_i915(dev);
7080 7137
7081 ilk_init_lp_watermarks(dev); 7138 ilk_init_lp_watermarks(dev);
7082 7139
@@ -7132,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
7132 7189
7133static void ivybridge_init_clock_gating(struct drm_device *dev) 7190static void ivybridge_init_clock_gating(struct drm_device *dev)
7134{ 7191{
7135 struct drm_i915_private *dev_priv = dev->dev_private; 7192 struct drm_i915_private *dev_priv = to_i915(dev);
7136 uint32_t snpcr; 7193 uint32_t snpcr;
7137 7194
7138 ilk_init_lp_watermarks(dev); 7195 ilk_init_lp_watermarks(dev);
@@ -7230,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
7230 7287
7231static void valleyview_init_clock_gating(struct drm_device *dev) 7288static void valleyview_init_clock_gating(struct drm_device *dev)
7232{ 7289{
7233 struct drm_i915_private *dev_priv = dev->dev_private; 7290 struct drm_i915_private *dev_priv = to_i915(dev);
7234 7291
7235 /* WaDisableEarlyCull:vlv */ 7292 /* WaDisableEarlyCull:vlv */
7236 I915_WRITE(_3D_CHICKEN3, 7293 I915_WRITE(_3D_CHICKEN3,
@@ -7312,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
7312 7369
7313static void cherryview_init_clock_gating(struct drm_device *dev) 7370static void cherryview_init_clock_gating(struct drm_device *dev)
7314{ 7371{
7315 struct drm_i915_private *dev_priv = dev->dev_private; 7372 struct drm_i915_private *dev_priv = to_i915(dev);
7316 7373
7317 /* WaVSRefCountFullforceMissDisable:chv */ 7374 /* WaVSRefCountFullforceMissDisable:chv */
7318 /* WaDSRefCountFullforceMissDisable:chv */ 7375 /* WaDSRefCountFullforceMissDisable:chv */
@@ -7348,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
7348 7405
7349static void g4x_init_clock_gating(struct drm_device *dev) 7406static void g4x_init_clock_gating(struct drm_device *dev)
7350{ 7407{
7351 struct drm_i915_private *dev_priv = dev->dev_private; 7408 struct drm_i915_private *dev_priv = to_i915(dev);
7352 uint32_t dspclk_gate; 7409 uint32_t dspclk_gate;
7353 7410
7354 I915_WRITE(RENCLK_GATE_D1, 0); 7411 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7375,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
7375 7432
7376static void crestline_init_clock_gating(struct drm_device *dev) 7433static void crestline_init_clock_gating(struct drm_device *dev)
7377{ 7434{
7378 struct drm_i915_private *dev_priv = dev->dev_private; 7435 struct drm_i915_private *dev_priv = to_i915(dev);
7379 7436
7380 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7437 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7381 I915_WRITE(RENCLK_GATE_D2, 0); 7438 I915_WRITE(RENCLK_GATE_D2, 0);
@@ -7391,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
7391 7448
7392static void broadwater_init_clock_gating(struct drm_device *dev) 7449static void broadwater_init_clock_gating(struct drm_device *dev)
7393{ 7450{
7394 struct drm_i915_private *dev_priv = dev->dev_private; 7451 struct drm_i915_private *dev_priv = to_i915(dev);
7395 7452
7396 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7453 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7397 I965_RCC_CLOCK_GATE_DISABLE | 7454 I965_RCC_CLOCK_GATE_DISABLE |
@@ -7408,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
7408 7465
7409static void gen3_init_clock_gating(struct drm_device *dev) 7466static void gen3_init_clock_gating(struct drm_device *dev)
7410{ 7467{
7411 struct drm_i915_private *dev_priv = dev->dev_private; 7468 struct drm_i915_private *dev_priv = to_i915(dev);
7412 u32 dstate = I915_READ(D_STATE); 7469 u32 dstate = I915_READ(D_STATE);
7413 7470
7414 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7471 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -7433,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
7433 7490
7434static void i85x_init_clock_gating(struct drm_device *dev) 7491static void i85x_init_clock_gating(struct drm_device *dev)
7435{ 7492{
7436 struct drm_i915_private *dev_priv = dev->dev_private; 7493 struct drm_i915_private *dev_priv = to_i915(dev);
7437 7494
7438 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7495 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7439 7496
@@ -7447,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
7447 7504
7448static void i830_init_clock_gating(struct drm_device *dev) 7505static void i830_init_clock_gating(struct drm_device *dev)
7449{ 7506{
7450 struct drm_i915_private *dev_priv = dev->dev_private; 7507 struct drm_i915_private *dev_priv = to_i915(dev);
7451 7508
7452 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7509 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7453 7510
@@ -7458,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
7458 7515
7459void intel_init_clock_gating(struct drm_device *dev) 7516void intel_init_clock_gating(struct drm_device *dev)
7460{ 7517{
7461 struct drm_i915_private *dev_priv = dev->dev_private; 7518 struct drm_i915_private *dev_priv = to_i915(dev);
7462 7519
7463 dev_priv->display.init_clock_gating(dev); 7520 dev_priv->display.init_clock_gating(dev);
7464} 7521}
@@ -7526,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7526/* Set up chip specific power management-related functions */ 7583/* Set up chip specific power management-related functions */
7527void intel_init_pm(struct drm_device *dev) 7584void intel_init_pm(struct drm_device *dev)
7528{ 7585{
7529 struct drm_i915_private *dev_priv = dev->dev_private; 7586 struct drm_i915_private *dev_priv = to_i915(dev);
7530 7587
7531 intel_fbc_init(dev_priv); 7588 intel_fbc_init(dev_priv);
7532 7589
@@ -7604,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
7604{ 7661{
7605 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7662 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7606 7663
7607 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7664 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7665 * use te fw I915_READ variants to reduce the amount of work
7666 * required when reading/writing.
7667 */
7668
7669 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7608 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 7670 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7609 return -EAGAIN; 7671 return -EAGAIN;
7610 } 7672 }
7611 7673
7612 I915_WRITE(GEN6_PCODE_DATA, *val); 7674 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7613 I915_WRITE(GEN6_PCODE_DATA1, 0); 7675 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7614 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7676 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7615 7677
7616 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7678 if (intel_wait_for_register_fw(dev_priv,
7617 500)) { 7679 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7680 500)) {
7618 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 7681 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7619 return -ETIMEDOUT; 7682 return -ETIMEDOUT;
7620 } 7683 }
7621 7684
7622 *val = I915_READ(GEN6_PCODE_DATA); 7685 *val = I915_READ_FW(GEN6_PCODE_DATA);
7623 I915_WRITE(GEN6_PCODE_DATA, 0); 7686 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7624 7687
7625 return 0; 7688 return 0;
7626} 7689}
7627 7690
7628int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) 7691int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7692 u32 mbox, u32 val)
7629{ 7693{
7630 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7694 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7631 7695
7632 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7696 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7697 * use te fw I915_READ variants to reduce the amount of work
7698 * required when reading/writing.
7699 */
7700
7701 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7633 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 7702 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7634 return -EAGAIN; 7703 return -EAGAIN;
7635 } 7704 }
7636 7705
7637 I915_WRITE(GEN6_PCODE_DATA, val); 7706 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7638 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7707 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7639 7708
7640 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7709 if (intel_wait_for_register_fw(dev_priv,
7641 500)) { 7710 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7711 500)) {
7642 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 7712 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7643 return -ETIMEDOUT; 7713 return -ETIMEDOUT;
7644 } 7714 }
7645 7715
7646 I915_WRITE(GEN6_PCODE_DATA, 0); 7716 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7647 7717
7648 return 0; 7718 return 0;
7649} 7719}
@@ -7713,7 +7783,7 @@ static void __intel_rps_boost_work(struct work_struct *work)
7713 struct request_boost *boost = container_of(work, struct request_boost, work); 7783 struct request_boost *boost = container_of(work, struct request_boost, work);
7714 struct drm_i915_gem_request *req = boost->req; 7784 struct drm_i915_gem_request *req = boost->req;
7715 7785
7716 if (!i915_gem_request_completed(req, true)) 7786 if (!i915_gem_request_completed(req))
7717 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies); 7787 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7718 7788
7719 i915_gem_request_unreference(req); 7789 i915_gem_request_unreference(req);
@@ -7727,7 +7797,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7727 if (req == NULL || INTEL_GEN(req->i915) < 6) 7797 if (req == NULL || INTEL_GEN(req->i915) < 6)
7728 return; 7798 return;
7729 7799
7730 if (i915_gem_request_completed(req, true)) 7800 if (i915_gem_request_completed(req))
7731 return; 7801 return;
7732 7802
7733 boost = kmalloc(sizeof(*boost), GFP_ATOMIC); 7803 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
@@ -7743,7 +7813,7 @@ void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7743 7813
7744void intel_pm_setup(struct drm_device *dev) 7814void intel_pm_setup(struct drm_device *dev)
7745{ 7815{
7746 struct drm_i915_private *dev_priv = dev->dev_private; 7816 struct drm_i915_private *dev_priv = to_i915(dev);
7747 7817
7748 mutex_init(&dev_priv->rps.hw_lock); 7818 mutex_init(&dev_priv->rps.hw_lock);
7749 spin_lock_init(&dev_priv->rps.client_lock); 7819 spin_lock_init(&dev_priv->rps.client_lock);