diff options
author | Sagar Arun Kamble <sagar.a.kamble@intel.com> | 2017-10-10 17:30:05 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2017-10-11 03:56:56 -0400 |
commit | 9f817501bd7facfe2bffacd637f4332e5991e57a (patch) | |
tree | f87b873df21964993b89bcc10754274bbf2cf9be | |
parent | ad1443f0f3dd1b2434af897af8b8f942e47cf8c3 (diff) |
drm/i915: Move rps.hw_lock to dev_priv and s/hw_lock/pcu_lock
In order to separate GT PM related functionality into new structure
we are updating rps structure. hw_lock in it is used for display
related PCU communication too hence move it to dev_priv.
Signed-off-by: Sagar Arun Kamble <sagar.a.kamble@intel.com>
Cc: Imre Deak <imre.deak@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/1507360055-19948-8-git-send-email-sagar.a.kamble@intel.com
Acked-by: Imre Deak <imre.deak@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171010213010.7415-7-chris@chris-wilson.co.uk
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_sysfs.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_cdclk.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 72 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_runtime_pm.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sideband.c | 6 |
9 files changed, 105 insertions, 105 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 31ab92eda45d..e733097fa647 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1097,7 +1097,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1097 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 1097 | } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
1098 | u32 rpmodectl, freq_sts; | 1098 | u32 rpmodectl, freq_sts; |
1099 | 1099 | ||
1100 | mutex_lock(&dev_priv->rps.hw_lock); | 1100 | mutex_lock(&dev_priv->pcu_lock); |
1101 | 1101 | ||
1102 | rpmodectl = I915_READ(GEN6_RP_CONTROL); | 1102 | rpmodectl = I915_READ(GEN6_RP_CONTROL); |
1103 | seq_printf(m, "Video Turbo Mode: %s\n", | 1103 | seq_printf(m, "Video Turbo Mode: %s\n", |
@@ -1130,7 +1130,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) | |||
1130 | seq_printf(m, | 1130 | seq_printf(m, |
1131 | "efficient (RPe) frequency: %d MHz\n", | 1131 | "efficient (RPe) frequency: %d MHz\n", |
1132 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); | 1132 | intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); |
1133 | mutex_unlock(&dev_priv->rps.hw_lock); | 1133 | mutex_unlock(&dev_priv->pcu_lock); |
1134 | } else if (INTEL_GEN(dev_priv) >= 6) { | 1134 | } else if (INTEL_GEN(dev_priv) >= 6) { |
1135 | u32 rp_state_limits; | 1135 | u32 rp_state_limits; |
1136 | u32 gt_perf_status; | 1136 | u32 gt_perf_status; |
@@ -1565,9 +1565,9 @@ static int gen6_drpc_info(struct seq_file *m) | |||
1565 | gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); | 1565 | gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); |
1566 | } | 1566 | } |
1567 | 1567 | ||
1568 | mutex_lock(&dev_priv->rps.hw_lock); | 1568 | mutex_lock(&dev_priv->pcu_lock); |
1569 | sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); | 1569 | sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); |
1570 | mutex_unlock(&dev_priv->rps.hw_lock); | 1570 | mutex_unlock(&dev_priv->pcu_lock); |
1571 | 1571 | ||
1572 | seq_printf(m, "RC1e Enabled: %s\n", | 1572 | seq_printf(m, "RC1e Enabled: %s\n", |
1573 | yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); | 1573 | yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); |
@@ -1842,7 +1842,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1842 | 1842 | ||
1843 | intel_runtime_pm_get(dev_priv); | 1843 | intel_runtime_pm_get(dev_priv); |
1844 | 1844 | ||
1845 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 1845 | ret = mutex_lock_interruptible(&dev_priv->pcu_lock); |
1846 | if (ret) | 1846 | if (ret) |
1847 | goto out; | 1847 | goto out; |
1848 | 1848 | ||
@@ -1873,7 +1873,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1873 | ((ia_freq >> 8) & 0xff) * 100); | 1873 | ((ia_freq >> 8) & 0xff) * 100); |
1874 | } | 1874 | } |
1875 | 1875 | ||
1876 | mutex_unlock(&dev_priv->rps.hw_lock); | 1876 | mutex_unlock(&dev_priv->pcu_lock); |
1877 | 1877 | ||
1878 | out: | 1878 | out: |
1879 | intel_runtime_pm_put(dev_priv); | 1879 | intel_runtime_pm_put(dev_priv); |
@@ -4320,7 +4320,7 @@ i915_max_freq_set(void *data, u64 val) | |||
4320 | 4320 | ||
4321 | DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); | 4321 | DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val); |
4322 | 4322 | ||
4323 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 4323 | ret = mutex_lock_interruptible(&dev_priv->pcu_lock); |
4324 | if (ret) | 4324 | if (ret) |
4325 | return ret; | 4325 | return ret; |
4326 | 4326 | ||
@@ -4333,7 +4333,7 @@ i915_max_freq_set(void *data, u64 val) | |||
4333 | hw_min = dev_priv->rps.min_freq; | 4333 | hw_min = dev_priv->rps.min_freq; |
4334 | 4334 | ||
4335 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { | 4335 | if (val < hw_min || val > hw_max || val < dev_priv->rps.min_freq_softlimit) { |
4336 | mutex_unlock(&dev_priv->rps.hw_lock); | 4336 | mutex_unlock(&dev_priv->pcu_lock); |
4337 | return -EINVAL; | 4337 | return -EINVAL; |
4338 | } | 4338 | } |
4339 | 4339 | ||
@@ -4342,7 +4342,7 @@ i915_max_freq_set(void *data, u64 val) | |||
4342 | if (intel_set_rps(dev_priv, val)) | 4342 | if (intel_set_rps(dev_priv, val)) |
4343 | DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); | 4343 | DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); |
4344 | 4344 | ||
4345 | mutex_unlock(&dev_priv->rps.hw_lock); | 4345 | mutex_unlock(&dev_priv->pcu_lock); |
4346 | 4346 | ||
4347 | return 0; | 4347 | return 0; |
4348 | } | 4348 | } |
@@ -4375,7 +4375,7 @@ i915_min_freq_set(void *data, u64 val) | |||
4375 | 4375 | ||
4376 | DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); | 4376 | DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val); |
4377 | 4377 | ||
4378 | ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock); | 4378 | ret = mutex_lock_interruptible(&dev_priv->pcu_lock); |
4379 | if (ret) | 4379 | if (ret) |
4380 | return ret; | 4380 | return ret; |
4381 | 4381 | ||
@@ -4389,7 +4389,7 @@ i915_min_freq_set(void *data, u64 val) | |||
4389 | 4389 | ||
4390 | if (val < hw_min || | 4390 | if (val < hw_min || |
4391 | val > hw_max || val > dev_priv->rps.max_freq_softlimit) { | 4391 | val > hw_max || val > dev_priv->rps.max_freq_softlimit) { |
4392 | mutex_unlock(&dev_priv->rps.hw_lock); | 4392 | mutex_unlock(&dev_priv->pcu_lock); |
4393 | return -EINVAL; | 4393 | return -EINVAL; |
4394 | } | 4394 | } |
4395 | 4395 | ||
@@ -4398,7 +4398,7 @@ i915_min_freq_set(void *data, u64 val) | |||
4398 | if (intel_set_rps(dev_priv, val)) | 4398 | if (intel_set_rps(dev_priv, val)) |
4399 | DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); | 4399 | DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n"); |
4400 | 4400 | ||
4401 | mutex_unlock(&dev_priv->rps.hw_lock); | 4401 | mutex_unlock(&dev_priv->pcu_lock); |
4402 | 4402 | ||
4403 | return 0; | 4403 | return 0; |
4404 | } | 4404 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f44027f6e5e1..fca7b939495f 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1364,14 +1364,6 @@ struct intel_gen6_power_mgmt { | |||
1364 | 1364 | ||
1365 | /* manual wa residency calculations */ | 1365 | /* manual wa residency calculations */ |
1366 | struct intel_rps_ei ei; | 1366 | struct intel_rps_ei ei; |
1367 | |||
1368 | /* | ||
1369 | * Protects RPS/RC6 register access and PCU communication. | ||
1370 | * Must be taken after struct_mutex if nested. Note that | ||
1371 | * this lock may be held for long periods of time when | ||
1372 | * talking to hw - so only take it when talking to hw! | ||
1373 | */ | ||
1374 | struct mutex hw_lock; | ||
1375 | }; | 1367 | }; |
1376 | 1368 | ||
1377 | /* defined intel_pm.c */ | 1369 | /* defined intel_pm.c */ |
@@ -2421,6 +2413,14 @@ struct drm_i915_private { | |||
2421 | /* Cannot be determined by PCIID. You must always read a register. */ | 2413 | /* Cannot be determined by PCIID. You must always read a register. */ |
2422 | u32 edram_cap; | 2414 | u32 edram_cap; |
2423 | 2415 | ||
2416 | /* | ||
2417 | * Protects RPS/RC6 register access and PCU communication. | ||
2418 | * Must be taken after struct_mutex if nested. Note that | ||
2419 | * this lock may be held for long periods of time when | ||
2420 | * talking to hw - so only take it when talking to hw! | ||
2421 | */ | ||
2422 | struct mutex pcu_lock; | ||
2423 | |||
2424 | /* gen6+ rps state */ | 2424 | /* gen6+ rps state */ |
2425 | struct intel_gen6_power_mgmt rps; | 2425 | struct intel_gen6_power_mgmt rps; |
2426 | 2426 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 915c5b9dc547..1844d3fe8f1f 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -1181,7 +1181,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1181 | if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) | 1181 | if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) |
1182 | goto out; | 1182 | goto out; |
1183 | 1183 | ||
1184 | mutex_lock(&dev_priv->rps.hw_lock); | 1184 | mutex_lock(&dev_priv->pcu_lock); |
1185 | 1185 | ||
1186 | pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); | 1186 | pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); |
1187 | 1187 | ||
@@ -1235,7 +1235,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1235 | dev_priv->rps.last_adj = 0; | 1235 | dev_priv->rps.last_adj = 0; |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | mutex_unlock(&dev_priv->rps.hw_lock); | 1238 | mutex_unlock(&dev_priv->pcu_lock); |
1239 | 1239 | ||
1240 | out: | 1240 | out: |
1241 | /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ | 1241 | /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ |
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index d61c8727f756..79fbab49d1d0 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c | |||
@@ -246,7 +246,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, | |||
246 | 246 | ||
247 | intel_runtime_pm_get(dev_priv); | 247 | intel_runtime_pm_get(dev_priv); |
248 | 248 | ||
249 | mutex_lock(&dev_priv->rps.hw_lock); | 249 | mutex_lock(&dev_priv->pcu_lock); |
250 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { | 250 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { |
251 | u32 freq; | 251 | u32 freq; |
252 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); | 252 | freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); |
@@ -261,7 +261,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, | |||
261 | ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; | 261 | ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; |
262 | ret = intel_gpu_freq(dev_priv, ret); | 262 | ret = intel_gpu_freq(dev_priv, ret); |
263 | } | 263 | } |
264 | mutex_unlock(&dev_priv->rps.hw_lock); | 264 | mutex_unlock(&dev_priv->pcu_lock); |
265 | 265 | ||
266 | intel_runtime_pm_put(dev_priv); | 266 | intel_runtime_pm_put(dev_priv); |
267 | 267 | ||
@@ -304,9 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, | |||
304 | if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) | 304 | if (val < dev_priv->rps.min_freq || val > dev_priv->rps.max_freq) |
305 | return -EINVAL; | 305 | return -EINVAL; |
306 | 306 | ||
307 | mutex_lock(&dev_priv->rps.hw_lock); | 307 | mutex_lock(&dev_priv->pcu_lock); |
308 | dev_priv->rps.boost_freq = val; | 308 | dev_priv->rps.boost_freq = val; |
309 | mutex_unlock(&dev_priv->rps.hw_lock); | 309 | mutex_unlock(&dev_priv->pcu_lock); |
310 | 310 | ||
311 | return count; | 311 | return count; |
312 | } | 312 | } |
@@ -344,14 +344,14 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
344 | 344 | ||
345 | intel_runtime_pm_get(dev_priv); | 345 | intel_runtime_pm_get(dev_priv); |
346 | 346 | ||
347 | mutex_lock(&dev_priv->rps.hw_lock); | 347 | mutex_lock(&dev_priv->pcu_lock); |
348 | 348 | ||
349 | val = intel_freq_opcode(dev_priv, val); | 349 | val = intel_freq_opcode(dev_priv, val); |
350 | 350 | ||
351 | if (val < dev_priv->rps.min_freq || | 351 | if (val < dev_priv->rps.min_freq || |
352 | val > dev_priv->rps.max_freq || | 352 | val > dev_priv->rps.max_freq || |
353 | val < dev_priv->rps.min_freq_softlimit) { | 353 | val < dev_priv->rps.min_freq_softlimit) { |
354 | mutex_unlock(&dev_priv->rps.hw_lock); | 354 | mutex_unlock(&dev_priv->pcu_lock); |
355 | intel_runtime_pm_put(dev_priv); | 355 | intel_runtime_pm_put(dev_priv); |
356 | return -EINVAL; | 356 | return -EINVAL; |
357 | } | 357 | } |
@@ -371,7 +371,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, | |||
371 | * frequency request may be unchanged. */ | 371 | * frequency request may be unchanged. */ |
372 | ret = intel_set_rps(dev_priv, val); | 372 | ret = intel_set_rps(dev_priv, val); |
373 | 373 | ||
374 | mutex_unlock(&dev_priv->rps.hw_lock); | 374 | mutex_unlock(&dev_priv->pcu_lock); |
375 | 375 | ||
376 | intel_runtime_pm_put(dev_priv); | 376 | intel_runtime_pm_put(dev_priv); |
377 | 377 | ||
@@ -401,14 +401,14 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
401 | 401 | ||
402 | intel_runtime_pm_get(dev_priv); | 402 | intel_runtime_pm_get(dev_priv); |
403 | 403 | ||
404 | mutex_lock(&dev_priv->rps.hw_lock); | 404 | mutex_lock(&dev_priv->pcu_lock); |
405 | 405 | ||
406 | val = intel_freq_opcode(dev_priv, val); | 406 | val = intel_freq_opcode(dev_priv, val); |
407 | 407 | ||
408 | if (val < dev_priv->rps.min_freq || | 408 | if (val < dev_priv->rps.min_freq || |
409 | val > dev_priv->rps.max_freq || | 409 | val > dev_priv->rps.max_freq || |
410 | val > dev_priv->rps.max_freq_softlimit) { | 410 | val > dev_priv->rps.max_freq_softlimit) { |
411 | mutex_unlock(&dev_priv->rps.hw_lock); | 411 | mutex_unlock(&dev_priv->pcu_lock); |
412 | intel_runtime_pm_put(dev_priv); | 412 | intel_runtime_pm_put(dev_priv); |
413 | return -EINVAL; | 413 | return -EINVAL; |
414 | } | 414 | } |
@@ -424,7 +424,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, | |||
424 | * frequency request may be unchanged. */ | 424 | * frequency request may be unchanged. */ |
425 | ret = intel_set_rps(dev_priv, val); | 425 | ret = intel_set_rps(dev_priv, val); |
426 | 426 | ||
427 | mutex_unlock(&dev_priv->rps.hw_lock); | 427 | mutex_unlock(&dev_priv->pcu_lock); |
428 | 428 | ||
429 | intel_runtime_pm_put(dev_priv); | 429 | intel_runtime_pm_put(dev_priv); |
430 | 430 | ||
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index 87fc42b19336..b2a6d62b71c0 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c | |||
@@ -503,7 +503,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, | |||
503 | else | 503 | else |
504 | cmd = 0; | 504 | cmd = 0; |
505 | 505 | ||
506 | mutex_lock(&dev_priv->rps.hw_lock); | 506 | mutex_lock(&dev_priv->pcu_lock); |
507 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | 507 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); |
508 | val &= ~DSPFREQGUAR_MASK; | 508 | val &= ~DSPFREQGUAR_MASK; |
509 | val |= (cmd << DSPFREQGUAR_SHIFT); | 509 | val |= (cmd << DSPFREQGUAR_SHIFT); |
@@ -513,7 +513,7 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, | |||
513 | 50)) { | 513 | 50)) { |
514 | DRM_ERROR("timed out waiting for CDclk change\n"); | 514 | DRM_ERROR("timed out waiting for CDclk change\n"); |
515 | } | 515 | } |
516 | mutex_unlock(&dev_priv->rps.hw_lock); | 516 | mutex_unlock(&dev_priv->pcu_lock); |
517 | 517 | ||
518 | mutex_lock(&dev_priv->sb_lock); | 518 | mutex_lock(&dev_priv->sb_lock); |
519 | 519 | ||
@@ -590,7 +590,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, | |||
590 | */ | 590 | */ |
591 | cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; | 591 | cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; |
592 | 592 | ||
593 | mutex_lock(&dev_priv->rps.hw_lock); | 593 | mutex_lock(&dev_priv->pcu_lock); |
594 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | 594 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); |
595 | val &= ~DSPFREQGUAR_MASK_CHV; | 595 | val &= ~DSPFREQGUAR_MASK_CHV; |
596 | val |= (cmd << DSPFREQGUAR_SHIFT_CHV); | 596 | val |= (cmd << DSPFREQGUAR_SHIFT_CHV); |
@@ -600,7 +600,7 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, | |||
600 | 50)) { | 600 | 50)) { |
601 | DRM_ERROR("timed out waiting for CDclk change\n"); | 601 | DRM_ERROR("timed out waiting for CDclk change\n"); |
602 | } | 602 | } |
603 | mutex_unlock(&dev_priv->rps.hw_lock); | 603 | mutex_unlock(&dev_priv->pcu_lock); |
604 | 604 | ||
605 | intel_update_cdclk(dev_priv); | 605 | intel_update_cdclk(dev_priv); |
606 | 606 | ||
@@ -656,10 +656,10 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, | |||
656 | "trying to change cdclk frequency with cdclk not enabled\n")) | 656 | "trying to change cdclk frequency with cdclk not enabled\n")) |
657 | return; | 657 | return; |
658 | 658 | ||
659 | mutex_lock(&dev_priv->rps.hw_lock); | 659 | mutex_lock(&dev_priv->pcu_lock); |
660 | ret = sandybridge_pcode_write(dev_priv, | 660 | ret = sandybridge_pcode_write(dev_priv, |
661 | BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); | 661 | BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); |
662 | mutex_unlock(&dev_priv->rps.hw_lock); | 662 | mutex_unlock(&dev_priv->pcu_lock); |
663 | if (ret) { | 663 | if (ret) { |
664 | DRM_ERROR("failed to inform pcode about cdclk change\n"); | 664 | DRM_ERROR("failed to inform pcode about cdclk change\n"); |
665 | return; | 665 | return; |
@@ -712,9 +712,9 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, | |||
712 | LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) | 712 | LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) |
713 | DRM_ERROR("Switching back to LCPLL failed\n"); | 713 | DRM_ERROR("Switching back to LCPLL failed\n"); |
714 | 714 | ||
715 | mutex_lock(&dev_priv->rps.hw_lock); | 715 | mutex_lock(&dev_priv->pcu_lock); |
716 | sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); | 716 | sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); |
717 | mutex_unlock(&dev_priv->rps.hw_lock); | 717 | mutex_unlock(&dev_priv->pcu_lock); |
718 | 718 | ||
719 | I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); | 719 | I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); |
720 | 720 | ||
@@ -928,12 +928,12 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, | |||
928 | 928 | ||
929 | WARN_ON((cdclk == 24000) != (vco == 0)); | 929 | WARN_ON((cdclk == 24000) != (vco == 0)); |
930 | 930 | ||
931 | mutex_lock(&dev_priv->rps.hw_lock); | 931 | mutex_lock(&dev_priv->pcu_lock); |
932 | ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, | 932 | ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, |
933 | SKL_CDCLK_PREPARE_FOR_CHANGE, | 933 | SKL_CDCLK_PREPARE_FOR_CHANGE, |
934 | SKL_CDCLK_READY_FOR_CHANGE, | 934 | SKL_CDCLK_READY_FOR_CHANGE, |
935 | SKL_CDCLK_READY_FOR_CHANGE, 3); | 935 | SKL_CDCLK_READY_FOR_CHANGE, 3); |
936 | mutex_unlock(&dev_priv->rps.hw_lock); | 936 | mutex_unlock(&dev_priv->pcu_lock); |
937 | if (ret) { | 937 | if (ret) { |
938 | DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", | 938 | DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", |
939 | ret); | 939 | ret); |
@@ -975,9 +975,9 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, | |||
975 | POSTING_READ(CDCLK_CTL); | 975 | POSTING_READ(CDCLK_CTL); |
976 | 976 | ||
977 | /* inform PCU of the change */ | 977 | /* inform PCU of the change */ |
978 | mutex_lock(&dev_priv->rps.hw_lock); | 978 | mutex_lock(&dev_priv->pcu_lock); |
979 | sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); | 979 | sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); |
980 | mutex_unlock(&dev_priv->rps.hw_lock); | 980 | mutex_unlock(&dev_priv->pcu_lock); |
981 | 981 | ||
982 | intel_update_cdclk(dev_priv); | 982 | intel_update_cdclk(dev_priv); |
983 | } | 983 | } |
@@ -1268,10 +1268,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, | |||
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | /* Inform power controller of upcoming frequency change */ | 1270 | /* Inform power controller of upcoming frequency change */ |
1271 | mutex_lock(&dev_priv->rps.hw_lock); | 1271 | mutex_lock(&dev_priv->pcu_lock); |
1272 | ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, | 1272 | ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, |
1273 | 0x80000000); | 1273 | 0x80000000); |
1274 | mutex_unlock(&dev_priv->rps.hw_lock); | 1274 | mutex_unlock(&dev_priv->pcu_lock); |
1275 | 1275 | ||
1276 | if (ret) { | 1276 | if (ret) { |
1277 | DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", | 1277 | DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", |
@@ -1300,10 +1300,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, | |||
1300 | val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; | 1300 | val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; |
1301 | I915_WRITE(CDCLK_CTL, val); | 1301 | I915_WRITE(CDCLK_CTL, val); |
1302 | 1302 | ||
1303 | mutex_lock(&dev_priv->rps.hw_lock); | 1303 | mutex_lock(&dev_priv->pcu_lock); |
1304 | ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, | 1304 | ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, |
1305 | DIV_ROUND_UP(cdclk, 25000)); | 1305 | DIV_ROUND_UP(cdclk, 25000)); |
1306 | mutex_unlock(&dev_priv->rps.hw_lock); | 1306 | mutex_unlock(&dev_priv->pcu_lock); |
1307 | 1307 | ||
1308 | if (ret) { | 1308 | if (ret) { |
1309 | DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", | 1309 | DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", |
@@ -1518,12 +1518,12 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, | |||
1518 | u32 val, divider, pcu_ack; | 1518 | u32 val, divider, pcu_ack; |
1519 | int ret; | 1519 | int ret; |
1520 | 1520 | ||
1521 | mutex_lock(&dev_priv->rps.hw_lock); | 1521 | mutex_lock(&dev_priv->pcu_lock); |
1522 | ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, | 1522 | ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, |
1523 | SKL_CDCLK_PREPARE_FOR_CHANGE, | 1523 | SKL_CDCLK_PREPARE_FOR_CHANGE, |
1524 | SKL_CDCLK_READY_FOR_CHANGE, | 1524 | SKL_CDCLK_READY_FOR_CHANGE, |
1525 | SKL_CDCLK_READY_FOR_CHANGE, 3); | 1525 | SKL_CDCLK_READY_FOR_CHANGE, 3); |
1526 | mutex_unlock(&dev_priv->rps.hw_lock); | 1526 | mutex_unlock(&dev_priv->pcu_lock); |
1527 | if (ret) { | 1527 | if (ret) { |
1528 | DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", | 1528 | DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", |
1529 | ret); | 1529 | ret); |
@@ -1575,9 +1575,9 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, | |||
1575 | I915_WRITE(CDCLK_CTL, val); | 1575 | I915_WRITE(CDCLK_CTL, val); |
1576 | 1576 | ||
1577 | /* inform PCU of the change */ | 1577 | /* inform PCU of the change */ |
1578 | mutex_lock(&dev_priv->rps.hw_lock); | 1578 | mutex_lock(&dev_priv->pcu_lock); |
1579 | sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); | 1579 | sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); |
1580 | mutex_unlock(&dev_priv->rps.hw_lock); | 1580 | mutex_unlock(&dev_priv->pcu_lock); |
1581 | 1581 | ||
1582 | intel_update_cdclk(dev_priv); | 1582 | intel_update_cdclk(dev_priv); |
1583 | } | 1583 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e03b0c3d6f9f..b2c5fba102e1 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -4946,10 +4946,10 @@ void hsw_enable_ips(struct intel_crtc *crtc) | |||
4946 | 4946 | ||
4947 | assert_plane_enabled(dev_priv, crtc->plane); | 4947 | assert_plane_enabled(dev_priv, crtc->plane); |
4948 | if (IS_BROADWELL(dev_priv)) { | 4948 | if (IS_BROADWELL(dev_priv)) { |
4949 | mutex_lock(&dev_priv->rps.hw_lock); | 4949 | mutex_lock(&dev_priv->pcu_lock); |
4950 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, | 4950 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, |
4951 | IPS_ENABLE | IPS_PCODE_CONTROL)); | 4951 | IPS_ENABLE | IPS_PCODE_CONTROL)); |
4952 | mutex_unlock(&dev_priv->rps.hw_lock); | 4952 | mutex_unlock(&dev_priv->pcu_lock); |
4953 | /* Quoting Art Runyan: "its not safe to expect any particular | 4953 | /* Quoting Art Runyan: "its not safe to expect any particular |
4954 | * value in IPS_CTL bit 31 after enabling IPS through the | 4954 | * value in IPS_CTL bit 31 after enabling IPS through the |
4955 | * mailbox." Moreover, the mailbox may return a bogus state, | 4955 | * mailbox." Moreover, the mailbox may return a bogus state, |
@@ -4979,9 +4979,9 @@ void hsw_disable_ips(struct intel_crtc *crtc) | |||
4979 | 4979 | ||
4980 | assert_plane_enabled(dev_priv, crtc->plane); | 4980 | assert_plane_enabled(dev_priv, crtc->plane); |
4981 | if (IS_BROADWELL(dev_priv)) { | 4981 | if (IS_BROADWELL(dev_priv)) { |
4982 | mutex_lock(&dev_priv->rps.hw_lock); | 4982 | mutex_lock(&dev_priv->pcu_lock); |
4983 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); | 4983 | WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); |
4984 | mutex_unlock(&dev_priv->rps.hw_lock); | 4984 | mutex_unlock(&dev_priv->pcu_lock); |
4985 | /* wait for pcode to finish disabling IPS, which may take up to 42ms */ | 4985 | /* wait for pcode to finish disabling IPS, which may take up to 42ms */ |
4986 | if (intel_wait_for_register(dev_priv, | 4986 | if (intel_wait_for_register(dev_priv, |
4987 | IPS_CTL, IPS_ENABLE, 0, | 4987 | IPS_CTL, IPS_ENABLE, 0, |
@@ -8839,11 +8839,11 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) | |||
8839 | static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) | 8839 | static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) |
8840 | { | 8840 | { |
8841 | if (IS_HASWELL(dev_priv)) { | 8841 | if (IS_HASWELL(dev_priv)) { |
8842 | mutex_lock(&dev_priv->rps.hw_lock); | 8842 | mutex_lock(&dev_priv->pcu_lock); |
8843 | if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, | 8843 | if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, |
8844 | val)) | 8844 | val)) |
8845 | DRM_DEBUG_KMS("Failed to write to D_COMP\n"); | 8845 | DRM_DEBUG_KMS("Failed to write to D_COMP\n"); |
8846 | mutex_unlock(&dev_priv->rps.hw_lock); | 8846 | mutex_unlock(&dev_priv->pcu_lock); |
8847 | } else { | 8847 | } else { |
8848 | I915_WRITE(D_COMP_BDW, val); | 8848 | I915_WRITE(D_COMP_BDW, val); |
8849 | POSTING_READ(D_COMP_BDW); | 8849 | POSTING_READ(D_COMP_BDW); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 831054084fb7..512f2b0513e0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -322,7 +322,7 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) | |||
322 | { | 322 | { |
323 | u32 val; | 323 | u32 val; |
324 | 324 | ||
325 | mutex_lock(&dev_priv->rps.hw_lock); | 325 | mutex_lock(&dev_priv->pcu_lock); |
326 | 326 | ||
327 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); | 327 | val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); |
328 | if (enable) | 328 | if (enable) |
@@ -337,14 +337,14 @@ static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) | |||
337 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) | 337 | FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) |
338 | DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); | 338 | DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); |
339 | 339 | ||
340 | mutex_unlock(&dev_priv->rps.hw_lock); | 340 | mutex_unlock(&dev_priv->pcu_lock); |
341 | } | 341 | } |
342 | 342 | ||
343 | static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) | 343 | static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) |
344 | { | 344 | { |
345 | u32 val; | 345 | u32 val; |
346 | 346 | ||
347 | mutex_lock(&dev_priv->rps.hw_lock); | 347 | mutex_lock(&dev_priv->pcu_lock); |
348 | 348 | ||
349 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | 349 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); |
350 | if (enable) | 350 | if (enable) |
@@ -353,7 +353,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) | |||
353 | val &= ~DSP_MAXFIFO_PM5_ENABLE; | 353 | val &= ~DSP_MAXFIFO_PM5_ENABLE; |
354 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); | 354 | vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); |
355 | 355 | ||
356 | mutex_unlock(&dev_priv->rps.hw_lock); | 356 | mutex_unlock(&dev_priv->pcu_lock); |
357 | } | 357 | } |
358 | 358 | ||
359 | #define FW_WM(value, plane) \ | 359 | #define FW_WM(value, plane) \ |
@@ -2790,11 +2790,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, | |||
2790 | 2790 | ||
2791 | /* read the first set of memory latencies[0:3] */ | 2791 | /* read the first set of memory latencies[0:3] */ |
2792 | val = 0; /* data0 to be programmed to 0 for first set */ | 2792 | val = 0; /* data0 to be programmed to 0 for first set */ |
2793 | mutex_lock(&dev_priv->rps.hw_lock); | 2793 | mutex_lock(&dev_priv->pcu_lock); |
2794 | ret = sandybridge_pcode_read(dev_priv, | 2794 | ret = sandybridge_pcode_read(dev_priv, |
2795 | GEN9_PCODE_READ_MEM_LATENCY, | 2795 | GEN9_PCODE_READ_MEM_LATENCY, |
2796 | &val); | 2796 | &val); |
2797 | mutex_unlock(&dev_priv->rps.hw_lock); | 2797 | mutex_unlock(&dev_priv->pcu_lock); |
2798 | 2798 | ||
2799 | if (ret) { | 2799 | if (ret) { |
2800 | DRM_ERROR("SKL Mailbox read error = %d\n", ret); | 2800 | DRM_ERROR("SKL Mailbox read error = %d\n", ret); |
@@ -2811,11 +2811,11 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, | |||
2811 | 2811 | ||
2812 | /* read the second set of memory latencies[4:7] */ | 2812 | /* read the second set of memory latencies[4:7] */ |
2813 | val = 1; /* data0 to be programmed to 1 for second set */ | 2813 | val = 1; /* data0 to be programmed to 1 for second set */ |
2814 | mutex_lock(&dev_priv->rps.hw_lock); | 2814 | mutex_lock(&dev_priv->pcu_lock); |
2815 | ret = sandybridge_pcode_read(dev_priv, | 2815 | ret = sandybridge_pcode_read(dev_priv, |
2816 | GEN9_PCODE_READ_MEM_LATENCY, | 2816 | GEN9_PCODE_READ_MEM_LATENCY, |
2817 | &val); | 2817 | &val); |
2818 | mutex_unlock(&dev_priv->rps.hw_lock); | 2818 | mutex_unlock(&dev_priv->pcu_lock); |
2819 | if (ret) { | 2819 | if (ret) { |
2820 | DRM_ERROR("SKL Mailbox read error = %d\n", ret); | 2820 | DRM_ERROR("SKL Mailbox read error = %d\n", ret); |
2821 | return; | 2821 | return; |
@@ -3608,13 +3608,13 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) | |||
3608 | return 0; | 3608 | return 0; |
3609 | 3609 | ||
3610 | DRM_DEBUG_KMS("Enabling the SAGV\n"); | 3610 | DRM_DEBUG_KMS("Enabling the SAGV\n"); |
3611 | mutex_lock(&dev_priv->rps.hw_lock); | 3611 | mutex_lock(&dev_priv->pcu_lock); |
3612 | 3612 | ||
3613 | ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, | 3613 | ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, |
3614 | GEN9_SAGV_ENABLE); | 3614 | GEN9_SAGV_ENABLE); |
3615 | 3615 | ||
3616 | /* We don't need to wait for the SAGV when enabling */ | 3616 | /* We don't need to wait for the SAGV when enabling */ |
3617 | mutex_unlock(&dev_priv->rps.hw_lock); | 3617 | mutex_unlock(&dev_priv->pcu_lock); |
3618 | 3618 | ||
3619 | /* | 3619 | /* |
3620 | * Some skl systems, pre-release machines in particular, | 3620 | * Some skl systems, pre-release machines in particular, |
@@ -3645,14 +3645,14 @@ intel_disable_sagv(struct drm_i915_private *dev_priv) | |||
3645 | return 0; | 3645 | return 0; |
3646 | 3646 | ||
3647 | DRM_DEBUG_KMS("Disabling the SAGV\n"); | 3647 | DRM_DEBUG_KMS("Disabling the SAGV\n"); |
3648 | mutex_lock(&dev_priv->rps.hw_lock); | 3648 | mutex_lock(&dev_priv->pcu_lock); |
3649 | 3649 | ||
3650 | /* bspec says to keep retrying for at least 1 ms */ | 3650 | /* bspec says to keep retrying for at least 1 ms */ |
3651 | ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, | 3651 | ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, |
3652 | GEN9_SAGV_DISABLE, | 3652 | GEN9_SAGV_DISABLE, |
3653 | GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, | 3653 | GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, |
3654 | 1); | 3654 | 1); |
3655 | mutex_unlock(&dev_priv->rps.hw_lock); | 3655 | mutex_unlock(&dev_priv->pcu_lock); |
3656 | 3656 | ||
3657 | /* | 3657 | /* |
3658 | * Some skl systems, pre-release machines in particular, | 3658 | * Some skl systems, pre-release machines in particular, |
@@ -5621,7 +5621,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) | |||
5621 | wm->level = VLV_WM_LEVEL_PM2; | 5621 | wm->level = VLV_WM_LEVEL_PM2; |
5622 | 5622 | ||
5623 | if (IS_CHERRYVIEW(dev_priv)) { | 5623 | if (IS_CHERRYVIEW(dev_priv)) { |
5624 | mutex_lock(&dev_priv->rps.hw_lock); | 5624 | mutex_lock(&dev_priv->pcu_lock); |
5625 | 5625 | ||
5626 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); | 5626 | val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); |
5627 | if (val & DSP_MAXFIFO_PM5_ENABLE) | 5627 | if (val & DSP_MAXFIFO_PM5_ENABLE) |
@@ -5651,7 +5651,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev) | |||
5651 | wm->level = VLV_WM_LEVEL_DDR_DVFS; | 5651 | wm->level = VLV_WM_LEVEL_DDR_DVFS; |
5652 | } | 5652 | } |
5653 | 5653 | ||
5654 | mutex_unlock(&dev_priv->rps.hw_lock); | 5654 | mutex_unlock(&dev_priv->pcu_lock); |
5655 | } | 5655 | } |
5656 | 5656 | ||
5657 | for_each_intel_crtc(dev, crtc) { | 5657 | for_each_intel_crtc(dev, crtc) { |
@@ -6224,7 +6224,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) | |||
6224 | 6224 | ||
6225 | void gen6_rps_busy(struct drm_i915_private *dev_priv) | 6225 | void gen6_rps_busy(struct drm_i915_private *dev_priv) |
6226 | { | 6226 | { |
6227 | mutex_lock(&dev_priv->rps.hw_lock); | 6227 | mutex_lock(&dev_priv->pcu_lock); |
6228 | if (dev_priv->rps.enabled) { | 6228 | if (dev_priv->rps.enabled) { |
6229 | u8 freq; | 6229 | u8 freq; |
6230 | 6230 | ||
@@ -6247,7 +6247,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) | |||
6247 | dev_priv->rps.max_freq_softlimit))) | 6247 | dev_priv->rps.max_freq_softlimit))) |
6248 | DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); | 6248 | DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); |
6249 | } | 6249 | } |
6250 | mutex_unlock(&dev_priv->rps.hw_lock); | 6250 | mutex_unlock(&dev_priv->pcu_lock); |
6251 | } | 6251 | } |
6252 | 6252 | ||
6253 | void gen6_rps_idle(struct drm_i915_private *dev_priv) | 6253 | void gen6_rps_idle(struct drm_i915_private *dev_priv) |
@@ -6259,7 +6259,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
6259 | */ | 6259 | */ |
6260 | gen6_disable_rps_interrupts(dev_priv); | 6260 | gen6_disable_rps_interrupts(dev_priv); |
6261 | 6261 | ||
6262 | mutex_lock(&dev_priv->rps.hw_lock); | 6262 | mutex_lock(&dev_priv->pcu_lock); |
6263 | if (dev_priv->rps.enabled) { | 6263 | if (dev_priv->rps.enabled) { |
6264 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | 6264 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) |
6265 | vlv_set_rps_idle(dev_priv); | 6265 | vlv_set_rps_idle(dev_priv); |
@@ -6269,7 +6269,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) | |||
6269 | I915_WRITE(GEN6_PMINTRMSK, | 6269 | I915_WRITE(GEN6_PMINTRMSK, |
6270 | gen6_sanitize_rps_pm_mask(dev_priv, ~0)); | 6270 | gen6_sanitize_rps_pm_mask(dev_priv, ~0)); |
6271 | } | 6271 | } |
6272 | mutex_unlock(&dev_priv->rps.hw_lock); | 6272 | mutex_unlock(&dev_priv->pcu_lock); |
6273 | } | 6273 | } |
6274 | 6274 | ||
6275 | void gen6_rps_boost(struct drm_i915_gem_request *rq, | 6275 | void gen6_rps_boost(struct drm_i915_gem_request *rq, |
@@ -6306,7 +6306,7 @@ int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) | |||
6306 | { | 6306 | { |
6307 | int err; | 6307 | int err; |
6308 | 6308 | ||
6309 | lockdep_assert_held(&dev_priv->rps.hw_lock); | 6309 | lockdep_assert_held(&dev_priv->pcu_lock); |
6310 | GEM_BUG_ON(val > dev_priv->rps.max_freq); | 6310 | GEM_BUG_ON(val > dev_priv->rps.max_freq); |
6311 | GEM_BUG_ON(val < dev_priv->rps.min_freq); | 6311 | GEM_BUG_ON(val < dev_priv->rps.min_freq); |
6312 | 6312 | ||
@@ -6715,7 +6715,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) | |||
6715 | int rc6_mode; | 6715 | int rc6_mode; |
6716 | int ret; | 6716 | int ret; |
6717 | 6717 | ||
6718 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 6718 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
6719 | 6719 | ||
6720 | I915_WRITE(GEN6_RC_STATE, 0); | 6720 | I915_WRITE(GEN6_RC_STATE, 0); |
6721 | 6721 | ||
@@ -6789,7 +6789,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv) | |||
6789 | 6789 | ||
6790 | static void gen6_enable_rps(struct drm_i915_private *dev_priv) | 6790 | static void gen6_enable_rps(struct drm_i915_private *dev_priv) |
6791 | { | 6791 | { |
6792 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 6792 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
6793 | 6793 | ||
6794 | /* Here begins a magic sequence of register writes to enable | 6794 | /* Here begins a magic sequence of register writes to enable |
6795 | * auto-downclocking. | 6795 | * auto-downclocking. |
@@ -6817,7 +6817,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) | |||
6817 | int scaling_factor = 180; | 6817 | int scaling_factor = 180; |
6818 | struct cpufreq_policy *policy; | 6818 | struct cpufreq_policy *policy; |
6819 | 6819 | ||
6820 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 6820 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
6821 | 6821 | ||
6822 | policy = cpufreq_cpu_get(0); | 6822 | policy = cpufreq_cpu_get(0); |
6823 | if (policy) { | 6823 | if (policy) { |
@@ -7210,7 +7210,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv) | |||
7210 | enum intel_engine_id id; | 7210 | enum intel_engine_id id; |
7211 | u32 gtfifodbg, rc6_mode = 0, pcbr; | 7211 | u32 gtfifodbg, rc6_mode = 0, pcbr; |
7212 | 7212 | ||
7213 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7213 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
7214 | 7214 | ||
7215 | gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | | 7215 | gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | |
7216 | GT_FIFO_FREE_ENTRIES_CHV); | 7216 | GT_FIFO_FREE_ENTRIES_CHV); |
@@ -7264,7 +7264,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv) | |||
7264 | { | 7264 | { |
7265 | u32 val; | 7265 | u32 val; |
7266 | 7266 | ||
7267 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7267 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
7268 | 7268 | ||
7269 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 7269 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
7270 | 7270 | ||
@@ -7310,7 +7310,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) | |||
7310 | enum intel_engine_id id; | 7310 | enum intel_engine_id id; |
7311 | u32 gtfifodbg, rc6_mode = 0; | 7311 | u32 gtfifodbg, rc6_mode = 0; |
7312 | 7312 | ||
7313 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7313 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
7314 | 7314 | ||
7315 | valleyview_check_pctx(dev_priv); | 7315 | valleyview_check_pctx(dev_priv); |
7316 | 7316 | ||
@@ -7357,7 +7357,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv) | |||
7357 | { | 7357 | { |
7358 | u32 val; | 7358 | u32 val; |
7359 | 7359 | ||
7360 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 7360 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
7361 | 7361 | ||
7362 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); | 7362 | intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); |
7363 | 7363 | ||
@@ -7881,7 +7881,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) | |||
7881 | } | 7881 | } |
7882 | 7882 | ||
7883 | mutex_lock(&dev_priv->drm.struct_mutex); | 7883 | mutex_lock(&dev_priv->drm.struct_mutex); |
7884 | mutex_lock(&dev_priv->rps.hw_lock); | 7884 | mutex_lock(&dev_priv->pcu_lock); |
7885 | 7885 | ||
7886 | /* Initialize RPS limits (for userspace) */ | 7886 | /* Initialize RPS limits (for userspace) */ |
7887 | if (IS_CHERRYVIEW(dev_priv)) | 7887 | if (IS_CHERRYVIEW(dev_priv)) |
@@ -7921,7 +7921,7 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv) | |||
7921 | /* Finally allow us to boost to max by default */ | 7921 | /* Finally allow us to boost to max by default */ |
7922 | dev_priv->rps.boost_freq = dev_priv->rps.max_freq; | 7922 | dev_priv->rps.boost_freq = dev_priv->rps.max_freq; |
7923 | 7923 | ||
7924 | mutex_unlock(&dev_priv->rps.hw_lock); | 7924 | mutex_unlock(&dev_priv->pcu_lock); |
7925 | mutex_unlock(&dev_priv->drm.struct_mutex); | 7925 | mutex_unlock(&dev_priv->drm.struct_mutex); |
7926 | 7926 | ||
7927 | intel_autoenable_gt_powersave(dev_priv); | 7927 | intel_autoenable_gt_powersave(dev_priv); |
@@ -7968,7 +7968,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) | |||
7968 | if (!READ_ONCE(dev_priv->rps.enabled)) | 7968 | if (!READ_ONCE(dev_priv->rps.enabled)) |
7969 | return; | 7969 | return; |
7970 | 7970 | ||
7971 | mutex_lock(&dev_priv->rps.hw_lock); | 7971 | mutex_lock(&dev_priv->pcu_lock); |
7972 | 7972 | ||
7973 | if (INTEL_GEN(dev_priv) >= 9) { | 7973 | if (INTEL_GEN(dev_priv) >= 9) { |
7974 | gen9_disable_rc6(dev_priv); | 7974 | gen9_disable_rc6(dev_priv); |
@@ -7987,7 +7987,7 @@ void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) | |||
7987 | } | 7987 | } |
7988 | 7988 | ||
7989 | dev_priv->rps.enabled = false; | 7989 | dev_priv->rps.enabled = false; |
7990 | mutex_unlock(&dev_priv->rps.hw_lock); | 7990 | mutex_unlock(&dev_priv->pcu_lock); |
7991 | } | 7991 | } |
7992 | 7992 | ||
7993 | void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) | 7993 | void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) |
@@ -8002,7 +8002,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) | |||
8002 | if (intel_vgpu_active(dev_priv)) | 8002 | if (intel_vgpu_active(dev_priv)) |
8003 | return; | 8003 | return; |
8004 | 8004 | ||
8005 | mutex_lock(&dev_priv->rps.hw_lock); | 8005 | mutex_lock(&dev_priv->pcu_lock); |
8006 | 8006 | ||
8007 | if (IS_CHERRYVIEW(dev_priv)) { | 8007 | if (IS_CHERRYVIEW(dev_priv)) { |
8008 | cherryview_enable_rc6(dev_priv); | 8008 | cherryview_enable_rc6(dev_priv); |
@@ -8035,7 +8035,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) | |||
8035 | WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); | 8035 | WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); |
8036 | 8036 | ||
8037 | dev_priv->rps.enabled = true; | 8037 | dev_priv->rps.enabled = true; |
8038 | mutex_unlock(&dev_priv->rps.hw_lock); | 8038 | mutex_unlock(&dev_priv->pcu_lock); |
8039 | } | 8039 | } |
8040 | 8040 | ||
8041 | static void __intel_autoenable_gt_powersave(struct work_struct *work) | 8041 | static void __intel_autoenable_gt_powersave(struct work_struct *work) |
@@ -9123,7 +9123,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val | |||
9123 | { | 9123 | { |
9124 | int status; | 9124 | int status; |
9125 | 9125 | ||
9126 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 9126 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
9127 | 9127 | ||
9128 | /* GEN6_PCODE_* are outside of the forcewake domain, we can | 9128 | /* GEN6_PCODE_* are outside of the forcewake domain, we can |
9129 | * use te fw I915_READ variants to reduce the amount of work | 9129 | * use te fw I915_READ variants to reduce the amount of work |
@@ -9170,7 +9170,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, | |||
9170 | { | 9170 | { |
9171 | int status; | 9171 | int status; |
9172 | 9172 | ||
9173 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 9173 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
9174 | 9174 | ||
9175 | /* GEN6_PCODE_* are outside of the forcewake domain, we can | 9175 | /* GEN6_PCODE_* are outside of the forcewake domain, we can |
9176 | * use te fw I915_READ variants to reduce the amount of work | 9176 | * use te fw I915_READ variants to reduce the amount of work |
@@ -9247,7 +9247,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, | |||
9247 | u32 status; | 9247 | u32 status; |
9248 | int ret; | 9248 | int ret; |
9249 | 9249 | ||
9250 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 9250 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
9251 | 9251 | ||
9252 | #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ | 9252 | #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ |
9253 | &status) | 9253 | &status) |
@@ -9344,7 +9344,7 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) | |||
9344 | 9344 | ||
9345 | void intel_pm_setup(struct drm_i915_private *dev_priv) | 9345 | void intel_pm_setup(struct drm_i915_private *dev_priv) |
9346 | { | 9346 | { |
9347 | mutex_init(&dev_priv->rps.hw_lock); | 9347 | mutex_init(&dev_priv->pcu_lock); |
9348 | 9348 | ||
9349 | INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, | 9349 | INIT_DELAYED_WORK(&dev_priv->rps.autoenable_work, |
9350 | __intel_autoenable_gt_powersave); | 9350 | __intel_autoenable_gt_powersave); |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 7348c16c4a96..8af286c63d3b 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -785,7 +785,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, | |||
785 | state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : | 785 | state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) : |
786 | PUNIT_PWRGT_PWR_GATE(power_well_id); | 786 | PUNIT_PWRGT_PWR_GATE(power_well_id); |
787 | 787 | ||
788 | mutex_lock(&dev_priv->rps.hw_lock); | 788 | mutex_lock(&dev_priv->pcu_lock); |
789 | 789 | ||
790 | #define COND \ | 790 | #define COND \ |
791 | ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) | 791 | ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state) |
@@ -806,7 +806,7 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv, | |||
806 | #undef COND | 806 | #undef COND |
807 | 807 | ||
808 | out: | 808 | out: |
809 | mutex_unlock(&dev_priv->rps.hw_lock); | 809 | mutex_unlock(&dev_priv->pcu_lock); |
810 | } | 810 | } |
811 | 811 | ||
812 | static void vlv_power_well_enable(struct drm_i915_private *dev_priv, | 812 | static void vlv_power_well_enable(struct drm_i915_private *dev_priv, |
@@ -833,7 +833,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, | |||
833 | mask = PUNIT_PWRGT_MASK(power_well_id); | 833 | mask = PUNIT_PWRGT_MASK(power_well_id); |
834 | ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); | 834 | ctrl = PUNIT_PWRGT_PWR_ON(power_well_id); |
835 | 835 | ||
836 | mutex_lock(&dev_priv->rps.hw_lock); | 836 | mutex_lock(&dev_priv->pcu_lock); |
837 | 837 | ||
838 | state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; | 838 | state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask; |
839 | /* | 839 | /* |
@@ -852,7 +852,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, | |||
852 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; | 852 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask; |
853 | WARN_ON(ctrl != state); | 853 | WARN_ON(ctrl != state); |
854 | 854 | ||
855 | mutex_unlock(&dev_priv->rps.hw_lock); | 855 | mutex_unlock(&dev_priv->pcu_lock); |
856 | 856 | ||
857 | return enabled; | 857 | return enabled; |
858 | } | 858 | } |
@@ -1364,7 +1364,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, | |||
1364 | bool enabled; | 1364 | bool enabled; |
1365 | u32 state, ctrl; | 1365 | u32 state, ctrl; |
1366 | 1366 | ||
1367 | mutex_lock(&dev_priv->rps.hw_lock); | 1367 | mutex_lock(&dev_priv->pcu_lock); |
1368 | 1368 | ||
1369 | state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); | 1369 | state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe); |
1370 | /* | 1370 | /* |
@@ -1381,7 +1381,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv, | |||
1381 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); | 1381 | ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe); |
1382 | WARN_ON(ctrl << 16 != state); | 1382 | WARN_ON(ctrl << 16 != state); |
1383 | 1383 | ||
1384 | mutex_unlock(&dev_priv->rps.hw_lock); | 1384 | mutex_unlock(&dev_priv->pcu_lock); |
1385 | 1385 | ||
1386 | return enabled; | 1386 | return enabled; |
1387 | } | 1387 | } |
@@ -1396,7 +1396,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, | |||
1396 | 1396 | ||
1397 | state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); | 1397 | state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); |
1398 | 1398 | ||
1399 | mutex_lock(&dev_priv->rps.hw_lock); | 1399 | mutex_lock(&dev_priv->pcu_lock); |
1400 | 1400 | ||
1401 | #define COND \ | 1401 | #define COND \ |
1402 | ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) | 1402 | ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state) |
@@ -1417,7 +1417,7 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv, | |||
1417 | #undef COND | 1417 | #undef COND |
1418 | 1418 | ||
1419 | out: | 1419 | out: |
1420 | mutex_unlock(&dev_priv->rps.hw_lock); | 1420 | mutex_unlock(&dev_priv->pcu_lock); |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, | 1423 | static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 7d971cb56116..75c872bb8cc9 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c | |||
@@ -81,7 +81,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr) | |||
81 | { | 81 | { |
82 | u32 val = 0; | 82 | u32 val = 0; |
83 | 83 | ||
84 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 84 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
85 | 85 | ||
86 | mutex_lock(&dev_priv->sb_lock); | 86 | mutex_lock(&dev_priv->sb_lock); |
87 | vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, | 87 | vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, |
@@ -95,7 +95,7 @@ int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val) | |||
95 | { | 95 | { |
96 | int err; | 96 | int err; |
97 | 97 | ||
98 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 98 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
99 | 99 | ||
100 | mutex_lock(&dev_priv->sb_lock); | 100 | mutex_lock(&dev_priv->sb_lock); |
101 | err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, | 101 | err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT, |
@@ -125,7 +125,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr) | |||
125 | { | 125 | { |
126 | u32 val = 0; | 126 | u32 val = 0; |
127 | 127 | ||
128 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | 128 | WARN_ON(!mutex_is_locked(&dev_priv->pcu_lock)); |
129 | 129 | ||
130 | mutex_lock(&dev_priv->sb_lock); | 130 | mutex_lock(&dev_priv->sb_lock); |
131 | vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC, | 131 | vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC, |