diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-08 17:35:35 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-08-09 15:52:22 -0400 |
commit | c6a828d3269a5d0b9adda1438cf8d7cb2007ca71 (patch) | |
tree | 461339ff364393d241388e597a04fe07c911b7b6 | |
parent | 22bcfc6a4b2288675a54b761ebcd85b9613ab9a6 (diff) |
drm/i915: move all rps state into dev_priv->rps
This way it's easier so see what belongs together, and what is used
by the ilk ips code. Also add some comments that explain the locking.
Note that (cur|min|max)_delay need to be duplicated, because
they're also used by the ips code.
v2: Missed one place that the dev_priv->ips change caught ...
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 32 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 47 |
6 files changed, 63 insertions, 49 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 9c9e199970af..ed4bc98095b1 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -1274,7 +1274,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) | |||
1274 | 1274 | ||
1275 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); | 1275 | seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\n"); |
1276 | 1276 | ||
1277 | for (gpu_freq = dev_priv->min_delay; gpu_freq <= dev_priv->max_delay; | 1277 | for (gpu_freq = dev_priv->rps.min_delay; |
1278 | gpu_freq <= dev_priv->rps.max_delay; | ||
1278 | gpu_freq++) { | 1279 | gpu_freq++) { |
1279 | I915_WRITE(GEN6_PCODE_DATA, gpu_freq); | 1280 | I915_WRITE(GEN6_PCODE_DATA, gpu_freq); |
1280 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | 1281 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | |
@@ -1712,7 +1713,7 @@ i915_max_freq_read(struct file *filp, | |||
1712 | return ret; | 1713 | return ret; |
1713 | 1714 | ||
1714 | len = snprintf(buf, sizeof(buf), | 1715 | len = snprintf(buf, sizeof(buf), |
1715 | "max freq: %d\n", dev_priv->max_delay * 50); | 1716 | "max freq: %d\n", dev_priv->rps.max_delay * 50); |
1716 | mutex_unlock(&dev->struct_mutex); | 1717 | mutex_unlock(&dev->struct_mutex); |
1717 | 1718 | ||
1718 | if (len > sizeof(buf)) | 1719 | if (len > sizeof(buf)) |
@@ -1755,7 +1756,7 @@ i915_max_freq_write(struct file *filp, | |||
1755 | /* | 1756 | /* |
1756 | * Turbo will still be enabled, but won't go above the set value. | 1757 | * Turbo will still be enabled, but won't go above the set value. |
1757 | */ | 1758 | */ |
1758 | dev_priv->max_delay = val / 50; | 1759 | dev_priv->rps.max_delay = val / 50; |
1759 | 1760 | ||
1760 | gen6_set_rps(dev, val / 50); | 1761 | gen6_set_rps(dev, val / 50); |
1761 | mutex_unlock(&dev->struct_mutex); | 1762 | mutex_unlock(&dev->struct_mutex); |
@@ -1788,7 +1789,7 @@ i915_min_freq_read(struct file *filp, char __user *ubuf, size_t max, | |||
1788 | return ret; | 1789 | return ret; |
1789 | 1790 | ||
1790 | len = snprintf(buf, sizeof(buf), | 1791 | len = snprintf(buf, sizeof(buf), |
1791 | "min freq: %d\n", dev_priv->min_delay * 50); | 1792 | "min freq: %d\n", dev_priv->rps.min_delay * 50); |
1792 | mutex_unlock(&dev->struct_mutex); | 1793 | mutex_unlock(&dev->struct_mutex); |
1793 | 1794 | ||
1794 | if (len > sizeof(buf)) | 1795 | if (len > sizeof(buf)) |
@@ -1829,7 +1830,7 @@ i915_min_freq_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1829 | /* | 1830 | /* |
1830 | * Turbo will still be enabled, but won't go below the set value. | 1831 | * Turbo will still be enabled, but won't go below the set value. |
1831 | */ | 1832 | */ |
1832 | dev_priv->min_delay = val / 50; | 1833 | dev_priv->rps.min_delay = val / 50; |
1833 | 1834 | ||
1834 | gen6_set_rps(dev, val / 50); | 1835 | gen6_set_rps(dev, val / 50); |
1835 | mutex_unlock(&dev->struct_mutex); | 1836 | mutex_unlock(&dev->struct_mutex); |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index a7a213cf06fb..0a1b64f8d442 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1605,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1605 | 1605 | ||
1606 | spin_lock_init(&dev_priv->irq_lock); | 1606 | spin_lock_init(&dev_priv->irq_lock); |
1607 | spin_lock_init(&dev_priv->error_lock); | 1607 | spin_lock_init(&dev_priv->error_lock); |
1608 | spin_lock_init(&dev_priv->rps_lock); | 1608 | spin_lock_init(&dev_priv->rps.lock); |
1609 | 1609 | ||
1610 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) | 1610 | if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
1611 | dev_priv->num_pipe = 3; | 1611 | dev_priv->num_pipe = 3; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 26a2cf6da3a2..1614097f975a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -819,9 +819,21 @@ typedef struct drm_i915_private { | |||
819 | 819 | ||
820 | bool mchbar_need_disable; | 820 | bool mchbar_need_disable; |
821 | 821 | ||
822 | struct work_struct rps_work; | 822 | /* gen6+ rps state */ |
823 | spinlock_t rps_lock; | 823 | struct { |
824 | u32 pm_iir; | 824 | struct work_struct work; |
825 | u32 pm_iir; | ||
826 | /* lock - irqsave spinlock that protectects the work_struct and | ||
827 | * pm_iir. */ | ||
828 | spinlock_t lock; | ||
829 | |||
830 | /* The below variables an all the rps hw state are protected by | ||
831 | * dev->struct mutext. */ | ||
832 | u8 cur_delay; | ||
833 | u8 min_delay; | ||
834 | u8 max_delay; | ||
835 | } rps; | ||
836 | |||
825 | 837 | ||
826 | u8 cur_delay; | 838 | u8 cur_delay; |
827 | u8 min_delay; | 839 | u8 min_delay; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 440c9051aa9b..74c9a0e52507 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -349,16 +349,16 @@ static void notify_ring(struct drm_device *dev, | |||
349 | static void gen6_pm_rps_work(struct work_struct *work) | 349 | static void gen6_pm_rps_work(struct work_struct *work) |
350 | { | 350 | { |
351 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | 351 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
352 | rps_work); | 352 | rps.work); |
353 | u32 pm_iir, pm_imr; | 353 | u32 pm_iir, pm_imr; |
354 | u8 new_delay; | 354 | u8 new_delay; |
355 | 355 | ||
356 | spin_lock_irq(&dev_priv->rps_lock); | 356 | spin_lock_irq(&dev_priv->rps.lock); |
357 | pm_iir = dev_priv->pm_iir; | 357 | pm_iir = dev_priv->rps.pm_iir; |
358 | dev_priv->pm_iir = 0; | 358 | dev_priv->rps.pm_iir = 0; |
359 | pm_imr = I915_READ(GEN6_PMIMR); | 359 | pm_imr = I915_READ(GEN6_PMIMR); |
360 | I915_WRITE(GEN6_PMIMR, 0); | 360 | I915_WRITE(GEN6_PMIMR, 0); |
361 | spin_unlock_irq(&dev_priv->rps_lock); | 361 | spin_unlock_irq(&dev_priv->rps.lock); |
362 | 362 | ||
363 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) | 363 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
364 | return; | 364 | return; |
@@ -366,9 +366,9 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
366 | mutex_lock(&dev_priv->dev->struct_mutex); | 366 | mutex_lock(&dev_priv->dev->struct_mutex); |
367 | 367 | ||
368 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) | 368 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) |
369 | new_delay = dev_priv->cur_delay + 1; | 369 | new_delay = dev_priv->rps.cur_delay + 1; |
370 | else | 370 | else |
371 | new_delay = dev_priv->cur_delay - 1; | 371 | new_delay = dev_priv->rps.cur_delay - 1; |
372 | 372 | ||
373 | gen6_set_rps(dev_priv->dev, new_delay); | 373 | gen6_set_rps(dev_priv->dev, new_delay); |
374 | 374 | ||
@@ -488,20 +488,20 @@ static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, | |||
488 | * IIR bits should never already be set because IMR should | 488 | * IIR bits should never already be set because IMR should |
489 | * prevent an interrupt from being shown in IIR. The warning | 489 | * prevent an interrupt from being shown in IIR. The warning |
490 | * displays a case where we've unsafely cleared | 490 | * displays a case where we've unsafely cleared |
491 | * dev_priv->pm_iir. Although missing an interrupt of the same | 491 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same |
492 | * type is not a problem, it displays a problem in the logic. | 492 | * type is not a problem, it displays a problem in the logic. |
493 | * | 493 | * |
494 | * The mask bit in IMR is cleared by rps_work. | 494 | * The mask bit in IMR is cleared by dev_priv->rps.work. |
495 | */ | 495 | */ |
496 | 496 | ||
497 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | 497 | spin_lock_irqsave(&dev_priv->rps.lock, flags); |
498 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | 498 | WARN(dev_priv->rps.pm_iir & pm_iir, "Missed a PM interrupt\n"); |
499 | dev_priv->pm_iir |= pm_iir; | 499 | dev_priv->rps.pm_iir |= pm_iir; |
500 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); | 500 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); |
501 | POSTING_READ(GEN6_PMIMR); | 501 | POSTING_READ(GEN6_PMIMR); |
502 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | 502 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); |
503 | 503 | ||
504 | queue_work(dev_priv->wq, &dev_priv->rps_work); | 504 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
505 | } | 505 | } |
506 | 506 | ||
507 | static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) | 507 | static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) |
@@ -2649,7 +2649,7 @@ void intel_irq_init(struct drm_device *dev) | |||
2649 | 2649 | ||
2650 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | 2650 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
2651 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); | 2651 | INIT_WORK(&dev_priv->error_work, i915_error_work_func); |
2652 | INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); | 2652 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
2653 | INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); | 2653 | INIT_WORK(&dev_priv->parity_error_work, ivybridge_parity_work); |
2654 | 2654 | ||
2655 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | 2655 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2e1f28f38ee9..bddb29002c03 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -7218,7 +7218,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
7218 | * enqueue unpin/hotplug work. */ | 7218 | * enqueue unpin/hotplug work. */ |
7219 | drm_irq_uninstall(dev); | 7219 | drm_irq_uninstall(dev); |
7220 | cancel_work_sync(&dev_priv->hotplug_work); | 7220 | cancel_work_sync(&dev_priv->hotplug_work); |
7221 | cancel_work_sync(&dev_priv->rps_work); | 7221 | cancel_work_sync(&dev_priv->rps.work); |
7222 | 7222 | ||
7223 | /* flush any delayed tasks or pending work */ | 7223 | /* flush any delayed tasks or pending work */ |
7224 | flush_scheduled_work(); | 7224 | flush_scheduled_work(); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 4fe8d0a9975b..e1439ae3556b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -2277,9 +2277,10 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) | |||
2277 | u32 limits; | 2277 | u32 limits; |
2278 | 2278 | ||
2279 | limits = 0; | 2279 | limits = 0; |
2280 | if (*val >= dev_priv->max_delay) | 2280 | |
2281 | *val = dev_priv->max_delay; | 2281 | if (*val >= dev_priv->rps.max_delay) |
2282 | limits |= dev_priv->max_delay << 24; | 2282 | *val = dev_priv->rps.max_delay; |
2283 | limits |= dev_priv->rps.max_delay << 24; | ||
2283 | 2284 | ||
2284 | /* Only set the down limit when we've reached the lowest level to avoid | 2285 | /* Only set the down limit when we've reached the lowest level to avoid |
2285 | * getting more interrupts, otherwise leave this clear. This prevents a | 2286 | * getting more interrupts, otherwise leave this clear. This prevents a |
@@ -2287,9 +2288,9 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val) | |||
2287 | * the hw runs at the minimal clock before selecting the desired | 2288 | * the hw runs at the minimal clock before selecting the desired |
2288 | * frequency, if the down threshold expires in that window we will not | 2289 | * frequency, if the down threshold expires in that window we will not |
2289 | * receive a down interrupt. */ | 2290 | * receive a down interrupt. */ |
2290 | if (*val <= dev_priv->min_delay) { | 2291 | if (*val <= dev_priv->rps.min_delay) { |
2291 | *val = dev_priv->min_delay; | 2292 | *val = dev_priv->rps.min_delay; |
2292 | limits |= dev_priv->min_delay << 16; | 2293 | limits |= dev_priv->rps.min_delay << 16; |
2293 | } | 2294 | } |
2294 | 2295 | ||
2295 | return limits; | 2296 | return limits; |
@@ -2302,7 +2303,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
2302 | 2303 | ||
2303 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | 2304 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
2304 | 2305 | ||
2305 | if (val == dev_priv->cur_delay) | 2306 | if (val == dev_priv->rps.cur_delay) |
2306 | return; | 2307 | return; |
2307 | 2308 | ||
2308 | I915_WRITE(GEN6_RPNSWREQ, | 2309 | I915_WRITE(GEN6_RPNSWREQ, |
@@ -2315,7 +2316,7 @@ void gen6_set_rps(struct drm_device *dev, u8 val) | |||
2315 | */ | 2316 | */ |
2316 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); | 2317 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits); |
2317 | 2318 | ||
2318 | dev_priv->cur_delay = val; | 2319 | dev_priv->rps.cur_delay = val; |
2319 | } | 2320 | } |
2320 | 2321 | ||
2321 | static void gen6_disable_rps(struct drm_device *dev) | 2322 | static void gen6_disable_rps(struct drm_device *dev) |
@@ -2331,9 +2332,9 @@ static void gen6_disable_rps(struct drm_device *dev) | |||
2331 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving | 2332 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving |
2332 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ | 2333 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ |
2333 | 2334 | ||
2334 | spin_lock_irq(&dev_priv->rps_lock); | 2335 | spin_lock_irq(&dev_priv->rps.lock); |
2335 | dev_priv->pm_iir = 0; | 2336 | dev_priv->rps.pm_iir = 0; |
2336 | spin_unlock_irq(&dev_priv->rps_lock); | 2337 | spin_unlock_irq(&dev_priv->rps.lock); |
2337 | 2338 | ||
2338 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); | 2339 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
2339 | } | 2340 | } |
@@ -2402,9 +2403,9 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2402 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | 2403 | gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); |
2403 | 2404 | ||
2404 | /* In units of 100MHz */ | 2405 | /* In units of 100MHz */ |
2405 | dev_priv->max_delay = rp_state_cap & 0xff; | 2406 | dev_priv->rps.max_delay = rp_state_cap & 0xff; |
2406 | dev_priv->min_delay = (rp_state_cap & 0xff0000) >> 16; | 2407 | dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16; |
2407 | dev_priv->cur_delay = 0; | 2408 | dev_priv->rps.cur_delay = 0; |
2408 | 2409 | ||
2409 | /* disable the counters and set deterministic thresholds */ | 2410 | /* disable the counters and set deterministic thresholds */ |
2410 | I915_WRITE(GEN6_RC_CONTROL, 0); | 2411 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -2457,8 +2458,8 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2457 | 2458 | ||
2458 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | 2459 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); |
2459 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | 2460 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, |
2460 | dev_priv->max_delay << 24 | | 2461 | dev_priv->rps.max_delay << 24 | |
2461 | dev_priv->min_delay << 16); | 2462 | dev_priv->rps.min_delay << 16); |
2462 | 2463 | ||
2463 | if (IS_HASWELL(dev)) { | 2464 | if (IS_HASWELL(dev)) { |
2464 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); | 2465 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); |
@@ -2503,7 +2504,7 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2503 | 500)) | 2504 | 500)) |
2504 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | 2505 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); |
2505 | if (pcu_mbox & (1<<31)) { /* OC supported */ | 2506 | if (pcu_mbox & (1<<31)) { /* OC supported */ |
2506 | dev_priv->max_delay = pcu_mbox & 0xff; | 2507 | dev_priv->rps.max_delay = pcu_mbox & 0xff; |
2507 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); | 2508 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
2508 | } | 2509 | } |
2509 | 2510 | ||
@@ -2511,10 +2512,10 @@ static void gen6_enable_rps(struct drm_device *dev) | |||
2511 | 2512 | ||
2512 | /* requires MSI enabled */ | 2513 | /* requires MSI enabled */ |
2513 | I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); | 2514 | I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS); |
2514 | spin_lock_irq(&dev_priv->rps_lock); | 2515 | spin_lock_irq(&dev_priv->rps.lock); |
2515 | WARN_ON(dev_priv->pm_iir != 0); | 2516 | WARN_ON(dev_priv->rps.pm_iir != 0); |
2516 | I915_WRITE(GEN6_PMIMR, 0); | 2517 | I915_WRITE(GEN6_PMIMR, 0); |
2517 | spin_unlock_irq(&dev_priv->rps_lock); | 2518 | spin_unlock_irq(&dev_priv->rps.lock); |
2518 | /* enable all PM interrupts */ | 2519 | /* enable all PM interrupts */ |
2519 | I915_WRITE(GEN6_PMINTRMSK, 0); | 2520 | I915_WRITE(GEN6_PMINTRMSK, 0); |
2520 | 2521 | ||
@@ -2546,9 +2547,9 @@ static void gen6_update_ring_freq(struct drm_device *dev) | |||
2546 | * to use for memory access. We do this by specifying the IA frequency | 2547 | * to use for memory access. We do this by specifying the IA frequency |
2547 | * the PCU should use as a reference to determine the ring frequency. | 2548 | * the PCU should use as a reference to determine the ring frequency. |
2548 | */ | 2549 | */ |
2549 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; | 2550 | for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay; |
2550 | gpu_freq--) { | 2551 | gpu_freq--) { |
2551 | int diff = dev_priv->max_delay - gpu_freq; | 2552 | int diff = dev_priv->rps.max_delay - gpu_freq; |
2552 | 2553 | ||
2553 | /* | 2554 | /* |
2554 | * For GPU frequencies less than 750MHz, just use the lowest | 2555 | * For GPU frequencies less than 750MHz, just use the lowest |
@@ -2991,7 +2992,7 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) | |||
2991 | 2992 | ||
2992 | assert_spin_locked(&mchdev_lock); | 2993 | assert_spin_locked(&mchdev_lock); |
2993 | 2994 | ||
2994 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->cur_delay * 4)); | 2995 | pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4)); |
2995 | pxvid = (pxvid >> 24) & 0x7f; | 2996 | pxvid = (pxvid >> 24) & 0x7f; |
2996 | ext_v = pvid_to_extvid(dev_priv, pxvid); | 2997 | ext_v = pvid_to_extvid(dev_priv, pxvid); |
2997 | 2998 | ||