diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-07-16 03:49:40 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2014-07-23 01:05:30 -0400 |
commit | 480c80338618867851659710d1a27c9cc85833d2 (patch) | |
tree | c98457de347c52aecbaca5d51a25c106a8199af2 | |
parent | ca1381b55b715ae3435a0d600a345bad90233a9b (diff) |
drm/i915: Use genX_ prefix for gt irq enable/disable functions
Traditionally we use genX_ for GT/render stuff and the codenames for
display stuff. But the gt and pm interrupt handling functions on
gen5/6+ stuck out as exceptions, so convert them.
Looking at the diff this nicely realigns our ducks since almost all
the callers are already platform-specific functions following the
genX_ pattern.
Spotted while reviewing some internal rps patches.
No function change in this patch.
Acked-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 24 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 12 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 12 |
4 files changed, 26 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 7d61ca2a01df..dfe923a3cb92 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -182,12 +182,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | |||
182 | POSTING_READ(GTIMR); | 182 | POSTING_READ(GTIMR); |
183 | } | 183 | } |
184 | 184 | ||
185 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 185 | void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
186 | { | 186 | { |
187 | ilk_update_gt_irq(dev_priv, mask, mask); | 187 | ilk_update_gt_irq(dev_priv, mask, mask); |
188 | } | 188 | } |
189 | 189 | ||
190 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 190 | void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
191 | { | 191 | { |
192 | ilk_update_gt_irq(dev_priv, mask, 0); | 192 | ilk_update_gt_irq(dev_priv, mask, 0); |
193 | } | 193 | } |
@@ -220,12 +220,12 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |||
220 | } | 220 | } |
221 | } | 221 | } |
222 | 222 | ||
223 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 223 | void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
224 | { | 224 | { |
225 | snb_update_pm_irq(dev_priv, mask, mask); | 225 | snb_update_pm_irq(dev_priv, mask, mask); |
226 | } | 226 | } |
227 | 227 | ||
228 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 228 | void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
229 | { | 229 | { |
230 | snb_update_pm_irq(dev_priv, mask, 0); | 230 | snb_update_pm_irq(dev_priv, mask, 0); |
231 | } | 231 | } |
@@ -278,12 +278,12 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, | |||
278 | } | 278 | } |
279 | } | 279 | } |
280 | 280 | ||
281 | void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 281 | void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
282 | { | 282 | { |
283 | bdw_update_pm_irq(dev_priv, mask, mask); | 283 | bdw_update_pm_irq(dev_priv, mask, mask); |
284 | } | 284 | } |
285 | 285 | ||
286 | void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 286 | void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
287 | { | 287 | { |
288 | bdw_update_pm_irq(dev_priv, mask, 0); | 288 | bdw_update_pm_irq(dev_priv, mask, 0); |
289 | } | 289 | } |
@@ -1408,10 +1408,10 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
1408 | pm_iir = dev_priv->rps.pm_iir; | 1408 | pm_iir = dev_priv->rps.pm_iir; |
1409 | dev_priv->rps.pm_iir = 0; | 1409 | dev_priv->rps.pm_iir = 0; |
1410 | if (INTEL_INFO(dev_priv->dev)->gen >= 8) | 1410 | if (INTEL_INFO(dev_priv->dev)->gen >= 8) |
1411 | bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 1411 | gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
1412 | else { | 1412 | else { |
1413 | /* Make sure not to corrupt PMIMR state used by ringbuffer */ | 1413 | /* Make sure not to corrupt PMIMR state used by ringbuffer */ |
1414 | snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 1414 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
1415 | } | 1415 | } |
1416 | spin_unlock_irq(&dev_priv->irq_lock); | 1416 | spin_unlock_irq(&dev_priv->irq_lock); |
1417 | 1417 | ||
@@ -1553,7 +1553,7 @@ static void ivybridge_parity_work(struct work_struct *work) | |||
1553 | out: | 1553 | out: |
1554 | WARN_ON(dev_priv->l3_parity.which_slice); | 1554 | WARN_ON(dev_priv->l3_parity.which_slice); |
1555 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1555 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1556 | ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); | 1556 | gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); |
1557 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1557 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1558 | 1558 | ||
1559 | mutex_unlock(&dev_priv->dev->struct_mutex); | 1559 | mutex_unlock(&dev_priv->dev->struct_mutex); |
@@ -1567,7 +1567,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) | |||
1567 | return; | 1567 | return; |
1568 | 1568 | ||
1569 | spin_lock(&dev_priv->irq_lock); | 1569 | spin_lock(&dev_priv->irq_lock); |
1570 | ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); | 1570 | gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); |
1571 | spin_unlock(&dev_priv->irq_lock); | 1571 | spin_unlock(&dev_priv->irq_lock); |
1572 | 1572 | ||
1573 | iir &= GT_PARITY_ERROR(dev); | 1573 | iir &= GT_PARITY_ERROR(dev); |
@@ -1622,7 +1622,7 @@ static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |||
1622 | 1622 | ||
1623 | spin_lock(&dev_priv->irq_lock); | 1623 | spin_lock(&dev_priv->irq_lock); |
1624 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | 1624 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; |
1625 | bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); | 1625 | gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
1626 | spin_unlock(&dev_priv->irq_lock); | 1626 | spin_unlock(&dev_priv->irq_lock); |
1627 | 1627 | ||
1628 | queue_work(dev_priv->wq, &dev_priv->rps.work); | 1628 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
@@ -1969,7 +1969,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |||
1969 | if (pm_iir & dev_priv->pm_rps_events) { | 1969 | if (pm_iir & dev_priv->pm_rps_events) { |
1970 | spin_lock(&dev_priv->irq_lock); | 1970 | spin_lock(&dev_priv->irq_lock); |
1971 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | 1971 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; |
1972 | snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); | 1972 | gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
1973 | spin_unlock(&dev_priv->irq_lock); | 1973 | spin_unlock(&dev_priv->irq_lock); |
1974 | 1974 | ||
1975 | queue_work(dev_priv->wq, &dev_priv->rps.work); | 1975 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 6093ebdeb7cf..9d97a50cae4b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -683,12 +683,12 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | |||
683 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | 683 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, |
684 | enum transcoder pch_transcoder, | 684 | enum transcoder pch_transcoder, |
685 | bool enable); | 685 | bool enable); |
686 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 686 | void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
687 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 687 | void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
688 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 688 | void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
689 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 689 | void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
690 | void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 690 | void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
691 | void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); | 691 | void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); |
692 | void intel_runtime_pm_disable_interrupts(struct drm_device *dev); | 692 | void intel_runtime_pm_disable_interrupts(struct drm_device *dev); |
693 | void intel_runtime_pm_restore_interrupts(struct drm_device *dev); | 693 | void intel_runtime_pm_restore_interrupts(struct drm_device *dev); |
694 | int intel_get_crtc_scanline(struct intel_crtc *crtc); | 694 | int intel_get_crtc_scanline(struct intel_crtc *crtc); |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 6e03851a4fa4..25ae4e6d3dd6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3474,7 +3474,7 @@ static void gen8_enable_rps_interrupts(struct drm_device *dev) | |||
3474 | 3474 | ||
3475 | spin_lock_irq(&dev_priv->irq_lock); | 3475 | spin_lock_irq(&dev_priv->irq_lock); |
3476 | WARN_ON(dev_priv->rps.pm_iir); | 3476 | WARN_ON(dev_priv->rps.pm_iir); |
3477 | bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 3477 | gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
3478 | I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); | 3478 | I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events); |
3479 | spin_unlock_irq(&dev_priv->irq_lock); | 3479 | spin_unlock_irq(&dev_priv->irq_lock); |
3480 | } | 3480 | } |
@@ -3485,7 +3485,7 @@ static void gen6_enable_rps_interrupts(struct drm_device *dev) | |||
3485 | 3485 | ||
3486 | spin_lock_irq(&dev_priv->irq_lock); | 3486 | spin_lock_irq(&dev_priv->irq_lock); |
3487 | WARN_ON(dev_priv->rps.pm_iir); | 3487 | WARN_ON(dev_priv->rps.pm_iir); |
3488 | snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | 3488 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
3489 | I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); | 3489 | I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events); |
3490 | spin_unlock_irq(&dev_priv->irq_lock); | 3490 | spin_unlock_irq(&dev_priv->irq_lock); |
3491 | } | 3491 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 599709e80a16..b3d8f766fa7f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1004,7 +1004,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring) | |||
1004 | 1004 | ||
1005 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1005 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1006 | if (ring->irq_refcount++ == 0) | 1006 | if (ring->irq_refcount++ == 0) |
1007 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); | 1007 | gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); |
1008 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1008 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1009 | 1009 | ||
1010 | return true; | 1010 | return true; |
@@ -1019,7 +1019,7 @@ gen5_ring_put_irq(struct intel_engine_cs *ring) | |||
1019 | 1019 | ||
1020 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1020 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1021 | if (--ring->irq_refcount == 0) | 1021 | if (--ring->irq_refcount == 0) |
1022 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); | 1022 | gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); |
1023 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1023 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1024 | } | 1024 | } |
1025 | 1025 | ||
@@ -1212,7 +1212,7 @@ gen6_ring_get_irq(struct intel_engine_cs *ring) | |||
1212 | GT_PARITY_ERROR(dev))); | 1212 | GT_PARITY_ERROR(dev))); |
1213 | else | 1213 | else |
1214 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | 1214 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
1215 | ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); | 1215 | gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); |
1216 | } | 1216 | } |
1217 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1217 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1218 | 1218 | ||
@@ -1232,7 +1232,7 @@ gen6_ring_put_irq(struct intel_engine_cs *ring) | |||
1232 | I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); | 1232 | I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); |
1233 | else | 1233 | else |
1234 | I915_WRITE_IMR(ring, ~0); | 1234 | I915_WRITE_IMR(ring, ~0); |
1235 | ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask); | 1235 | gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); |
1236 | } | 1236 | } |
1237 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1237 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1238 | } | 1238 | } |
@@ -1250,7 +1250,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring) | |||
1250 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1250 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1251 | if (ring->irq_refcount++ == 0) { | 1251 | if (ring->irq_refcount++ == 0) { |
1252 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); | 1252 | I915_WRITE_IMR(ring, ~ring->irq_enable_mask); |
1253 | snb_enable_pm_irq(dev_priv, ring->irq_enable_mask); | 1253 | gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); |
1254 | } | 1254 | } |
1255 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1255 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1256 | 1256 | ||
@@ -1270,7 +1270,7 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring) | |||
1270 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 1270 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
1271 | if (--ring->irq_refcount == 0) { | 1271 | if (--ring->irq_refcount == 0) { |
1272 | I915_WRITE_IMR(ring, ~0); | 1272 | I915_WRITE_IMR(ring, ~0); |
1273 | snb_disable_pm_irq(dev_priv, ring->irq_enable_mask); | 1273 | gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); |
1274 | } | 1274 | } |
1275 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | 1275 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
1276 | } | 1276 | } |