diff options
author | Ben Widawsky <benjamin.widawsky@intel.com> | 2012-07-24 23:47:31 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-07-25 12:23:56 -0400 |
commit | e1ef7cc299839e68dae3f1843f62e52acda04538 (patch) | |
tree | bebf6699022aa920bcbb02adf3c2536194f225f4 /drivers/gpu/drm/i915/intel_ringbuffer.c | |
parent | 2e4291e0bc6cff9514515a899a8158ea62b3ff90 (diff) |
drm/i915: Macro to determine DPF support
Originally I had a macro specifically for DPF support, and Daniel, with
good reason asked me to change it to this. It's not the way I would have
gone (and indeed I didn't), but for now there is no distinction as all
platforms with L3 also have DPF.
Note: The good reasons are that dpf is a l3$ feature (at least on
currrent hw), hence I don't expect one to go without the other.
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
[danvet: added note]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 8b7085e4cf84..c58f1b91d08b 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -454,7 +454,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
454 | if (INTEL_INFO(dev)->gen >= 6) | 454 | if (INTEL_INFO(dev)->gen >= 6) |
455 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | 455 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
456 | 456 | ||
457 | if (IS_IVYBRIDGE(dev)) | 457 | if (HAS_L3_GPU_CACHE(dev)) |
458 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | 458 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
459 | 459 | ||
460 | return ret; | 460 | return ret; |
@@ -844,7 +844,7 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring) | |||
844 | 844 | ||
845 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 845 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
846 | if (ring->irq_refcount++ == 0) { | 846 | if (ring->irq_refcount++ == 0) { |
847 | if (IS_IVYBRIDGE(dev) && ring->id == RCS) | 847 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
848 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | | 848 | I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | |
849 | GEN6_RENDER_L3_PARITY_ERROR)); | 849 | GEN6_RENDER_L3_PARITY_ERROR)); |
850 | else | 850 | else |
@@ -867,7 +867,7 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring) | |||
867 | 867 | ||
868 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | 868 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
869 | if (--ring->irq_refcount == 0) { | 869 | if (--ring->irq_refcount == 0) { |
870 | if (IS_IVYBRIDGE(dev) && ring->id == RCS) | 870 | if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) |
871 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); | 871 | I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR); |
872 | else | 872 | else |
873 | I915_WRITE_IMR(ring, ~0); | 873 | I915_WRITE_IMR(ring, ~0); |