aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/evergreen.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen.c')
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 4fedd14e670a..e50807c29f69 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
2869 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1)); 2869 radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2870 radeon_ring_write(ring, 0); 2870 radeon_ring_write(ring, 0);
2871 radeon_ring_write(ring, 0); 2871 radeon_ring_write(ring, 0);
2872 radeon_ring_unlock_commit(rdev, ring); 2872 radeon_ring_unlock_commit(rdev, ring, false);
2873 2873
2874 cp_me = 0xff; 2874 cp_me = 0xff;
2875 WREG32(CP_ME_CNTL, cp_me); 2875 WREG32(CP_ME_CNTL, cp_me);
@@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)
2912 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 2912 radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2913 radeon_ring_write(ring, 0x00000010); /* */ 2913 radeon_ring_write(ring, 0x00000010); /* */
2914 2914
2915 radeon_ring_unlock_commit(rdev, ring); 2915 radeon_ring_unlock_commit(rdev, ring, false);
2916 2916
2917 return 0; 2917 return 0;
2918} 2918}
@@ -4749,17 +4749,17 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
4749 wptr = RREG32(IH_RB_WPTR); 4749 wptr = RREG32(IH_RB_WPTR);
4750 4750
4751 if (wptr & RB_OVERFLOW) { 4751 if (wptr & RB_OVERFLOW) {
4752 wptr &= ~RB_OVERFLOW;
4752 /* When a ring buffer overflow happen start parsing interrupt 4753 /* When a ring buffer overflow happen start parsing interrupt
4753 * from the last not overwritten vector (wptr + 16). Hopefully 4754 * from the last not overwritten vector (wptr + 16). Hopefully
4754 * this should allow us to catchup. 4755 * this should allow us to catchup.
4755 */ 4756 */
4756 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", 4757 dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
4757 wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); 4758 wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask);
4758 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; 4759 rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
4759 tmp = RREG32(IH_RB_CNTL); 4760 tmp = RREG32(IH_RB_CNTL);
4760 tmp |= IH_WPTR_OVERFLOW_CLEAR; 4761 tmp |= IH_WPTR_OVERFLOW_CLEAR;
4761 WREG32(IH_RB_CNTL, tmp); 4762 WREG32(IH_RB_CNTL, tmp);
4762 wptr &= ~RB_OVERFLOW;
4763 } 4763 }
4764 return (wptr & rdev->ih.ptr_mask); 4764 return (wptr & rdev->ih.ptr_mask);
4765} 4765}
@@ -5137,6 +5137,7 @@ restart_ih:
5137 /* wptr/rptr are in bytes! */ 5137 /* wptr/rptr are in bytes! */
5138 rptr += 16; 5138 rptr += 16;
5139 rptr &= rdev->ih.ptr_mask; 5139 rptr &= rdev->ih.ptr_mask;
5140 WREG32(IH_RB_RPTR, rptr);
5140 } 5141 }
5141 if (queue_hotplug) 5142 if (queue_hotplug)
5142 schedule_work(&rdev->hotplug_work); 5143 schedule_work(&rdev->hotplug_work);
@@ -5145,7 +5146,6 @@ restart_ih:
5145 if (queue_thermal && rdev->pm.dpm_enabled) 5146 if (queue_thermal && rdev->pm.dpm_enabled)
5146 schedule_work(&rdev->pm.dpm.thermal.work); 5147 schedule_work(&rdev->pm.dpm.thermal.work);
5147 rdev->ih.rptr = rptr; 5148 rdev->ih.rptr = rptr;
5148 WREG32(IH_RB_RPTR, rdev->ih.rptr);
5149 atomic_set(&rdev->ih.lock, 0); 5149 atomic_set(&rdev->ih.lock, 0);
5150 5150
5151 /* make sure wptr hasn't changed while processing */ 5151 /* make sure wptr hasn't changed while processing */