diff options
author | Jerome Glisse <jglisse@redhat.com> | 2010-01-07 06:39:21 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2010-01-07 22:09:59 -0500 |
commit | cafe6609d6dc0a6a278f9fdbb59ce4d761a35ddd (patch) | |
tree | a3e15eabffd6e10bed1ef639fc2f2e087c67b047 /drivers/gpu/drm/radeon/r100.c | |
parent | 62cdc0c20663ef840a94850892517b2b7f584904 (diff) |
drm/radeon/kms: Schedule host path read cache flush through the ring V2
R300 family will hard lockup if host path read cache flush is
done through MMIO to HOST_PATH_CNTL. But scheduling same flush
through ring seems harmless. This patch remove the hdp_flush
callback and add a flush after each fence emission which means
a flush after each IB schedule. Thus we should have same behavior
without the hard lockup.
Tested on R100,R200,R300,R400,R500,R600,R700 family.
V2: Adjust fence counts in r600_blit_prepare_copy()
Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Reviewed-by: Alex Deucher <alexdeucher@gmail.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/r100.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r100.c | 14 |
1 files changed, 6 insertions, 8 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 71727460968f..1a056b774eec 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -356,6 +356,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev, | |||
356 | /* Wait until IDLE & CLEAN */ | 356 | /* Wait until IDLE & CLEAN */ |
357 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); | 357 | radeon_ring_write(rdev, PACKET0(0x1720, 0)); |
358 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); | 358 | radeon_ring_write(rdev, (1 << 16) | (1 << 17)); |
359 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | ||
360 | radeon_ring_write(rdev, rdev->config.r100.hdp_cntl | | ||
361 | RADEON_HDP_READ_BUFFER_INVALIDATE); | ||
362 | radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0)); | ||
363 | radeon_ring_write(rdev, rdev->config.r100.hdp_cntl); | ||
359 | /* Emit fence sequence & fire IRQ */ | 364 | /* Emit fence sequence & fire IRQ */ |
360 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); | 365 | radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); |
361 | radeon_ring_write(rdev, fence->seq); | 366 | radeon_ring_write(rdev, fence->seq); |
@@ -1713,14 +1718,6 @@ void r100_gpu_init(struct radeon_device *rdev) | |||
1713 | r100_hdp_reset(rdev); | 1718 | r100_hdp_reset(rdev); |
1714 | } | 1719 | } |
1715 | 1720 | ||
1716 | void r100_hdp_flush(struct radeon_device *rdev) | ||
1717 | { | ||
1718 | u32 tmp; | ||
1719 | tmp = RREG32(RADEON_HOST_PATH_CNTL); | ||
1720 | tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE; | ||
1721 | WREG32(RADEON_HOST_PATH_CNTL, tmp); | ||
1722 | } | ||
1723 | |||
1724 | void r100_hdp_reset(struct radeon_device *rdev) | 1721 | void r100_hdp_reset(struct radeon_device *rdev) |
1725 | { | 1722 | { |
1726 | uint32_t tmp; | 1723 | uint32_t tmp; |
@@ -3313,6 +3310,7 @@ static int r100_startup(struct radeon_device *rdev) | |||
3313 | } | 3310 | } |
3314 | /* Enable IRQ */ | 3311 | /* Enable IRQ */ |
3315 | r100_irq_set(rdev); | 3312 | r100_irq_set(rdev); |
3313 | rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); | ||
3316 | /* 1M ring buffer */ | 3314 | /* 1M ring buffer */ |
3317 | r = r100_cp_init(rdev, 1024 * 1024); | 3315 | r = r100_cp_init(rdev, 1024 * 1024); |
3318 | if (r) { | 3316 | if (r) { |