aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-01-07 06:39:21 -0500
committerDave Airlie <airlied@redhat.com>2010-01-07 22:09:59 -0500
commitcafe6609d6dc0a6a278f9fdbb59ce4d761a35ddd (patch)
treea3e15eabffd6e10bed1ef639fc2f2e087c67b047
parent62cdc0c20663ef840a94850892517b2b7f584904 (diff)
drm/radeon/kms: Schedule host path read cache flush through the ring V2
R300 family will hard lockup if host path read cache flush is done through MMIO to HOST_PATH_CNTL. But scheduling same flush through ring seems harmless. This patch remove the hdp_flush callback and add a flush after each fence emission which means a flush after each IB schedule. Thus we should have same behavior without the hard lockup. Tested on R100,R200,R300,R400,R500,R600,R700 family. V2: Adjust fence counts in r600_blit_prepare_copy() Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/radeon/r100.c14
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/r420.c1
-rw-r--r--drivers/gpu/drm/radeon/r520.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/rs400.c1
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c1
-rw-r--r--drivers/gpu/drm/radeon/rv515.c1
13 files changed, 33 insertions, 32 deletions
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 71727460968f..1a056b774eec 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -356,6 +356,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
356 /* Wait until IDLE & CLEAN */ 356 /* Wait until IDLE & CLEAN */
357 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 357 radeon_ring_write(rdev, PACKET0(0x1720, 0));
358 radeon_ring_write(rdev, (1 << 16) | (1 << 17)); 358 radeon_ring_write(rdev, (1 << 16) | (1 << 17));
359 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
360 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
361 RADEON_HDP_READ_BUFFER_INVALIDATE);
362 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
363 radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
359 /* Emit fence sequence & fire IRQ */ 364 /* Emit fence sequence & fire IRQ */
360 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 365 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
361 radeon_ring_write(rdev, fence->seq); 366 radeon_ring_write(rdev, fence->seq);
@@ -1713,14 +1718,6 @@ void r100_gpu_init(struct radeon_device *rdev)
1713 r100_hdp_reset(rdev); 1718 r100_hdp_reset(rdev);
1714} 1719}
1715 1720
1716void r100_hdp_flush(struct radeon_device *rdev)
1717{
1718 u32 tmp;
1719 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1720 tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
1721 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1722}
1723
1724void r100_hdp_reset(struct radeon_device *rdev) 1721void r100_hdp_reset(struct radeon_device *rdev)
1725{ 1722{
1726 uint32_t tmp; 1723 uint32_t tmp;
@@ -3313,6 +3310,7 @@ static int r100_startup(struct radeon_device *rdev)
3313 } 3310 }
3314 /* Enable IRQ */ 3311 /* Enable IRQ */
3315 r100_irq_set(rdev); 3312 r100_irq_set(rdev);
3313 rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3316 /* 1M ring buffer */ 3314 /* 1M ring buffer */
3317 r = r100_cp_init(rdev, 1024 * 1024); 3315 r = r100_cp_init(rdev, 1024 * 1024);
3318 if (r) { 3316 if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 3f2cc9e2e8d9..b8623b734a27 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -36,7 +36,15 @@
36#include "rv350d.h" 36#include "rv350d.h"
37#include "r300_reg_safe.h" 37#include "r300_reg_safe.h"
38 38
39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ 39/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
40 *
41 * GPU Errata:
42 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
43 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
44 * However, scheduling such write to the ring seems harmless, i suspect
45 * the CP read collide with the flush somehow, or maybe the MC, hard to
46 * tell. (Jerome Glisse)
47 */
40 48
41/* 49/*
42 * rv370,rv380 PCIE GART 50 * rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
178 /* Wait until IDLE & CLEAN */ 186 /* Wait until IDLE & CLEAN */
179 radeon_ring_write(rdev, PACKET0(0x1720, 0)); 187 radeon_ring_write(rdev, PACKET0(0x1720, 0));
180 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9)); 188 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
189 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
190 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
191 RADEON_HDP_READ_BUFFER_INVALIDATE);
192 radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
193 radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
181 /* Emit fence sequence & fire IRQ */ 194 /* Emit fence sequence & fire IRQ */
182 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0)); 195 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
183 radeon_ring_write(rdev, fence->seq); 196 radeon_ring_write(rdev, fence->seq);
@@ -1258,6 +1271,7 @@ static int r300_startup(struct radeon_device *rdev)
1258 } 1271 }
1259 /* Enable IRQ */ 1272 /* Enable IRQ */
1260 r100_irq_set(rdev); 1273 r100_irq_set(rdev);
1274 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
1261 /* 1M ring buffer */ 1275 /* 1M ring buffer */
1262 r = r100_cp_init(rdev, 1024 * 1024); 1276 r = r100_cp_init(rdev, 1024 * 1024);
1263 if (r) { 1277 if (r) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index f46502a253ec..1d4d16ed7db1 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -219,6 +219,7 @@ static int r420_startup(struct radeon_device *rdev)
219 r420_pipes_init(rdev); 219 r420_pipes_init(rdev);
220 /* Enable IRQ */ 220 /* Enable IRQ */
221 r100_irq_set(rdev); 221 r100_irq_set(rdev);
222 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
222 /* 1M ring buffer */ 223 /* 1M ring buffer */
223 r = r100_cp_init(rdev, 1024 * 1024); 224 r = r100_cp_init(rdev, 1024 * 1024);
224 if (r) { 225 if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 0f3843b6dac7..9a189072f2b9 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
186 } 186 }
187 /* Enable IRQ */ 187 /* Enable IRQ */
188 rs600_irq_set(rdev); 188 rs600_irq_set(rdev);
189 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
189 /* 1M ring buffer */ 190 /* 1M ring buffer */
190 r = r100_cp_init(rdev, 1024 * 1024); 191 r = r100_cp_init(rdev, 1024 * 1024);
191 if (r) { 192 if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 921926f3d1f3..e2f43c184aa9 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1388,11 +1388,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1388 (void)RREG32(PCIE_PORT_DATA); 1388 (void)RREG32(PCIE_PORT_DATA);
1389} 1389}
1390 1390
1391void r600_hdp_flush(struct radeon_device *rdev)
1392{
1393 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1394}
1395
1396/* 1391/*
1397 * CP & Ring 1392 * CP & Ring
1398 */ 1393 */
@@ -1789,6 +1784,8 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
1789 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1784 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1790 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1785 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1791 radeon_ring_write(rdev, fence->seq); 1786 radeon_ring_write(rdev, fence->seq);
1787 radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
1788 radeon_ring_write(rdev, 1);
1792 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */ 1789 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1793 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0)); 1790 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1794 radeon_ring_write(rdev, RB_INT_STAT); 1791 radeon_ring_write(rdev, RB_INT_STAT);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 9aecafb51b66..8787ea89dc6e 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -577,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
577 ring_size = num_loops * dwords_per_loop; 577 ring_size = num_loops * dwords_per_loop;
578 /* set default + shaders */ 578 /* set default + shaders */
579 ring_size += 40; /* shaders + def state */ 579 ring_size += 40; /* shaders + def state */
580 ring_size += 5; /* fence emit for VB IB */ 580 ring_size += 7; /* fence emit for VB IB */
581 ring_size += 5; /* done copy */ 581 ring_size += 5; /* done copy */
582 ring_size += 5; /* fence emit for done copy */ 582 ring_size += 7; /* fence emit for done copy */
583 r = radeon_ring_lock(rdev, ring_size); 583 r = radeon_ring_lock(rdev, ring_size);
584 WARN_ON(r); 584 WARN_ON(r);
585 585
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index cee8bdc6c9ff..eb5f99b9469d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -656,7 +656,6 @@ struct radeon_asic {
656 uint32_t offset, uint32_t obj_size); 656 uint32_t offset, uint32_t obj_size);
657 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 657 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
658 void (*bandwidth_update)(struct radeon_device *rdev); 658 void (*bandwidth_update)(struct radeon_device *rdev);
659 void (*hdp_flush)(struct radeon_device *rdev);
660 void (*hpd_init)(struct radeon_device *rdev); 659 void (*hpd_init)(struct radeon_device *rdev);
661 void (*hpd_fini)(struct radeon_device *rdev); 660 void (*hpd_fini)(struct radeon_device *rdev);
662 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); 661 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -669,12 +668,14 @@ struct radeon_asic {
669struct r100_asic { 668struct r100_asic {
670 const unsigned *reg_safe_bm; 669 const unsigned *reg_safe_bm;
671 unsigned reg_safe_bm_size; 670 unsigned reg_safe_bm_size;
671 u32 hdp_cntl;
672}; 672};
673 673
674struct r300_asic { 674struct r300_asic {
675 const unsigned *reg_safe_bm; 675 const unsigned *reg_safe_bm;
676 unsigned reg_safe_bm_size; 676 unsigned reg_safe_bm_size;
677 u32 resync_scratch; 677 u32 resync_scratch;
678 u32 hdp_cntl;
678}; 679};
679 680
680struct r600_asic { 681struct r600_asic {
@@ -1010,7 +1011,6 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
1010#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 1011#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
1011#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) 1012#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
1012#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) 1013#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
1013#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
1014#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) 1014#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
1015#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) 1015#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
1016#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) 1016#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index eb29217bbf1d..f2fbd2e4e9df 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -77,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
77void r100_bandwidth_update(struct radeon_device *rdev); 77void r100_bandwidth_update(struct radeon_device *rdev);
78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 78void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
79int r100_ring_test(struct radeon_device *rdev); 79int r100_ring_test(struct radeon_device *rdev);
80void r100_hdp_flush(struct radeon_device *rdev);
81void r100_hpd_init(struct radeon_device *rdev); 80void r100_hpd_init(struct radeon_device *rdev);
82void r100_hpd_fini(struct radeon_device *rdev); 81void r100_hpd_fini(struct radeon_device *rdev);
83bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 82bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -114,7 +113,6 @@ static struct radeon_asic r100_asic = {
114 .set_surface_reg = r100_set_surface_reg, 113 .set_surface_reg = r100_set_surface_reg,
115 .clear_surface_reg = r100_clear_surface_reg, 114 .clear_surface_reg = r100_clear_surface_reg,
116 .bandwidth_update = &r100_bandwidth_update, 115 .bandwidth_update = &r100_bandwidth_update,
117 .hdp_flush = &r100_hdp_flush,
118 .hpd_init = &r100_hpd_init, 116 .hpd_init = &r100_hpd_init,
119 .hpd_fini = &r100_hpd_fini, 117 .hpd_fini = &r100_hpd_fini,
120 .hpd_sense = &r100_hpd_sense, 118 .hpd_sense = &r100_hpd_sense,
@@ -174,7 +172,6 @@ static struct radeon_asic r300_asic = {
174 .set_surface_reg = r100_set_surface_reg, 172 .set_surface_reg = r100_set_surface_reg,
175 .clear_surface_reg = r100_clear_surface_reg, 173 .clear_surface_reg = r100_clear_surface_reg,
176 .bandwidth_update = &r100_bandwidth_update, 174 .bandwidth_update = &r100_bandwidth_update,
177 .hdp_flush = &r100_hdp_flush,
178 .hpd_init = &r100_hpd_init, 175 .hpd_init = &r100_hpd_init,
179 .hpd_fini = &r100_hpd_fini, 176 .hpd_fini = &r100_hpd_fini,
180 .hpd_sense = &r100_hpd_sense, 177 .hpd_sense = &r100_hpd_sense,
@@ -218,7 +215,6 @@ static struct radeon_asic r420_asic = {
218 .set_surface_reg = r100_set_surface_reg, 215 .set_surface_reg = r100_set_surface_reg,
219 .clear_surface_reg = r100_clear_surface_reg, 216 .clear_surface_reg = r100_clear_surface_reg,
220 .bandwidth_update = &r100_bandwidth_update, 217 .bandwidth_update = &r100_bandwidth_update,
221 .hdp_flush = &r100_hdp_flush,
222 .hpd_init = &r100_hpd_init, 218 .hpd_init = &r100_hpd_init,
223 .hpd_fini = &r100_hpd_fini, 219 .hpd_fini = &r100_hpd_fini,
224 .hpd_sense = &r100_hpd_sense, 220 .hpd_sense = &r100_hpd_sense,
@@ -267,7 +263,6 @@ static struct radeon_asic rs400_asic = {
267 .set_surface_reg = r100_set_surface_reg, 263 .set_surface_reg = r100_set_surface_reg,
268 .clear_surface_reg = r100_clear_surface_reg, 264 .clear_surface_reg = r100_clear_surface_reg,
269 .bandwidth_update = &r100_bandwidth_update, 265 .bandwidth_update = &r100_bandwidth_update,
270 .hdp_flush = &r100_hdp_flush,
271 .hpd_init = &r100_hpd_init, 266 .hpd_init = &r100_hpd_init,
272 .hpd_fini = &r100_hpd_fini, 267 .hpd_fini = &r100_hpd_fini,
273 .hpd_sense = &r100_hpd_sense, 268 .hpd_sense = &r100_hpd_sense,
@@ -324,7 +319,6 @@ static struct radeon_asic rs600_asic = {
324 .set_pcie_lanes = NULL, 319 .set_pcie_lanes = NULL,
325 .set_clock_gating = &radeon_atom_set_clock_gating, 320 .set_clock_gating = &radeon_atom_set_clock_gating,
326 .bandwidth_update = &rs600_bandwidth_update, 321 .bandwidth_update = &rs600_bandwidth_update,
327 .hdp_flush = &r100_hdp_flush,
328 .hpd_init = &rs600_hpd_init, 322 .hpd_init = &rs600_hpd_init,
329 .hpd_fini = &rs600_hpd_fini, 323 .hpd_fini = &rs600_hpd_fini,
330 .hpd_sense = &rs600_hpd_sense, 324 .hpd_sense = &rs600_hpd_sense,
@@ -372,7 +366,6 @@ static struct radeon_asic rs690_asic = {
372 .set_surface_reg = r100_set_surface_reg, 366 .set_surface_reg = r100_set_surface_reg,
373 .clear_surface_reg = r100_clear_surface_reg, 367 .clear_surface_reg = r100_clear_surface_reg,
374 .bandwidth_update = &rs690_bandwidth_update, 368 .bandwidth_update = &rs690_bandwidth_update,
375 .hdp_flush = &r100_hdp_flush,
376 .hpd_init = &rs600_hpd_init, 369 .hpd_init = &rs600_hpd_init,
377 .hpd_fini = &rs600_hpd_fini, 370 .hpd_fini = &rs600_hpd_fini,
378 .hpd_sense = &rs600_hpd_sense, 371 .hpd_sense = &rs600_hpd_sense,
@@ -424,7 +417,6 @@ static struct radeon_asic rv515_asic = {
424 .set_surface_reg = r100_set_surface_reg, 417 .set_surface_reg = r100_set_surface_reg,
425 .clear_surface_reg = r100_clear_surface_reg, 418 .clear_surface_reg = r100_clear_surface_reg,
426 .bandwidth_update = &rv515_bandwidth_update, 419 .bandwidth_update = &rv515_bandwidth_update,
427 .hdp_flush = &r100_hdp_flush,
428 .hpd_init = &rs600_hpd_init, 420 .hpd_init = &rs600_hpd_init,
429 .hpd_fini = &rs600_hpd_fini, 421 .hpd_fini = &rs600_hpd_fini,
430 .hpd_sense = &rs600_hpd_sense, 422 .hpd_sense = &rs600_hpd_sense,
@@ -467,7 +459,6 @@ static struct radeon_asic r520_asic = {
467 .set_surface_reg = r100_set_surface_reg, 459 .set_surface_reg = r100_set_surface_reg,
468 .clear_surface_reg = r100_clear_surface_reg, 460 .clear_surface_reg = r100_clear_surface_reg,
469 .bandwidth_update = &rv515_bandwidth_update, 461 .bandwidth_update = &rv515_bandwidth_update,
470 .hdp_flush = &r100_hdp_flush,
471 .hpd_init = &rs600_hpd_init, 462 .hpd_init = &rs600_hpd_init,
472 .hpd_fini = &rs600_hpd_fini, 463 .hpd_fini = &rs600_hpd_fini,
473 .hpd_sense = &rs600_hpd_sense, 464 .hpd_sense = &rs600_hpd_sense,
@@ -508,7 +499,6 @@ int r600_ring_test(struct radeon_device *rdev);
508int r600_copy_blit(struct radeon_device *rdev, 499int r600_copy_blit(struct radeon_device *rdev,
509 uint64_t src_offset, uint64_t dst_offset, 500 uint64_t src_offset, uint64_t dst_offset,
510 unsigned num_pages, struct radeon_fence *fence); 501 unsigned num_pages, struct radeon_fence *fence);
511void r600_hdp_flush(struct radeon_device *rdev);
512void r600_hpd_init(struct radeon_device *rdev); 502void r600_hpd_init(struct radeon_device *rdev);
513void r600_hpd_fini(struct radeon_device *rdev); 503void r600_hpd_fini(struct radeon_device *rdev);
514bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd); 504bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -544,7 +534,6 @@ static struct radeon_asic r600_asic = {
544 .set_surface_reg = r600_set_surface_reg, 534 .set_surface_reg = r600_set_surface_reg,
545 .clear_surface_reg = r600_clear_surface_reg, 535 .clear_surface_reg = r600_clear_surface_reg,
546 .bandwidth_update = &rv515_bandwidth_update, 536 .bandwidth_update = &rv515_bandwidth_update,
547 .hdp_flush = &r600_hdp_flush,
548 .hpd_init = &r600_hpd_init, 537 .hpd_init = &r600_hpd_init,
549 .hpd_fini = &r600_hpd_fini, 538 .hpd_fini = &r600_hpd_fini,
550 .hpd_sense = &r600_hpd_sense, 539 .hpd_sense = &r600_hpd_sense,
@@ -589,7 +578,6 @@ static struct radeon_asic rv770_asic = {
589 .set_surface_reg = r600_set_surface_reg, 578 .set_surface_reg = r600_set_surface_reg,
590 .clear_surface_reg = r600_clear_surface_reg, 579 .clear_surface_reg = r600_clear_surface_reg,
591 .bandwidth_update = &rv515_bandwidth_update, 580 .bandwidth_update = &rv515_bandwidth_update,
592 .hdp_flush = &r600_hdp_flush,
593 .hpd_init = &r600_hpd_init, 581 .hpd_init = &r600_hpd_init,
594 .hpd_fini = &r600_hpd_fini, 582 .hpd_fini = &r600_hpd_fini,
595 .hpd_sense = &r600_hpd_sense, 583 .hpd_sense = &r600_hpd_sense,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 60df2d7e7e4c..0e1325e18534 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -131,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
131 printk(KERN_ERR "Failed to wait for object !\n"); 131 printk(KERN_ERR "Failed to wait for object !\n");
132 return r; 132 return r;
133 } 133 }
134 radeon_hdp_flush(robj->rdev);
135 } 134 }
136 return 0; 135 return 0;
137} 136}
@@ -312,7 +311,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
312 mutex_lock(&dev->struct_mutex); 311 mutex_lock(&dev->struct_mutex);
313 drm_gem_object_unreference(gobj); 312 drm_gem_object_unreference(gobj);
314 mutex_unlock(&dev->struct_mutex); 313 mutex_unlock(&dev->struct_mutex);
315 radeon_hdp_flush(robj->rdev);
316 return r; 314 return r;
317} 315}
318 316
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index bf7b3cf80ed7..9f5418983e2a 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -396,6 +396,7 @@ static int rs400_startup(struct radeon_device *rdev)
396 return r; 396 return r;
397 /* Enable IRQ */ 397 /* Enable IRQ */
398 r100_irq_set(rdev); 398 r100_irq_set(rdev);
399 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
399 /* 1M ring buffer */ 400 /* 1M ring buffer */
400 r = r100_cp_init(rdev, 1024 * 1024); 401 r = r100_cp_init(rdev, 1024 * 1024);
401 if (r) { 402 if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 19258943a370..a0378c57e4ec 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -554,6 +554,7 @@ static int rs600_startup(struct radeon_device *rdev)
554 return r; 554 return r;
555 /* Enable IRQ */ 555 /* Enable IRQ */
556 rs600_irq_set(rdev); 556 rs600_irq_set(rdev);
557 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
557 /* 1M ring buffer */ 558 /* 1M ring buffer */
558 r = r100_cp_init(rdev, 1024 * 1024); 559 r = r100_cp_init(rdev, 1024 * 1024);
559 if (r) { 560 if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 42f27205a597..cd31da913771 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -626,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev)
626 return r; 626 return r;
627 /* Enable IRQ */ 627 /* Enable IRQ */
628 rs600_irq_set(rdev); 628 rs600_irq_set(rdev);
629 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
629 /* 1M ring buffer */ 630 /* 1M ring buffer */
630 r = r100_cp_init(rdev, 1024 * 1024); 631 r = r100_cp_init(rdev, 1024 * 1024);
631 if (r) { 632 if (r) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 59632a506b46..62756717b044 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev)
479 } 479 }
480 /* Enable IRQ */ 480 /* Enable IRQ */
481 rs600_irq_set(rdev); 481 rs600_irq_set(rdev);
482 rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
482 /* 1M ring buffer */ 483 /* 1M ring buffer */
483 r = r100_cp_init(rdev, 1024 * 1024); 484 r = r100_cp_init(rdev, 1024 * 1024);
484 if (r) { 485 if (r) {