aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon
diff options
context:
space:
mode:
authorAlex Deucher <alexdeucher@gmail.com>2011-06-13 17:39:06 -0400
committerDave Airlie <airlied@redhat.com>2011-06-16 19:30:04 -0400
commitb81157d016a48b8025ccfcb286827679b35f16aa (patch)
treec5097ff1d8b4af1be98f58302ba7c80d06d7bb73 /drivers/gpu/drm/radeon
parent11b0a5b89adbfaf4e7d31f2482f49471dd983692 (diff)
drm/radeon/kms: use helper functions for fence read/write
The existing code assumed scratch registers in a number of places while in most cases we are be using writeback and events rather than scratch registers. Signed-off-by: Alex Deucher <alexdeucher@gmail.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon')
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
1 files changed, 36 insertions, 15 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 1f8229436570..021d2b6b556f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,6 +40,35 @@
40#include "radeon.h" 40#include "radeon.h"
41#include "radeon_trace.h" 41#include "radeon_trace.h"
42 42
43static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
44{
45 if (rdev->wb.enabled) {
46 u32 scratch_index;
47 if (rdev->wb.use_event)
48 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
49 else
50 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
51 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
52 } else
53 WREG32(rdev->fence_drv.scratch_reg, seq);
54}
55
56static u32 radeon_fence_read(struct radeon_device *rdev)
57{
58 u32 seq;
59
60 if (rdev->wb.enabled) {
61 u32 scratch_index;
62 if (rdev->wb.use_event)
63 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
64 else
65 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
66 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
67 } else
68 seq = RREG32(rdev->fence_drv.scratch_reg);
69 return seq;
70}
71
43int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 72int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
44{ 73{
45 unsigned long irq_flags; 74 unsigned long irq_flags;
@@ -50,12 +79,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
50 return 0; 79 return 0;
51 } 80 }
52 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); 81 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
53 if (!rdev->cp.ready) { 82 if (!rdev->cp.ready)
54 /* FIXME: cp is not running assume everythings is done right 83 /* FIXME: cp is not running assume everythings is done right
55 * away 84 * away
56 */ 85 */
57 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 86 radeon_fence_write(rdev, fence->seq);
58 } else 87 else
59 radeon_fence_ring_emit(rdev, fence); 88 radeon_fence_ring_emit(rdev, fence);
60 89
61 trace_radeon_fence_emit(rdev->ddev, fence->seq); 90 trace_radeon_fence_emit(rdev->ddev, fence->seq);
@@ -73,15 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
73 bool wake = false; 102 bool wake = false;
74 unsigned long cjiffies; 103 unsigned long cjiffies;
75 104
76 if (rdev->wb.enabled) { 105 seq = radeon_fence_read(rdev);
77 u32 scratch_index;
78 if (rdev->wb.use_event)
79 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
80 else
81 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
82 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
83 } else
84 seq = RREG32(rdev->fence_drv.scratch_reg);
85 if (seq != rdev->fence_drv.last_seq) { 106 if (seq != rdev->fence_drv.last_seq) {
86 rdev->fence_drv.last_seq = seq; 107 rdev->fence_drv.last_seq = seq;
87 rdev->fence_drv.last_jiffies = jiffies; 108 rdev->fence_drv.last_jiffies = jiffies;
@@ -251,7 +272,7 @@ retry:
251 r = radeon_gpu_reset(rdev); 272 r = radeon_gpu_reset(rdev);
252 if (r) 273 if (r)
253 return r; 274 return r;
254 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 275 radeon_fence_write(rdev, fence->seq);
255 rdev->gpu_lockup = false; 276 rdev->gpu_lockup = false;
256 } 277 }
257 timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 278 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
@@ -351,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
351 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 372 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
352 return r; 373 return r;
353 } 374 }
354 WREG32(rdev->fence_drv.scratch_reg, 0); 375 radeon_fence_write(rdev, 0);
355 atomic_set(&rdev->fence_drv.seq, 0); 376 atomic_set(&rdev->fence_drv.seq, 0);
356 INIT_LIST_HEAD(&rdev->fence_drv.created); 377 INIT_LIST_HEAD(&rdev->fence_drv.created);
357 INIT_LIST_HEAD(&rdev->fence_drv.emited); 378 INIT_LIST_HEAD(&rdev->fence_drv.emited);
@@ -391,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
391 struct radeon_fence *fence; 412 struct radeon_fence *fence;
392 413
393 seq_printf(m, "Last signaled fence 0x%08X\n", 414 seq_printf(m, "Last signaled fence 0x%08X\n",
394 RREG32(rdev->fence_drv.scratch_reg)); 415 radeon_fence_read(rdev));
395 if (!list_empty(&rdev->fence_drv.emited)) { 416 if (!list_empty(&rdev->fence_drv.emited)) {
396 fence = list_entry(rdev->fence_drv.emited.prev, 417 fence = list_entry(rdev->fence_drv.emited.prev,
397 struct radeon_fence, list); 418 struct radeon_fence, list);