aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_fence.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2011-08-25 13:39:48 -0400
committerDave Airlie <airlied@redhat.com>2011-12-20 14:49:28 -0500
commit7465280c076d6440e5908c158c83b542dc063a30 (patch)
tree7781cffcc3784293e5bb97f20fb4a6c8109684ec /drivers/gpu/drm/radeon/radeon_fence.c
parent851a6bd99edda0094def3b0b81bb1c7c0e886e65 (diff)
drm/radeon/kms: add support for multiple fence queues v2
For supporting multiple CP ring buffers, async DMA engines and UVD. We still need a way to synchronize between engines. v2 initialize unused fence driver ring to avoid issue in suspend/unload Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Christian König <deathsimple@vodafone.de> Reviewed-by: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_fence.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c232
1 files changed, 130 insertions, 102 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7027766ec2a5..086b8a399118 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,32 +40,37 @@
40#include "radeon.h" 40#include "radeon.h"
41#include "radeon_trace.h" 41#include "radeon_trace.h"
42 42
43static void radeon_fence_write(struct radeon_device *rdev, u32 seq) 43static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44{ 44{
45 u32 scratch_index;
46
45 if (rdev->wb.enabled) { 47 if (rdev->wb.enabled) {
46 u32 scratch_index;
47 if (rdev->wb.use_event) 48 if (rdev->wb.use_event)
48 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 49 scratch_index = R600_WB_EVENT_OFFSET +
50 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
49 else 51 else
50 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 52 scratch_index = RADEON_WB_SCRATCH_OFFSET +
53 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
51 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq); 54 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
52 } else 55 } else
53 WREG32(rdev->fence_drv.scratch_reg, seq); 56 WREG32(rdev->fence_drv[ring].scratch_reg, seq);
54} 57}
55 58
56static u32 radeon_fence_read(struct radeon_device *rdev) 59static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
57{ 60{
58 u32 seq; 61 u32 seq = 0;
62 u32 scratch_index;
59 63
60 if (rdev->wb.enabled) { 64 if (rdev->wb.enabled) {
61 u32 scratch_index;
62 if (rdev->wb.use_event) 65 if (rdev->wb.use_event)
63 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 66 scratch_index = R600_WB_EVENT_OFFSET +
67 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
64 else 68 else
65 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base; 69 scratch_index = RADEON_WB_SCRATCH_OFFSET +
70 rdev->fence_drv[ring].scratch_reg - rdev->scratch.reg_base;
66 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]); 71 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
67 } else 72 } else
68 seq = RREG32(rdev->fence_drv.scratch_reg); 73 seq = RREG32(rdev->fence_drv[ring].scratch_reg);
69 return seq; 74 return seq;
70} 75}
71 76
@@ -73,28 +78,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
73{ 78{
74 unsigned long irq_flags; 79 unsigned long irq_flags;
75 80
76 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 81 write_lock_irqsave(&rdev->fence_lock, irq_flags);
77 if (fence->emitted) { 82 if (fence->emitted) {
78 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 83 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
79 return 0; 84 return 0;
80 } 85 }
81 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); 86 fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
82 if (!rdev->cp.ready) 87 if (!rdev->cp.ready)
83 /* FIXME: cp is not running assume everythings is done right 88 /* FIXME: cp is not running assume everythings is done right
84 * away 89 * away
85 */ 90 */
86 radeon_fence_write(rdev, fence->seq); 91 radeon_fence_write(rdev, fence->seq, fence->ring);
87 else 92 else
88 radeon_fence_ring_emit(rdev, fence); 93 radeon_fence_ring_emit(rdev, fence);
89 94
90 trace_radeon_fence_emit(rdev->ddev, fence->seq); 95 trace_radeon_fence_emit(rdev->ddev, fence->seq);
91 fence->emitted = true; 96 fence->emitted = true;
92 list_move_tail(&fence->list, &rdev->fence_drv.emitted); 97 list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
93 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 98 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
94 return 0; 99 return 0;
95} 100}
96 101
97static bool radeon_fence_poll_locked(struct radeon_device *rdev) 102static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
98{ 103{
99 struct radeon_fence *fence; 104 struct radeon_fence *fence;
100 struct list_head *i, *n; 105 struct list_head *i, *n;
@@ -102,34 +107,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
102 bool wake = false; 107 bool wake = false;
103 unsigned long cjiffies; 108 unsigned long cjiffies;
104 109
105 seq = radeon_fence_read(rdev); 110 seq = radeon_fence_read(rdev, ring);
106 if (seq != rdev->fence_drv.last_seq) { 111 if (seq != rdev->fence_drv[ring].last_seq) {
107 rdev->fence_drv.last_seq = seq; 112 rdev->fence_drv[ring].last_seq = seq;
108 rdev->fence_drv.last_jiffies = jiffies; 113 rdev->fence_drv[ring].last_jiffies = jiffies;
109 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 114 rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
110 } else { 115 } else {
111 cjiffies = jiffies; 116 cjiffies = jiffies;
112 if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) { 117 if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
113 cjiffies -= rdev->fence_drv.last_jiffies; 118 cjiffies -= rdev->fence_drv[ring].last_jiffies;
114 if (time_after(rdev->fence_drv.last_timeout, cjiffies)) { 119 if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
115 /* update the timeout */ 120 /* update the timeout */
116 rdev->fence_drv.last_timeout -= cjiffies; 121 rdev->fence_drv[ring].last_timeout -= cjiffies;
117 } else { 122 } else {
118 /* the 500ms timeout is elapsed we should test 123 /* the 500ms timeout is elapsed we should test
119 * for GPU lockup 124 * for GPU lockup
120 */ 125 */
121 rdev->fence_drv.last_timeout = 1; 126 rdev->fence_drv[ring].last_timeout = 1;
122 } 127 }
123 } else { 128 } else {
124 /* wrap around update last jiffies, we will just wait 129 /* wrap around update last jiffies, we will just wait
125 * a little longer 130 * a little longer
126 */ 131 */
127 rdev->fence_drv.last_jiffies = cjiffies; 132 rdev->fence_drv[ring].last_jiffies = cjiffies;
128 } 133 }
129 return false; 134 return false;
130 } 135 }
131 n = NULL; 136 n = NULL;
132 list_for_each(i, &rdev->fence_drv.emitted) { 137 list_for_each(i, &rdev->fence_drv[ring].emitted) {
133 fence = list_entry(i, struct radeon_fence, list); 138 fence = list_entry(i, struct radeon_fence, list);
134 if (fence->seq == seq) { 139 if (fence->seq == seq) {
135 n = i; 140 n = i;
@@ -141,11 +146,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
141 i = n; 146 i = n;
142 do { 147 do {
143 n = i->prev; 148 n = i->prev;
144 list_move_tail(i, &rdev->fence_drv.signaled); 149 list_move_tail(i, &rdev->fence_drv[ring].signaled);
145 fence = list_entry(i, struct radeon_fence, list); 150 fence = list_entry(i, struct radeon_fence, list);
146 fence->signaled = true; 151 fence->signaled = true;
147 i = n; 152 i = n;
148 } while (i != &rdev->fence_drv.emitted); 153 } while (i != &rdev->fence_drv[ring].emitted);
149 wake = true; 154 wake = true;
150 } 155 }
151 return wake; 156 return wake;
@@ -157,14 +162,16 @@ static void radeon_fence_destroy(struct kref *kref)
157 struct radeon_fence *fence; 162 struct radeon_fence *fence;
158 163
159 fence = container_of(kref, struct radeon_fence, kref); 164 fence = container_of(kref, struct radeon_fence, kref);
160 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 165 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
161 list_del(&fence->list); 166 list_del(&fence->list);
162 fence->emitted = false; 167 fence->emitted = false;
163 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 168 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
164 kfree(fence); 169 kfree(fence);
165} 170}
166 171
167int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence) 172int radeon_fence_create(struct radeon_device *rdev,
173 struct radeon_fence **fence,
174 int ring)
168{ 175{
169 unsigned long irq_flags; 176 unsigned long irq_flags;
170 177
@@ -177,15 +184,15 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
177 (*fence)->emitted = false; 184 (*fence)->emitted = false;
178 (*fence)->signaled = false; 185 (*fence)->signaled = false;
179 (*fence)->seq = 0; 186 (*fence)->seq = 0;
187 (*fence)->ring = ring;
180 INIT_LIST_HEAD(&(*fence)->list); 188 INIT_LIST_HEAD(&(*fence)->list);
181 189
182 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 190 write_lock_irqsave(&rdev->fence_lock, irq_flags);
183 list_add_tail(&(*fence)->list, &rdev->fence_drv.created); 191 list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
184 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 192 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
185 return 0; 193 return 0;
186} 194}
187 195
188
189bool radeon_fence_signaled(struct radeon_fence *fence) 196bool radeon_fence_signaled(struct radeon_fence *fence)
190{ 197{
191 unsigned long irq_flags; 198 unsigned long irq_flags;
@@ -197,7 +204,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
197 if (fence->rdev->gpu_lockup) 204 if (fence->rdev->gpu_lockup)
198 return true; 205 return true;
199 206
200 write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags); 207 write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
201 signaled = fence->signaled; 208 signaled = fence->signaled;
202 /* if we are shuting down report all fence as signaled */ 209 /* if we are shuting down report all fence as signaled */
203 if (fence->rdev->shutdown) { 210 if (fence->rdev->shutdown) {
@@ -208,10 +215,10 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
208 signaled = true; 215 signaled = true;
209 } 216 }
210 if (!signaled) { 217 if (!signaled) {
211 radeon_fence_poll_locked(fence->rdev); 218 radeon_fence_poll_locked(fence->rdev, fence->ring);
212 signaled = fence->signaled; 219 signaled = fence->signaled;
213 } 220 }
214 write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags); 221 write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
215 return signaled; 222 return signaled;
216} 223}
217 224
@@ -230,14 +237,14 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
230 if (radeon_fence_signaled(fence)) { 237 if (radeon_fence_signaled(fence)) {
231 return 0; 238 return 0;
232 } 239 }
233 timeout = rdev->fence_drv.last_timeout; 240 timeout = rdev->fence_drv[fence->ring].last_timeout;
234retry: 241retry:
235 /* save current sequence used to check for GPU lockup */ 242 /* save current sequence used to check for GPU lockup */
236 seq = rdev->fence_drv.last_seq; 243 seq = rdev->fence_drv[fence->ring].last_seq;
237 trace_radeon_fence_wait_begin(rdev->ddev, seq); 244 trace_radeon_fence_wait_begin(rdev->ddev, seq);
238 if (intr) { 245 if (intr) {
239 radeon_irq_kms_sw_irq_get(rdev); 246 radeon_irq_kms_sw_irq_get(rdev);
240 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 247 r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
241 radeon_fence_signaled(fence), timeout); 248 radeon_fence_signaled(fence), timeout);
242 radeon_irq_kms_sw_irq_put(rdev); 249 radeon_irq_kms_sw_irq_put(rdev);
243 if (unlikely(r < 0)) { 250 if (unlikely(r < 0)) {
@@ -245,7 +252,7 @@ retry:
245 } 252 }
246 } else { 253 } else {
247 radeon_irq_kms_sw_irq_get(rdev); 254 radeon_irq_kms_sw_irq_get(rdev);
248 r = wait_event_timeout(rdev->fence_drv.queue, 255 r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
249 radeon_fence_signaled(fence), timeout); 256 radeon_fence_signaled(fence), timeout);
250 radeon_irq_kms_sw_irq_put(rdev); 257 radeon_irq_kms_sw_irq_put(rdev);
251 } 258 }
@@ -258,10 +265,11 @@ retry:
258 timeout = r; 265 timeout = r;
259 goto retry; 266 goto retry;
260 } 267 }
261 /* don't protect read access to rdev->fence_drv.last_seq 268 /* don't protect read access to rdev->fence_drv[t].last_seq
262 * if we experiencing a lockup the value doesn't change 269 * if we experiencing a lockup the value doesn't change
263 */ 270 */
264 if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) { 271 if (seq == rdev->fence_drv[fence->ring].last_seq &&
272 radeon_gpu_is_lockup(rdev)) {
265 /* good news we believe it's a lockup */ 273 /* good news we believe it's a lockup */
266 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", 274 printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
267 fence->seq, seq); 275 fence->seq, seq);
@@ -272,20 +280,20 @@ retry:
272 r = radeon_gpu_reset(rdev); 280 r = radeon_gpu_reset(rdev);
273 if (r) 281 if (r)
274 return r; 282 return r;
275 radeon_fence_write(rdev, fence->seq); 283 radeon_fence_write(rdev, fence->seq, fence->ring);
276 rdev->gpu_lockup = false; 284 rdev->gpu_lockup = false;
277 } 285 }
278 timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 286 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
279 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 287 write_lock_irqsave(&rdev->fence_lock, irq_flags);
280 rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 288 rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
281 rdev->fence_drv.last_jiffies = jiffies; 289 rdev->fence_drv[fence->ring].last_jiffies = jiffies;
282 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 290 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
283 goto retry; 291 goto retry;
284 } 292 }
285 return 0; 293 return 0;
286} 294}
287 295
288int radeon_fence_wait_next(struct radeon_device *rdev) 296int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
289{ 297{
290 unsigned long irq_flags; 298 unsigned long irq_flags;
291 struct radeon_fence *fence; 299 struct radeon_fence *fence;
@@ -294,21 +302,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
294 if (rdev->gpu_lockup) { 302 if (rdev->gpu_lockup) {
295 return 0; 303 return 0;
296 } 304 }
297 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 305 write_lock_irqsave(&rdev->fence_lock, irq_flags);
298 if (list_empty(&rdev->fence_drv.emitted)) { 306 if (list_empty(&rdev->fence_drv[ring].emitted)) {
299 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 307 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
300 return 0; 308 return 0;
301 } 309 }
302 fence = list_entry(rdev->fence_drv.emitted.next, 310 fence = list_entry(rdev->fence_drv[ring].emitted.next,
303 struct radeon_fence, list); 311 struct radeon_fence, list);
304 radeon_fence_ref(fence); 312 radeon_fence_ref(fence);
305 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 313 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
306 r = radeon_fence_wait(fence, false); 314 r = radeon_fence_wait(fence, false);
307 radeon_fence_unref(&fence); 315 radeon_fence_unref(&fence);
308 return r; 316 return r;
309} 317}
310 318
311int radeon_fence_wait_last(struct radeon_device *rdev) 319int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
312{ 320{
313 unsigned long irq_flags; 321 unsigned long irq_flags;
314 struct radeon_fence *fence; 322 struct radeon_fence *fence;
@@ -317,15 +325,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
317 if (rdev->gpu_lockup) { 325 if (rdev->gpu_lockup) {
318 return 0; 326 return 0;
319 } 327 }
320 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 328 write_lock_irqsave(&rdev->fence_lock, irq_flags);
321 if (list_empty(&rdev->fence_drv.emitted)) { 329 if (list_empty(&rdev->fence_drv[ring].emitted)) {
322 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 330 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
323 return 0; 331 return 0;
324 } 332 }
325 fence = list_entry(rdev->fence_drv.emitted.prev, 333 fence = list_entry(rdev->fence_drv[ring].emitted.prev,
326 struct radeon_fence, list); 334 struct radeon_fence, list);
327 radeon_fence_ref(fence); 335 radeon_fence_ref(fence);
328 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 336 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
329 r = radeon_fence_wait(fence, false); 337 r = radeon_fence_wait(fence, false);
330 radeon_fence_unref(&fence); 338 radeon_fence_unref(&fence);
331 return r; 339 return r;
@@ -347,39 +355,49 @@ void radeon_fence_unref(struct radeon_fence **fence)
347 } 355 }
348} 356}
349 357
350void radeon_fence_process(struct radeon_device *rdev) 358void radeon_fence_process(struct radeon_device *rdev, int ring)
351{ 359{
352 unsigned long irq_flags; 360 unsigned long irq_flags;
353 bool wake; 361 bool wake;
354 362
355 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 363 write_lock_irqsave(&rdev->fence_lock, irq_flags);
356 wake = radeon_fence_poll_locked(rdev); 364 wake = radeon_fence_poll_locked(rdev, ring);
357 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 365 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
358 if (wake) { 366 if (wake) {
359 wake_up_all(&rdev->fence_drv.queue); 367 wake_up_all(&rdev->fence_drv[ring].queue);
360 } 368 }
361} 369}
362 370
363int radeon_fence_driver_init(struct radeon_device *rdev) 371int radeon_fence_driver_init(struct radeon_device *rdev, int num_rings)
364{ 372{
365 unsigned long irq_flags; 373 unsigned long irq_flags;
366 int r; 374 int r, ring;
367 375
368 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 376 for (ring = 0; ring < num_rings; ring++) {
369 r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); 377 write_lock_irqsave(&rdev->fence_lock, irq_flags);
370 if (r) { 378 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
371 dev_err(rdev->dev, "fence failed to get scratch register\n"); 379 if (r) {
372 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 380 dev_err(rdev->dev, "fence failed to get scratch register\n");
373 return r; 381 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
382 return r;
383 }
384 radeon_fence_write(rdev, 0, ring);
385 atomic_set(&rdev->fence_drv[ring].seq, 0);
386 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
387 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
388 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
389 init_waitqueue_head(&rdev->fence_drv[ring].queue);
390 rdev->fence_drv[ring].initialized = true;
391 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
392 }
393 for (ring = num_rings; ring < RADEON_NUM_RINGS; ring++) {
394 write_lock_irqsave(&rdev->fence_lock, irq_flags);
395 INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
396 INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
397 INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
398 rdev->fence_drv[ring].initialized = false;
399 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
374 } 400 }
375 radeon_fence_write(rdev, 0);
376 atomic_set(&rdev->fence_drv.seq, 0);
377 INIT_LIST_HEAD(&rdev->fence_drv.created);
378 INIT_LIST_HEAD(&rdev->fence_drv.emitted);
379 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
380 init_waitqueue_head(&rdev->fence_drv.queue);
381 rdev->fence_drv.initialized = true;
382 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
383 if (radeon_debugfs_fence_init(rdev)) { 401 if (radeon_debugfs_fence_init(rdev)) {
384 dev_err(rdev->dev, "fence debugfs file creation failed\n"); 402 dev_err(rdev->dev, "fence debugfs file creation failed\n");
385 } 403 }
@@ -389,14 +407,17 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
389void radeon_fence_driver_fini(struct radeon_device *rdev) 407void radeon_fence_driver_fini(struct radeon_device *rdev)
390{ 408{
391 unsigned long irq_flags; 409 unsigned long irq_flags;
392 410 int ring;
393 if (!rdev->fence_drv.initialized) 411
394 return; 412 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
395 wake_up_all(&rdev->fence_drv.queue); 413 if (!rdev->fence_drv[ring].initialized)
396 write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); 414 continue;
397 radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); 415 wake_up_all(&rdev->fence_drv[ring].queue);
398 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 416 write_lock_irqsave(&rdev->fence_lock, irq_flags);
399 rdev->fence_drv.initialized = false; 417 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
418 write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
419 rdev->fence_drv[ring].initialized = false;
420 }
400} 421}
401 422
402 423
@@ -410,14 +431,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
410 struct drm_device *dev = node->minor->dev; 431 struct drm_device *dev = node->minor->dev;
411 struct radeon_device *rdev = dev->dev_private; 432 struct radeon_device *rdev = dev->dev_private;
412 struct radeon_fence *fence; 433 struct radeon_fence *fence;
413 434 int i;
414 seq_printf(m, "Last signaled fence 0x%08X\n", 435
415 radeon_fence_read(rdev)); 436 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
416 if (!list_empty(&rdev->fence_drv.emitted)) { 437 if (!rdev->fence_drv[i].initialized)
417 fence = list_entry(rdev->fence_drv.emitted.prev, 438 continue;
418 struct radeon_fence, list); 439
419 seq_printf(m, "Last emitted fence %p with 0x%08X\n", 440 seq_printf(m, "--- ring %d ---\n", i);
420 fence, fence->seq); 441 seq_printf(m, "Last signaled fence 0x%08X\n",
442 radeon_fence_read(rdev, i));
443 if (!list_empty(&rdev->fence_drv[i].emitted)) {
444 fence = list_entry(rdev->fence_drv[i].emitted.prev,
445 struct radeon_fence, list);
446 seq_printf(m, "Last emitted fence %p with 0x%08X\n",
447 fence, fence->seq);
448 }
421 } 449 }
422 return 0; 450 return 0;
423} 451}